summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2018-03-07 08:54:53 +0100
committerMichaël Zasso <targos@protonmail.com>2018-03-07 16:48:52 +0100
commit88786fecff336342a56e6f2e7ff3b286be716e47 (patch)
tree92e6ba5b8ac8dae1a058988d20c9d27bfa654390
parent4e86f9b5ab83cbabf43839385bf383e6a7ef7d19 (diff)
downloadnode-new-88786fecff336342a56e6f2e7ff3b286be716e47.tar.gz
deps: update V8 to 6.5.254.31
PR-URL: https://github.com/nodejs/node/pull/18453 Reviewed-By: James M Snell <jasnell@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Yang Guo <yangguo@chromium.org> Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com> Reviewed-By: Michael Dawson <michael_dawson@ca.ibm.com>
-rw-r--r--deps/v8/.gitignore4
-rw-r--r--deps/v8/AUTHORS5
-rw-r--r--deps/v8/BUILD.gn75
-rw-r--r--deps/v8/ChangeLog1300
-rw-r--r--deps/v8/DEPS47
-rw-r--r--deps/v8/PRESUBMIT.py2
-rw-r--r--deps/v8/build_overrides/build.gni2
-rw-r--r--deps/v8/gni/v8.gni5
-rw-r--r--deps/v8/gypfiles/all.gyp1
-rw-r--r--deps/v8/gypfiles/standalone.gypi2
-rw-r--r--deps/v8/include/v8-inspector.h5
-rw-r--r--deps/v8/include/v8-platform.h92
-rw-r--r--deps/v8/include/v8-version.h6
-rw-r--r--deps/v8/include/v8.h566
-rw-r--r--deps/v8/infra/config/PRESUBMIT.py29
-rw-r--r--deps/v8/infra/config/cq.cfg46
-rw-r--r--deps/v8/infra/mb/mb_config.pyl19
-rw-r--r--deps/v8/infra/testing/README.md36
-rw-r--r--deps/v8/infra/testing/client.v8.pyl37
-rw-r--r--deps/v8/infra/testing/tryserver.v8.pyl22
-rw-r--r--deps/v8/src/accessors.cc39
-rw-r--r--deps/v8/src/allocation.cc187
-rw-r--r--deps/v8/src/allocation.h67
-rw-r--r--deps/v8/src/api-arguments-inl.h292
-rw-r--r--deps/v8/src/api-arguments.cc32
-rw-r--r--deps/v8/src/api-arguments.h81
-rw-r--r--deps/v8/src/api-natives.cc10
-rw-r--r--deps/v8/src/api.cc979
-rw-r--r--deps/v8/src/api.h24
-rw-r--r--deps/v8/src/arguments.h2
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h30
-rw-r--r--deps/v8/src/arm/assembler-arm.cc130
-rw-r--r--deps/v8/src/arm/assembler-arm.h12
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc36
-rw-r--r--deps/v8/src/arm/codegen-arm.cc18
-rw-r--r--deps/v8/src/arm/constants-arm.cc2
-rw-r--r--deps/v8/src/arm/constants-arm.h3
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc8
-rw-r--r--deps/v8/src/arm/disasm-arm.cc44
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc7
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc96
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h10
-rw-r--r--deps/v8/src/arm/simulator-arm.cc238
-rw-r--r--deps/v8/src/arm/simulator-arm.h124
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h30
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc56
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h15
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc152
-rw-r--r--deps/v8/src/arm64/constants-arm64.h1
-rw-r--r--deps/v8/src/arm64/cpu-arm64.cc2
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc14
-rw-r--r--deps/v8/src/arm64/disasm-arm64.cc51
-rw-r--r--deps/v8/src/arm64/eh-frame-arm64.cc7
-rw-r--r--deps/v8/src/arm64/frame-constants-arm64.h25
-rw-r--r--deps/v8/src/arm64/instructions-arm64-constants.cc26
-rw-r--r--deps/v8/src/arm64/instrument-arm64.cc4
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc7
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64-inl.h54
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc329
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h82
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc227
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h156
-rw-r--r--deps/v8/src/arm64/simulator-logic-arm64.cc4
-rw-r--r--deps/v8/src/arm64/utils-arm64.cc4
-rw-r--r--deps/v8/src/asmjs/asm-parser.cc36
-rw-r--r--deps/v8/src/asmjs/asm-scanner.cc2
-rw-r--r--deps/v8/src/assembler.cc36
-rw-r--r--deps/v8/src/assembler.h28
-rw-r--r--deps/v8/src/ast/ast-numbering.cc26
-rw-r--r--deps/v8/src/ast/ast-traversal-visitor.h9
-rw-r--r--deps/v8/src/ast/ast.cc21
-rw-r--r--deps/v8/src/ast/ast.h71
-rw-r--r--deps/v8/src/ast/prettyprinter.cc43
-rw-r--r--deps/v8/src/ast/prettyprinter.h1
-rw-r--r--deps/v8/src/ast/scopes.cc26
-rw-r--r--deps/v8/src/ast/scopes.h14
-rw-r--r--deps/v8/src/bailout-reason.cc21
-rw-r--r--deps/v8/src/bailout-reason.h123
-rw-r--r--deps/v8/src/base/DEPS1
-rw-r--r--deps/v8/src/base/cpu.cc20
-rw-r--r--deps/v8/src/base/debug/stack_trace_posix.cc8
-rw-r--r--deps/v8/src/base/functional.cc8
-rw-r--r--deps/v8/src/base/ieee754.cc282
-rw-r--r--deps/v8/src/base/lazy-instance.h12
-rw-r--r--deps/v8/src/base/logging.cc2
-rw-r--r--deps/v8/src/base/logging.h18
-rw-r--r--deps/v8/src/base/macros.h57
-rw-r--r--deps/v8/src/base/once.cc4
-rw-r--r--deps/v8/src/base/once.h11
-rw-r--r--deps/v8/src/base/page-allocator.cc64
-rw-r--r--deps/v8/src/base/page-allocator.h41
-rw-r--r--deps/v8/src/base/platform/platform-fuchsia.cc5
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc58
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc33
-rw-r--r--deps/v8/src/base/platform/platform.h68
-rw-r--r--deps/v8/src/base/platform/semaphore.cc2
-rw-r--r--deps/v8/src/base/platform/time.cc3
-rw-r--r--deps/v8/src/base/safe_conversions.h3
-rw-r--r--deps/v8/src/base/utils/random-number-generator.cc4
-rw-r--r--deps/v8/src/base/utils/random-number-generator.h4
-rw-r--r--deps/v8/src/bignum.cc2
-rw-r--r--deps/v8/src/bootstrapper.cc182
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc91
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc587
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc539
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.cc1
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc5
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc9
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.cc15
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h85
-rw-r--r--deps/v8/src/builtins/builtins-function-gen.cc1
-rw-r--r--deps/v8/src/builtins/builtins-handler-gen.cc54
-rw-r--r--deps/v8/src/builtins/builtins-ic-gen.cc4
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc449
-rw-r--r--deps/v8/src/builtins/builtins-intl-gen.cc1
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc53
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.h13
-rw-r--r--deps/v8/src/builtins/builtins-math-gen.cc3
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc321
-rw-r--r--deps/v8/src/builtins/builtins-object.cc25
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.cc217
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.h3
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc100
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.h9
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc59
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc236
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.h17
-rw-r--r--deps/v8/src/builtins/builtins-string.cc4
-rw-r--r--deps/v8/src/builtins/builtins-typedarray-gen.cc207
-rw-r--r--deps/v8/src/builtins/builtins-utils-gen.h3
-rw-r--r--deps/v8/src/builtins/builtins-utils.h3
-rw-r--r--deps/v8/src/builtins/builtins.cc32
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc87
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc115
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc115
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc107
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc109
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc11
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc112
-rw-r--r--deps/v8/src/cached-powers.cc174
-rw-r--r--deps/v8/src/code-factory.cc26
-rw-r--r--deps/v8/src/code-factory.h4
-rw-r--r--deps/v8/src/code-stub-assembler.cc1250
-rw-r--r--deps/v8/src/code-stub-assembler.h218
-rw-r--r--deps/v8/src/code-stubs.cc43
-rw-r--r--deps/v8/src/code-stubs.h158
-rw-r--r--deps/v8/src/compilation-info.cc2
-rw-r--r--deps/v8/src/compilation-info.h6
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc2
-rw-r--r--deps/v8/src/compiler.cc141
-rw-r--r--deps/v8/src/compiler.h8
-rw-r--r--deps/v8/src/compiler/OWNERS5
-rw-r--r--deps/v8/src/compiler/access-builder.cc8
-rw-r--r--deps/v8/src/compiler/access-builder.h3
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc325
-rw-r--r--deps/v8/src/compiler/arm/instruction-codes-arm.h1
-rw-r--r--deps/v8/src/compiler/arm/instruction-scheduler-arm.cc1
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc181
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc425
-rw-r--r--deps/v8/src/compiler/arm64/instruction-codes-arm64.h9
-rw-r--r--deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc28
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc263
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc5
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc47
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h8
-rw-r--r--deps/v8/src/compiler/c-linkage.cc2
-rw-r--r--deps/v8/src/compiler/code-assembler.cc17
-rw-r--r--deps/v8/src/compiler/code-assembler.h14
-rw-r--r--deps/v8/src/compiler/code-generator.cc23
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc12
-rw-r--r--deps/v8/src/compiler/common-operator.cc97
-rw-r--r--deps/v8/src/compiler/common-operator.h22
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.cc49
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.h30
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc510
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h12
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc45
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.h2
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc19
-rw-r--r--deps/v8/src/compiler/frame.cc11
-rw-r--r--deps/v8/src/compiler/frame.h49
-rw-r--r--deps/v8/src/compiler/gap-resolver.cc14
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc36
-rw-r--r--deps/v8/src/compiler/graph-assembler.h16
-rw-r--r--deps/v8/src/compiler/graph-trimmer.h1
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc761
-rw-r--r--deps/v8/src/compiler/ia32/instruction-codes-ia32.h43
-rw-r--r--deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc53
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc316
-rw-r--r--deps/v8/src/compiler/instruction-codes.h14
-rw-r--r--deps/v8/src/compiler/instruction-scheduler.cc30
-rw-r--r--deps/v8/src/compiler/instruction-selector-impl.h29
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc348
-rw-r--r--deps/v8/src/compiler/instruction-selector.h31
-rw-r--r--deps/v8/src/compiler/instruction.cc4
-rw-r--r--deps/v8/src/compiler/instruction.h14
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc15
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc742
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.h11
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc2109
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h29
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc16
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc29
-rw-r--r--deps/v8/src/compiler/js-graph.cc4
-rw-r--r--deps/v8/src/compiler/js-graph.h4
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc3
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h2
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc3
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc103
-rw-r--r--deps/v8/src/compiler/js-operator.cc43
-rw-r--r--deps/v8/src/compiler/js-operator.h48
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc5
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc121
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h5
-rw-r--r--deps/v8/src/compiler/linkage.cc41
-rw-r--r--deps/v8/src/compiler/linkage.h27
-rw-r--r--deps/v8/src/compiler/load-elimination.cc4
-rw-r--r--deps/v8/src/compiler/loop-analysis.cc2
-rw-r--r--deps/v8/src/compiler/loop-peeling.cc95
-rw-r--r--deps/v8/src/compiler/loop-peeling.h31
-rw-r--r--deps/v8/src/compiler/loop-variable-optimizer.cc34
-rw-r--r--deps/v8/src/compiler/loop-variable-optimizer.h7
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc43
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc24
-rw-r--r--deps/v8/src/compiler/machine-operator.cc84
-rw-r--r--deps/v8/src/compiler/machine-operator.h22
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc2
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc228
-rw-r--r--deps/v8/src/compiler/mips/instruction-codes-mips.h1
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc169
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc245
-rw-r--r--deps/v8/src/compiler/mips64/instruction-codes-mips64.h1
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc190
-rw-r--r--deps/v8/src/compiler/node-properties.cc38
-rw-r--r--deps/v8/src/compiler/node-properties.h3
-rw-r--r--deps/v8/src/compiler/opcodes.h46
-rw-r--r--deps/v8/src/compiler/operation-typer.cc10
-rw-r--r--deps/v8/src/compiler/operation-typer.h2
-rw-r--r--deps/v8/src/compiler/pipeline.cc96
-rw-r--r--deps/v8/src/compiler/pipeline.h2
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc302
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc307
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc15
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc32
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h7
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.cc55
-rw-r--r--deps/v8/src/compiler/representation-change.cc67
-rw-r--r--deps/v8/src/compiler/representation-change.h23
-rw-r--r--deps/v8/src/compiler/s390/code-generator-s390.cc212
-rw-r--r--deps/v8/src/compiler/s390/instruction-selector-s390.cc321
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc8
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc135
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h2
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc315
-rw-r--r--deps/v8/src/compiler/simplified-operator.h178
-rw-r--r--deps/v8/src/compiler/state-values-utils.cc2
-rw-r--r--deps/v8/src/compiler/store-store-elimination.cc4
-rw-r--r--deps/v8/src/compiler/type-cache.h2
-rw-r--r--deps/v8/src/compiler/typer.cc140
-rw-r--r--deps/v8/src/compiler/types.cc2
-rw-r--r--deps/v8/src/compiler/types.h3
-rw-r--r--deps/v8/src/compiler/verifier.cc59
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc693
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h121
-rw-r--r--deps/v8/src/compiler/wasm-linkage.cc123
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc479
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h16
-rw-r--r--deps/v8/src/compiler/x64/instruction-scheduler-x64.cc32
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc217
-rw-r--r--deps/v8/src/contexts-inl.h6
-rw-r--r--deps/v8/src/contexts.cc4
-rw-r--r--deps/v8/src/contexts.h19
-rw-r--r--deps/v8/src/conversions.cc7
-rw-r--r--deps/v8/src/counters-inl.h6
-rw-r--r--deps/v8/src/counters.cc99
-rw-r--r--deps/v8/src/counters.h160
-rw-r--r--deps/v8/src/d8-posix.cc14
-rw-r--r--deps/v8/src/d8.cc441
-rw-r--r--deps/v8/src/d8.h51
-rw-r--r--deps/v8/src/debug/debug-coverage.cc33
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc38
-rw-r--r--deps/v8/src/debug/debug-frames.h8
-rw-r--r--deps/v8/src/debug/debug-type-profile.cc5
-rw-r--r--deps/v8/src/debug/debug.cc8
-rw-r--r--deps/v8/src/debug/liveedit.cc7
-rw-r--r--deps/v8/src/debug/mirrors.js2
-rw-r--r--deps/v8/src/deoptimize-reason.h40
-rw-r--r--deps/v8/src/deoptimizer.cc1194
-rw-r--r--deps/v8/src/deoptimizer.h110
-rw-r--r--deps/v8/src/disassembler.cc42
-rw-r--r--deps/v8/src/disassembler.h2
-rw-r--r--deps/v8/src/eh-frame.cc14
-rw-r--r--deps/v8/src/elements-kind.h37
-rw-r--r--deps/v8/src/elements.cc122
-rw-r--r--deps/v8/src/elements.h19
-rw-r--r--deps/v8/src/execution.cc65
-rw-r--r--deps/v8/src/execution.h13
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc18
-rw-r--r--deps/v8/src/external-reference-table.cc8
-rw-r--r--deps/v8/src/factory-inl.h8
-rw-r--r--deps/v8/src/factory.cc71
-rw-r--r--deps/v8/src/factory.h20
-rw-r--r--deps/v8/src/fast-dtoa.cc7
-rw-r--r--deps/v8/src/feedback-vector-inl.h9
-rw-r--r--deps/v8/src/feedback-vector.cc151
-rw-r--r--deps/v8/src/feedback-vector.h119
-rw-r--r--deps/v8/src/flag-definitions.h136
-rw-r--r--deps/v8/src/flags.cc78
-rw-r--r--deps/v8/src/frame-constants.h8
-rw-r--r--deps/v8/src/frames-inl.h12
-rw-r--r--deps/v8/src/frames.cc133
-rw-r--r--deps/v8/src/frames.h29
-rw-r--r--deps/v8/src/gdb-jit.cc60
-rw-r--r--deps/v8/src/global-handles.cc2
-rw-r--r--deps/v8/src/globals.h351
-rw-r--r--deps/v8/src/handles.cc2
-rw-r--r--deps/v8/src/heap-symbols.h15
-rw-r--r--deps/v8/src/heap/array-buffer-collector.cc3
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc21
-rw-r--r--deps/v8/src/heap/gc-tracer.cc185
-rw-r--r--deps/v8/src/heap/gc-tracer.h69
-rw-r--r--deps/v8/src/heap/heap-inl.h29
-rw-r--r--deps/v8/src/heap/heap.cc390
-rw-r--r--deps/v8/src/heap/heap.h240
-rw-r--r--deps/v8/src/heap/incremental-marking-job.cc2
-rw-r--r--deps/v8/src/heap/incremental-marking.cc35
-rw-r--r--deps/v8/src/heap/local-allocator.h2
-rw-r--r--deps/v8/src/heap/mark-compact.cc203
-rw-r--r--deps/v8/src/heap/mark-compact.h11
-rw-r--r--deps/v8/src/heap/object-stats.cc307
-rw-r--r--deps/v8/src/heap/object-stats.h67
-rw-r--r--deps/v8/src/heap/objects-visiting.h1
-rw-r--r--deps/v8/src/heap/scavenge-job.cc2
-rw-r--r--deps/v8/src/heap/scavenger-inl.h2
-rw-r--r--deps/v8/src/heap/scavenger.cc14
-rw-r--r--deps/v8/src/heap/scavenger.h2
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc18
-rw-r--r--deps/v8/src/heap/spaces-inl.h35
-rw-r--r--deps/v8/src/heap/spaces.cc768
-rw-r--r--deps/v8/src/heap/spaces.h297
-rw-r--r--deps/v8/src/heap/store-buffer.cc14
-rw-r--r--deps/v8/src/heap/store-buffer.h8
-rw-r--r--deps/v8/src/heap/stress-marking-observer.cc21
-rw-r--r--deps/v8/src/heap/stress-marking-observer.h26
-rw-r--r--deps/v8/src/heap/stress-scavenge-observer.cc94
-rw-r--r--deps/v8/src/heap/stress-scavenge-observer.h39
-rw-r--r--deps/v8/src/heap/sweeper.cc153
-rw-r--r--deps/v8/src/heap/sweeper.h61
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h19
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc80
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h34
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc30
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc12
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc162
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc7
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc46
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h10
-rw-r--r--deps/v8/src/ia32/simulator-ia32.h46
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc937
-rw-r--r--deps/v8/src/ic/accessor-assembler.h91
-rw-r--r--deps/v8/src/ic/handler-configuration-inl.h57
-rw-r--r--deps/v8/src/ic/handler-configuration.cc327
-rw-r--r--deps/v8/src/ic/handler-configuration.h109
-rw-r--r--deps/v8/src/ic/ic-inl.h3
-rw-r--r--deps/v8/src/ic/ic.cc209
-rw-r--r--deps/v8/src/ic/ic.h4
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc44
-rw-r--r--deps/v8/src/ic/stub-cache.cc2
-rw-r--r--deps/v8/src/ic/stub-cache.h3
-rw-r--r--deps/v8/src/icu_util.cc4
-rw-r--r--deps/v8/src/inspector/BUILD.gn11
-rw-r--r--deps/v8/src/inspector/OWNERS3
-rw-r--r--deps/v8/src/inspector/injected-script-source.js28
-rw-r--r--deps/v8/src/inspector/injected_script_externs.js6
-rw-r--r--deps/v8/src/inspector/js_protocol.json4167
-rw-r--r--deps/v8/src/inspector/js_protocol.pdl1370
-rw-r--r--deps/v8/src/inspector/string-16.cc32
-rw-r--r--deps/v8/src/inspector/v8-console-message.cc5
-rw-r--r--deps/v8/src/inspector/v8-console-message.h1
-rw-r--r--deps/v8/src/inspector/v8-console.cc22
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc34
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.h1
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc35
-rw-r--r--deps/v8/src/inspector/v8-debugger.h8
-rw-r--r--deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc2
-rw-r--r--deps/v8/src/inspector/v8-injected-script-host.cc50
-rw-r--r--deps/v8/src/inspector/v8-injected-script-host.h2
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.cc5
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.h4
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.cc8
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.h1
-rw-r--r--deps/v8/src/inspector/v8-value-utils.cc1
-rw-r--r--deps/v8/src/interface-descriptors.cc72
-rw-r--r--deps/v8/src/interface-descriptors.h80
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc28
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h11
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc400
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h21
-rw-r--r--deps/v8/src/interpreter/bytecode-register-allocator.h5
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.cc2
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.h2
-rw-r--r--deps/v8/src/interpreter/bytecode-register.h26
-rw-r--r--deps/v8/src/interpreter/bytecodes.h9
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.cc2
-rw-r--r--deps/v8/src/interpreter/handler-table-builder.cc2
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc21
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h6
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc206
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.cc6
-rw-r--r--deps/v8/src/interpreter/interpreter.cc6
-rw-r--r--deps/v8/src/isolate.cc397
-rw-r--r--deps/v8/src/isolate.h64
-rw-r--r--deps/v8/src/js/array.js58
-rw-r--r--deps/v8/src/js/prologue.js36
-rw-r--r--deps/v8/src/js/proxy.js27
-rw-r--r--deps/v8/src/js/v8natives.js64
-rw-r--r--deps/v8/src/json-parser.cc62
-rw-r--r--deps/v8/src/json-parser.h8
-rw-r--r--deps/v8/src/json-stringifier.cc68
-rw-r--r--deps/v8/src/keys.cc25
-rw-r--r--deps/v8/src/label.h5
-rw-r--r--deps/v8/src/layout-descriptor-inl.h5
-rw-r--r--deps/v8/src/layout-descriptor.h2
-rw-r--r--deps/v8/src/libplatform/default-platform.cc6
-rw-r--r--deps/v8/src/libplatform/default-platform.h3
-rw-r--r--deps/v8/src/log-utils.cc5
-rw-r--r--deps/v8/src/log-utils.h6
-rw-r--r--deps/v8/src/log.cc98
-rw-r--r--deps/v8/src/log.h22
-rw-r--r--deps/v8/src/lookup.cc57
-rw-r--r--deps/v8/src/machine-type.h2
-rw-r--r--deps/v8/src/messages.cc54
-rw-r--r--deps/v8/src/messages.h9
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h42
-rw-r--r--deps/v8/src/mips/assembler-mips.cc20
-rw-r--r--deps/v8/src/mips/assembler-mips.h5
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc48
-rw-r--r--deps/v8/src/mips/codegen-mips.cc20
-rw-r--r--deps/v8/src/mips/disasm-mips.cc4
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc7
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc76
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h6
-rw-r--r--deps/v8/src/mips/simulator-mips.cc203
-rw-r--r--deps/v8/src/mips/simulator-mips.h121
-rw-r--r--deps/v8/src/mips64/assembler-mips64-inl.h32
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc19
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h5
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc48
-rw-r--r--deps/v8/src/mips64/codegen-mips64.cc20
-rw-r--r--deps/v8/src/mips64/disasm-mips64.cc10
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc7
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc68
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h6
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc238
-rw-r--r--deps/v8/src/mips64/simulator-mips64.h132
-rw-r--r--deps/v8/src/objects-body-descriptors-inl.h3
-rw-r--r--deps/v8/src/objects-debug.cc81
-rw-r--r--deps/v8/src/objects-inl.h1727
-rw-r--r--deps/v8/src/objects-printer.cc102
-rw-r--r--deps/v8/src/objects.cc613
-rw-r--r--deps/v8/src/objects.h985
-rw-r--r--deps/v8/src/objects/bigint.cc84
-rw-r--r--deps/v8/src/objects/bigint.h25
-rw-r--r--deps/v8/src/objects/code-inl.h34
-rw-r--r--deps/v8/src/objects/code.h21
-rw-r--r--deps/v8/src/objects/data-handler-inl.h41
-rw-r--r--deps/v8/src/objects/data-handler.h63
-rw-r--r--deps/v8/src/objects/debug-objects.h1
-rw-r--r--deps/v8/src/objects/descriptor-array.h1
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h634
-rw-r--r--deps/v8/src/objects/fixed-array.h601
-rw-r--r--deps/v8/src/objects/hash-table-inl.h109
-rw-r--r--deps/v8/src/objects/hash-table.h34
-rw-r--r--deps/v8/src/objects/js-array-inl.h29
-rw-r--r--deps/v8/src/objects/js-array.h4
-rw-r--r--deps/v8/src/objects/js-collection-inl.h49
-rw-r--r--deps/v8/src/objects/js-collection.h162
-rw-r--r--deps/v8/src/objects/js-regexp.h15
-rw-r--r--deps/v8/src/objects/literal-objects.h1
-rw-r--r--deps/v8/src/objects/map-inl.h645
-rw-r--r--deps/v8/src/objects/map.h151
-rw-r--r--deps/v8/src/objects/module.cc179
-rw-r--r--deps/v8/src/objects/module.h16
-rw-r--r--deps/v8/src/objects/object-macros.h3
-rw-r--r--deps/v8/src/objects/scope-info.h5
-rw-r--r--deps/v8/src/objects/script-inl.h35
-rw-r--r--deps/v8/src/objects/script.h23
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h2
-rw-r--r--deps/v8/src/objects/shared-function-info.h18
-rw-r--r--deps/v8/src/objects/string-inl.h31
-rw-r--r--deps/v8/src/objects/string.h7
-rw-r--r--deps/v8/src/ostreams.cc8
-rw-r--r--deps/v8/src/parsing/background-parsing-task.cc3
-rw-r--r--deps/v8/src/parsing/expression-classifier.h25
-rw-r--r--deps/v8/src/parsing/expression-scope-reparenter.cc8
-rw-r--r--deps/v8/src/parsing/parse-info.cc14
-rw-r--r--deps/v8/src/parsing/parse-info.h10
-rw-r--r--deps/v8/src/parsing/parser-base.h387
-rw-r--r--deps/v8/src/parsing/parser.cc350
-rw-r--r--deps/v8/src/parsing/parser.h54
-rw-r--r--deps/v8/src/parsing/pattern-rewriter.cc10
-rw-r--r--deps/v8/src/parsing/preparsed-scope-data.cc6
-rw-r--r--deps/v8/src/parsing/preparser.cc20
-rw-r--r--deps/v8/src/parsing/preparser.h25
-rw-r--r--deps/v8/src/parsing/rewriter.cc4
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc57
-rw-r--r--deps/v8/src/parsing/scanner.cc117
-rw-r--r--deps/v8/src/parsing/scanner.h17
-rw-r--r--deps/v8/src/parsing/token.h1
-rw-r--r--deps/v8/src/perf-jit.cc4
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h49
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc81
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h5
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc33
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc6
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc7
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc55
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h6
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc232
-rw-r--r--deps/v8/src/ppc/simulator-ppc.h111
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc4
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc11
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h3
-rw-r--r--deps/v8/src/profiler/profile-generator.cc8
-rw-r--r--deps/v8/src/profiler/profiler-listener.cc2
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.cc19
-rw-r--r--deps/v8/src/profiler/tick-sample.cc12
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc35
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc36
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc32
-rw-r--r--deps/v8/src/regexp/jsregexp.cc76
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc37
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc37
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc35
-rw-r--r--deps/v8/src/regexp/regexp-ast.h12
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc40
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc42
-rw-r--r--deps/v8/src/regexp/regexp-utils.cc2
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc37
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc32
-rw-r--r--deps/v8/src/runtime/runtime-bigint.cc3
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc18
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc21
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc1
-rw-r--r--deps/v8/src/runtime/runtime-function.cc4
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc18
-rw-r--r--deps/v8/src/runtime/runtime-module.cc8
-rw-r--r--deps/v8/src/runtime/runtime-numbers.cc10
-rw-r--r--deps/v8/src/runtime/runtime-object.cc55
-rw-r--r--deps/v8/src/runtime/runtime-promise.cc16
-rw-r--r--deps/v8/src/runtime/runtime-proxy.cc8
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc9
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc4
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc8
-rw-r--r--deps/v8/src/runtime/runtime-test.cc128
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc100
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc20
-rw-r--r--deps/v8/src/runtime/runtime.h40
-rw-r--r--deps/v8/src/s390/assembler-s390-inl.h42
-rw-r--r--deps/v8/src/s390/assembler-s390.cc38
-rw-r--r--deps/v8/src/s390/assembler-s390.h5
-rw-r--r--deps/v8/src/s390/code-stubs-s390.cc34
-rw-r--r--deps/v8/src/s390/codegen-s390.cc6
-rw-r--r--deps/v8/src/s390/constants-s390.cc312
-rw-r--r--deps/v8/src/s390/interface-descriptors-s390.cc8
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.cc55
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.h6
-rw-r--r--deps/v8/src/s390/simulator-s390.cc198
-rw-r--r--deps/v8/src/s390/simulator-s390.h110
-rw-r--r--deps/v8/src/safepoint-table.cc83
-rw-r--r--deps/v8/src/safepoint-table.h31
-rw-r--r--deps/v8/src/simulator-base.cc95
-rw-r--r--deps/v8/src/simulator-base.h163
-rw-r--r--deps/v8/src/simulator.h108
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer-allocator.h2
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc49
-rw-r--r--deps/v8/src/snapshot/code-serializer.h2
-rw-r--r--deps/v8/src/snapshot/default-deserializer-allocator.cc2
-rw-r--r--deps/v8/src/snapshot/default-deserializer-allocator.h2
-rw-r--r--deps/v8/src/snapshot/deserializer.cc56
-rw-r--r--deps/v8/src/snapshot/mksnapshot.cc2
-rw-r--r--deps/v8/src/snapshot/partial-serializer.cc26
-rw-r--r--deps/v8/src/snapshot/partial-serializer.h3
-rw-r--r--deps/v8/src/snapshot/serializer-common.cc11
-rw-r--r--deps/v8/src/snapshot/serializer-common.h3
-rw-r--r--deps/v8/src/snapshot/serializer.cc23
-rw-r--r--deps/v8/src/snapshot/serializer.h2
-rw-r--r--deps/v8/src/snapshot/snapshot-common.cc12
-rw-r--r--deps/v8/src/snapshot/snapshot-empty.cc4
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.cc14
-rw-r--r--deps/v8/src/snapshot/snapshot.h2
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.cc2
-rw-r--r--deps/v8/src/snapshot/startup-serializer.cc39
-rw-r--r--deps/v8/src/snapshot/startup-serializer.h14
-rw-r--r--deps/v8/src/string-stream.cc2
-rw-r--r--deps/v8/src/strtod.cc32
-rw-r--r--deps/v8/src/third_party/utf8-decoder/LICENSE19
-rw-r--r--deps/v8/src/third_party/utf8-decoder/README.v818
-rw-r--r--deps/v8/src/third_party/utf8-decoder/utf8-decoder.h78
-rw-r--r--deps/v8/src/tracing/traced-value.cc2
-rw-r--r--deps/v8/src/tracing/tracing-category-observer.cc15
-rw-r--r--deps/v8/src/transitions.cc4
-rw-r--r--deps/v8/src/trap-handler/trap-handler.h6
-rw-r--r--deps/v8/src/type-hints.cc4
-rw-r--r--deps/v8/src/type-hints.h2
-rw-r--r--deps/v8/src/unicode-inl.h4
-rw-r--r--deps/v8/src/unicode.cc325
-rw-r--r--deps/v8/src/unicode.h7
-rw-r--r--deps/v8/src/uri.cc14
-rw-r--r--deps/v8/src/utils.h18
-rw-r--r--deps/v8/src/v8.cc15
-rw-r--r--deps/v8/src/v8.gyp69
-rw-r--r--deps/v8/src/value-serializer.cc46
-rw-r--r--deps/v8/src/vector-slot-pair.cc39
-rw-r--r--deps/v8/src/vector-slot-pair.h47
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm-defs.h23
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h202
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64-defs.h23
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h202
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32-defs.h23
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h494
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler-defs.h64
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc268
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h348
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc824
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-register.h242
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips-defs.h23
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h202
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64-defs.h23
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h202
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc-defs.h23
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h202
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390-defs.h23
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h202
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64-defs.h23
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h476
-rw-r--r--deps/v8/src/wasm/decoder.h31
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h263
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc39
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h17
-rw-r--r--deps/v8/src/wasm/memory-tracing.cc31
-rw-r--r--deps/v8/src/wasm/memory-tracing.h31
-rw-r--r--deps/v8/src/wasm/module-compiler.cc702
-rw-r--r--deps/v8/src/wasm/module-compiler.h20
-rw-r--r--deps/v8/src/wasm/module-decoder.cc42
-rw-r--r--deps/v8/src/wasm/module-decoder.h36
-rw-r--r--deps/v8/src/wasm/streaming-decoder.cc97
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc (renamed from deps/v8/src/wasm/wasm-heap.cc)279
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h (renamed from deps/v8/src/wasm/wasm-heap.h)68
-rw-r--r--deps/v8/src/wasm/wasm-code-specialization.cc30
-rw-r--r--deps/v8/src/wasm/wasm-code-wrapper.cc33
-rw-r--r--deps/v8/src/wasm/wasm-code-wrapper.h10
-rw-r--r--deps/v8/src/wasm/wasm-constants.h83
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc64
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc23
-rw-r--r--deps/v8/src/wasm/wasm-engine.h46
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc26
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.h4
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc341
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.h10
-rw-r--r--deps/v8/src/wasm/wasm-js.cc91
-rw-r--r--deps/v8/src/wasm/wasm-limits.h7
-rw-r--r--deps/v8/src/wasm/wasm-memory.cc87
-rw-r--r--deps/v8/src/wasm/wasm-memory.h22
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc34
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h4
-rw-r--r--deps/v8/src/wasm/wasm-module.cc116
-rw-r--r--deps/v8/src/wasm/wasm-module.h41
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h35
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc986
-rw-r--r--deps/v8/src/wasm/wasm-objects.h204
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc65
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h229
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc183
-rw-r--r--deps/v8/src/wasm/wasm-serialization.h80
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h19
-rw-r--r--deps/v8/src/x64/assembler-x64.cc137
-rw-r--r--deps/v8/src/x64/assembler-x64.h11
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc28
-rw-r--r--deps/v8/src/x64/codegen-x64.cc6
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc26
-rw-r--r--deps/v8/src/x64/disasm-x64.cc150
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc7
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc147
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h13
-rw-r--r--deps/v8/src/x64/simulator-x64.cc2
-rw-r--r--deps/v8/src/x64/simulator-x64.h42
-rw-r--r--deps/v8/src/zone/accounting-allocator.cc8
-rw-r--r--deps/v8/src/zone/zone-containers.h5
-rw-r--r--deps/v8/src/zone/zone.cc9
-rw-r--r--deps/v8/src/zone/zone.h7
-rw-r--r--deps/v8/test/BUILD.gn23
-rw-r--r--deps/v8/test/benchmarks/testcfg.py86
-rw-r--r--deps/v8/test/bot_default.gyp1
-rw-r--r--deps/v8/test/cctest/BUILD.gn3
-rw-r--r--deps/v8/test/cctest/assembler-helper-arm.cc8
-rw-r--r--deps/v8/test/cctest/assembler-helper-arm.h27
-rw-r--r--deps/v8/test/cctest/cctest.cc2
-rw-r--r--deps/v8/test/cctest/cctest.gyp3
-rw-r--r--deps/v8/test/cctest/cctest.h22
-rw-r--r--deps/v8/test/cctest/cctest.status28
-rw-r--r--deps/v8/test/cctest/compiler/c-signature.h143
-rw-r--r--deps/v8/test/cctest/compiler/call-tester.h156
-rw-r--r--deps/v8/test/cctest/compiler/code-assembler-tester.h5
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.cc36
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.h209
-rw-r--r--deps/v8/test/cctest/compiler/graph-builder-tester.h45
-rw-r--r--deps/v8/test/cctest/compiler/test-code-generator.cc431
-rw-r--r--deps/v8/test/cctest/compiler/test-js-typed-lowering.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-loop-analysis.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-multiple-return.cc379
-rw-r--r--deps/v8/test/cctest/compiler/test-operator.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-representation-change.cc12
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsops.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-run-load-store.cc614
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc593
-rw-r--r--deps/v8/test/cctest/compiler/test-run-native-calls.cc10
-rw-r--r--deps/v8/test/cctest/compiler/test-run-retpoline.cc208
-rw-r--r--deps/v8/test/cctest/compiler/test-run-tail-calls.cc171
-rw-r--r--deps/v8/test/cctest/compiler/test-run-wasm-machops.cc16
-rw-r--r--deps/v8/test/cctest/compiler/value-helper.h52
-rw-r--r--deps/v8/test/cctest/heap/heap-tester.h1
-rw-r--r--deps/v8/test/cctest/heap/heap-utils.cc15
-rw-r--r--deps/v8/test/cctest/heap/test-embedder-tracing.cc197
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc18
-rw-r--r--deps/v8/test/cctest/heap/test-incremental-marking.cc8
-rw-r--r--deps/v8/test/cctest/heap/test-mark-compact.cc2
-rw-r--r--deps/v8/test/cctest/heap/test-spaces.cc62
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden501
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassFields.golden185
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden1143
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden504
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden1340
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden302
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden703
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden52
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden339
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc14
-rw-r--r--deps/v8/test/cctest/interpreter/interpreter-tester.h5
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc66
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc3
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter.cc119
-rw-r--r--deps/v8/test/cctest/parsing/test-preparser.cc20
-rw-r--r--deps/v8/test/cctest/parsing/test-scanner-streams.cc18
-rw-r--r--deps/v8/test/cctest/parsing/test-scanner.cc8
-rw-r--r--deps/v8/test/cctest/profiler-extension.cc3
-rw-r--r--deps/v8/test/cctest/test-access-checks.cc2
-rw-r--r--deps/v8/test/cctest/test-accessors.cc107
-rw-r--r--deps/v8/test/cctest/test-allocation.cc17
-rw-r--r--deps/v8/test/cctest/test-api-interceptors.cc46
-rw-r--r--deps/v8/test/cctest/test-api.cc266
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc710
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc4168
-rw-r--r--deps/v8/test/cctest/test-assembler-ia32.cc13
-rw-r--r--deps/v8/test/cctest/test-assembler-mips.cc1431
-rw-r--r--deps/v8/test/cctest/test-assembler-mips64.cc1830
-rw-r--r--deps/v8/test/cctest/test-assembler-ppc.cc95
-rw-r--r--deps/v8/test/cctest/test-assembler-s390.cc55
-rw-r--r--deps/v8/test/cctest/test-assembler-x64.cc240
-rw-r--r--deps/v8/test/cctest/test-atomicops.cc5
-rw-r--r--deps/v8/test/cctest/test-bignum.cc2
-rw-r--r--deps/v8/test/cctest/test-code-layout.cc10
-rw-r--r--deps/v8/test/cctest/test-code-stub-assembler.cc105
-rw-r--r--deps/v8/test/cctest/test-code-stubs-arm.cc6
-rw-r--r--deps/v8/test/cctest/test-code-stubs-arm64.cc70
-rw-r--r--deps/v8/test/cctest/test-code-stubs-ia32.cc3
-rw-r--r--deps/v8/test/cctest/test-code-stubs-mips.cc3
-rw-r--r--deps/v8/test/cctest/test-code-stubs-mips64.cc3
-rw-r--r--deps/v8/test/cctest/test-code-stubs-x64.cc3
-rw-r--r--deps/v8/test/cctest/test-code-stubs.cc2
-rw-r--r--deps/v8/test/cctest/test-compiler.cc151
-rw-r--r--deps/v8/test/cctest/test-conversions.cc6
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc74
-rw-r--r--deps/v8/test/cctest/test-debug.cc67
-rw-r--r--deps/v8/test/cctest/test-disasm-arm.cc18
-rw-r--r--deps/v8/test/cctest/test-disasm-arm64.cc75
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc11
-rw-r--r--deps/v8/test/cctest/test-disasm-mips.cc17
-rw-r--r--deps/v8/test/cctest/test-disasm-mips64.cc19
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc6
-rw-r--r--deps/v8/test/cctest/test-diy-fp.cc6
-rw-r--r--deps/v8/test/cctest/test-double.cc14
-rw-r--r--deps/v8/test/cctest/test-feedback-vector.cc38
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc2
-rw-r--r--deps/v8/test/cctest/test-func-name-inference.cc28
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc36
-rw-r--r--deps/v8/test/cctest/test-inobject-slack-tracking.cc6
-rw-r--r--deps/v8/test/cctest/test-log-stack-tracer.cc2
-rw-r--r--deps/v8/test/cctest/test-log.cc21
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-arm.cc22
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips.cc127
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips64.cc197
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc16
-rw-r--r--deps/v8/test/cctest/test-modules.cc185
-rw-r--r--deps/v8/test/cctest/test-parsing.cc972
-rw-r--r--deps/v8/test/cctest/test-platform.cc39
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc3
-rw-r--r--deps/v8/test/cctest/test-regexp.cc10
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-arm.cc2
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc2
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc2
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-x64.cc2
-rw-r--r--deps/v8/test/cctest/test-serialize.cc399
-rw-r--r--deps/v8/test/cctest/test-strings.cc6
-rw-r--r--deps/v8/test/cctest/test-strtod.cc4
-rw-r--r--deps/v8/test/cctest/test-sync-primitives-arm.cc16
-rw-r--r--deps/v8/test/cctest/test-sync-primitives-arm64.cc10
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc8
-rw-r--r--deps/v8/test/cctest/test-traced-value.cc4
-rw-r--r--deps/v8/test/cctest/test-typedarrays.cc74
-rw-r--r--deps/v8/test/cctest/test-types.cc34
-rw-r--r--deps/v8/test/cctest/test-unboxed-doubles.cc2
-rw-r--r--deps/v8/test/cctest/test-usecounters.cc25
-rw-r--r--deps/v8/test/cctest/test-utils-arm64.cc4
-rw-r--r--deps/v8/test/cctest/test-utils-arm64.h2
-rw-r--r--deps/v8/test/cctest/test-utils.cc6
-rw-r--r--deps/v8/test/cctest/test-weakmaps.cc25
-rw-r--r--deps/v8/test/cctest/testcfg.py44
-rw-r--r--deps/v8/test/cctest/trace-extension.cc4
-rw-r--r--deps/v8/test/cctest/unicode-helpers.h12
-rw-r--r--deps/v8/test/cctest/wasm/test-c-wasm-entry.cc3
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-64.cc493
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc12
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc12
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-module.cc143
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd.cc557
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc267
-rw-r--r--deps/v8/test/cctest/wasm/test-streaming-compilation.cc123
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc32
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc6
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.cc133
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h43
-rw-r--r--deps/v8/test/common/wasm/wasm-macro-gen.h25
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.cc6
-rw-r--r--deps/v8/test/d8_default.gyp31
-rw-r--r--deps/v8/test/d8_default.isolate18
-rw-r--r--deps/v8/test/debugger/debug/debug-modules-set-variable-value.js4
-rw-r--r--deps/v8/test/debugger/debug/harmony/modules-debug-scopes2.js10
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-1853.js3
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-crbug-481896.js4
-rw-r--r--deps/v8/test/debugger/testcfg.py66
-rw-r--r--deps/v8/test/default.gyp1
-rw-r--r--deps/v8/test/fuzzer/fuzzer-support.cc9
-rw-r--r--deps/v8/test/fuzzer/fuzzer.gyp30
-rw-r--r--deps/v8/test/fuzzer/fuzzer.isolate2
-rw-r--r--deps/v8/test/fuzzer/multi-return.cc346
-rw-r--r--deps/v8/test/fuzzer/multi_return/README.md4
-rw-r--r--deps/v8/test/fuzzer/regexp.cc2
-rw-r--r--deps/v8/test/fuzzer/testcfg.py64
-rw-r--r--deps/v8/test/fuzzer/wasm-async.cc2
-rw-r--r--deps/v8/test/fuzzer/wasm-compile.cc335
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.cc159
-rw-r--r--deps/v8/test/inspector/debugger/async-chains-expected.txt152
-rw-r--r--deps/v8/test/inspector/debugger/async-chains.js77
-rw-r--r--deps/v8/test/inspector/debugger/async-for-await-of-promise-stack.js2
-rw-r--r--deps/v8/test/inspector/debugger/evaluate-on-call-frame-in-module-expected.txt228
-rw-r--r--deps/v8/test/inspector/debugger/evaluate-on-call-frame-in-module.js30
-rw-r--r--deps/v8/test/inspector/debugger/external-stack-trace.js1
-rw-r--r--deps/v8/test/inspector/debugger/for-of-loops-expected.txt8
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt12
-rw-r--r--deps/v8/test/inspector/debugger/pause-on-async-call-expected.txt78
-rw-r--r--deps/v8/test/inspector/debugger/pause-on-async-call.js60
-rw-r--r--deps/v8/test/inspector/debugger/return-break-locations-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-ignore-hint-when-no-location-expected.txt11
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-ignore-hint-when-no-location.js27
-rw-r--r--deps/v8/test/inspector/debugger/step-into-external-async-task-same-context-expected.txt14
-rw-r--r--deps/v8/test/inspector/debugger/step-into-external-async-task-same-context.js81
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scripts-expected.txt7
-rw-r--r--deps/v8/test/inspector/inspector-test.cc45
-rw-r--r--deps/v8/test/inspector/protocol-test.js8
-rw-r--r--deps/v8/test/inspector/runtime/console-methods-expected.txt100
-rw-r--r--deps/v8/test/inspector/runtime/console-methods.js3
-rw-r--r--deps/v8/test/inspector/runtime/console-time-repeat-expected.txt76
-rw-r--r--deps/v8/test/inspector/runtime/console-time-repeat.js20
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-async-expected.txt16
-rw-r--r--deps/v8/test/inspector/runtime/get-properties-expected.txt37
-rw-r--r--deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt173
-rw-r--r--deps/v8/test/inspector/runtime/get-properties-on-proxy.js209
-rw-r--r--deps/v8/test/inspector/runtime/get-properties.js94
-rw-r--r--deps/v8/test/inspector/runtime/regression-736302-expected.txt2
-rw-r--r--deps/v8/test/inspector/runtime/runtime-evaluate-null-property-expected.txt18
-rw-r--r--deps/v8/test/inspector/runtime/runtime-evaluate-null-property.js14
-rw-r--r--deps/v8/test/inspector/testcfg.py102
-rw-r--r--deps/v8/test/intl/testcfg.py61
-rw-r--r--deps/v8/test/js-perf-test/Array/every.js60
-rw-r--r--deps/v8/test/js-perf-test/Array/filter.js108
-rw-r--r--deps/v8/test/js-perf-test/Array/find-index.js63
-rw-r--r--deps/v8/test/js-perf-test/Array/find.js63
-rw-r--r--deps/v8/test/js-perf-test/Array/for-each.js62
-rw-r--r--deps/v8/test/js-perf-test/Array/join.js36
-rw-r--r--deps/v8/test/js-perf-test/Array/map.js128
-rw-r--r--deps/v8/test/js-perf-test/Array/of.js117
-rw-r--r--deps/v8/test/js-perf-test/Array/reduce-right.js61
-rw-r--r--deps/v8/test/js-perf-test/Array/reduce.js61
-rw-r--r--deps/v8/test/js-perf-test/Array/run.js73
-rw-r--r--deps/v8/test/js-perf-test/Array/some.js60
-rw-r--r--deps/v8/test/js-perf-test/Array/to-string.js37
-rw-r--r--deps/v8/test/js-perf-test/JSTests.json60
-rw-r--r--deps/v8/test/js-perf-test/Strings/run.js1
-rw-r--r--deps/v8/test/js-perf-test/Strings/string-indexof.js69
-rw-r--r--deps/v8/test/js-perf-test/TypedArrays/slice-nospecies.js34
-rw-r--r--deps/v8/test/js-perf-test/TypedArrays/subarray-nospecies.js34
-rw-r--r--deps/v8/test/message/fail/array-binding-pattern-await1.js9
-rw-r--r--deps/v8/test/message/fail/array-binding-pattern-await1.out4
-rw-r--r--deps/v8/test/message/fail/class-field-static-constructor.js2
-rw-r--r--deps/v8/test/message/fail/class-field-static-prototype.js2
-rw-r--r--deps/v8/test/message/fail/modules-cycle1.out4
-rw-r--r--deps/v8/test/message/fail/modules-cycle2.out4
-rw-r--r--deps/v8/test/message/fail/modules-cycle3.out4
-rw-r--r--deps/v8/test/message/fail/modules-cycle4.out4
-rw-r--r--deps/v8/test/message/fail/modules-cycle5.out4
-rw-r--r--deps/v8/test/message/fail/modules-cycle6.out4
-rw-r--r--deps/v8/test/message/fail/modules-import1.out4
-rw-r--r--deps/v8/test/message/fail/modules-import2.out4
-rw-r--r--deps/v8/test/message/fail/modules-import3.out4
-rw-r--r--deps/v8/test/message/fail/modules-import4.out4
-rw-r--r--deps/v8/test/message/fail/modules-import5.out4
-rw-r--r--deps/v8/test/message/fail/modules-import6.out4
-rw-r--r--deps/v8/test/message/fail/modules-star-conflict1.out4
-rw-r--r--deps/v8/test/message/fail/modules-star-conflict2.out4
-rw-r--r--deps/v8/test/message/fail/modules-star-default.out4
-rw-r--r--deps/v8/test/message/fail/object-binding-pattern-await1.js9
-rw-r--r--deps/v8/test/message/fail/object-binding-pattern-await1.out4
-rw-r--r--deps/v8/test/message/message.status11
-rw-r--r--deps/v8/test/message/object-binding-pattern-await-computed-name.js9
-rw-r--r--deps/v8/test/message/object-binding-pattern-await-computed-name.out (renamed from deps/v8/tools/release/testdata/v8/base/trace_event/common/common)0
-rw-r--r--deps/v8/test/message/testcfg.py112
-rw-r--r--deps/v8/test/message/wasm-trace-memory-interpreted.out14
-rw-r--r--deps/v8/test/message/wasm-trace-memory-liftoff.js7
-rw-r--r--deps/v8/test/message/wasm-trace-memory-liftoff.out9
-rw-r--r--deps/v8/test/message/wasm-trace-memory.js2
-rw-r--r--deps/v8/test/message/wasm-trace-memory.out18
-rw-r--r--deps/v8/test/mjsunit/array-iteration.js95
-rw-r--r--deps/v8/test/mjsunit/array-reduce.js684
-rw-r--r--deps/v8/test/mjsunit/code-coverage-block-noopt.js2
-rw-r--r--deps/v8/test/mjsunit/code-coverage-block-opt.js4
-rw-r--r--deps/v8/test/mjsunit/code-coverage-block.js25
-rw-r--r--deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js122
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-builtins.js148
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-push.js97
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-13.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-15.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-phi-type.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/materialize-dictionary-properties.js18
-rw-r--r--deps/v8/test/mjsunit/compiler/materialize-mutable-heap-number.js22
-rw-r--r--deps/v8/test/mjsunit/compiler/new-cons-string.js71
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-786521.js23
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-793863.js12
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-796041.js35
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-797596.js30
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-801097.js19
-rw-r--r--deps/v8/test/mjsunit/compiler/varargs.js49
-rw-r--r--deps/v8/test/mjsunit/constant-folding-2.js4
-rw-r--r--deps/v8/test/mjsunit/d8/.gitignore1
-rw-r--r--deps/v8/test/mjsunit/d8/d8-os.js (renamed from deps/v8/test/mjsunit/d8-os.js)0
-rw-r--r--deps/v8/test/mjsunit/d8/d8-performance-now.js (renamed from deps/v8/test/mjsunit/d8-performance-now.js)0
-rw-r--r--deps/v8/test/mjsunit/d8/d8-worker-sharedarraybuffer.js (renamed from deps/v8/test/mjsunit/d8-worker-sharedarraybuffer.js)0
-rw-r--r--deps/v8/test/mjsunit/d8/d8-worker-spawn-worker.js (renamed from deps/v8/test/mjsunit/d8-worker-spawn-worker.js)0
-rw-r--r--deps/v8/test/mjsunit/d8/d8-worker.js (renamed from deps/v8/test/mjsunit/d8-worker.js)0
-rw-r--r--deps/v8/test/mjsunit/d8/enable-tracing.js8
-rw-r--r--deps/v8/test/mjsunit/deserialize-reference.js2
-rw-r--r--deps/v8/test/mjsunit/dictionary-prototypes.js409
-rw-r--r--deps/v8/test/mjsunit/es6/array-find.js34
-rw-r--r--deps/v8/test/mjsunit/es6/array-findindex.js34
-rw-r--r--deps/v8/test/mjsunit/es6/array-iterator-turbo.js2
-rw-r--r--deps/v8/test/mjsunit/es6/call-with-spread-modify-next.js4
-rw-r--r--deps/v8/test/mjsunit/es6/computed-property-names-object-literals-methods.js2
-rw-r--r--deps/v8/test/mjsunit/es6/destructuring-assignment.js44
-rw-r--r--deps/v8/test/mjsunit/es6/iteration-semantics.js8
-rw-r--r--deps/v8/test/mjsunit/es6/reflect-construct.js2
-rw-r--r--deps/v8/test/mjsunit/es6/spread-call.js18
-rw-r--r--deps/v8/test/mjsunit/es6/super-with-spread-modify-next.js4
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray.js24
-rw-r--r--deps/v8/test/mjsunit/es8/object-entries.js33
-rw-r--r--deps/v8/test/mjsunit/es8/regress/regress-794744.js8
-rw-r--r--deps/v8/test/mjsunit/global-prototypes.js354
-rw-r--r--deps/v8/test/mjsunit/harmony/async-for-of-non-iterable.js1
-rw-r--r--deps/v8/test/mjsunit/harmony/async-from-sync-iterator.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/async-generators-basic.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/async-generators-resume-return.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/async-generators-return.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/async-generators-yield.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/as-int-n.js18
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/basics.js10
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/comparisons.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/dec.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/exp.js43
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/inc.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/json.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/neg.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/not.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/regressions.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/tonumber.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/too-big-literal.js14
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/turbo.js193
-rw-r--r--deps/v8/test/mjsunit/harmony/for-await-of.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-15.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/optional-catch-binding-breaks.js65
-rw-r--r--deps/v8/test/mjsunit/harmony/optional-catch-binding.js39
-rw-r--r--deps/v8/test/mjsunit/harmony/promise-prototype-finally.js10
-rw-r--r--deps/v8/test/mjsunit/harmony/public-instance-class-fields.js82
-rw-r--r--deps/v8/test/mjsunit/harmony/public-static-class-fields.js130
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-named-captures.js123
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-6322.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-772649.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/sharedarraybuffer.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/symbol-async-iterator.js2
-rw-r--r--deps/v8/test/mjsunit/ic-lookup-on-receiver.js44
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status59
-rw-r--r--deps/v8/test/mjsunit/optimized-array-every.js520
-rw-r--r--deps/v8/test/mjsunit/optimized-array-find.js460
-rw-r--r--deps/v8/test/mjsunit/optimized-array-findindex.js460
-rw-r--r--deps/v8/test/mjsunit/optimized-array-some.js502
-rw-r--r--deps/v8/test/mjsunit/optimized-filter.js53
-rw-r--r--deps/v8/test/mjsunit/optimized-foreach.js50
-rw-r--r--deps/v8/test/mjsunit/optimized-map.js53
-rw-r--r--deps/v8/test/mjsunit/regress/modules-skip-regress-797581-1.js5
-rw-r--r--deps/v8/test/mjsunit/regress/modules-skip-regress-797581-2.js5
-rw-r--r--deps/v8/test/mjsunit/regress/modules-skip-regress-797581-3.js5
-rw-r--r--deps/v8/test/mjsunit/regress/modules-skip-regress-797581-4.js5
-rw-r--r--deps/v8/test/mjsunit/regress/modules-skip-regress-797581-5.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2646.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-370827.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-599717.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-791334.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-791958.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-793588.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-796427.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-797481.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-797581.js29
-rw-r--r--deps/v8/test/mjsunit/regress/regress-800538.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-801171.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-801772.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-802060.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-789764.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-791245-1.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-791245-2.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-795922.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-798644.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-800077.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-800810.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-807096.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-808192.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-813427.js49
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-7245.js6
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-791810.js21
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-793551.js20
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-797846.js14
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-800756.js15
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-801850.js11
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-802244.js22
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-808980.js28
-rw-r--r--deps/v8/test/mjsunit/serialize-after-execute.js15
-rw-r--r--deps/v8/test/mjsunit/serialize-embedded-error.js2
-rw-r--r--deps/v8/test/mjsunit/serialize-ic.js2
-rw-r--r--deps/v8/test/mjsunit/testcfg.py75
-rw-r--r--deps/v8/test/mjsunit/wasm/errors.js18
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory-detaching.js65
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-tables.js101
-rw-r--r--deps/v8/test/mjsunit/wasm/lazy-compilation.js39
-rw-r--r--deps/v8/test/mjsunit/wasm/many-parameters.js9
-rw-r--r--deps/v8/test/mjsunit/wasm/module-memory.js23
-rw-r--r--deps/v8/test/mjsunit/wasm/shared-memory.js39
-rw-r--r--deps/v8/test/mjsunit/wasm/trap-location.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-module-builder.js25
-rw-r--r--deps/v8/test/mkgrokdump/mkgrokdump.cc9
-rw-r--r--deps/v8/test/mkgrokdump/testcfg.py60
-rw-r--r--deps/v8/test/mozilla/mozilla.status8
-rw-r--r--deps/v8/test/mozilla/testcfg.py53
-rw-r--r--deps/v8/test/optimize_for_size.gyp1
-rw-r--r--deps/v8/test/perf.isolate2
-rw-r--r--deps/v8/test/preparser/testcfg.py68
-rw-r--r--deps/v8/test/promises-aplus/README29
-rw-r--r--deps/v8/test/promises-aplus/lib/adapter.js41
-rw-r--r--deps/v8/test/promises-aplus/lib/assert.js97
-rw-r--r--deps/v8/test/promises-aplus/lib/global.js67
-rw-r--r--deps/v8/test/promises-aplus/lib/mocha.js255
-rw-r--r--deps/v8/test/promises-aplus/lib/require.js50
-rw-r--r--deps/v8/test/promises-aplus/lib/run-tests.js29
-rw-r--r--deps/v8/test/promises-aplus/promises-aplus.status31
-rw-r--r--deps/v8/test/promises-aplus/testcfg.py99
-rw-r--r--deps/v8/test/test262/local-tests/test/language/expressions/class/fields-inner-arrow-err-contains-arguments.js26
-rw-r--r--deps/v8/test/test262/local-tests/test/language/expressions/class/fields-inner-arrow-eval-err-contains-arguments.js33
-rw-r--r--deps/v8/test/test262/local-tests/test/language/expressions/class/fields-inner-eval-arrow-err-contains-arguments.js30
-rw-r--r--deps/v8/test/test262/local-tests/test/language/statements/class/fields-inner-arrow-err-contains-arguments.js26
-rw-r--r--deps/v8/test/test262/local-tests/test/language/statements/class/fields-inner-arrow-eval-err-contains-arguments.js34
-rw-r--r--deps/v8/test/test262/local-tests/test/language/statements/class/fields-inner-eval-arrow-err-contains-arguments.js30
-rw-r--r--deps/v8/test/test262/test262.status47
-rw-r--r--deps/v8/test/test262/testcfg.py277
-rw-r--r--deps/v8/test/unittests/BUILD.gn4
-rw-r--r--deps/v8/test/unittests/allocation-unittest.cc164
-rw-r--r--deps/v8/test/unittests/api/access-check-unittest.cc9
-rw-r--r--deps/v8/test/unittests/asmjs/asm-scanner-unittest.cc8
-rw-r--r--deps/v8/test/unittests/base/bits-unittest.cc60
-rw-r--r--deps/v8/test/unittests/base/logging-unittest.cc46
-rw-r--r--deps/v8/test/unittests/base/ostreams-unittest.cc4
-rw-r--r--deps/v8/test/unittests/base/platform/platform-unittest.cc116
-rw-r--r--deps/v8/test/unittests/bigint-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc45
-rw-r--r--deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc309
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.h3
-rw-r--r--deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc11
-rw-r--r--deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/int64-lowering-unittest.cc8
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc16
-rw-r--r--deps/v8/test/unittests/compiler/loop-peeling-unittest.cc21
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc180
-rw-r--r--deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc12
-rw-r--r--deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc28
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc1
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h1
-rw-r--r--deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc89
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc29
-rw-r--r--deps/v8/test/unittests/compiler/state-values-utils-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/typer-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc23
-rw-r--r--deps/v8/test/unittests/counters-unittest.cc62
-rw-r--r--deps/v8/test/unittests/eh-frame-iterator-unittest.cc18
-rw-r--r--deps/v8/test/unittests/eh-frame-writer-unittest.cc8
-rw-r--r--deps/v8/test/unittests/heap/gc-tracer-unittest.cc121
-rw-r--r--deps/v8/test/unittests/heap/heap-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/marking-unittest.cc14
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc19
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc5
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc29
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-node-unittest.cc8
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc9
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-utils.h20
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc4
-rw-r--r--deps/v8/test/unittests/object-unittest.cc4
-rw-r--r--deps/v8/test/unittests/parser/ast-value-unittest.cc4
-rw-r--r--deps/v8/test/unittests/testcfg.py70
-rw-r--r--deps/v8/test/unittests/unicode-unittest.cc476
-rw-r--r--deps/v8/test/unittests/unittests.gyp4
-rw-r--r--deps/v8/test/unittests/unittests.isolate3
-rw-r--r--deps/v8/test/unittests/value-serializer-unittest.cc521
-rw-r--r--deps/v8/test/unittests/wasm/decoder-unittest.cc32
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc127
-rw-r--r--deps/v8/test/unittests/wasm/leb-helper-unittest.cc8
-rw-r--r--deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc6
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc70
-rw-r--r--deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc70
-rw-r--r--deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc (renamed from deps/v8/test/unittests/wasm/wasm-heap-unittest.cc)4
-rw-r--r--deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc2
-rw-r--r--deps/v8/test/wasm-spec-tests/testcfg.py20
-rw-r--r--deps/v8/test/wasm-spec-tests/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/webkit/date-constructor-expected.txt55
-rw-r--r--deps/v8/test/webkit/date-constructor.js58
-rw-r--r--deps/v8/test/webkit/testcfg.py124
-rw-r--r--deps/v8/third_party/binutils/Linux_ia32/binutils.tar.bz2.sha12
-rw-r--r--deps/v8/third_party/binutils/Linux_x64/binutils.tar.bz2.sha12
-rw-r--r--deps/v8/third_party/inspector_protocol/CodeGenerator.py55
-rw-r--r--deps/v8/third_party/inspector_protocol/ConvertProtocolToJSON.py183
-rw-r--r--deps/v8/third_party/inspector_protocol/README.v82
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template44
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template3
-rw-r--r--deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template4
-rw-r--r--deps/v8/tools/BUILD.gn1
-rw-r--r--deps/v8/tools/callstats.html2
-rwxr-xr-xdeps/v8/tools/callstats.py1
-rw-r--r--deps/v8/tools/foozzie/testdata/failure_output.txt2
-rwxr-xr-xdeps/v8/tools/foozzie/v8_foozzie.py34
-rw-r--r--deps/v8/tools/foozzie/v8_suppressions.py65
-rw-r--r--deps/v8/tools/gdbinit2
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py4
-rw-r--r--deps/v8/tools/heap-stats/README.md15
-rw-r--r--deps/v8/tools/heap-stats/categories.js167
-rw-r--r--deps/v8/tools/heap-stats/details-selection.html72
-rw-r--r--deps/v8/tools/heap-stats/details-selection.js211
-rw-r--r--deps/v8/tools/heap-stats/global-timeline.html16
-rw-r--r--deps/v8/tools/heap-stats/global-timeline.js135
-rw-r--r--deps/v8/tools/heap-stats/index.html88
-rw-r--r--deps/v8/tools/heap-stats/trace-file-reader.html26
-rw-r--r--deps/v8/tools/heap-stats/trace-file-reader.js300
-rwxr-xr-xdeps/v8/tools/js2c.py12
-rwxr-xr-xdeps/v8/tools/map-processor41
-rw-r--r--deps/v8/tools/map-processor-driver.js33
-rw-r--r--deps/v8/tools/map-processor.html1254
-rw-r--r--deps/v8/tools/map-processor.js717
-rwxr-xr-xdeps/v8/tools/node/backport_node.py (renamed from deps/v8/tools/release/backport_node.py)11
-rwxr-xr-xdeps/v8/tools/node/build_gn.py82
-rwxr-xr-xdeps/v8/tools/node/fetch_deps.py96
-rwxr-xr-xdeps/v8/tools/node/node_common.py43
-rwxr-xr-xdeps/v8/tools/node/test_backport_node.py (renamed from deps/v8/tools/release/test_backport_node.py)3
-rwxr-xr-xdeps/v8/tools/node/test_update_node.py (renamed from deps/v8/tools/release/test_update_node.py)1
-rw-r--r--deps/v8/tools/node/testdata/node/deps/v8/.gitignore (renamed from deps/v8/tools/release/testdata/node/deps/v8/.gitignore)0
-rw-r--r--deps/v8/tools/node/testdata/node/deps/v8/baz/delete_me (renamed from deps/v8/tools/release/testdata/node/deps/v8/baz/delete_me)0
-rw-r--r--deps/v8/tools/node/testdata/node/deps/v8/baz/v8_foo (renamed from deps/v8/tools/release/testdata/node/deps/v8/baz/v8_foo)0
-rw-r--r--deps/v8/tools/node/testdata/node/deps/v8/delete_me (renamed from deps/v8/tools/release/testdata/node/deps/v8/delete_me)0
-rw-r--r--deps/v8/tools/node/testdata/node/deps/v8/include/v8-version.h (renamed from deps/v8/tools/release/testdata/node/deps/v8/include/v8-version.h)0
-rw-r--r--deps/v8/tools/node/testdata/node/deps/v8/v8_foo (renamed from deps/v8/tools/release/testdata/node/deps/v8/v8_foo)0
-rw-r--r--deps/v8/tools/node/testdata/v8/.gitignore (renamed from deps/v8/tools/release/testdata/v8/.gitignore)0
-rw-r--r--deps/v8/tools/node/testdata/v8/base/trace_event/common/common0
-rw-r--r--deps/v8/tools/node/testdata/v8/baz/v8_foo (renamed from deps/v8/tools/release/testdata/v8/baz/v8_foo)0
-rw-r--r--deps/v8/tools/node/testdata/v8/baz/v8_new (renamed from deps/v8/tools/release/testdata/v8/baz/v8_new)0
-rw-r--r--deps/v8/tools/node/testdata/v8/new/v8_new (renamed from deps/v8/tools/release/testdata/v8/new/v8_new)0
-rw-r--r--deps/v8/tools/node/testdata/v8/v8_foo (renamed from deps/v8/tools/release/testdata/v8/v8_foo)0
-rw-r--r--deps/v8/tools/node/testdata/v8/v8_new (renamed from deps/v8/tools/release/testdata/v8/v8_new)0
-rwxr-xr-xdeps/v8/tools/node/update_node.py167
-rwxr-xr-xdeps/v8/tools/parse-processor41
-rw-r--r--deps/v8/tools/parse-processor-driver.js33
-rw-r--r--deps/v8/tools/parse-processor.html337
-rw-r--r--deps/v8/tools/parse-processor.js918
-rwxr-xr-xdeps/v8/tools/perf-compare.py1
-rwxr-xr-xdeps/v8/tools/perf-to-html.py1
-rw-r--r--deps/v8/tools/predictable_wrapper.py66
-rwxr-xr-xdeps/v8/tools/presubmit.py14
-rwxr-xr-xdeps/v8/tools/process-heap-prof.py120
-rw-r--r--deps/v8/tools/run-num-fuzzer.isolate1
-rwxr-xr-xdeps/v8/tools/run_perf.py33
-rw-r--r--deps/v8/tools/testrunner/PRESUBMIT.py8
-rw-r--r--deps/v8/tools/testrunner/base_runner.py141
-rwxr-xr-xdeps/v8/tools/testrunner/deopt_fuzzer.py75
-rwxr-xr-xdeps/v8/tools/testrunner/gc_fuzzer.py113
-rw-r--r--deps/v8/tools/testrunner/local/command.py171
-rw-r--r--deps/v8/tools/testrunner/local/commands.py152
-rw-r--r--deps/v8/tools/testrunner/local/execution.py275
-rw-r--r--deps/v8/tools/testrunner/local/junit_output.py9
-rw-r--r--deps/v8/tools/testrunner/local/perfdata.py17
-rw-r--r--deps/v8/tools/testrunner/local/pool.py22
-rw-r--r--deps/v8/tools/testrunner/local/progress.py185
-rw-r--r--deps/v8/tools/testrunner/local/statusfile.py79
-rw-r--r--deps/v8/tools/testrunner/local/testsuite.py328
-rwxr-xr-xdeps/v8/tools/testrunner/local/testsuite_unittest.py54
-rw-r--r--deps/v8/tools/testrunner/local/utils.py2
-rw-r--r--deps/v8/tools/testrunner/local/variants.py36
-rw-r--r--deps/v8/tools/testrunner/local/verbose.py57
-rw-r--r--deps/v8/tools/testrunner/objects/context.py6
-rw-r--r--deps/v8/tools/testrunner/objects/output.py3
-rw-r--r--deps/v8/tools/testrunner/objects/predictable.py57
-rw-r--r--deps/v8/tools/testrunner/objects/testcase.py271
-rw-r--r--deps/v8/tools/testrunner/outproc/__init__.py3
-rw-r--r--deps/v8/tools/testrunner/outproc/base.py166
-rw-r--r--deps/v8/tools/testrunner/outproc/message.py56
-rw-r--r--deps/v8/tools/testrunner/outproc/mkgrokdump.py31
-rw-r--r--deps/v8/tools/testrunner/outproc/mozilla.py33
-rw-r--r--deps/v8/tools/testrunner/outproc/test262.py54
-rw-r--r--deps/v8/tools/testrunner/outproc/webkit.py18
-rwxr-xr-xdeps/v8/tools/testrunner/standard_runner.py250
-rw-r--r--deps/v8/tools/testrunner/testproc/__init__.py3
-rw-r--r--deps/v8/tools/testrunner/testproc/base.py207
-rw-r--r--deps/v8/tools/testrunner/testproc/execution.py92
-rw-r--r--deps/v8/tools/testrunner/testproc/filter.py83
-rw-r--r--deps/v8/tools/testrunner/testproc/loader.py27
-rw-r--r--deps/v8/tools/testrunner/testproc/progress.py385
-rw-r--r--deps/v8/tools/testrunner/testproc/rerun.py59
-rw-r--r--deps/v8/tools/testrunner/testproc/result.py97
-rw-r--r--deps/v8/tools/testrunner/testproc/shard.py30
-rw-r--r--deps/v8/tools/testrunner/testproc/variant.py68
-rw-r--r--deps/v8/tools/turbolizer/code-view.js2
-rw-r--r--deps/v8/tools/turbolizer/graph-view.js32
-rw-r--r--deps/v8/tools/turbolizer/index.html16
-rw-r--r--deps/v8/tools/turbolizer/turbo-visualizer.css147
-rw-r--r--deps/v8/tools/turbolizer/turbo-visualizer.js255
-rw-r--r--deps/v8/tools/turbolizer/view.js12
-rw-r--r--deps/v8/tools/unittests/PRESUBMIT.py9
-rwxr-xr-xdeps/v8/tools/unittests/predictable_wrapper_test.py57
-rwxr-xr-x[-rw-r--r--]deps/v8/tools/unittests/run_perf_test.py40
-rwxr-xr-xdeps/v8/tools/unittests/run_tests_test.py667
-rw-r--r--deps/v8/tools/unittests/testdata/expected_test_results1.json107
-rw-r--r--deps/v8/tools/unittests/testdata/expected_test_results2.json74
-rw-r--r--deps/v8/tools/unittests/testdata/predictable_mocked.py28
-rw-r--r--deps/v8/tools/unittests/testdata/testroot1/d8_mocked.py16
-rw-r--r--deps/v8/tools/unittests/testdata/testroot1/test/sweet/sweet.status35
-rw-r--r--deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py31
-rw-r--r--deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json18
-rw-r--r--deps/v8/tools/unittests/testdata/testroot2/d8_mocked.py29
-rw-r--r--deps/v8/tools/unittests/testdata/testroot2/test/sweet/sweet.status6
-rw-r--r--deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py31
-rw-r--r--deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json18
-rwxr-xr-xdeps/v8/tools/v8-rolls.sh120
-rw-r--r--deps/v8/tools/v8heapconst.py370
-rwxr-xr-xdeps/v8/tools/wasm/update-wasm-spec-tests.sh29
-rw-r--r--deps/v8/tools/whitespace.txt7
1284 files changed, 70884 insertions, 43157 deletions
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index 85ff179226..f07fc1cb62 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -50,9 +50,6 @@
/test/fuzzer/wasm_corpus
/test/fuzzer/wasm_corpus.tar.gz
/test/mozilla/data
-/test/promises-aplus/promises-tests
-/test/promises-aplus/promises-tests.tar.gz
-/test/promises-aplus/sinon
/test/test262/data
/test/test262/data.tar
/test/test262/harness
@@ -94,6 +91,7 @@ TAGS
bsuite
compile_commands.json
d8
+!/test/mjsunit/d8
d8_g
gccauses
gcsuspects
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 08391a5566..b2b01df888 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -96,6 +96,7 @@ Luis Reis <luis.m.reis@gmail.com>
Luke Zarko <lukezarko@gmail.com>
Maciej Małecki <me@mmalecki.com>
Marcin Cieślak <saper@marcincieslak.com>
+Marcin Wiącek <marcin@mwiacek.com>
Mateusz Czeladka <mateusz.szczap@gmail.com>
Mathias Bynens <mathias@qiwi.be>
Matt Hanselman <mjhanselman@gmail.com>
@@ -106,6 +107,7 @@ Michael Smith <mike@w3.org>
Michaël Zasso <mic.besace@gmail.com>
Mike Gilbert <floppymaster@gmail.com>
Mike Pennisi <mike@mikepennisi.com>
+Mikhail Gusarov <dottedmag@dottedmag.net>
Milton Chiang <milton.chiang@mediatek.com>
Myeong-bo Shim <m0609.shim@samsung.com>
Nicolas Antonius Ernst Leopold Maria Kaiser <nikai@nikai.net>
@@ -118,6 +120,7 @@ Peter Rybin <peter.rybin@gmail.com>
Peter Varga <pvarga@inf.u-szeged.hu>
Peter Wong <peter.wm.wong@gmail.com>
Paul Lind <plind44@gmail.com>
+Qingyan Li <qingyan.liqy@alibaba-inc.com>
Qiuyi Zhang <qiuyi.zqy@alibaba-inc.com>
Rafal Krypa <rafal@krypa.net>
Refael Ackermann <refack@gmail.com>
@@ -133,6 +136,8 @@ Sanjoy Das <sanjoy@playingwithpointers.com>
Seo Sanghyeon <sanxiyn@gmail.com>
Stefan Penner <stefan.penner@gmail.com>
Sylvestre Ledru <sledru@mozilla.com>
+Taketoshi Aono <brn@b6n.ch>
+Tiancheng "Timothy" Gu <timothygu99@gmail.com>
Tobias Burnus <burnus@net-b.de>
Victor Costan <costan@gmail.com>
Vlad Burlik <vladbph@gmail.com>
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 8492cb5f62..4970765972 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -89,6 +89,9 @@ declare_args() {
# Sets -dV8_CONCURRENT_MARKING
v8_enable_concurrent_marking = true
+ # Enables various testing features.
+ v8_enable_test_features = ""
+
# Build the snapshot with unwinding information for perf.
# Sets -dV8_USE_SNAPSHOT_WITH_UNWINDING_INFO.
v8_perf_prof_unwinding_info = false
@@ -133,8 +136,6 @@ declare_args() {
# while rolling in a new version of V8.
v8_check_microtasks_scopes_consistency = ""
- v8_monolithic = false
-
# Enable mitigations for executing untrusted code.
v8_untrusted_code_mitigations = true
}
@@ -152,6 +153,9 @@ if (v8_enable_disassembler == "") {
if (v8_enable_trace_maps == "") {
v8_enable_trace_maps = is_debug
}
+if (v8_enable_test_features == "") {
+ v8_enable_test_features = is_debug || dcheck_always_on
+}
if (v8_enable_v8_checks == "") {
v8_enable_v8_checks = is_debug
}
@@ -278,6 +282,10 @@ config("features") {
if (v8_enable_trace_feedback_updates) {
defines += [ "V8_TRACE_FEEDBACK_UPDATES" ]
}
+ if (v8_enable_test_features) {
+ defines += [ "V8_ENABLE_ALLOCATION_TIMEOUT" ]
+ defines += [ "V8_ENABLE_FORCE_SLOW_PATH" ]
+ }
if (v8_enable_v8_checks) {
defines += [ "V8_ENABLE_CHECKS" ]
}
@@ -511,6 +519,12 @@ config("toolchain") {
# TODO(hans): Remove once http://crbug.com/428099 is resolved.
"-Winconsistent-missing-override",
]
+
+ if (v8_current_cpu != "mips" && v8_current_cpu != "mipsel") {
+ # We exclude MIPS because the IsMipsArchVariant macro causes trouble.
+ cflags += [ "-Wunreachable-code" ]
+ }
+
if (v8_current_cpu == "x64" || v8_current_cpu == "arm64" ||
v8_current_cpu == "mips64el") {
cflags += [ "-Wshorten-64-to-32" ]
@@ -575,12 +589,10 @@ action("js2c") {
"src/js/macros.py",
"src/messages.h",
"src/js/prologue.js",
- "src/js/v8natives.js",
"src/js/array.js",
"src/js/typedarray.js",
"src/js/messages.js",
"src/js/spread.js",
- "src/js/proxy.js",
"src/debug/mirrors.js",
"src/debug/debug.js",
"src/debug/liveedit.js",
@@ -755,6 +767,10 @@ action("postmortem-metadata") {
"src/objects-inl.h",
"src/objects/code-inl.h",
"src/objects/code.h",
+ "src/objects/data-handler.h",
+ "src/objects/data-handler-inl.h",
+ "src/objects/fixed-array-inl.h",
+ "src/objects/fixed-array.h",
"src/objects/js-array-inl.h",
"src/objects/js-array.h",
"src/objects/js-regexp-inl.h",
@@ -1680,6 +1696,10 @@ v8_source_set("v8_base") {
"src/heap/spaces.h",
"src/heap/store-buffer.cc",
"src/heap/store-buffer.h",
+ "src/heap/stress-marking-observer.cc",
+ "src/heap/stress-marking-observer.h",
+ "src/heap/stress-scavenge-observer.cc",
+ "src/heap/stress-scavenge-observer.h",
"src/heap/sweeper.cc",
"src/heap/sweeper.h",
"src/heap/worklist.h",
@@ -1803,6 +1823,8 @@ v8_source_set("v8_base") {
"src/objects/debug-objects.h",
"src/objects/descriptor-array.h",
"src/objects/dictionary.h",
+ "src/objects/fixed-array-inl.h",
+ "src/objects/fixed-array.h",
"src/objects/frame-array-inl.h",
"src/objects/frame-array.h",
"src/objects/hash-table-inl.h",
@@ -1811,6 +1833,8 @@ v8_source_set("v8_base") {
"src/objects/intl-objects.h",
"src/objects/js-array-inl.h",
"src/objects/js-array.h",
+ "src/objects/js-collection-inl.h",
+ "src/objects/js-collection.h",
"src/objects/js-regexp-inl.h",
"src/objects/js-regexp.h",
"src/objects/literal-objects-inl.h",
@@ -1974,6 +1998,8 @@ v8_source_set("v8_base") {
"src/safepoint-table.h",
"src/setup-isolate.h",
"src/signature.h",
+ "src/simulator-base.cc",
+ "src/simulator-base.h",
"src/simulator.h",
"src/snapshot/builtin-deserializer-allocator.cc",
"src/snapshot/builtin-deserializer-allocator.h",
@@ -2032,6 +2058,7 @@ v8_source_set("v8_base") {
"src/string-stream.h",
"src/strtod.cc",
"src/strtod.h",
+ "src/third_party/utf8-decoder/utf8-decoder.h",
"src/tracing/trace-event.cc",
"src/tracing/trace-event.h",
"src/tracing/traced-value.cc",
@@ -2066,6 +2093,8 @@ v8_source_set("v8_base") {
"src/v8threads.h",
"src/value-serializer.cc",
"src/value-serializer.h",
+ "src/vector-slot-pair.cc",
+ "src/vector-slot-pair.h",
"src/vector.h",
"src/version.cc",
"src/version.h",
@@ -2073,9 +2102,11 @@ v8_source_set("v8_base") {
"src/visitors.h",
"src/vm-state-inl.h",
"src/vm-state.h",
+ "src/wasm/baseline/liftoff-assembler-defs.h",
"src/wasm/baseline/liftoff-assembler.cc",
"src/wasm/baseline/liftoff-assembler.h",
"src/wasm/baseline/liftoff-compiler.cc",
+ "src/wasm/baseline/liftoff-register.h",
"src/wasm/compilation-manager.cc",
"src/wasm/compilation-manager.h",
"src/wasm/decoder.h",
@@ -2097,15 +2128,18 @@ v8_source_set("v8_base") {
"src/wasm/streaming-decoder.h",
"src/wasm/wasm-api.cc",
"src/wasm/wasm-api.h",
+ "src/wasm/wasm-code-manager.cc",
+ "src/wasm/wasm-code-manager.h",
"src/wasm/wasm-code-specialization.cc",
"src/wasm/wasm-code-specialization.h",
"src/wasm/wasm-code-wrapper.cc",
"src/wasm/wasm-code-wrapper.h",
+ "src/wasm/wasm-constants.h",
"src/wasm/wasm-debug.cc",
+ "src/wasm/wasm-engine.cc",
+ "src/wasm/wasm-engine.h",
"src/wasm/wasm-external-refs.cc",
"src/wasm/wasm-external-refs.h",
- "src/wasm/wasm-heap.cc",
- "src/wasm/wasm-heap.h",
"src/wasm/wasm-interpreter.cc",
"src/wasm/wasm-interpreter.h",
"src/wasm/wasm-js.cc",
@@ -2184,7 +2218,6 @@ v8_source_set("v8_base") {
"src/ia32/sse-instr.h",
"src/regexp/ia32/regexp-macro-assembler-ia32.cc",
"src/regexp/ia32/regexp-macro-assembler-ia32.h",
- "src/wasm/baseline/ia32/liftoff-assembler-ia32-defs.h",
"src/wasm/baseline/ia32/liftoff-assembler-ia32.h",
]
} else if (v8_current_cpu == "x64") {
@@ -2199,7 +2232,6 @@ v8_source_set("v8_base") {
"src/regexp/x64/regexp-macro-assembler-x64.cc",
"src/regexp/x64/regexp-macro-assembler-x64.h",
"src/third_party/valgrind/valgrind.h",
- "src/wasm/baseline/x64/liftoff-assembler-x64-defs.h",
"src/wasm/baseline/x64/liftoff-assembler-x64.h",
"src/x64/assembler-x64-inl.h",
"src/x64/assembler-x64.cc",
@@ -2253,7 +2285,6 @@ v8_source_set("v8_base") {
"src/debug/arm/debug-arm.cc",
"src/regexp/arm/regexp-macro-assembler-arm.cc",
"src/regexp/arm/regexp-macro-assembler-arm.h",
- "src/wasm/baseline/arm/liftoff-assembler-arm-defs.h",
"src/wasm/baseline/arm/liftoff-assembler-arm.h",
]
} else if (v8_current_cpu == "arm64") {
@@ -2299,7 +2330,6 @@ v8_source_set("v8_base") {
"src/debug/arm64/debug-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.h",
- "src/wasm/baseline/arm64/liftoff-assembler-arm64-defs.h",
"src/wasm/baseline/arm64/liftoff-assembler-arm64.h",
]
if (use_jumbo_build) {
@@ -2336,7 +2366,6 @@ v8_source_set("v8_base") {
"src/mips/simulator-mips.h",
"src/regexp/mips/regexp-macro-assembler-mips.cc",
"src/regexp/mips/regexp-macro-assembler-mips.h",
- "src/wasm/baseline/mips/liftoff-assembler-mips-defs.h",
"src/wasm/baseline/mips/liftoff-assembler-mips.h",
]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
@@ -2366,7 +2395,6 @@ v8_source_set("v8_base") {
"src/mips64/simulator-mips64.h",
"src/regexp/mips64/regexp-macro-assembler-mips64.cc",
"src/regexp/mips64/regexp-macro-assembler-mips64.h",
- "src/wasm/baseline/mips64/liftoff-assembler-mips64-defs.h",
"src/wasm/baseline/mips64/liftoff-assembler-mips64.h",
]
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
@@ -2396,7 +2424,6 @@ v8_source_set("v8_base") {
"src/ppc/simulator-ppc.h",
"src/regexp/ppc/regexp-macro-assembler-ppc.cc",
"src/regexp/ppc/regexp-macro-assembler-ppc.h",
- "src/wasm/baseline/ppc/liftoff-assembler-ppc-defs.h",
"src/wasm/baseline/ppc/liftoff-assembler-ppc.h",
]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
@@ -2426,7 +2453,6 @@ v8_source_set("v8_base") {
"src/s390/macro-assembler-s390.h",
"src/s390/simulator-s390.cc",
"src/s390/simulator-s390.h",
- "src/wasm/baseline/s390/liftoff-assembler-s390-defs.h",
"src/wasm/baseline/s390/liftoff-assembler-s390.h",
]
}
@@ -2506,6 +2532,8 @@ v8_component("v8_libbase") {
"src/base/once.cc",
"src/base/once.h",
"src/base/optional.h",
+ "src/base/page-allocator.cc",
+ "src/base/page-allocator.h",
"src/base/platform/condition-variable.cc",
"src/base/platform/condition-variable.h",
"src/base/platform/elapsed-timer.h",
@@ -2812,6 +2840,7 @@ group("v8_fuzzers") {
testonly = true
deps = [
":v8_simple_json_fuzzer",
+ ":v8_simple_multi_return_fuzzer",
":v8_simple_parser_fuzzer",
":v8_simple_regexp_fuzzer",
":v8_simple_wasm_async_fuzzer",
@@ -3062,6 +3091,24 @@ v8_source_set("json_fuzzer") {
v8_fuzzer("json_fuzzer") {
}
+v8_source_set("multi_return_fuzzer") {
+ sources = [
+ "test/fuzzer/multi-return.cc",
+ ]
+
+ deps = [
+ ":fuzzer_support",
+ ]
+
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
+}
+
+v8_fuzzer("multi_return_fuzzer") {
+}
+
v8_source_set("parser_fuzzer") {
sources = [
"test/fuzzer/parser.cc",
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 248b42b8d0..1fe3135a01 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,1303 @@
+2018-01-17: Version 6.5.254
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-17: Version 6.5.253
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-17: Version 6.5.252
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-17: Version 6.5.251
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-17: Version 6.5.250
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-17: Version 6.5.249
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-17: Version 6.5.248
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-17: Version 6.5.247
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-17: Version 6.5.246
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-16: Version 6.5.245
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-16: Version 6.5.244
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-16: Version 6.5.243
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-16: Version 6.5.242
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-16: Version 6.5.241
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-16: Version 6.5.240
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-16: Version 6.5.239
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-16: Version 6.5.238
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-16: Version 6.5.237
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-16: Version 6.5.236
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-16: Version 6.5.235
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-16: Version 6.5.234
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-16: Version 6.5.233
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-16: Version 6.5.232
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-16: Version 6.5.231
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-16: Version 6.5.230
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-16: Version 6.5.229
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-16: Version 6.5.228
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-16: Version 6.5.227
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-16: Version 6.5.226
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-16: Version 6.5.225
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-16: Version 6.5.224
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-16: Version 6.5.223
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-16: Version 6.5.222
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-16: Version 6.5.221
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-16: Version 6.5.220
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-15: Version 6.5.219
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-15: Version 6.5.218
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-15: Version 6.5.217
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-15: Version 6.5.216
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-15: Version 6.5.215
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-15: Version 6.5.214
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-15: Version 6.5.213
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-15: Version 6.5.212
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-15: Version 6.5.211
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-15: Version 6.5.210
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-15: Version 6.5.209
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-15: Version 6.5.208
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-15: Version 6.5.207
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-15: Version 6.5.206
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-15: Version 6.5.205
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-15: Version 6.5.204
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-15: Version 6.5.203
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-15: Version 6.5.202
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-15: Version 6.5.201
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-15: Version 6.5.200
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-15: Version 6.5.199
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-14: Version 6.5.198
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-13: Version 6.5.197
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-12: Version 6.5.196
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-12: Version 6.5.195
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-12: Version 6.5.194
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-12: Version 6.5.193
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-12: Version 6.5.192
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-12: Version 6.5.191
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-12: Version 6.5.190
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-12: Version 6.5.189
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-11: Version 6.5.188
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-11: Version 6.5.187
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-11: Version 6.5.186
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-11: Version 6.5.185
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-11: Version 6.5.184
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-11: Version 6.5.183
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-11: Version 6.5.182
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-11: Version 6.5.181
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-11: Version 6.5.180
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-11: Version 6.5.179
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-11: Version 6.5.178
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-11: Version 6.5.177
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-11: Version 6.5.176
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-11: Version 6.5.175
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-11: Version 6.5.174
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-11: Version 6.5.173
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-10: Version 6.5.172
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-10: Version 6.5.171
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-10: Version 6.5.170
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-10: Version 6.5.169
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-10: Version 6.5.168
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-10: Version 6.5.167
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-10: Version 6.5.166
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-10: Version 6.5.165
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-10: Version 6.5.164
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-10: Version 6.5.163
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-09: Version 6.5.162
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-09: Version 6.5.161
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-09: Version 6.5.160
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-09: Version 6.5.159
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-09: Version 6.5.158
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-09: Version 6.5.157
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-09: Version 6.5.156
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-09: Version 6.5.155
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-09: Version 6.5.154
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-09: Version 6.5.153
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-09: Version 6.5.152
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-09: Version 6.5.151
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-08: Version 6.5.150
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-08: Version 6.5.149
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-08: Version 6.5.148
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-08: Version 6.5.147
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-05: Version 6.5.146
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-05: Version 6.5.145
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-05: Version 6.5.144
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-05: Version 6.5.143
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-05: Version 6.5.142
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-05: Version 6.5.141
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-05: Version 6.5.140
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-04: Version 6.5.139
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-04: Version 6.5.138
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-04: Version 6.5.137
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-04: Version 6.5.136
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-04: Version 6.5.135
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-04: Version 6.5.134
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-04: Version 6.5.133
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-04: Version 6.5.132
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-04: Version 6.5.131
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-04: Version 6.5.130
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-04: Version 6.5.129
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-04: Version 6.5.128
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-04: Version 6.5.127
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-04: Version 6.5.126
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-03: Version 6.5.125
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-03: Version 6.5.124
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-02: Version 6.5.123
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-02: Version 6.5.122
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-02: Version 6.5.121
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-02: Version 6.5.120
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-02: Version 6.5.119
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-02: Version 6.5.118
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-02: Version 6.5.117
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-29: Version 6.5.116
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-29: Version 6.5.115
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-29: Version 6.5.114
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-29: Version 6.5.113
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-29: Version 6.5.112
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-29: Version 6.5.111
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-28: Version 6.5.110
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-28: Version 6.5.109
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-27: Version 6.5.108
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-22: Version 6.5.107
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-22: Version 6.5.106
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-22: Version 6.5.105
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-22: Version 6.5.104
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-21: Version 6.5.103
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-21: Version 6.5.102
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-20: Version 6.5.101
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-20: Version 6.5.100
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-20: Version 6.5.99
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-20: Version 6.5.98
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-20: Version 6.5.97
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-20: Version 6.5.96
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-20: Version 6.5.95
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-20: Version 6.5.94
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-20: Version 6.5.93
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-20: Version 6.5.92
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-20: Version 6.5.91
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-20: Version 6.5.90
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-19: Version 6.5.89
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-19: Version 6.5.88
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-19: Version 6.5.87
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-19: Version 6.5.86
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-19: Version 6.5.85
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-19: Version 6.5.84
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-19: Version 6.5.83
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-19: Version 6.5.82
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-19: Version 6.5.81
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-19: Version 6.5.80
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-19: Version 6.5.79
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-19: Version 6.5.78
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-19: Version 6.5.77
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-18: Version 6.5.76
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-18: Version 6.5.75
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-18: Version 6.5.74
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-16: Version 6.5.73
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-15: Version 6.5.72
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-15: Version 6.5.71
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-15: Version 6.5.70
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-14: Version 6.5.69
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-14: Version 6.5.68
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-13: Version 6.5.67
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-13: Version 6.5.66
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-13: Version 6.5.65
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-12: Version 6.5.64
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-12: Version 6.5.63
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-12: Version 6.5.62
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-12: Version 6.5.61
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-12: Version 6.5.60
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-12: Version 6.5.59
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-12: Version 6.5.58
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-12: Version 6.5.57
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-12: Version 6.5.56
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-12: Version 6.5.55
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-12: Version 6.5.54
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-12: Version 6.5.53
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-11: Version 6.5.52
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-11: Version 6.5.51
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-11: Version 6.5.50
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-11: Version 6.5.49
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-11: Version 6.5.48
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-11: Version 6.5.47
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-11: Version 6.5.46
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-11: Version 6.5.45
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-11: Version 6.5.44
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-11: Version 6.5.43
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-11: Version 6.5.42
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-10: Version 6.5.41
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-10: Version 6.5.40
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-09: Version 6.5.39
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-09: Version 6.5.38
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-08: Version 6.5.37
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-08: Version 6.5.36
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-08: Version 6.5.35
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-08: Version 6.5.34
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-08: Version 6.5.33
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-08: Version 6.5.32
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-08: Version 6.5.31
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-08: Version 6.5.30
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-08: Version 6.5.29
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-08: Version 6.5.28
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-08: Version 6.5.27
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-07: Version 6.5.26
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-07: Version 6.5.25
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-07: Version 6.5.24
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-07: Version 6.5.23
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-07: Version 6.5.22
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-07: Version 6.5.21
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-07: Version 6.5.20
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-07: Version 6.5.19
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-07: Version 6.5.18
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-07: Version 6.5.17
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-07: Version 6.5.16
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-07: Version 6.5.15
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-06: Version 6.5.14
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-06: Version 6.5.13
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-05: Version 6.5.12
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-05: Version 6.5.11
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-05: Version 6.5.10
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-05: Version 6.5.9
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-05: Version 6.5.8
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-04: Version 6.5.7
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-04: Version 6.5.6
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-04: Version 6.5.5
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-04: Version 6.5.4
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-04: Version 6.5.3
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-04: Version 6.5.2
+
+ Performance and stability improvements on all platforms.
+
+
+2017-12-03: Version 6.5.1
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-30: Version 6.4.394
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-30: Version 6.4.393
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-29: Version 6.4.392
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-29: Version 6.4.391
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-29: Version 6.4.390
+
+ Performance and stability improvements on all platforms.
+
+
+2017-11-29: Version 6.4.389
+
+ Performance and stability improvements on all platforms.
+
+
2017-11-29: Version 6.4.388
Performance and stability improvements on all platforms.
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index 0d6b49d3b4..bc9e4e0a90 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -5,27 +5,32 @@
vars = {
'checkout_instrumented_libraries': False,
'chromium_url': 'https://chromium.googlesource.com',
+ 'build_for_node': False,
}
deps = {
'v8/build':
- Var('chromium_url') + '/chromium/src/build.git' + '@' + '9338ce52d0b9bcef34c38285fbd5023b62739fac',
+ Var('chromium_url') + '/chromium/src/build.git' + '@' + 'b3a78cd03a95c30ff10f863f736249eb04f0f34d',
'v8/tools/gyp':
Var('chromium_url') + '/external/gyp.git' + '@' + 'd61a9397e668fa9843c4aa7da9e79460fe590bfb',
'v8/third_party/icu':
- Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '741688ebf328da9adc52505248bf4e2ef868722c',
+ Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'c8ca2962b46670ec89071ffd1291688983cd319c',
'v8/third_party/instrumented_libraries':
- Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '28417458ac4dc79f68915079d0f283f682504cc0',
+ Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'b7578b4132cf73ca3265e2ee0b7bd0a422a54ebf',
'v8/buildtools':
- Var('chromium_url') + '/chromium/buildtools.git' + '@' + '505de88083136eefd056e5ee4ca0f01fe9b33de8',
+ Var('chromium_url') + '/chromium/buildtools.git' + '@' + '6fe4a3251488f7af86d64fc25cf442e817cf6133',
'v8/base/trace_event/common':
Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '0e9a47d74970bee1bbfc063c47215406f8918699',
+ 'v8/third_party/android_ndk': {
+ 'url': Var('chromium_url') + '/android_ndk.git' + '@' + 'e951c37287c7d8cd915bf8d4149fd4a06d808b55',
+ 'condition': 'checkout_android',
+ },
'v8/third_party/android_tools': {
- 'url': Var('chromium_url') + '/android_tools.git' + '@' + 'a2e9bc7c1b41d983577907df51d339fb1e0fd02f',
+ 'url': Var('chromium_url') + '/android_tools.git' + '@' + 'c78b25872734e0038ae2a333edc645cd96bc232d',
'condition': 'checkout_android',
},
'v8/third_party/catapult': {
- 'url': Var('chromium_url') + '/catapult.git' + '@' + '11d7efb857ae77eff1cea4640e3f3d9ac49cba0a',
+ 'url': Var('chromium_url') + '/catapult.git' + '@' + 'b4826a52853c9c2778d496f6c6fa853f777f94df',
'condition': 'checkout_android',
},
'v8/third_party/colorama/src': {
@@ -37,7 +42,7 @@ deps = {
'v8/third_party/markupsafe':
Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '8f45f5cfa0009d2a70589bcda0349b8cb2b72783',
'v8/tools/swarming_client':
- Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '4bd9152f8a975d57c972c071dfb4ddf668e02200',
+ Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '88229872dd17e71658fe96763feaa77915d8cbd6',
'v8/testing/gtest':
Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '6f8a66431cb592dad629028a50b3dd418a408c87',
'v8/testing/gmock':
@@ -47,15 +52,15 @@ deps = {
'v8/test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'v8/test/test262/data':
- Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '5d4c667b271a9b39d0de73aef5ffe6879c6f8811',
+ Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '8311965251953d4745aeb68c98fb71fab2eac1d0',
'v8/test/test262/harness':
Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '0f2acdd882c84cff43b9d60df7574a1901e2cdcd',
'v8/tools/clang':
- Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '8688d267571de76a56746324dcc249bf4232b85a',
+ Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '27088876ff821e8a1518383576a43662a3255d56',
'v8/tools/luci-go':
- Var('chromium_url') + '/chromium/src/tools/luci-go.git' + '@' + '45a8a51fda92e123619a69e7644d9c64a320b0c1',
+ Var('chromium_url') + '/chromium/src/tools/luci-go.git' + '@' + 'd882048313f6f51df29856406fa03b620c1d0205',
'v8/test/wasm-js':
- Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + 'a7e226a92e660a3d5413cfea4269824f513259d2',
+ Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + 'a25083ac7076b05e3f304ec9e093ef1b1ee09422',
}
recursedeps = [
@@ -93,7 +98,7 @@ hooks = [
{
'name': 'clang_format_win',
'pattern': '.',
- 'condition': 'host_os == "win"',
+ 'condition': 'host_os == "win" and build_for_node != True',
'action': [ 'download_from_google_storage',
'--no_resume',
'--platform=win32',
@@ -105,7 +110,7 @@ hooks = [
{
'name': 'clang_format_mac',
'pattern': '.',
- 'condition': 'host_os == "mac"',
+ 'condition': 'host_os == "mac" and build_for_node != True',
'action': [ 'download_from_google_storage',
'--no_resume',
'--platform=darwin',
@@ -117,7 +122,7 @@ hooks = [
{
'name': 'clang_format_linux',
'pattern': '.',
- 'condition': 'host_os == "linux"',
+ 'condition': 'host_os == "linux" and build_for_node != True',
'action': [ 'download_from_google_storage',
'--no_resume',
'--platform=linux*',
@@ -129,6 +134,7 @@ hooks = [
{
'name': 'gcmole',
'pattern': '.',
+ 'condition': 'build_for_node != True',
# TODO(machenbach): Insert condition and remove GYP_DEFINES dependency.
'action': [
'python',
@@ -138,6 +144,7 @@ hooks = [
{
'name': 'jsfunfuzz',
'pattern': '.',
+ 'condition': 'build_for_node != True',
# TODO(machenbach): Insert condition and remove GYP_DEFINES dependency.
'action': [
'python',
@@ -148,7 +155,7 @@ hooks = [
{
'name': 'luci-go_win',
'pattern': '.',
- 'condition': 'host_os == "win"',
+ 'condition': 'host_os == "win" and build_for_node != True',
'action': [ 'download_from_google_storage',
'--no_resume',
'--platform=win32',
@@ -160,7 +167,7 @@ hooks = [
{
'name': 'luci-go_mac',
'pattern': '.',
- 'condition': 'host_os == "mac"',
+ 'condition': 'host_os == "mac" and build_for_node != True',
'action': [ 'download_from_google_storage',
'--no_resume',
'--platform=darwin',
@@ -172,7 +179,7 @@ hooks = [
{
'name': 'luci-go_linux',
'pattern': '.',
- 'condition': 'host_os == "linux"',
+ 'condition': 'host_os == "linux" and build_for_node != True',
'action': [ 'download_from_google_storage',
'--no_resume',
'--platform=linux*',
@@ -221,6 +228,7 @@ hooks = [
{
'name': 'wasm_spec_tests',
'pattern': '.',
+ 'condition': 'build_for_node != True',
'action': [ 'download_from_google_storage',
'--no_resume',
'--no_auth',
@@ -232,6 +240,7 @@ hooks = [
{
'name': 'closure_compiler',
'pattern': '.',
+ 'condition': 'build_for_node != True',
'action': [ 'download_from_google_storage',
'--no_resume',
'--no_auth',
@@ -246,6 +255,7 @@ hooks = [
# change.
'name': 'sysroot',
'pattern': '.',
+ 'condition': 'build_for_node != True',
'action': [
'python',
'v8/build/linux/sysroot_scripts/install-sysroot.py',
@@ -287,7 +297,7 @@ hooks = [
{
'name': 'binutils',
'pattern': 'v8/third_party/binutils',
- 'condition': 'host_os == "linux"',
+ 'condition': 'host_os == "linux" and build_for_node != True',
'action': [
'python',
'v8/third_party/binutils/download.py',
@@ -313,6 +323,7 @@ hooks = [
# A change to a .gyp, .gypi, or to GYP itself should run the generator.
'name': 'regyp_if_needed',
'pattern': '.',
+ 'condition': 'build_for_node != True',
'action': ['python', 'v8/gypfiles/gyp_v8', '--running-as-hook'],
},
# Download and initialize "vpython" VirtualEnv environment packages.
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index a595220a09..b69e8f5089 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -430,6 +430,6 @@ def PostUploadHook(cl, change, output_api):
return output_api.EnsureCQIncludeTrybotsAreAdded(
cl,
[
- 'master.tryserver.v8:v8_linux_noi18n_rel_ng'
+ 'luci.v8.try:v8_linux_noi18n_rel_ng'
],
'Automatically added noi18n trybots to run tests on CQ.')
diff --git a/deps/v8/build_overrides/build.gni b/deps/v8/build_overrides/build.gni
index b656fce61a..e4dcf1cc46 100644
--- a/deps/v8/build_overrides/build.gni
+++ b/deps/v8/build_overrides/build.gni
@@ -8,7 +8,7 @@ build_with_chromium = false
# Uncomment these to specify a different NDK location and version in
# non-Chromium builds.
-# default_android_ndk_root = "//third_party/android_tools/ndk"
+# default_android_ndk_root = "//third_party/android_ndk"
# default_android_ndk_version = "r10e"
# Some non-Chromium builds don't support building java targets.
diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni
index 4b8292a244..ce9953ac68 100644
--- a/deps/v8/gni/v8.gni
+++ b/deps/v8/gni/v8.gni
@@ -45,6 +45,9 @@ declare_args() {
# Use static libraries instead of source_sets.
v8_static_library = false
+
+ # Enable monolithic static library for embedders.
+ v8_monolithic = false
}
if (v8_use_external_startup_data == "") {
@@ -97,7 +100,7 @@ if (v8_code_coverage && !is_clang) {
]
}
-if (is_posix && v8_enable_backtrace) {
+if (is_posix && (v8_enable_backtrace || v8_monolithic)) {
v8_remove_configs += [ "//build/config/gcc:symbol_visibility_hidden" ]
v8_add_configs += [ "//build/config/gcc:symbol_visibility_default" ]
}
diff --git a/deps/v8/gypfiles/all.gyp b/deps/v8/gypfiles/all.gyp
index 593ba2a795..2f9cf858c0 100644
--- a/deps/v8/gypfiles/all.gyp
+++ b/deps/v8/gypfiles/all.gyp
@@ -33,6 +33,7 @@
'../test/benchmarks/benchmarks.gyp:*',
'../test/debugger/debugger.gyp:*',
'../test/default.gyp:*',
+ '../test/d8_default.gyp:*',
'../test/intl/intl.gyp:*',
'../test/message/message.gyp:*',
'../test/mjsunit/mjsunit.gyp:*',
diff --git a/deps/v8/gypfiles/standalone.gypi b/deps/v8/gypfiles/standalone.gypi
index 7a45dc615f..ec47f1c0ab 100644
--- a/deps/v8/gypfiles/standalone.gypi
+++ b/deps/v8/gypfiles/standalone.gypi
@@ -296,7 +296,7 @@
'variables': {
# The Android toolchain needs to use the absolute path to the NDK
# because it is used at different levels in the GYP files.
- 'android_ndk_root%': '<(base_dir)/third_party/android_tools/ndk/',
+ 'android_ndk_root%': '<(base_dir)/third_party/android_ndk/',
'android_host_arch%': "<!(uname -m | sed -e 's/i[3456]86/x86/')",
# Version of the NDK. Used to ensure full rebuilds on NDK rolls.
'android_ndk_version%': 'r12b',
diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h
index 5478e127f9..6de8234fb8 100644
--- a/deps/v8/include/v8-inspector.h
+++ b/deps/v8/include/v8-inspector.h
@@ -149,8 +149,9 @@ class V8_EXPORT V8InspectorSession {
// Remote objects.
virtual std::unique_ptr<protocol::Runtime::API::RemoteObject> wrapObject(
- v8::Local<v8::Context>, v8::Local<v8::Value>,
- const StringView& groupName) = 0;
+ v8::Local<v8::Context>, v8::Local<v8::Value>, const StringView& groupName,
+ bool generatePreview) = 0;
+
virtual bool unwrapObject(std::unique_ptr<StringBuffer>* error,
const StringView& objectId, v8::Local<v8::Value>*,
v8::Local<v8::Context>*,
diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h
index 43420a972c..2bb14df93e 100644
--- a/deps/v8/include/v8-platform.h
+++ b/deps/v8/include/v8-platform.h
@@ -167,6 +167,74 @@ class TracingController {
};
/**
+ * A V8 memory page allocator.
+ *
+ * Can be implemented by an embedder to manage large host OS allocations.
+ */
+class PageAllocator {
+ public:
+ virtual ~PageAllocator() = default;
+
+ /**
+ * Gets the page granularity for AllocatePages and FreePages. Addresses and
+ * lengths for those calls should be multiples of AllocatePageSize().
+ */
+ virtual size_t AllocatePageSize() = 0;
+
+ /**
+ * Gets the page granularity for SetPermissions and ReleasePages. Addresses
+ * and lengths for those calls should be multiples of CommitPageSize().
+ */
+ virtual size_t CommitPageSize() = 0;
+
+ /**
+ * Sets the random seed so that GetRandomMmapAddr() will generate repeatable
+ * sequences of random mmap addresses.
+ */
+ virtual void SetRandomMmapSeed(int64_t seed) = 0;
+
+ /**
+ * Returns a randomized address, suitable for memory allocation under ASLR.
+ * The address will be aligned to AllocatePageSize.
+ */
+ virtual void* GetRandomMmapAddr() = 0;
+
+ /**
+ * Memory permissions.
+ */
+ enum Permission {
+ kNoAccess,
+ kReadWrite,
+ // TODO(hpayer): Remove this flag. Memory should never be rwx.
+ kReadWriteExecute,
+ kReadExecute
+ };
+
+ /**
+ * Allocates memory in range with the given alignment and permission.
+ */
+ virtual void* AllocatePages(void* address, size_t length, size_t alignment,
+ Permission permissions) = 0;
+
+ /**
+ * Frees memory in a range that was allocated by a call to AllocatePages.
+ */
+ virtual bool FreePages(void* address, size_t length) = 0;
+
+ /**
+ * Releases memory in a range that was allocated by a call to AllocatePages.
+ */
+ virtual bool ReleasePages(void* address, size_t length,
+ size_t new_length) = 0;
+
+ /**
+ * Sets permissions on pages in an allocated range.
+ */
+ virtual bool SetPermissions(void* address, size_t length,
+ Permission permissions) = 0;
+};
+
+/**
* V8 Platform abstraction layer.
*
* The embedder has to provide an implementation of this interface before
@@ -187,13 +255,35 @@ class Platform {
virtual ~Platform() = default;
/**
+ * Allows the embedder to manage memory page allocations.
+ */
+ virtual PageAllocator* GetPageAllocator() {
+ // TODO(bbudge) Make this abstract after all embedders implement this.
+ return nullptr;
+ }
+
+ /**
* Enables the embedder to respond in cases where V8 can't allocate large
* blocks of memory. V8 retries the failed allocation once after calling this
* method. On success, execution continues; otherwise V8 exits with a fatal
* error.
* Embedder overrides of this function must NOT call back into V8.
*/
- virtual void OnCriticalMemoryPressure() {}
+ virtual void OnCriticalMemoryPressure() {
+ // TODO(bbudge) Remove this when embedders override the following method.
+ // See crbug.com/634547.
+ }
+
+ /**
+ * Enables the embedder to respond in cases where V8 can't allocate large
+ * memory regions. The |length| parameter is the amount of memory needed.
+ * Returns true if memory is now available. Returns false if no memory could
+ * be made available. V8 will retry allocations until this method returns
+ * false.
+ *
+ * Embedder overrides of this function must NOT call back into V8.
+ */
+ virtual bool OnCriticalMemoryPressure(size_t length) { return false; }
/**
* Gets the number of threads that are used to execute background tasks. Is
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index cb85da2cd7..6cc98294ec 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 6
-#define V8_MINOR_VERSION 4
-#define V8_BUILD_NUMBER 388
-#define V8_PATCH_LEVEL 46
+#define V8_MINOR_VERSION 5
+#define V8_BUILD_NUMBER 254
+#define V8_PATCH_LEVEL 31
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index c09f610333..acb3efbc71 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -313,6 +313,7 @@ class Local {
friend class String;
friend class Object;
friend class Context;
+ friend class Isolate;
friend class Private;
template<class F> friend class internal::CustomArguments;
friend Local<Primitive> Undefined(Isolate* isolate);
@@ -1129,15 +1130,15 @@ class V8_EXPORT Location {
};
/**
- * This is an unfinished experimental feature, and is only exposed
- * here for internal testing purposes. DO NOT USE.
- *
* A compiled JavaScript module.
*/
class V8_EXPORT Module {
public:
/**
* The different states a module can be in.
+ * This corresponds to the states used in ECMAScript except that "evaluated"
+ * is split into kEvaluated and kErrored, indicating success and failure,
+ * respectively.
*/
enum Status {
kUninstantiated,
@@ -1191,9 +1192,6 @@ class V8_EXPORT Module {
* instantiation. (In the case where the callback throws an exception, that
* exception is propagated.)
*/
- V8_DEPRECATED("Use Maybe<bool> version",
- bool Instantiate(Local<Context> context,
- ResolveCallback callback));
V8_WARN_UNUSED_RESULT Maybe<bool> InstantiateModule(Local<Context> context,
ResolveCallback callback);
@@ -1423,7 +1421,8 @@ class V8_EXPORT ScriptCompiler {
kConsumeParserCache,
kProduceCodeCache,
kProduceFullCodeCache,
- kConsumeCodeCache
+ kConsumeCodeCache,
+ kEagerCompile
};
/**
@@ -1443,7 +1442,8 @@ class V8_EXPORT ScriptCompiler {
kNoCacheBecauseExtensionModule,
kNoCacheBecausePacScript,
kNoCacheBecauseInDocumentWrite,
- kNoCacheBecauseResourceWithNoCacheHandler
+ kNoCacheBecauseResourceWithNoCacheHandler,
+ kNoCacheBecauseDeferredProduceCodeCache
};
/**
@@ -1459,11 +1459,6 @@ class V8_EXPORT ScriptCompiler {
* \return Compiled script object (context independent; for running it must be
* bound to a context).
*/
- static V8_DEPRECATED("Use maybe version",
- Local<UnboundScript> CompileUnbound(
- Isolate* isolate, Source* source,
- CompileOptions options = kNoCompileOptions,
- NoCacheReason no_cache_reason = kNoCacheNoReason));
static V8_WARN_UNUSED_RESULT MaybeLocal<UnboundScript> CompileUnboundScript(
Isolate* isolate, Source* source,
CompileOptions options = kNoCompileOptions,
@@ -1480,11 +1475,6 @@ class V8_EXPORT ScriptCompiler {
* when this function was called. When run it will always use this
* context.
*/
- static V8_DEPRECATED(
- "Use maybe version",
- Local<Script> Compile(Isolate* isolate, Source* source,
- CompileOptions options = kNoCompileOptions,
- NoCacheReason no_cache_reason = kNoCacheNoReason));
static V8_WARN_UNUSED_RESULT MaybeLocal<Script> Compile(
Local<Context> context, Source* source,
CompileOptions options = kNoCompileOptions,
@@ -1512,11 +1502,6 @@ class V8_EXPORT ScriptCompiler {
* (ScriptStreamingTask has been run). V8 doesn't construct the source string
* during streaming, so the embedder needs to pass the full source here.
*/
- static V8_DEPRECATED("Use maybe version",
- Local<Script> Compile(Isolate* isolate,
- StreamedSource* source,
- Local<String> full_source_string,
- const ScriptOrigin& origin));
static V8_WARN_UNUSED_RESULT MaybeLocal<Script> Compile(
Local<Context> context, StreamedSource* source,
Local<String> full_source_string, const ScriptOrigin& origin);
@@ -1542,9 +1527,6 @@ class V8_EXPORT ScriptCompiler {
static uint32_t CachedDataVersionTag();
/**
- * This is an unfinished experimental feature, and is only exposed
- * here for internal testing purposes. DO NOT USE.
- *
* Compile an ES module, returning a Module that encapsulates
* the compiled code.
*
@@ -1576,6 +1558,14 @@ class V8_EXPORT ScriptCompiler {
Local<String> arguments[], size_t context_extension_count,
Local<Object> context_extensions[]);
+ /**
+ * Creates and returns code cache for the specified unbound_script.
+ * This will return nullptr if the script cannot be serialized. The
+ * CachedData returned by this function should be owned by the caller.
+ */
+ static CachedData* CreateCodeCache(Local<UnboundScript> unbound_script,
+ Local<String> source);
+
private:
static V8_WARN_UNUSED_RESULT MaybeLocal<UnboundScript> CompileUnboundInternal(
Isolate* isolate, Source* source, CompileOptions options,
@@ -1647,7 +1637,6 @@ class V8_EXPORT Message {
* Returns the index within the line of the last character where
* the error occurred.
*/
- V8_DEPRECATED("Use maybe version", int GetEndColumn() const);
V8_WARN_UNUSED_RESULT Maybe<int> GetEndColumn(Local<Context> context) const;
/**
@@ -1704,11 +1693,6 @@ class V8_EXPORT StackTrace {
int GetFrameCount() const;
/**
- * Returns StackTrace as a v8::Array that contains StackFrame objects.
- */
- V8_DEPRECATED("Use native API instead", Local<Array> AsArray());
-
- /**
* Grab a snapshot of the current JavaScript execution stack.
*
* \param frame_limit The maximum number of stack frames we want to capture.
@@ -1829,8 +1813,6 @@ class V8_EXPORT JSON {
* \param json_string The string to parse.
* \return The corresponding value if successfully parsed.
*/
- static V8_DEPRECATED("Use the maybe version taking context",
- Local<Value> Parse(Local<String> json_string));
static V8_DEPRECATE_SOON("Use the maybe version taking context",
MaybeLocal<Value> Parse(Isolate* isolate,
Local<String> json_string));
@@ -2366,34 +2348,24 @@ class V8_EXPORT Value : public Data {
Local<Number> ToNumber(Isolate* isolate) const);
V8_DEPRECATE_SOON("Use maybe version",
Local<String> ToString(Isolate* isolate) const);
- V8_DEPRECATED("Use maybe version",
- Local<String> ToDetailString(Isolate* isolate) const);
V8_DEPRECATE_SOON("Use maybe version",
Local<Object> ToObject(Isolate* isolate) const);
V8_DEPRECATE_SOON("Use maybe version",
Local<Integer> ToInteger(Isolate* isolate) const);
- V8_DEPRECATED("Use maybe version",
- Local<Uint32> ToUint32(Isolate* isolate) const);
V8_DEPRECATE_SOON("Use maybe version",
Local<Int32> ToInt32(Isolate* isolate) const);
inline V8_DEPRECATE_SOON("Use maybe version",
Local<Boolean> ToBoolean() const);
- inline V8_DEPRECATED("Use maybe version", Local<Number> ToNumber() const);
inline V8_DEPRECATE_SOON("Use maybe version", Local<String> ToString() const);
- inline V8_DEPRECATED("Use maybe version",
- Local<String> ToDetailString() const);
inline V8_DEPRECATE_SOON("Use maybe version", Local<Object> ToObject() const);
inline V8_DEPRECATE_SOON("Use maybe version",
Local<Integer> ToInteger() const);
- inline V8_DEPRECATED("Use maybe version", Local<Uint32> ToUint32() const);
- inline V8_DEPRECATED("Use maybe version", Local<Int32> ToInt32() const);
/**
* Attempts to convert a string to an array index.
* Returns an empty handle if the conversion fails.
*/
- V8_DEPRECATED("Use maybe version", Local<Uint32> ToArrayIndex() const);
V8_WARN_UNUSED_RESULT MaybeLocal<Uint32> ToArrayIndex(
Local<Context> context) const;
@@ -2724,13 +2696,6 @@ class V8_EXPORT String : public Name {
Isolate* isolate, const char* data, v8::NewStringType type,
int length = -1);
- /** Allocates a new string from Latin-1 data.*/
- static V8_DEPRECATED(
- "Use maybe version",
- Local<String> NewFromOneByte(Isolate* isolate, const uint8_t* data,
- NewStringType type = kNormalString,
- int length = -1));
-
/** Allocates a new string from Latin-1 data. Only returns an empty value
* when length > kMaxLength. **/
static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewFromOneByte(
@@ -2764,9 +2729,6 @@ class V8_EXPORT String : public Name {
* should the underlying buffer be deallocated or modified except through the
* destructor of the external string resource.
*/
- static V8_DEPRECATED("Use maybe version",
- Local<String> NewExternal(
- Isolate* isolate, ExternalStringResource* resource));
static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewExternalTwoByte(
Isolate* isolate, ExternalStringResource* resource);
@@ -2951,8 +2913,12 @@ class V8_EXPORT Private : public Data {
*/
static Local<Private> ForApi(Isolate* isolate, Local<String> name);
+ V8_INLINE static Private* Cast(Data* data);
+
private:
Private();
+
+ static void CheckCast(Data* that);
};
@@ -3152,19 +3118,6 @@ class V8_EXPORT Object : public Value {
V8_WARN_UNUSED_RESULT Maybe<bool> DefineProperty(
Local<Context> context, Local<Name> key, PropertyDescriptor& descriptor);
- // Sets an own property on this object bypassing interceptors and
- // overriding accessors or read-only properties.
- //
- // Note that if the object has an interceptor the property will be set
- // locally, but since the interceptor takes precedence the local property
- // will only be returned if the interceptor doesn't return a value.
- //
- // Note also that this only works for named properties.
- V8_DEPRECATED("Use CreateDataProperty / DefineOwnProperty",
- Maybe<bool> ForceSet(Local<Context> context, Local<Value> key,
- Local<Value> value,
- PropertyAttribute attribs = None));
-
V8_DEPRECATE_SOON("Use maybe version", Local<Value> Get(Local<Value> key));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> Get(Local<Context> context,
Local<Value> key);
@@ -3178,16 +3131,12 @@ class V8_EXPORT Object : public Value {
* any combination of ReadOnly, DontEnum and DontDelete. Returns
* None when the property doesn't exist.
*/
- V8_DEPRECATED("Use maybe version",
- PropertyAttribute GetPropertyAttributes(Local<Value> key));
V8_WARN_UNUSED_RESULT Maybe<PropertyAttribute> GetPropertyAttributes(
Local<Context> context, Local<Value> key);
/**
* Returns Object.getOwnPropertyDescriptor as per ES2016 section 19.1.2.6.
*/
- V8_DEPRECATED("Use maybe version",
- Local<Value> GetOwnPropertyDescriptor(Local<Name> key));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> GetOwnPropertyDescriptor(
Local<Context> context, Local<Name> key);
@@ -3214,28 +3163,12 @@ class V8_EXPORT Object : public Value {
V8_WARN_UNUSED_RESULT Maybe<bool> Delete(Local<Context> context,
Local<Value> key);
- V8_DEPRECATED("Use maybe version", bool Has(uint32_t index));
V8_WARN_UNUSED_RESULT Maybe<bool> Has(Local<Context> context,
uint32_t index);
- V8_DEPRECATED("Use maybe version", bool Delete(uint32_t index));
V8_WARN_UNUSED_RESULT Maybe<bool> Delete(Local<Context> context,
uint32_t index);
- V8_DEPRECATED("Use maybe version",
- bool SetAccessor(Local<String> name,
- AccessorGetterCallback getter,
- AccessorSetterCallback setter = 0,
- Local<Value> data = Local<Value>(),
- AccessControl settings = DEFAULT,
- PropertyAttribute attribute = None));
- V8_DEPRECATED("Use maybe version",
- bool SetAccessor(Local<Name> name,
- AccessorNameGetterCallback getter,
- AccessorNameSetterCallback setter = 0,
- Local<Value> data = Local<Value>(),
- AccessControl settings = DEFAULT,
- PropertyAttribute attribute = None));
V8_WARN_UNUSED_RESULT Maybe<bool> SetAccessor(Local<Context> context,
Local<Name> name,
AccessorNameGetterCallback getter,
@@ -3314,7 +3247,6 @@ class V8_EXPORT Object : public Value {
* be skipped by __proto__ and it does not consult the security
* handler.
*/
- V8_DEPRECATED("Use maybe version", bool SetPrototype(Local<Value> prototype));
V8_WARN_UNUSED_RESULT Maybe<bool> SetPrototype(Local<Context> context,
Local<Value> prototype);
@@ -3329,7 +3261,6 @@ class V8_EXPORT Object : public Value {
* This is different from Value::ToString() that may call
* user-defined toString function. This one does not.
*/
- V8_DEPRECATED("Use maybe version", Local<String> ObjectProtoToString());
V8_WARN_UNUSED_RESULT MaybeLocal<String> ObjectProtoToString(
Local<Context> context);
@@ -3380,9 +3311,6 @@ class V8_EXPORT Object : public Value {
void SetAlignedPointerInInternalFields(int argc, int indices[],
void* values[]);
- // Testers for local properties.
- V8_DEPRECATED("Use maybe version", bool HasOwnProperty(Local<String> key));
-
/**
* HasOwnProperty() is like JavaScript's Object.prototype.hasOwnProperty().
*
@@ -3422,9 +3350,6 @@ class V8_EXPORT Object : public Value {
* If result.IsEmpty() no real property was located in the prototype chain.
* This means interceptors in the prototype chain are not called.
*/
- V8_DEPRECATED(
- "Use maybe version",
- Local<Value> GetRealNamedPropertyInPrototypeChain(Local<String> key));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> GetRealNamedPropertyInPrototypeChain(
Local<Context> context, Local<Name> key);
@@ -3433,10 +3358,6 @@ class V8_EXPORT Object : public Value {
* which can be None or any combination of ReadOnly, DontEnum and DontDelete.
* Interceptors in the prototype chain are not called.
*/
- V8_DEPRECATED(
- "Use maybe version",
- Maybe<PropertyAttribute> GetRealNamedPropertyAttributesInPrototypeChain(
- Local<String> key));
V8_WARN_UNUSED_RESULT Maybe<PropertyAttribute>
GetRealNamedPropertyAttributesInPrototypeChain(Local<Context> context,
Local<Name> key);
@@ -3446,8 +3367,6 @@ class V8_EXPORT Object : public Value {
* in the prototype chain.
* This means interceptors in the prototype chain are not called.
*/
- V8_DEPRECATED("Use maybe version",
- Local<Value> GetRealNamedProperty(Local<String> key));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> GetRealNamedProperty(
Local<Context> context, Local<Name> key);
@@ -3456,9 +3375,6 @@ class V8_EXPORT Object : public Value {
* None or any combination of ReadOnly, DontEnum and DontDelete.
* Interceptors in the prototype chain are not called.
*/
- V8_DEPRECATED("Use maybe version",
- Maybe<PropertyAttribute> GetRealNamedPropertyAttributes(
- Local<String> key));
V8_WARN_UNUSED_RESULT Maybe<PropertyAttribute> GetRealNamedPropertyAttributes(
Local<Context> context, Local<Name> key);
@@ -3511,9 +3427,6 @@ class V8_EXPORT Object : public Value {
* Call an Object as a function if a callback is set by the
* ObjectTemplate::SetCallAsFunctionHandler method.
*/
- V8_DEPRECATED("Use maybe version",
- Local<Value> CallAsFunction(Local<Value> recv, int argc,
- Local<Value> argv[]));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> CallAsFunction(Local<Context> context,
Local<Value> recv,
int argc,
@@ -3524,8 +3437,6 @@ class V8_EXPORT Object : public Value {
* ObjectTemplate::SetCallAsFunctionHandler method.
* Note: This method behaves like the Function::NewInstance method.
*/
- V8_DEPRECATED("Use maybe version",
- Local<Value> CallAsConstructor(int argc, Local<Value> argv[]));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> CallAsConstructor(
Local<Context> context, int argc, Local<Value> argv[]);
@@ -3554,16 +3465,6 @@ class V8_EXPORT Array : public Object {
uint32_t Length() const;
/**
- * Clones an element at index |index|. Returns an empty
- * handle if cloning fails (for any reason).
- */
- V8_DEPRECATED("Cloning is not supported.",
- Local<Object> CloneElementAt(uint32_t index));
- V8_DEPRECATED("Cloning is not supported.",
- MaybeLocal<Object> CloneElementAt(Local<Context> context,
- uint32_t index));
-
- /**
* Creates a JavaScript array with the given length. If the length
* is negative the returned array will have length 0.
*/
@@ -3885,12 +3786,9 @@ class V8_EXPORT Function : public Object {
Local<Function> New(Isolate* isolate, FunctionCallback callback,
Local<Value> data = Local<Value>(), int length = 0));
- V8_DEPRECATED("Use maybe version",
- Local<Object> NewInstance(int argc, Local<Value> argv[]) const);
V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstance(
Local<Context> context, int argc, Local<Value> argv[]) const;
- V8_DEPRECATED("Use maybe version", Local<Object> NewInstance() const);
V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstance(
Local<Context> context) const {
return NewInstance(context, 0, nullptr);
@@ -3938,11 +3836,6 @@ class V8_EXPORT Function : public Object {
int GetScriptColumnNumber() const;
/**
- * Tells whether this function is builtin.
- */
- V8_DEPRECATED("this should no longer be used.", bool IsBuiltin() const);
-
- /**
* Returns scriptId.
*/
int ScriptId() const;
@@ -3983,8 +3876,8 @@ class V8_EXPORT Promise : public Object {
/**
* Create a new resolver, along with an associated promise in pending state.
*/
- static V8_DEPRECATE_SOON("Use maybe version",
- Local<Resolver> New(Isolate* isolate));
+ static V8_DEPRECATED("Use maybe version",
+ Local<Resolver> New(Isolate* isolate));
static V8_WARN_UNUSED_RESULT MaybeLocal<Resolver> New(
Local<Context> context);
@@ -3997,11 +3890,11 @@ class V8_EXPORT Promise : public Object {
* Resolve/reject the associated promise with a given value.
* Ignored if the promise is no longer pending.
*/
- V8_DEPRECATE_SOON("Use maybe version", void Resolve(Local<Value> value));
+ V8_DEPRECATED("Use maybe version", void Resolve(Local<Value> value));
V8_WARN_UNUSED_RESULT Maybe<bool> Resolve(Local<Context> context,
Local<Value> value);
- V8_DEPRECATE_SOON("Use maybe version", void Reject(Local<Value> value));
+ V8_DEPRECATED("Use maybe version", void Reject(Local<Value> value));
V8_WARN_UNUSED_RESULT Maybe<bool> Reject(Local<Context> context,
Local<Value> value);
@@ -4018,13 +3911,9 @@ class V8_EXPORT Promise : public Object {
* an argument. If the promise is already resolved/rejected, the handler is
* invoked at the end of turn.
*/
- V8_DEPRECATED("Use maybe version",
- Local<Promise> Catch(Local<Function> handler));
V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Catch(Local<Context> context,
Local<Function> handler);
- V8_DEPRECATED("Use maybe version",
- Local<Promise> Then(Local<Function> handler));
V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Then(Local<Context> context,
Local<Function> handler);
@@ -4133,7 +4022,7 @@ class V8_EXPORT PropertyDescriptor {
*/
class V8_EXPORT Proxy : public Object {
public:
- Local<Object> GetTarget();
+ Local<Value> GetTarget();
Local<Value> GetHandler();
bool IsRevoked();
void Revoke();
@@ -4918,7 +4807,6 @@ class V8_EXPORT NumberObject : public Object {
class V8_EXPORT BooleanObject : public Object {
public:
static Local<Value> New(Isolate* isolate, bool value);
- V8_DEPRECATED("Pass an isolate", static Local<Value> New(bool value));
bool ValueOf() const;
@@ -5460,7 +5348,8 @@ typedef bool (*AccessCheckCallback)(Local<Context> accessing_context,
* v8::Local<v8::ObjectTemplate> instance_t = t->InstanceTemplate();
* instance_t->SetAccessor(String::NewFromUtf8(isolate, "instance_accessor"),
* InstanceAccessorCallback);
- * instance_t->SetNamedPropertyHandler(PropertyHandlerCallback);
+ * instance_t->SetHandler(
+ * NamedPropertyHandlerConfiguration(PropertyHandlerCallback));
* instance_t->Set(String::NewFromUtf8(isolate, "instance_property"),
* Number::New(isolate, 3));
*
@@ -5638,8 +5527,12 @@ class V8_EXPORT FunctionTemplate : public Template {
*/
bool HasInstance(Local<Value> object);
+ V8_INLINE static FunctionTemplate* Cast(Data* data);
+
private:
FunctionTemplate();
+
+ static void CheckCast(Data* that);
friend class Context;
friend class ObjectTemplate;
};
@@ -5786,7 +5679,6 @@ class V8_EXPORT ObjectTemplate : public Template {
static Local<ObjectTemplate> New(
Isolate* isolate,
Local<FunctionTemplate> constructor = Local<FunctionTemplate>());
- static V8_DEPRECATED("Use isolate version", Local<ObjectTemplate> New());
/** Get a template included in the snapshot by index. */
static MaybeLocal<ObjectTemplate> FromSnapshot(Isolate* isolate,
@@ -5858,13 +5750,16 @@ class V8_EXPORT ObjectTemplate : public Template {
* \param data A piece of data that will be passed to the callbacks
* whenever they are invoked.
*/
- // TODO(dcarney): deprecate
- void SetNamedPropertyHandler(NamedPropertyGetterCallback getter,
- NamedPropertySetterCallback setter = 0,
- NamedPropertyQueryCallback query = 0,
- NamedPropertyDeleterCallback deleter = 0,
- NamedPropertyEnumeratorCallback enumerator = 0,
- Local<Value> data = Local<Value>());
+ V8_DEPRECATE_SOON(
+ "Use SetHandler(const NamedPropertyHandlerConfiguration) "
+ "with the kOnlyInterceptStrings flag set.",
+ void SetNamedPropertyHandler(
+ NamedPropertyGetterCallback getter,
+ NamedPropertySetterCallback setter = 0,
+ NamedPropertyQueryCallback query = 0,
+ NamedPropertyDeleterCallback deleter = 0,
+ NamedPropertyEnumeratorCallback enumerator = 0,
+ Local<Value> data = Local<Value>()));
/**
* Sets a named property handler on the object template.
@@ -5984,10 +5879,13 @@ class V8_EXPORT ObjectTemplate : public Template {
*/
void SetImmutableProto();
+ V8_INLINE static ObjectTemplate* Cast(Data* data);
+
private:
ObjectTemplate();
static Local<ObjectTemplate> New(internal::Isolate* isolate,
Local<FunctionTemplate> constructor);
+ static void CheckCast(Data* that);
friend class FunctionTemplate;
};
@@ -6005,8 +5903,12 @@ class V8_EXPORT Signature : public Data {
Isolate* isolate,
Local<FunctionTemplate> receiver = Local<FunctionTemplate>());
+ V8_INLINE static Signature* Cast(Data* data);
+
private:
Signature();
+
+ static void CheckCast(Data* that);
};
@@ -6020,8 +5922,12 @@ class V8_EXPORT AccessorSignature : public Data {
Isolate* isolate,
Local<FunctionTemplate> receiver = Local<FunctionTemplate>());
+ V8_INLINE static AccessorSignature* Cast(Data* data);
+
private:
AccessorSignature();
+
+ static void CheckCast(Data* that);
};
@@ -6215,8 +6121,6 @@ class V8_EXPORT Exception {
* or capture the current stack trace if not available.
*/
static Local<Message> CreateMessage(Isolate* isolate, Local<Value> exception);
- V8_DEPRECATED("Use version with an Isolate*",
- static Local<Message> CreateMessage(Local<Value> exception));
/**
* Returns the original stack trace that was captured at the creation time
@@ -6338,11 +6242,6 @@ class PromiseRejectMessage {
V8_INLINE PromiseRejectEvent GetEvent() const { return event_; }
V8_INLINE Local<Value> GetValue() const { return value_; }
- V8_DEPRECATED("Use v8::Exception::CreateMessage(GetValue())->GetStackTrace()",
- V8_INLINE Local<StackTrace> GetStackTrace() const) {
- return stack_trace_;
- }
-
private:
Local<Promise> promise_;
PromiseRejectEvent event_;
@@ -6496,6 +6395,8 @@ class V8_EXPORT HeapStatistics {
size_t heap_size_limit() { return heap_size_limit_; }
size_t malloced_memory() { return malloced_memory_; }
size_t peak_malloced_memory() { return peak_malloced_memory_; }
+ size_t number_of_native_contexts() { return number_of_native_contexts_; }
+ size_t number_of_detached_contexts() { return number_of_detached_contexts_; }
/**
* Returns a 0/1 boolean, which signifies whether the V8 overwrite heap
@@ -6513,6 +6414,8 @@ class V8_EXPORT HeapStatistics {
size_t malloced_memory_;
size_t peak_malloced_memory_;
bool does_zap_garbage_;
+ size_t number_of_native_contexts_;
+ size_t number_of_detached_contexts_;
friend class V8;
friend class Isolate;
@@ -7064,6 +6967,7 @@ class V8_EXPORT Isolate {
kErrorPrepareStackTrace = 44,
kErrorStackTraceLimit = 45,
kWebAssemblyInstantiation = 46,
+ kDeoptimizerDisableSpeculation = 47,
// If you add new values here, you'll also need to update Chromium's:
// web_feature.mojom, UseCounterCallback.cpp, and enums.xml. V8 changes to
@@ -7125,9 +7029,6 @@ class V8_EXPORT Isolate {
HostImportModuleDynamicallyCallback callback);
/**
- * This is an unfinished experimental feature, and is only exposed
- * here for internal testing purposes. DO NOT USE.
- *
* This specifies the callback called by the upcoming importa.meta
* language feature to retrieve host-defined meta data for a module.
*/
@@ -7203,6 +7104,14 @@ class V8_EXPORT Isolate {
V8_INLINE static uint32_t GetNumberOfDataSlots();
/**
+ * Return data that was previously attached to the isolate snapshot via
+ * SnapshotCreator, and removes the reference to it.
+ * Repeated call with the same index returns an empty MaybeLocal.
+ */
+ template <class T>
+ V8_INLINE MaybeLocal<T> GetDataFromSnapshotOnce(size_t index);
+
+ /**
* Get statistics about the heap memory usage.
*/
void GetHeapStatistics(HeapStatistics* heap_statistics);
@@ -7315,7 +7224,7 @@ class V8_EXPORT Isolate {
* context of the top-most JavaScript frame. If there are no
* JavaScript frames an empty handle is returned.
*/
- V8_DEPRECATE_SOON(
+ V8_DEPRECATED(
"Calling context concept is not compatible with tail calls, and will be "
"removed.",
Local<Context> GetCallingContext());
@@ -7492,7 +7401,7 @@ class V8_EXPORT Isolate {
* further callbacks.
*/
void AddCallCompletedCallback(CallCompletedCallback callback);
- V8_DEPRECATE_SOON(
+ V8_DEPRECATED(
"Use callback with parameter",
void AddCallCompletedCallback(DeprecatedCallCompletedCallback callback));
@@ -7500,10 +7409,9 @@ class V8_EXPORT Isolate {
* Removes callback that was installed by AddCallCompletedCallback.
*/
void RemoveCallCompletedCallback(CallCompletedCallback callback);
- V8_DEPRECATE_SOON(
- "Use callback with parameter",
- void RemoveCallCompletedCallback(
- DeprecatedCallCompletedCallback callback));
+ V8_DEPRECATED("Use callback with parameter",
+ void RemoveCallCompletedCallback(
+ DeprecatedCallCompletedCallback callback));
/**
* Set the PromiseHook callback for various promise lifecycle
@@ -7518,38 +7426,36 @@ class V8_EXPORT Isolate {
void SetPromiseRejectCallback(PromiseRejectCallback callback);
/**
- * Experimental: Runs the Microtask Work Queue until empty
+ * Runs the Microtask Work Queue until empty
* Any exceptions thrown by microtask callbacks are swallowed.
*/
void RunMicrotasks();
/**
- * Experimental: Enqueues the callback to the Microtask Work Queue
+ * Enqueues the callback to the Microtask Work Queue
*/
void EnqueueMicrotask(Local<Function> microtask);
/**
- * Experimental: Enqueues the callback to the Microtask Work Queue
+ * Enqueues the callback to the Microtask Work Queue
*/
void EnqueueMicrotask(MicrotaskCallback microtask, void* data = NULL);
/**
- * Experimental: Controls how Microtasks are invoked. See MicrotasksPolicy
- * for details.
+ * Controls how Microtasks are invoked. See MicrotasksPolicy for details.
*/
void SetMicrotasksPolicy(MicrotasksPolicy policy);
- V8_DEPRECATE_SOON("Use SetMicrotasksPolicy",
- void SetAutorunMicrotasks(bool autorun));
+ V8_DEPRECATED("Use SetMicrotasksPolicy",
+ void SetAutorunMicrotasks(bool autorun));
/**
- * Experimental: Returns the policy controlling how Microtasks are invoked.
+ * Returns the policy controlling how Microtasks are invoked.
*/
MicrotasksPolicy GetMicrotasksPolicy() const;
- V8_DEPRECATE_SOON("Use GetMicrotasksPolicy",
- bool WillAutorunMicrotasks() const);
+ V8_DEPRECATED("Use GetMicrotasksPolicy", bool WillAutorunMicrotasks() const);
/**
- * Experimental: adds a callback to notify the host application after
+ * Adds a callback to notify the host application after
* microtasks were run. The callback is triggered by explicit RunMicrotasks
* call or automatic microtasks execution (see SetAutorunMicrotasks).
*
@@ -7603,9 +7509,6 @@ class V8_EXPORT Isolate {
*/
bool IdleNotificationDeadline(double deadline_in_seconds);
- V8_DEPRECATED("use IdleNotificationDeadline()",
- bool IdleNotification(int idle_time_in_ms));
-
/**
* Optional notification that the system is running low on memory.
* V8 uses these notifications to attempt to free memory.
@@ -7847,6 +7750,7 @@ class V8_EXPORT Isolate {
template <class K, class V, class Traits>
friend class PersistentValueMapBase;
+ internal::Object** GetDataFromSnapshotOnce(size_t index);
void ReportExternalAllocationLimitReached();
void CheckMemoryPressure();
};
@@ -7886,17 +7790,6 @@ typedef uintptr_t (*ReturnAddressLocationResolver)(
*/
class V8_EXPORT V8 {
public:
- /** Set the callback to invoke in case of fatal errors. */
- V8_INLINE static V8_DEPRECATED(
- "Use isolate version",
- void SetFatalErrorHandler(FatalErrorCallback that));
-
- /**
- * Check if V8 is dead and therefore unusable. This is the case after
- * fatal errors such as out-of-memory situations.
- */
- V8_INLINE static V8_DEPRECATED("Use isolate version", bool IsDead());
-
/**
* Hand startup data to V8, in case the embedder has chosen to build
* V8 with external startup data.
@@ -7937,35 +7830,6 @@ class V8_EXPORT V8 {
/** Set the callback to invoke in case of Dcheck failures. */
static void SetDcheckErrorHandler(DcheckErrorCallback that);
- /**
- * Adds a message listener.
- *
- * The same message listener can be added more than once and in that
- * case it will be called more than once for each message.
- *
- * If data is specified, it will be passed to the callback when it is called.
- * Otherwise, the exception object will be passed to the callback instead.
- */
- V8_INLINE static V8_DEPRECATED(
- "Use isolate version",
- bool AddMessageListener(MessageCallback that,
- Local<Value> data = Local<Value>()));
-
- /**
- * Remove all message listeners from the specified callback function.
- */
- V8_INLINE static V8_DEPRECATED(
- "Use isolate version", void RemoveMessageListeners(MessageCallback that));
-
- /**
- * Tells V8 to capture current stack trace when uncaught exception occurs
- * and report it to the message listeners. The option is off by default.
- */
- V8_INLINE static V8_DEPRECATED(
- "Use isolate version",
- void SetCaptureStackTraceForUncaughtExceptions(
- bool capture, int frame_limit = 10,
- StackTrace::StackTraceOptions options = StackTrace::kOverview));
/**
* Sets V8 flags from a string.
@@ -7982,11 +7846,6 @@ class V8_EXPORT V8 {
/** Get the version string. */
static const char* GetVersion();
- /** Callback function for reporting failed access checks.*/
- V8_INLINE static V8_DEPRECATED(
- "Use isolate version",
- void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback));
-
/**
* Initializes V8. This function needs to be called before the first Isolate
* is created. It always returns true.
@@ -8007,51 +7866,6 @@ class V8_EXPORT V8 {
ReturnAddressLocationResolver return_address_resolver);
/**
- * Forcefully terminate the current thread of JavaScript execution
- * in the given isolate.
- *
- * This method can be used by any thread even if that thread has not
- * acquired the V8 lock with a Locker object.
- *
- * \param isolate The isolate in which to terminate the current JS execution.
- */
- V8_INLINE static V8_DEPRECATED("Use isolate version",
- void TerminateExecution(Isolate* isolate));
-
- /**
- * Is V8 terminating JavaScript execution.
- *
- * Returns true if JavaScript execution is currently terminating
- * because of a call to TerminateExecution. In that case there are
- * still JavaScript frames on the stack and the termination
- * exception is still active.
- *
- * \param isolate The isolate in which to check.
- */
- V8_INLINE static V8_DEPRECATED(
- "Use isolate version",
- bool IsExecutionTerminating(Isolate* isolate = NULL));
-
- /**
- * Resume execution capability in the given isolate, whose execution
- * was previously forcefully terminated using TerminateExecution().
- *
- * When execution is forcefully terminated using TerminateExecution(),
- * the isolate can not resume execution until all JavaScript frames
- * have propagated the uncatchable exception which is generated. This
- * method allows the program embedding the engine to handle the
- * termination event and resume execution capability, even if
- * JavaScript frames remain on the stack.
- *
- * This method can be used by any thread even if that thread has not
- * acquired the V8 lock with a Locker object.
- *
- * \param isolate The isolate in which to resume execution capability.
- */
- V8_INLINE static V8_DEPRECATED(
- "Use isolate version", void CancelTerminateExecution(Isolate* isolate));
-
- /**
* Releases any resources used by v8 and stops any utility threads
* that may be running. Note that disposing v8 is permanent, it
* cannot be reinitialized.
@@ -8063,26 +7877,6 @@ class V8_EXPORT V8 {
static bool Dispose();
/**
- * Iterates through all external resources referenced from current isolate
- * heap. GC is not invoked prior to iterating, therefore there is no
- * guarantee that visited objects are still alive.
- */
- V8_INLINE static V8_DEPRECATED(
- "Use isolate version",
- void VisitExternalResources(ExternalResourceVisitor* visitor));
-
- /**
- * Initialize the ICU library bundled with V8. The embedder should only
- * invoke this method when using the bundled ICU. Returns true on success.
- *
- * If V8 was compiled with the ICU data in an external file, the location
- * of the data file has to be provided.
- */
- V8_DEPRECATE_SOON(
- "Use version with default location.",
- static bool InitializeICU(const char* icu_data_file = nullptr));
-
- /**
* Initialize the ICU library bundled with V8. The embedder should only
* invoke this method when using the bundled ICU. If V8 was compiled with
* the ICU data in an external file and when the default location of that
@@ -8254,6 +8048,24 @@ class V8_EXPORT SnapshotCreator {
size_t AddTemplate(Local<Template> template_obj);
/**
+ * Attach arbitrary V8::Data to the context snapshot, which can be retrieved
+ * via Context::GetDataFromSnapshot after deserialization. This data does not
+ * survive when a new snapshot is created from an existing snapshot.
+ * \returns the index for retrieval.
+ */
+ template <class T>
+ V8_INLINE size_t AddData(Local<Context> context, Local<T> object);
+
+ /**
+ * Attach arbitrary V8::Data to the isolate snapshot, which can be retrieved
+ * via Isolate::GetDataFromSnapshot after deserialization. This data does not
+ * survive when a new snapshot is created from an existing snapshot.
+ * \returns the index for retrieval.
+ */
+ template <class T>
+ V8_INLINE size_t AddData(Local<T> object);
+
+ /**
* Created a snapshot data blob.
* This must not be called from within a handle scope.
* \param function_code_handling whether to include compiled function code
@@ -8268,6 +8080,9 @@ class V8_EXPORT SnapshotCreator {
void operator=(const SnapshotCreator&) = delete;
private:
+ size_t AddData(Local<Context> context, internal::Object* object);
+ size_t AddData(internal::Object* object);
+
void* data_;
};
@@ -8390,13 +8205,6 @@ class V8_EXPORT TryCatch {
* all TryCatch blocks should be stack allocated because the memory
* location itself is compared against JavaScript try/catch blocks.
*/
- V8_DEPRECATED("Use isolate version", TryCatch());
-
- /**
- * Creates a new try/catch block and registers it with v8. Note that
- * all TryCatch blocks should be stack allocated because the memory
- * location itself is compared against JavaScript try/catch blocks.
- */
TryCatch(Isolate* isolate);
/**
@@ -8454,7 +8262,7 @@ class V8_EXPORT TryCatch {
* Returns the .stack property of the thrown object. If no .stack
* property is present an empty handle is returned.
*/
- V8_DEPRECATE_SOON("Use maybe version.", Local<Value> StackTrace() const);
+ V8_DEPRECATED("Use maybe version.", Local<Value> StackTrace() const);
V8_WARN_UNUSED_RESULT MaybeLocal<Value> StackTrace(
Local<Context> context) const;
@@ -8763,9 +8571,12 @@ class V8_EXPORT Context {
void SetErrorMessageForCodeGenerationFromStrings(Local<String> message);
/**
- * Estimate the memory in bytes retained by this context.
+ * Return data that was previously attached to the context snapshot via
+ * SnapshotCreator, and removes the reference to it.
+ * Repeated call with the same index returns an empty MaybeLocal.
*/
- V8_DEPRECATED("no longer supported", size_t EstimatedSize());
+ template <class T>
+ V8_INLINE MaybeLocal<T> GetDataFromSnapshotOnce(size_t index);
/**
* Stack-allocated class which sets the execution context for all
@@ -8809,6 +8620,7 @@ class V8_EXPORT Context {
friend class Object;
friend class Function;
+ internal::Object** GetDataFromSnapshotOnce(size_t index);
Local<Value> SlowGetEmbedderData(int index);
void* SlowGetAlignedPointerFromEmbedderData(int index);
};
@@ -9072,9 +8884,9 @@ class Internals {
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
static const int kForeignType = 0x87;
- static const int kJSSpecialApiObjectType = 0xbc;
- static const int kJSApiObjectType = 0xc0;
- static const int kJSObjectType = 0xc1;
+ static const int kJSSpecialApiObjectType = 0x410;
+ static const int kJSApiObjectType = 0x420;
+ static const int kJSObjectType = 0x421;
static const int kUndefinedOddballKind = 5;
static const int kNullOddballKind = 3;
@@ -9186,6 +8998,29 @@ class Internals {
}
};
+// Only perform cast check for types derived from v8::Data since
+// other types do not implement the Cast method.
+template <bool PerformCheck>
+struct CastCheck {
+ template <class T>
+ static void Perform(T* data);
+};
+
+template <>
+template <class T>
+void CastCheck<true>::Perform(T* data) {
+ T::Cast(data);
+}
+
+template <>
+template <class T>
+void CastCheck<false>::Perform(T* data) {}
+
+template <class T>
+V8_INLINE void PerformCastCheck(T* data) {
+ CastCheck<std::is_base_of<Data, T>::value>::Perform(data);
+}
+
} // namespace internal
@@ -9649,6 +9484,33 @@ void Template::Set(Isolate* isolate, const char* name, Local<Data> value) {
value);
}
+FunctionTemplate* FunctionTemplate::Cast(Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return reinterpret_cast<FunctionTemplate*>(data);
+}
+
+ObjectTemplate* ObjectTemplate::Cast(Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return reinterpret_cast<ObjectTemplate*>(data);
+}
+
+Signature* Signature::Cast(Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return reinterpret_cast<Signature*>(data);
+}
+
+AccessorSignature* AccessorSignature::Cast(Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return reinterpret_cast<AccessorSignature*>(data);
+}
Local<Value> Object::GetInternalField(int index) {
#ifndef V8_ENABLE_CHECKS
@@ -9826,24 +9688,12 @@ Local<Boolean> Value::ToBoolean() const {
}
-Local<Number> Value::ToNumber() const {
- return ToNumber(Isolate::GetCurrent()->GetCurrentContext())
- .FromMaybe(Local<Number>());
-}
-
-
Local<String> Value::ToString() const {
return ToString(Isolate::GetCurrent()->GetCurrentContext())
.FromMaybe(Local<String>());
}
-Local<String> Value::ToDetailString() const {
- return ToDetailString(Isolate::GetCurrent()->GetCurrentContext())
- .FromMaybe(Local<String>());
-}
-
-
Local<Object> Value::ToObject() const {
return ToObject(Isolate::GetCurrent()->GetCurrentContext())
.FromMaybe(Local<Object>());
@@ -9856,18 +9706,6 @@ Local<Integer> Value::ToInteger() const {
}
-Local<Uint32> Value::ToUint32() const {
- return ToUint32(Isolate::GetCurrent()->GetCurrentContext())
- .FromMaybe(Local<Uint32>());
-}
-
-
-Local<Int32> Value::ToInt32() const {
- return ToInt32(Isolate::GetCurrent()->GetCurrentContext())
- .FromMaybe(Local<Int32>());
-}
-
-
Boolean* Boolean::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
@@ -9892,6 +9730,14 @@ Symbol* Symbol::Cast(v8::Value* value) {
}
+Private* Private::Cast(Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return reinterpret_cast<Private*>(data);
+}
+
+
Number* Number::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
@@ -10251,6 +10097,12 @@ uint32_t Isolate::GetNumberOfDataSlots() {
return I::kNumIsolateDataSlots;
}
+template <class T>
+MaybeLocal<T> Isolate::GetDataFromSnapshotOnce(size_t index) {
+ T* data = reinterpret_cast<T*>(GetDataFromSnapshotOnce(index));
+ if (data) internal::PerformCastCheck(data);
+ return Local<T>(data);
+}
int64_t Isolate::AdjustAmountOfExternalAllocatedMemory(
int64_t change_in_bytes) {
@@ -10310,63 +10162,25 @@ void* Context::GetAlignedPointerFromEmbedderData(int index) {
#endif
}
-bool V8::IsDead() {
- Isolate* isolate = Isolate::GetCurrent();
- return isolate->IsDead();
-}
-
-
-bool V8::AddMessageListener(MessageCallback that, Local<Value> data) {
- Isolate* isolate = Isolate::GetCurrent();
- return isolate->AddMessageListener(that, data);
-}
-
-
-void V8::RemoveMessageListeners(MessageCallback that) {
- Isolate* isolate = Isolate::GetCurrent();
- isolate->RemoveMessageListeners(that);
-}
-
-
-void V8::SetFailedAccessCheckCallbackFunction(
- FailedAccessCheckCallback callback) {
- Isolate* isolate = Isolate::GetCurrent();
- isolate->SetFailedAccessCheckCallbackFunction(callback);
-}
-
-
-void V8::SetCaptureStackTraceForUncaughtExceptions(
- bool capture, int frame_limit, StackTrace::StackTraceOptions options) {
- Isolate* isolate = Isolate::GetCurrent();
- isolate->SetCaptureStackTraceForUncaughtExceptions(capture, frame_limit,
- options);
-}
-
-
-void V8::SetFatalErrorHandler(FatalErrorCallback callback) {
- Isolate* isolate = Isolate::GetCurrent();
- isolate->SetFatalErrorHandler(callback);
-}
-
-void V8::TerminateExecution(Isolate* isolate) { isolate->TerminateExecution(); }
-
-
-bool V8::IsExecutionTerminating(Isolate* isolate) {
- if (isolate == NULL) {
- isolate = Isolate::GetCurrent();
- }
- return isolate->IsExecutionTerminating();
+template <class T>
+MaybeLocal<T> Context::GetDataFromSnapshotOnce(size_t index) {
+ T* data = reinterpret_cast<T*>(GetDataFromSnapshotOnce(index));
+ if (data) internal::PerformCastCheck(data);
+ return Local<T>(data);
}
-
-void V8::CancelTerminateExecution(Isolate* isolate) {
- isolate->CancelTerminateExecution();
+template <class T>
+size_t SnapshotCreator::AddData(Local<Context> context, Local<T> object) {
+ T* object_ptr = *object;
+ internal::Object** p = reinterpret_cast<internal::Object**>(object_ptr);
+ return AddData(context, *p);
}
-
-void V8::VisitExternalResources(ExternalResourceVisitor* visitor) {
- Isolate* isolate = Isolate::GetCurrent();
- isolate->VisitExternalResources(visitor);
+template <class T>
+size_t SnapshotCreator::AddData(Local<T> object) {
+ T* object_ptr = *object;
+ internal::Object** p = reinterpret_cast<internal::Object**>(object_ptr);
+ return AddData(*p);
}
/**
diff --git a/deps/v8/infra/config/PRESUBMIT.py b/deps/v8/infra/config/PRESUBMIT.py
new file mode 100644
index 0000000000..3d20f403f6
--- /dev/null
+++ b/deps/v8/infra/config/PRESUBMIT.py
@@ -0,0 +1,29 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Presubmit script for changes in the infrastructure configs.
+
+See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
+for more details about the presubmit API built into gcl.
+"""
+
+
+def _CommonChecks(input_api, output_api):
+ """Checks common to both upload and commit."""
+ results = []
+ results.extend(
+ input_api.canned_checks.CheckChangedLUCIConfigs(input_api, output_api))
+ return results
+
+
+def CheckChangeOnUpload(input_api, output_api):
+ results = []
+ results.extend(_CommonChecks(input_api, output_api))
+ return results
+
+
+def CheckChangeOnCommit(input_api, output_api):
+ results = []
+ results.extend(_CommonChecks(input_api, output_api))
+ return results
diff --git a/deps/v8/infra/config/cq.cfg b/deps/v8/infra/config/cq.cfg
index dbc20d5f80..d57b117f10 100644
--- a/deps/v8/infra/config/cq.cfg
+++ b/deps/v8/infra/config/cq.cfg
@@ -25,23 +25,12 @@ verifiers {
name: "luci.v8.try"
builders { name: "v8_android_arm_compile_rel" }
builders { name: "v8_fuchsia_rel_ng" }
- builders { name: "v8_linux64_gcc_compile_dbg" }
- builders { name: "v8_linux_gcc_compile_rel" }
- builders { name: "v8_linux_shared_compile_rel" }
- builders { name: "v8_presubmit" }
- builders {
- name: "v8_win64_msvc_compile_rel"
- experiment_percentage: 20
- }
- }
- buckets {
- name: "master.tryserver.v8"
- builders { name: "v8_node_linux64_rel" }
builders { name: "v8_linux64_asan_rel_ng" }
builders {
name: "v8_linux64_asan_rel_ng_triggered"
triggered_by: "v8_linux64_asan_rel_ng"
}
+ builders { name: "v8_linux64_gcc_compile_dbg" }
builders { name: "v8_linux64_gyp_rel_ng" }
builders {
name: "v8_linux64_gyp_rel_ng_triggered"
@@ -52,6 +41,10 @@ verifiers {
name: "v8_linux64_rel_ng_triggered"
triggered_by: "v8_linux64_rel_ng"
}
+ builders {
+ name: "v8_linux64_sanitizer_coverage_rel"
+ experiment_percentage: 100
+ }
builders { name: "v8_linux64_verify_csa_rel_ng" }
builders {
name: "v8_linux64_verify_csa_rel_ng_triggered"
@@ -67,14 +60,19 @@ verifiers {
name: "v8_linux_arm_rel_ng_triggered"
triggered_by: "v8_linux_arm_rel_ng"
}
+ builders {
+ name: "v8_linux_blink_rel"
+ experiment_percentage: 100
+ }
builders { name: "v8_linux_chromium_gn_rel" }
builders { name: "v8_linux_dbg_ng" }
builders {
name: "v8_linux_dbg_ng_triggered"
triggered_by: "v8_linux_dbg_ng"
}
- builders { name: "v8_linux_mipsel_compile_rel" }
+ builders { name: "v8_linux_gcc_compile_rel" }
builders { name: "v8_linux_mips64el_compile_rel" }
+ builders { name: "v8_linux_mipsel_compile_rel" }
builders { name: "v8_linux_nodcheck_rel_ng" }
builders {
name: "v8_linux_nodcheck_rel_ng_triggered"
@@ -85,6 +83,7 @@ verifiers {
name: "v8_linux_rel_ng_triggered"
triggered_by: "v8_linux_rel_ng"
}
+ builders { name: "v8_linux_shared_compile_rel" }
builders { name: "v8_linux_verify_csa_rel_ng" }
builders {
name: "v8_linux_verify_csa_rel_ng_triggered"
@@ -95,6 +94,12 @@ verifiers {
name: "v8_mac_rel_ng_triggered"
triggered_by: "v8_mac_rel_ng"
}
+ builders { name: "v8_node_linux64_rel" }
+ builders { name: "v8_presubmit" }
+ builders {
+ name: "v8_win64_msvc_compile_rel"
+ experiment_percentage: 20
+ }
builders { name: "v8_win64_rel_ng" }
builders {
name: "v8_win64_rel_ng_triggered"
@@ -111,21 +116,6 @@ verifiers {
name: "v8_win_rel_ng_triggered"
triggered_by: "v8_win_rel_ng"
}
- builders {
- name: "v8_linux_blink_rel"
- experiment_percentage: 100
- }
- builders {
- name: "v8_linux64_sanitizer_coverage_rel"
- experiment_percentage: 100
- }
- }
- buckets {
- name: "master.tryserver.chromium.win"
- builders {
- name: "win_chromium_compile_dbg_ng"
- experiment_percentage: 100
- }
}
}
diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl
index afc7225148..e025c98f9e 100644
--- a/deps/v8/infra/mb/mb_config.pyl
+++ b/deps/v8/infra/mb/mb_config.pyl
@@ -204,7 +204,8 @@
'v8_linux_gcc_rel': 'gn_release_x86_gcc_minimal_symbols',
'v8_linux_shared_compile_rel': 'gn_release_x86_shared_verify_heap',
'v8_linux64_gcc_compile_dbg': 'gn_debug_x64_gcc',
- 'v8_linux64_rel_ng': 'gn_release_x64_trybot',
+ 'v8_linux64_fyi_rel_ng': 'gn_release_x64_test_features_trybot',
+ 'v8_linux64_rel_ng': 'gn_release_x64_test_features_trybot',
'v8_linux64_verify_csa_rel_ng': 'gn_release_x64_verify_csa',
'v8_linux64_gyp_rel_ng': 'gyp_release_x64',
'v8_linux64_asan_rel_ng': 'gn_release_x64_asan_minimal_symbols',
@@ -428,6 +429,8 @@
'gn', 'release_bot', 'x64', 'minimal_symbols', 'swarming'],
'gn_release_x64_trybot': [
'gn', 'release_trybot', 'x64', 'swarming'],
+ 'gn_release_x64_test_features_trybot': [
+ 'gn', 'release_trybot', 'x64', 'swarming', 'v8_enable_test_features'],
'gn_release_x64_tsan': [
'gn', 'release_bot', 'x64', 'tsan', 'swarming'],
'gn_release_x64_tsan_concurrent_marking': [
@@ -555,6 +558,7 @@
},
'asan': {
+ 'mixins': ['v8_enable_test_features'],
'gn_args': 'is_asan=true',
'gyp_defines': 'clang=1 asan=1',
},
@@ -565,12 +569,14 @@
},
'cfi': {
+ 'mixins': ['v8_enable_test_features'],
'gn_args': ('is_cfi=true use_cfi_cast=true use_cfi_diag=true '
'use_cfi_recover=false'),
'gyp_defines': 'cfi_vptr=1 cfi_diag=1',
},
'cfi_clusterfuzz': {
+ 'mixins': ['v8_enable_test_features'],
'gn_args': ('is_cfi=true use_cfi_cast=true use_cfi_diag=true '
'use_cfi_recover=true'),
'gyp_defines': 'cfi_vptr=1 cfi_diag=1',
@@ -647,6 +653,7 @@
},
'lsan': {
+ 'mixins': ['v8_enable_test_features'],
'gn_args': 'is_lsan=true',
'gyp_defines': 'lsan=1',
},
@@ -662,11 +669,13 @@
},
'msan': {
+ 'mixins': ['v8_enable_test_features'],
'gn_args': ('is_msan=true msan_track_origins=2 '
'use_prebuilt_instrumented_libraries=true'),
},
'msan_no_origins': {
+ 'mixins': ['v8_enable_test_features'],
'gn_args': ('is_msan=true msan_track_origins=0 '
'use_prebuilt_instrumented_libraries=true'),
},
@@ -756,11 +765,13 @@
},
'tsan': {
+ 'mixins': ['v8_enable_test_features'],
'gn_args': 'is_tsan=true',
'gyp_defines': 'clang=1 tsan=1',
},
'ubsan_vptr': {
+ 'mixins': ['v8_enable_test_features'],
# TODO(krasin): Remove is_ubsan_no_recover=true when
# https://llvm.org/bugs/show_bug.cgi?id=25569 is fixed and just use
# ubsan_vptr instead.
@@ -768,6 +779,7 @@
},
'ubsan_vptr_recover': {
+ 'mixins': ['v8_enable_test_features'],
# Ubsan vptr with recovery.
'gn_args': 'is_ubsan_vptr=true is_ubsan_no_recover=false',
},
@@ -782,6 +794,7 @@
},
'v8_correctness_fuzzer': {
+ 'mixins': ['v8_enable_test_features'],
'gn_args': 'v8_correctness_fuzzer=true v8_multi_arch_build=true',
},
@@ -795,6 +808,10 @@
'gyp_defines': 'v8_enable_slow_dchecks=1',
},
+ 'v8_enable_test_features': {
+ 'gn_args': 'v8_enable_test_features=true',
+ },
+
'v8_enable_verify_predictable': {
'gn_args': 'v8_enable_verify_predictable=true',
'gyp_defines': 'v8_enable_verify_predictable=1',
diff --git a/deps/v8/infra/testing/README.md b/deps/v8/infra/testing/README.md
index 3099062477..8658768cac 100644
--- a/deps/v8/infra/testing/README.md
+++ b/deps/v8/infra/testing/README.md
@@ -1,8 +1,15 @@
# Src-side test specifications
-The infra/testing folder in V8 contains test specifications, consumed and
-executed by the continuous infrastructure. Every master has an optional file
-named `<mastername>.pyl`. E.g. `tryserver.v8.pyl`.
+Src-side test specifications enable developers to quickly add tests running on
+specific bots on V8's continuous infrastructure (CI) or tryserver. Features to
+be tested must live behind runtime flags, which are mapped to named testing
+variants specified [here](https://chromium.googlesource.com/v8/v8/+/master/tools/testrunner/local/variants.py).
+Changes to src-side test specifications go through CQ like any other CL and
+require tests added for specific trybots to pass.
+
+The test specifications are defined in a V8-side folder called infra/testing.
+Every master has an optional file named `<mastername>.pyl`. E.g.
+`tryserver.v8.pyl`.
The structure of each file is:
```
@@ -21,10 +28,10 @@ The structure of each file is:
The `<buildername>` is a string name of the builder to execute the tests.
`<test-spec name>` is a label defining a test specification matching the
[infra-side](https://chromium.googlesource.com/chromium/tools/build/+/master/scripts/slave/recipe_modules/v8/testing.py#58).
-The `<variant name>` is a testing variant as specified in
-`v8/tools/testrunner/local/variants.py`. `<number of shards>` is optional
-(default 1), but can be provided to increase the swarming shards for
-long-running tests.
+The `<variant name>` is a testing variant specified
+[here](https://chromium.googlesource.com/v8/v8/+/master/tools/testrunner/local/variants.py).
+`<number of shards>` is optional (default 1), but can be provided to increase
+the swarming shards for long-running tests.
Example:
```
@@ -47,4 +54,17 @@ tryserver.v8:
client.v8:
V8 Linux64
V8 Linux64 - debug
-``` \ No newline at end of file
+```
+
+Please only add tests that are expected to pass, or skip failing tests via
+status file for the selected testing variants only. If you want to add FYI tests
+(i.e. not closing the tree and not blocking CQ) you can do so for the following
+set of bots:
+
+```
+tryserver.v8:
+ v8_linux64_fyi_rel_ng_triggered
+client.v8:
+ V8 Linux64 - fyi
+ V8 Linux64 - debug - fyi
+```
diff --git a/deps/v8/infra/testing/client.v8.pyl b/deps/v8/infra/testing/client.v8.pyl
index 80d75a920a..ab1744fc78 100644
--- a/deps/v8/infra/testing/client.v8.pyl
+++ b/deps/v8/infra/testing/client.v8.pyl
@@ -10,4 +10,39 @@
# 'V8 Linux64 - debug': [
# {'name': 'benchmarks', 'variant': 'default', 'shards': 1},
# ],
-} \ No newline at end of file
+
+ 'V8 Linux - debug': [
+ {'name': 'd8testing', 'variant': 'code_serializer', 'shards': 1},
+ {'name': 'mozilla', 'variant': 'code_serializer', 'shards': 1},
+ {'name': 'test262_variants', 'variant': 'code_serializer', 'shards': 1},
+ {'name': 'benchmarks', 'variant': 'code_serializer', 'shards': 1},
+ ],
+ 'V8 Linux - gc stress': [
+ {'name': 'mjsunit', 'variant': 'slow_path', 'shards': 2},
+ ],
+ 'V8 Linux64': [
+ {'name': 'v8testing', 'variant': 'minor_mc', 'shards': 1},
+ ],
+ 'V8 Linux64 - debug': [
+ {'name': 'v8testing', 'variant': 'minor_mc', 'shards': 1},
+ {'name': 'v8testing', 'variant': 'slow_path', 'shards': 1},
+ ],
+ 'V8 Linux64 ASAN': [
+ {'name': 'v8testing', 'variant': 'slow_path', 'shards': 1},
+ ],
+ 'V8 Linux64 TSAN': [
+ {'name': 'v8testing', 'variant': 'slow_path', 'shards': 1},
+ ],
+ 'V8 Linux64 - fyi': [
+ {'name': 'v8testing', 'variant': 'infra_staging', 'shards': 1},
+ {'name': 'test262_variants', 'variant': 'infra_staging', 'shards': 2},
+ {'name': 'mjsunit', 'variant': 'stress_sampling', 'shards': 1},
+ {'name': 'webkit', 'variant': 'stress_sampling', 'shards': 1},
+ ],
+ 'V8 Linux64 - debug - fyi': [
+ {'name': 'v8testing', 'variant': 'infra_staging', 'shards': 2},
+ {'name': 'test262_variants', 'variant': 'infra_staging', 'shards': 3},
+ {'name': 'mjsunit', 'variant': 'stress_sampling', 'shards': 1},
+ {'name': 'webkit', 'variant': 'stress_sampling', 'shards': 1},
+ ],
+}
diff --git a/deps/v8/infra/testing/tryserver.v8.pyl b/deps/v8/infra/testing/tryserver.v8.pyl
index f296779c4e..ee6abae5d5 100644
--- a/deps/v8/infra/testing/tryserver.v8.pyl
+++ b/deps/v8/infra/testing/tryserver.v8.pyl
@@ -7,4 +7,24 @@
# 'v8_linux64_rel_ng_triggered': [
# {'name': 'benchmarks', 'variant': 'default', 'shards': 1},
# ],
-} \ No newline at end of file
+
+ 'v8_linux64_fyi_rel_ng_triggered': [
+ {'name': 'v8testing', 'variant': 'infra_staging', 'shards': 2},
+ {'name': 'test262_variants', 'variant': 'infra_staging', 'shards': 2},
+ {'name': 'mjsunit', 'variant': 'stress_sampling', 'shards': 1},
+ {'name': 'webkit', 'variant': 'stress_sampling', 'shards': 1},
+ ],
+ 'v8_linux64_rel_ng_triggered': [
+ {'name': 'v8testing', 'variant': 'minor_mc', 'shards': 1},
+ {'name': 'v8testing', 'variant': 'slow_path', 'shards': 1},
+ ],
+ 'v8_linux_gc_stress_dbg': [
+ {'name': 'mjsunit', 'variant': 'slow_path', 'shards': 2},
+ ],
+ 'v8_linux64_asan_rel_ng_triggered': [
+ {'name': 'v8testing', 'variant': 'slow_path', 'shards': 1},
+ ],
+ 'v8_linux64_tsan_rel': [
+ {'name': 'v8testing', 'variant': 'slow_path', 'shards': 1},
+ ],
+}
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index adaa0be3c6..eb89288685 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -102,7 +102,7 @@ void Accessors::ReconfigureToDataProperty(
const v8::PropertyCallbackInfo<v8::Boolean>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
RuntimeCallTimerScope stats_scope(
- isolate, &RuntimeCallStats::ReconfigureToDataProperty);
+ isolate, RuntimeCallCounterId::kReconfigureToDataProperty);
HandleScope scope(isolate);
Handle<Object> receiver = Utils::OpenHandle(*info.This());
Handle<JSObject> holder =
@@ -147,7 +147,8 @@ void Accessors::ArrayLengthGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::ArrayLengthGetter);
+ RuntimeCallTimerScope timer(isolate,
+ RuntimeCallCounterId::kArrayLengthGetter);
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
JSArray* holder = JSArray::cast(*Utils::OpenHandle(*info.Holder()));
@@ -159,7 +160,8 @@ void Accessors::ArrayLengthSetter(
v8::Local<v8::Name> name, v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<v8::Boolean>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::ArrayLengthSetter);
+ RuntimeCallTimerScope timer(isolate,
+ RuntimeCallCounterId::kArrayLengthSetter);
HandleScope scope(isolate);
DCHECK(Utils::OpenHandle(*name)->SameValue(isolate->heap()->length_string()));
@@ -272,7 +274,8 @@ void Accessors::StringLengthGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::StringLengthGetter);
+ RuntimeCallTimerScope timer(isolate,
+ RuntimeCallCounterId::kStringLengthGetter);
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
@@ -546,9 +549,8 @@ void Accessors::ScriptEvalFromScriptGetter(
Handle<Script> script(
Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
Handle<Object> result = isolate->factory()->undefined_value();
- if (!script->eval_from_shared()->IsUndefined(isolate)) {
- Handle<SharedFunctionInfo> eval_from_shared(
- SharedFunctionInfo::cast(script->eval_from_shared()));
+ if (script->has_eval_from_shared()) {
+ Handle<SharedFunctionInfo> eval_from_shared(script->eval_from_shared());
if (eval_from_shared->script()->IsScript()) {
Handle<Script> eval_from_script(Script::cast(eval_from_shared->script()));
result = Script::GetWrapper(eval_from_script);
@@ -608,9 +610,8 @@ void Accessors::ScriptEvalFromFunctionNameGetter(
Handle<Script> script(
Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
Handle<Object> result = isolate->factory()->undefined_value();
- if (!script->eval_from_shared()->IsUndefined(isolate)) {
- Handle<SharedFunctionInfo> shared(
- SharedFunctionInfo::cast(script->eval_from_shared()));
+ if (script->has_eval_from_shared()) {
+ Handle<SharedFunctionInfo> shared(script->eval_from_shared());
// Find the name of the function calling eval.
result = Handle<Object>(shared->name(), isolate);
}
@@ -644,7 +645,7 @@ void Accessors::FunctionPrototypeGetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
RuntimeCallTimerScope timer(isolate,
- &RuntimeCallStats::FunctionPrototypeGetter);
+ RuntimeCallCounterId::kFunctionPrototypeGetter);
HandleScope scope(isolate);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
@@ -657,7 +658,7 @@ void Accessors::FunctionPrototypeSetter(
const v8::PropertyCallbackInfo<v8::Boolean>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
RuntimeCallTimerScope timer(isolate,
- &RuntimeCallStats::FunctionPrototypeSetter);
+ RuntimeCallCounterId::kFunctionPrototypeSetter);
HandleScope scope(isolate);
Handle<Object> value = Utils::OpenHandle(*val);
Handle<JSFunction> object =
@@ -681,7 +682,8 @@ void Accessors::FunctionLengthGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::FunctionLengthGetter);
+ RuntimeCallTimerScope timer(isolate,
+ RuntimeCallCounterId::kFunctionLengthGetter);
HandleScope scope(isolate);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
@@ -950,16 +952,17 @@ class FrameFunctionIterator {
private:
MaybeHandle<JSFunction> next() {
while (true) {
- inlined_frame_index_--;
- if (inlined_frame_index_ == -1) {
+ if (inlined_frame_index_ <= 0) {
if (!frame_iterator_.done()) {
frame_iterator_.Advance();
frames_.clear();
+ inlined_frame_index_ = -1;
GetFrames();
}
if (inlined_frame_index_ == -1) return MaybeHandle<JSFunction>();
- inlined_frame_index_--;
}
+
+ --inlined_frame_index_;
Handle<JSFunction> next_function =
frames_[inlined_frame_index_].AsJavaScript().function();
// Skip functions from other origins.
@@ -1057,7 +1060,7 @@ void Accessors::BoundFunctionLengthGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
RuntimeCallTimerScope timer(isolate,
- &RuntimeCallStats::BoundFunctionLengthGetter);
+ RuntimeCallCounterId::kBoundFunctionLengthGetter);
HandleScope scope(isolate);
Handle<JSBoundFunction> function =
Handle<JSBoundFunction>::cast(Utils::OpenHandle(*info.Holder()));
@@ -1084,7 +1087,7 @@ void Accessors::BoundFunctionNameGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
RuntimeCallTimerScope timer(isolate,
- &RuntimeCallStats::BoundFunctionNameGetter);
+ RuntimeCallCounterId::kBoundFunctionNameGetter);
HandleScope scope(isolate);
Handle<JSBoundFunction> function =
Handle<JSBoundFunction>::cast(Utils::OpenHandle(*info.Holder()));
diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc
index ab7b33a085..e17de159c1 100644
--- a/deps/v8/src/allocation.cc
+++ b/deps/v8/src/allocation.cc
@@ -6,7 +6,9 @@
#include <stdlib.h> // For free, malloc.
#include "src/base/bits.h"
+#include "src/base/lazy-instance.h"
#include "src/base/logging.h"
+#include "src/base/page-allocator.h"
#include "src/base/platform/platform.h"
#include "src/utils.h"
#include "src/v8.h"
@@ -38,26 +40,44 @@ void* AlignedAllocInternal(size_t size, size_t alignment) {
return ptr;
}
+// TODO(bbudge) Simplify this once all embedders implement a page allocator.
+struct InitializePageAllocator {
+ static void Construct(void* page_allocator_ptr_arg) {
+ auto page_allocator_ptr =
+ reinterpret_cast<v8::PageAllocator**>(page_allocator_ptr_arg);
+ v8::PageAllocator* page_allocator =
+ V8::GetCurrentPlatform()->GetPageAllocator();
+ if (page_allocator == nullptr) {
+ static v8::base::PageAllocator default_allocator;
+ page_allocator = &default_allocator;
+ }
+ *page_allocator_ptr = page_allocator;
+ }
+};
+
+static base::LazyInstance<v8::PageAllocator*, InitializePageAllocator>::type
+ page_allocator = LAZY_INSTANCE_INITIALIZER;
+
+v8::PageAllocator* GetPageAllocator() { return page_allocator.Get(); }
+
+// We will attempt allocation this many times. After each failure, we call
+// OnCriticalMemoryPressure to try to free some memory.
+const int kAllocationTries = 2;
+
} // namespace
void* Malloced::New(size_t size) {
- void* result = malloc(size);
+ void* result = AllocWithRetry(size);
if (result == nullptr) {
- V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
- result = malloc(size);
- if (result == nullptr) {
- V8::FatalProcessOutOfMemory("Malloced operator new");
- }
+ V8::FatalProcessOutOfMemory("Malloced operator new");
}
return result;
}
-
void Malloced::Delete(void* p) {
free(p);
}
-
char* StrDup(const char* str) {
int length = StrLength(str);
char* result = NewArray<char>(length + 1);
@@ -66,7 +86,6 @@ char* StrDup(const char* str) {
return result;
}
-
char* StrNDup(const char* str, int n) {
int length = StrLength(str);
if (n < length) length = n;
@@ -76,22 +95,31 @@ char* StrNDup(const char* str, int n) {
return result;
}
+void* AllocWithRetry(size_t size) {
+ void* result = nullptr;
+ for (int i = 0; i < kAllocationTries; ++i) {
+ result = malloc(size);
+ if (result != nullptr) break;
+ if (!OnCriticalMemoryPressure(size)) break;
+ }
+ return result;
+}
void* AlignedAlloc(size_t size, size_t alignment) {
DCHECK_LE(V8_ALIGNOF(void*), alignment);
DCHECK(base::bits::IsPowerOfTwo(alignment));
- void* ptr = AlignedAllocInternal(size, alignment);
- if (ptr == nullptr) {
- V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
- ptr = AlignedAllocInternal(size, alignment);
- if (ptr == nullptr) {
- V8::FatalProcessOutOfMemory("AlignedAlloc");
- }
+ void* result = nullptr;
+ for (int i = 0; i < kAllocationTries; ++i) {
+ result = AlignedAllocInternal(size, alignment);
+ if (result != nullptr) break;
+ if (!OnCriticalMemoryPressure(size + alignment)) break;
}
- return ptr;
+ if (result == nullptr) {
+ V8::FatalProcessOutOfMemory("AlignedAlloc");
+ }
+ return result;
}
-
void AlignedFree(void *ptr) {
#if V8_OS_WIN
_aligned_free(ptr);
@@ -103,27 +131,88 @@ void AlignedFree(void *ptr) {
#endif
}
-byte* AllocateSystemPage(void* address, size_t* allocated) {
- size_t page_size = base::OS::AllocatePageSize();
- void* result = base::OS::Allocate(address, page_size, page_size,
- base::OS::MemoryPermission::kReadWrite);
+size_t AllocatePageSize() { return GetPageAllocator()->AllocatePageSize(); }
+
+size_t CommitPageSize() { return GetPageAllocator()->CommitPageSize(); }
+
+void SetRandomMmapSeed(int64_t seed) {
+ GetPageAllocator()->SetRandomMmapSeed(seed);
+}
+
+void* GetRandomMmapAddr() { return GetPageAllocator()->GetRandomMmapAddr(); }
+
+void* AllocatePages(void* address, size_t size, size_t alignment,
+ PageAllocator::Permission access) {
+ void* result = nullptr;
+ for (int i = 0; i < kAllocationTries; ++i) {
+ result =
+ GetPageAllocator()->AllocatePages(address, size, alignment, access);
+ if (result != nullptr) break;
+ size_t request_size = size + alignment - AllocatePageSize();
+ if (!OnCriticalMemoryPressure(request_size)) break;
+ }
+#if defined(LEAK_SANITIZER)
+ if (result != nullptr) {
+ __lsan_register_root_region(result, size);
+ }
+#endif
+ return result;
+}
+
+bool FreePages(void* address, const size_t size) {
+ bool result = GetPageAllocator()->FreePages(address, size);
+#if defined(LEAK_SANITIZER)
+ if (result) {
+ __lsan_unregister_root_region(address, size);
+ }
+#endif
+ return result;
+}
+
+bool ReleasePages(void* address, size_t size, size_t new_size) {
+ DCHECK_LT(new_size, size);
+ bool result = GetPageAllocator()->ReleasePages(address, size, new_size);
+#if defined(LEAK_SANITIZER)
+ if (result) {
+ __lsan_unregister_root_region(address, size);
+ __lsan_register_root_region(address, new_size);
+ }
+#endif
+ return result;
+}
+
+bool SetPermissions(void* address, size_t size,
+ PageAllocator::Permission access) {
+ return GetPageAllocator()->SetPermissions(address, size, access);
+}
+
+byte* AllocatePage(void* address, size_t* allocated) {
+ size_t page_size = AllocatePageSize();
+ void* result =
+ AllocatePages(address, page_size, page_size, PageAllocator::kReadWrite);
if (result != nullptr) *allocated = page_size;
return static_cast<byte*>(result);
}
+bool OnCriticalMemoryPressure(size_t length) {
+ // TODO(bbudge) Rework retry logic once embedders implement the more
+ // informative overload.
+ if (!V8::GetCurrentPlatform()->OnCriticalMemoryPressure(length)) {
+ V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
+ }
+ return true;
+}
+
VirtualMemory::VirtualMemory() : address_(nullptr), size_(0) {}
VirtualMemory::VirtualMemory(size_t size, void* hint, size_t alignment)
: address_(nullptr), size_(0) {
- size_t page_size = base::OS::AllocatePageSize();
+ size_t page_size = AllocatePageSize();
size_t alloc_size = RoundUp(size, page_size);
- address_ = base::OS::Allocate(hint, alloc_size, alignment,
- base::OS::MemoryPermission::kNoAccess);
+ address_ =
+ AllocatePages(hint, alloc_size, alignment, PageAllocator::kNoAccess);
if (address_ != nullptr) {
size_ = alloc_size;
-#if defined(LEAK_SANITIZER)
- __lsan_register_root_region(address_, size_);
-#endif
}
}
@@ -139,9 +228,9 @@ void VirtualMemory::Reset() {
}
bool VirtualMemory::SetPermissions(void* address, size_t size,
- base::OS::MemoryPermission access) {
+ PageAllocator::Permission access) {
CHECK(InVM(address, size));
- bool result = base::OS::SetPermissions(address, size, access);
+ bool result = v8::internal::SetPermissions(address, size, access);
DCHECK(result);
USE(result);
return result;
@@ -149,8 +238,7 @@ bool VirtualMemory::SetPermissions(void* address, size_t size,
size_t VirtualMemory::Release(void* free_start) {
DCHECK(IsReserved());
- DCHECK(IsAddressAligned(static_cast<Address>(free_start),
- base::OS::CommitPageSize()));
+ DCHECK(IsAddressAligned(static_cast<Address>(free_start), CommitPageSize()));
// Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.
const size_t free_size = size_ - (reinterpret_cast<size_t>(free_start) -
@@ -159,11 +247,7 @@ size_t VirtualMemory::Release(void* free_start) {
DCHECK_LT(address_, free_start);
DCHECK_LT(free_start, reinterpret_cast<void*>(
reinterpret_cast<size_t>(address_) + size_));
-#if defined(LEAK_SANITIZER)
- __lsan_unregister_root_region(address_, size_);
- __lsan_register_root_region(address_, size_ - free_size);
-#endif
- CHECK(base::OS::Release(free_start, free_size));
+ CHECK(ReleasePages(address_, size_, size_ - free_size));
size_ -= free_size;
return free_size;
}
@@ -176,10 +260,7 @@ void VirtualMemory::Free() {
size_t size = size_;
CHECK(InVM(address, size));
Reset();
-#if defined(LEAK_SANITIZER)
- __lsan_unregister_root_region(address, size);
-#endif
- CHECK(base::OS::Free(address, size));
+ CHECK(FreePages(address, size));
}
void VirtualMemory::TakeControl(VirtualMemory* from) {
@@ -190,30 +271,22 @@ void VirtualMemory::TakeControl(VirtualMemory* from) {
}
bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result) {
- VirtualMemory first_try(size, hint);
- if (first_try.IsReserved()) {
- result->TakeControl(&first_try);
+ VirtualMemory vm(size, hint);
+ if (vm.IsReserved()) {
+ result->TakeControl(&vm);
return true;
}
-
- V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
- VirtualMemory second_try(size, hint);
- result->TakeControl(&second_try);
- return result->IsReserved();
+ return false;
}
bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
VirtualMemory* result) {
- VirtualMemory first_try(size, hint, alignment);
- if (first_try.IsReserved()) {
- result->TakeControl(&first_try);
+ VirtualMemory vm(size, hint, alignment);
+ if (vm.IsReserved()) {
+ result->TakeControl(&vm);
return true;
}
-
- V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
- VirtualMemory second_try(size, hint, alignment);
- result->TakeControl(&second_try);
- return result->IsReserved();
+ return false;
}
} // namespace internal
diff --git a/deps/v8/src/allocation.h b/deps/v8/src/allocation.h
index 668a0e6037..9bb47c8f05 100644
--- a/deps/v8/src/allocation.h
+++ b/deps/v8/src/allocation.h
@@ -72,14 +72,68 @@ class FreeStoreAllocationPolicy {
INLINE(static void Delete(void* p)) { Malloced::Delete(p); }
};
+// Performs a malloc, with retry logic on failure. Returns nullptr on failure.
+// Call free to release memory allocated with this function.
+void* AllocWithRetry(size_t size);
void* AlignedAlloc(size_t size, size_t alignment);
void AlignedFree(void *ptr);
-// Allocates a single system memory page with read/write permissions. The
-// address parameter is a hint. Returns the base address of the memory, or null
-// on failure. Permissions can be changed on the base address.
-byte* AllocateSystemPage(void* address, size_t* allocated);
+// Gets the page granularity for AllocatePages and FreePages. Addresses returned
+// by AllocatePages and AllocatePage are aligned to this size.
+V8_EXPORT_PRIVATE size_t AllocatePageSize();
+
+// Gets the granularity at which the permissions and release calls can be made.
+V8_EXPORT_PRIVATE size_t CommitPageSize();
+
+// Sets the random seed so that GetRandomMmapAddr() will generate repeatable
+// sequences of random mmap addresses.
+V8_EXPORT_PRIVATE void SetRandomMmapSeed(int64_t seed);
+
+// Generate a random address to be used for hinting allocation calls.
+V8_EXPORT_PRIVATE void* GetRandomMmapAddr();
+
+// Allocates memory. Permissions are set according to the access argument.
+// |address| is a hint. |size| and |alignment| must be multiples of
+// AllocatePageSize(). Returns the address of the allocated memory, with the
+// specified size and alignment, or nullptr on failure.
+V8_EXPORT_PRIVATE
+V8_WARN_UNUSED_RESULT void* AllocatePages(void* address, size_t size,
+ size_t alignment,
+ PageAllocator::Permission access);
+
+// Frees memory allocated by a call to AllocatePages. |address| and |size| must
+// be multiples of AllocatePageSize(). Returns true on success, otherwise false.
+V8_EXPORT_PRIVATE
+V8_WARN_UNUSED_RESULT bool FreePages(void* address, const size_t size);
+
+// Releases memory that is no longer needed. The range specified by |address|
+// and |size| must be an allocated memory region. |size| and |new_size| must be
+// multiples of CommitPageSize(). Memory from |new_size| to |size| is released.
+// Released memory is left in an undefined state, so it should not be accessed.
+// Returns true on success, otherwise false.
+V8_EXPORT_PRIVATE
+V8_WARN_UNUSED_RESULT bool ReleasePages(void* address, size_t size,
+ size_t new_size);
+
+// Sets permissions according to |access|. |address| and |size| must be
+// multiples of CommitPageSize(). Setting permission to kNoAccess may
+// cause the memory contents to be lost. Returns true on success, otherwise
+// false.
+V8_EXPORT_PRIVATE
+V8_WARN_UNUSED_RESULT bool SetPermissions(void* address, size_t size,
+ PageAllocator::Permission access);
+
+// Convenience function that allocates a single system page with read and write
+// permissions. |address| is a hint. Returns the base address of the memory and
+// the page size via |allocated| on success. Returns nullptr on failure.
+V8_EXPORT_PRIVATE
+V8_WARN_UNUSED_RESULT byte* AllocatePage(void* address, size_t* allocated);
+
+// Function that may release reserved memory regions to allow failed allocations
+// to succeed. |length| is the amount of memory needed. Returns |true| if memory
+// could be released, false otherwise.
+V8_EXPORT_PRIVATE bool OnCriticalMemoryPressure(size_t length);
// Represents and controls an area of reserved memory.
class V8_EXPORT_PRIVATE VirtualMemory {
@@ -90,8 +144,7 @@ class V8_EXPORT_PRIVATE VirtualMemory {
// Reserves virtual memory containing an area of the given size that is
// aligned per alignment. This may not be at the position returned by
// address().
- VirtualMemory(size_t size, void* hint,
- size_t alignment = base::OS::AllocatePageSize());
+ VirtualMemory(size_t size, void* hint, size_t alignment = AllocatePageSize());
// Construct a virtual memory by assigning it some already mapped address
// and size.
@@ -131,7 +184,7 @@ class V8_EXPORT_PRIVATE VirtualMemory {
// Sets permissions according to the access argument. address and size must be
// multiples of CommitPageSize(). Returns true on success, otherwise false.
bool SetPermissions(void* address, size_t size,
- base::OS::MemoryPermission access);
+ PageAllocator::Permission access);
// Releases memory after |free_start|. Returns the number of bytes released.
size_t Release(void* free_start);
diff --git a/deps/v8/src/api-arguments-inl.h b/deps/v8/src/api-arguments-inl.h
index 4035e715c1..b8336f97c4 100644
--- a/deps/v8/src/api-arguments-inl.h
+++ b/deps/v8/src/api-arguments-inl.h
@@ -13,146 +13,248 @@
namespace v8 {
namespace internal {
-#define SIDE_EFFECT_CHECK(ISOLATE, F, RETURN_TYPE) \
- do { \
- if (ISOLATE->needs_side_effect_check() && \
- !PerformSideEffectCheck(ISOLATE, FUNCTION_ADDR(F))) { \
- return Handle<RETURN_TYPE>(); \
- } \
- } while (false)
-
-#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(F) \
- F(AccessorNameGetterCallback, "get", v8::Value, Object) \
- F(GenericNamedPropertyQueryCallback, "has", v8::Integer, Object) \
- F(GenericNamedPropertyDeleterCallback, "delete", v8::Boolean, Object)
-
-#define WRITE_CALL_1_NAME(Function, type, ApiReturn, InternalReturn) \
- Handle<InternalReturn> PropertyCallbackArguments::Call(Function f, \
- Handle<Name> name) { \
+#define FOR_EACH_CALLBACK(F) \
+ F(Query, query, Object, v8::Integer) \
+ F(Deleter, deleter, Object, v8::Boolean)
+
+#define PREPARE_CALLBACK_INFO(ISOLATE, F, RETURN_VALUE, API_RETURN_TYPE) \
+ if (ISOLATE->needs_side_effect_check() && \
+ !PerformSideEffectCheck(ISOLATE, FUNCTION_ADDR(F))) { \
+ return RETURN_VALUE(); \
+ } \
+ VMState<EXTERNAL> state(ISOLATE); \
+ ExternalCallbackScope call_scope(ISOLATE, FUNCTION_ADDR(F)); \
+ PropertyCallbackInfo<API_RETURN_TYPE> callback_info(begin());
+
+#define CREATE_NAMED_CALLBACK(Function, type, ReturnType, ApiReturnType) \
+ Handle<ReturnType> PropertyCallbackArguments::CallNamed##Function( \
+ Handle<InterceptorInfo> interceptor, Handle<Name> name) { \
+ DCHECK(interceptor->is_named()); \
+ DCHECK(!name->IsPrivate()); \
+ DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols()); \
Isolate* isolate = this->isolate(); \
- SIDE_EFFECT_CHECK(isolate, f, InternalReturn); \
- RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function); \
- VMState<EXTERNAL> state(isolate); \
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- PropertyCallbackInfo<ApiReturn> info(begin()); \
+ RuntimeCallTimerScope timer( \
+ isolate, RuntimeCallCounterId::kNamed##Function##Callback); \
+ DCHECK(!name->IsPrivate()); \
+ GenericNamedProperty##Function##Callback f = \
+ ToCData<GenericNamedProperty##Function##Callback>( \
+ interceptor->type()); \
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<ReturnType>, ApiReturnType); \
LOG(isolate, \
- ApiNamedPropertyAccess("interceptor-named-" type, holder(), *name)); \
- f(v8::Utils::ToLocal(name), info); \
- return GetReturnValue<InternalReturn>(isolate); \
+ ApiNamedPropertyAccess("interceptor-named-" #type, holder(), *name)); \
+ f(v8::Utils::ToLocal(name), callback_info); \
+ return GetReturnValue<ReturnType>(isolate); \
}
-FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(WRITE_CALL_1_NAME)
-
-#undef FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME
-#undef WRITE_CALL_1_NAME
+FOR_EACH_CALLBACK(CREATE_NAMED_CALLBACK)
+#undef CREATE_NAMED_CALLBACK
-#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX(F) \
- F(IndexedPropertyGetterCallback, "get", v8::Value, Object) \
- F(IndexedPropertyQueryCallback, "has", v8::Integer, Object) \
- F(IndexedPropertyDeleterCallback, "delete", v8::Boolean, Object)
-
-#define WRITE_CALL_1_INDEX(Function, type, ApiReturn, InternalReturn) \
- Handle<InternalReturn> PropertyCallbackArguments::Call(Function f, \
- uint32_t index) { \
+#define CREATE_INDEXED_CALLBACK(Function, type, ReturnType, ApiReturnType) \
+ Handle<ReturnType> PropertyCallbackArguments::CallIndexed##Function( \
+ Handle<InterceptorInfo> interceptor, uint32_t index) { \
+ DCHECK(!interceptor->is_named()); \
Isolate* isolate = this->isolate(); \
- SIDE_EFFECT_CHECK(isolate, f, InternalReturn); \
- RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function); \
- VMState<EXTERNAL> state(isolate); \
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- PropertyCallbackInfo<ApiReturn> info(begin()); \
- LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-" type, \
+ RuntimeCallTimerScope timer( \
+ isolate, RuntimeCallCounterId::kIndexed##Function##Callback); \
+ IndexedProperty##Function##Callback f = \
+ ToCData<IndexedProperty##Function##Callback>(interceptor->type()); \
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<ReturnType>, ApiReturnType); \
+ LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-" #type, \
holder(), index)); \
- f(index, info); \
- return GetReturnValue<InternalReturn>(isolate); \
+ f(index, callback_info); \
+ return GetReturnValue<ReturnType>(isolate); \
}
-FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX(WRITE_CALL_1_INDEX)
+FOR_EACH_CALLBACK(CREATE_INDEXED_CALLBACK)
+
+#undef FOR_EACH_CALLBACK
+#undef CREATE_INDEXED_CALLBACK
+
+Handle<Object> PropertyCallbackArguments::CallNamedGetter(
+ Handle<InterceptorInfo> interceptor, Handle<Name> name) {
+ DCHECK(interceptor->is_named());
+ DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols());
+ DCHECK(!name->IsPrivate());
+ Isolate* isolate = this->isolate();
+ RuntimeCallTimerScope timer(isolate,
+ RuntimeCallCounterId::kNamedGetterCallback);
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-getter", holder(), *name));
+ GenericNamedPropertyGetterCallback f =
+ ToCData<GenericNamedPropertyGetterCallback>(interceptor->getter());
+ return BasicCallNamedGetterCallback(f, name);
+}
+
+Handle<Object> PropertyCallbackArguments::CallNamedDescriptor(
+ Handle<InterceptorInfo> interceptor, Handle<Name> name) {
+ DCHECK(interceptor->is_named());
+ DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols());
+ Isolate* isolate = this->isolate();
+ RuntimeCallTimerScope timer(isolate,
+ RuntimeCallCounterId::kNamedDescriptorCallback);
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-descriptor", holder(), *name));
+ GenericNamedPropertyDescriptorCallback f =
+ ToCData<GenericNamedPropertyDescriptorCallback>(
+ interceptor->descriptor());
+ return BasicCallNamedGetterCallback(f, name);
+}
+
+Handle<Object> PropertyCallbackArguments::BasicCallNamedGetterCallback(
+ GenericNamedPropertyGetterCallback f, Handle<Name> name) {
+ DCHECK(!name->IsPrivate());
+ Isolate* isolate = this->isolate();
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value);
+ f(v8::Utils::ToLocal(name), callback_info);
+ return GetReturnValue<Object>(isolate);
+}
-#undef FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX
-#undef WRITE_CALL_1_INDEX
+Handle<Object> PropertyCallbackArguments::CallNamedSetter(
+ Handle<InterceptorInfo> interceptor, Handle<Name> name,
+ Handle<Object> value) {
+ DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols());
+ GenericNamedPropertySetterCallback f =
+ ToCData<GenericNamedPropertySetterCallback>(interceptor->setter());
+ return CallNamedSetterCallback(f, name, value);
+}
-Handle<Object> PropertyCallbackArguments::Call(
+Handle<Object> PropertyCallbackArguments::CallNamedSetterCallback(
GenericNamedPropertySetterCallback f, Handle<Name> name,
Handle<Object> value) {
+ DCHECK(!name->IsPrivate());
Isolate* isolate = this->isolate();
- SIDE_EFFECT_CHECK(isolate, f, Object);
- RuntimeCallTimerScope timer(
- isolate, &RuntimeCallStats::GenericNamedPropertySetterCallback);
- VMState<EXTERNAL> state(isolate);
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
- PropertyCallbackInfo<v8::Value> info(begin());
+ RuntimeCallTimerScope timer(isolate,
+ RuntimeCallCounterId::kNamedSetterCallback);
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value);
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
- f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
+ f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), callback_info);
return GetReturnValue<Object>(isolate);
}
-Handle<Object> PropertyCallbackArguments::Call(
- GenericNamedPropertyDefinerCallback f, Handle<Name> name,
+Handle<Object> PropertyCallbackArguments::CallNamedDefiner(
+ Handle<InterceptorInfo> interceptor, Handle<Name> name,
const v8::PropertyDescriptor& desc) {
+ DCHECK(interceptor->is_named());
+ DCHECK(!name->IsPrivate());
+ DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols());
Isolate* isolate = this->isolate();
- SIDE_EFFECT_CHECK(isolate, f, Object);
- RuntimeCallTimerScope timer(
- isolate, &RuntimeCallStats::GenericNamedPropertyDefinerCallback);
- VMState<EXTERNAL> state(isolate);
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
- PropertyCallbackInfo<v8::Value> info(begin());
+ RuntimeCallTimerScope timer(isolate,
+ RuntimeCallCounterId::kNamedDefinerCallback);
+ GenericNamedPropertyDefinerCallback f =
+ ToCData<GenericNamedPropertyDefinerCallback>(interceptor->definer());
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value);
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-define", holder(), *name));
- f(v8::Utils::ToLocal(name), desc, info);
+ f(v8::Utils::ToLocal(name), desc, callback_info);
return GetReturnValue<Object>(isolate);
}
-Handle<Object> PropertyCallbackArguments::Call(IndexedPropertySetterCallback f,
- uint32_t index,
- Handle<Object> value) {
+Handle<Object> PropertyCallbackArguments::CallIndexedSetter(
+ Handle<InterceptorInfo> interceptor, uint32_t index, Handle<Object> value) {
+ DCHECK(!interceptor->is_named());
Isolate* isolate = this->isolate();
- SIDE_EFFECT_CHECK(isolate, f, Object);
RuntimeCallTimerScope timer(isolate,
- &RuntimeCallStats::IndexedPropertySetterCallback);
- VMState<EXTERNAL> state(isolate);
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
- PropertyCallbackInfo<v8::Value> info(begin());
+ RuntimeCallCounterId::kIndexedSetterCallback);
+ IndexedPropertySetterCallback f =
+ ToCData<IndexedPropertySetterCallback>(interceptor->setter());
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value);
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-set", holder(), index));
- f(index, v8::Utils::ToLocal(value), info);
+ f(index, v8::Utils::ToLocal(value), callback_info);
return GetReturnValue<Object>(isolate);
}
-Handle<Object> PropertyCallbackArguments::Call(
- IndexedPropertyDefinerCallback f, uint32_t index,
+Handle<Object> PropertyCallbackArguments::CallIndexedDefiner(
+ Handle<InterceptorInfo> interceptor, uint32_t index,
const v8::PropertyDescriptor& desc) {
+ DCHECK(!interceptor->is_named());
Isolate* isolate = this->isolate();
- SIDE_EFFECT_CHECK(isolate, f, Object);
- RuntimeCallTimerScope timer(
- isolate, &RuntimeCallStats::IndexedPropertyDefinerCallback);
- VMState<EXTERNAL> state(isolate);
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
- PropertyCallbackInfo<v8::Value> info(begin());
+ RuntimeCallTimerScope timer(isolate,
+ RuntimeCallCounterId::kIndexedDefinerCallback);
+ IndexedPropertyDefinerCallback f =
+ ToCData<IndexedPropertyDefinerCallback>(interceptor->definer());
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value);
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-define", holder(), index));
- f(index, desc, info);
+ f(index, desc, callback_info);
return GetReturnValue<Object>(isolate);
}
-void PropertyCallbackArguments::Call(AccessorNameSetterCallback f,
- Handle<Name> name, Handle<Object> value) {
+Handle<Object> PropertyCallbackArguments::CallIndexedGetter(
+ Handle<InterceptorInfo> interceptor, uint32_t index) {
+ DCHECK(!interceptor->is_named());
Isolate* isolate = this->isolate();
- if (isolate->needs_side_effect_check() &&
- !PerformSideEffectCheck(isolate, FUNCTION_ADDR(f))) {
- return;
- }
RuntimeCallTimerScope timer(isolate,
- &RuntimeCallStats::AccessorNameSetterCallback);
- VMState<EXTERNAL> state(isolate);
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
- PropertyCallbackInfo<void> info(begin());
+ RuntimeCallCounterId::kNamedGetterCallback);
LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
- f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
+ ApiIndexedPropertyAccess("interceptor-indexed-getter", holder(), index));
+ IndexedPropertyGetterCallback f =
+ ToCData<IndexedPropertyGetterCallback>(interceptor->getter());
+ return BasicCallIndexedGetterCallback(f, index);
+}
+
+Handle<Object> PropertyCallbackArguments::CallIndexedDescriptor(
+ Handle<InterceptorInfo> interceptor, uint32_t index) {
+ DCHECK(!interceptor->is_named());
+ Isolate* isolate = this->isolate();
+ RuntimeCallTimerScope timer(isolate,
+ RuntimeCallCounterId::kIndexedDescriptorCallback);
+ LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-descriptor",
+ holder(), index));
+ IndexedPropertyDescriptorCallback f =
+ ToCData<IndexedPropertyDescriptorCallback>(interceptor->descriptor());
+ return BasicCallIndexedGetterCallback(f, index);
+}
+
+Handle<Object> PropertyCallbackArguments::BasicCallIndexedGetterCallback(
+ IndexedPropertyGetterCallback f, uint32_t index) {
+ Isolate* isolate = this->isolate();
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value);
+ f(index, callback_info);
+ return GetReturnValue<Object>(isolate);
+}
+
+Handle<JSObject> PropertyCallbackArguments::CallPropertyEnumerator(
+ Handle<InterceptorInfo> interceptor) {
+ // For now there is a single enumerator for indexed and named properties.
+ IndexedPropertyEnumeratorCallback f =
+ v8::ToCData<IndexedPropertyEnumeratorCallback>(interceptor->enumerator());
+ // TODO(cbruni): assert same type for indexed and named callback.
+ Isolate* isolate = this->isolate();
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<JSObject>, v8::Array);
+ f(callback_info);
+ return GetReturnValue<JSObject>(isolate);
+}
+
+// -------------------------------------------------------------------------
+// Accessors
+
+Handle<Object> PropertyCallbackArguments::CallAccessorGetter(
+ Handle<AccessorInfo> info, Handle<Name> name) {
+ Isolate* isolate = this->isolate();
+ RuntimeCallTimerScope timer(isolate,
+ RuntimeCallCounterId::kAccessorGetterCallback);
+ LOG(isolate, ApiNamedPropertyAccess("accessor-getter", holder(), *name));
+ AccessorNameGetterCallback f =
+ ToCData<AccessorNameGetterCallback>(info->getter());
+ return BasicCallNamedGetterCallback(f, name);
+}
+
+void PropertyCallbackArguments::CallAccessorSetter(
+ Handle<AccessorInfo> accessor_info, Handle<Name> name,
+ Handle<Object> value) {
+ Isolate* isolate = this->isolate();
+ RuntimeCallTimerScope timer(isolate,
+ RuntimeCallCounterId::kAccessorSetterCallback);
+ AccessorNameSetterCallback f =
+ ToCData<AccessorNameSetterCallback>(accessor_info->setter());
+ PREPARE_CALLBACK_INFO(isolate, f, void, void);
+ LOG(isolate, ApiNamedPropertyAccess("accessor-setter", holder(), *name));
+ f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), callback_info);
}
-#undef SIDE_EFFECT_CHECK
+#undef PREPARE_CALLBACK_INFO
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/api-arguments.cc b/deps/v8/src/api-arguments.cc
index c7c54e5de1..1302e32b66 100644
--- a/deps/v8/src/api-arguments.cc
+++ b/deps/v8/src/api-arguments.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/api-arguments.h"
+#include "src/api-arguments-inl.h"
#include "src/debug/debug.h"
#include "src/objects-inl.h"
@@ -18,7 +19,7 @@ Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) {
!isolate->debug()->PerformSideEffectCheckForCallback(FUNCTION_ADDR(f))) {
return Handle<Object>();
}
- RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::FunctionCallback);
+ RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kFunctionCallback);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
FunctionCallbackInfo<v8::Value> info(begin(), argv_, argc_);
@@ -26,19 +27,22 @@ Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) {
return GetReturnValue<Object>(isolate);
}
-Handle<JSObject> PropertyCallbackArguments::Call(
- IndexedPropertyEnumeratorCallback f) {
- Isolate* isolate = this->isolate();
- if (isolate->needs_side_effect_check() &&
- !isolate->debug()->PerformSideEffectCheckForCallback(FUNCTION_ADDR(f))) {
- return Handle<JSObject>();
- }
- RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::PropertyCallback);
- VMState<EXTERNAL> state(isolate);
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
- PropertyCallbackInfo<v8::Array> info(begin());
- f(info);
- return GetReturnValue<JSObject>(isolate);
+Handle<JSObject> PropertyCallbackArguments::CallNamedEnumerator(
+ Handle<InterceptorInfo> interceptor) {
+ DCHECK(interceptor->is_named());
+ LOG(isolate(), ApiObjectAccess("interceptor-named-enumerator", holder()));
+ RuntimeCallTimerScope timer(isolate(),
+ RuntimeCallCounterId::kNamedEnumeratorCallback);
+ return CallPropertyEnumerator(interceptor);
+}
+
+Handle<JSObject> PropertyCallbackArguments::CallIndexedEnumerator(
+ Handle<InterceptorInfo> interceptor) {
+ DCHECK(!interceptor->is_named());
+ LOG(isolate(), ApiObjectAccess("interceptor-indexed-enumerator", holder()));
+ RuntimeCallTimerScope timer(isolate(),
+ RuntimeCallCounterId::kIndexedEnumeratorCallback);
+ return CallPropertyEnumerator(interceptor);
}
bool PropertyCallbackArguments::PerformSideEffectCheck(Isolate* isolate,
diff --git a/deps/v8/src/api-arguments.h b/deps/v8/src/api-arguments.h
index 179d787941..42d58b88a8 100644
--- a/deps/v8/src/api-arguments.h
+++ b/deps/v8/src/api-arguments.h
@@ -99,6 +99,54 @@ class PropertyCallbackArguments
DCHECK(values[T::kIsolateIndex]->IsSmi());
}
+ // -------------------------------------------------------------------------
+ // Accessor Callbacks
+ // Also used for AccessorSetterCallback.
+ inline void CallAccessorSetter(Handle<AccessorInfo> info, Handle<Name> name,
+ Handle<Object> value);
+ // Also used for AccessorGetterCallback, AccessorNameGetterCallback.
+ inline Handle<Object> CallAccessorGetter(Handle<AccessorInfo> info,
+ Handle<Name> name);
+
+ // -------------------------------------------------------------------------
+ // Named Interceptor Callbacks
+ inline Handle<Object> CallNamedQuery(Handle<InterceptorInfo> interceptor,
+ Handle<Name> name);
+ inline Handle<Object> CallNamedGetter(Handle<InterceptorInfo> interceptor,
+ Handle<Name> name);
+ inline Handle<Object> CallNamedSetter(Handle<InterceptorInfo> interceptor,
+ Handle<Name> name,
+ Handle<Object> value);
+ inline Handle<Object> CallNamedSetterCallback(
+ GenericNamedPropertySetterCallback callback, Handle<Name> name,
+ Handle<Object> value);
+ inline Handle<Object> CallNamedDefiner(Handle<InterceptorInfo> interceptor,
+ Handle<Name> name,
+ const v8::PropertyDescriptor& desc);
+ inline Handle<Object> CallNamedDeleter(Handle<InterceptorInfo> interceptor,
+ Handle<Name> name);
+ inline Handle<Object> CallNamedDescriptor(Handle<InterceptorInfo> interceptor,
+ Handle<Name> name);
+ Handle<JSObject> CallNamedEnumerator(Handle<InterceptorInfo> interceptor);
+
+ // -------------------------------------------------------------------------
+ // Indexed Interceptor Callbacks
+ inline Handle<Object> CallIndexedQuery(Handle<InterceptorInfo> interceptor,
+ uint32_t index);
+ inline Handle<Object> CallIndexedGetter(Handle<InterceptorInfo> interceptor,
+ uint32_t index);
+ inline Handle<Object> CallIndexedSetter(Handle<InterceptorInfo> interceptor,
+ uint32_t index, Handle<Object> value);
+ inline Handle<Object> CallIndexedDefiner(Handle<InterceptorInfo> interceptor,
+ uint32_t index,
+ const v8::PropertyDescriptor& desc);
+ inline Handle<Object> CallIndexedDeleter(Handle<InterceptorInfo> interceptor,
+ uint32_t index);
+ inline Handle<Object> CallIndexedDescriptor(
+ Handle<InterceptorInfo> interceptor, uint32_t index);
+ Handle<JSObject> CallIndexedEnumerator(Handle<InterceptorInfo> interceptor);
+
+ private:
/*
* The following Call functions wrap the calling of all callbacks to handle
* calling either the old or the new style callbacks depending on which one
@@ -107,35 +155,14 @@ class PropertyCallbackArguments
* and used if it's been set to anything inside the callback.
* New style callbacks always use the return value.
*/
- Handle<JSObject> Call(IndexedPropertyEnumeratorCallback f);
-
- inline Handle<Object> Call(AccessorNameGetterCallback f, Handle<Name> name);
- inline Handle<Object> Call(GenericNamedPropertyQueryCallback f,
- Handle<Name> name);
- inline Handle<Object> Call(GenericNamedPropertyDeleterCallback f,
- Handle<Name> name);
-
- inline Handle<Object> Call(IndexedPropertyGetterCallback f, uint32_t index);
- inline Handle<Object> Call(IndexedPropertyQueryCallback f, uint32_t index);
- inline Handle<Object> Call(IndexedPropertyDeleterCallback f, uint32_t index);
-
- inline Handle<Object> Call(GenericNamedPropertySetterCallback f,
- Handle<Name> name, Handle<Object> value);
+ inline Handle<JSObject> CallPropertyEnumerator(
+ Handle<InterceptorInfo> interceptor);
- inline Handle<Object> Call(GenericNamedPropertyDefinerCallback f,
- Handle<Name> name,
- const v8::PropertyDescriptor& desc);
+ inline Handle<Object> BasicCallIndexedGetterCallback(
+ IndexedPropertyGetterCallback f, uint32_t index);
+ inline Handle<Object> BasicCallNamedGetterCallback(
+ GenericNamedPropertyGetterCallback f, Handle<Name> name);
- inline Handle<Object> Call(IndexedPropertySetterCallback f, uint32_t index,
- Handle<Object> value);
-
- inline Handle<Object> Call(IndexedPropertyDefinerCallback f, uint32_t index,
- const v8::PropertyDescriptor& desc);
-
- inline void Call(AccessorNameSetterCallback f, Handle<Name> name,
- Handle<Object> value);
-
- private:
inline JSObject* holder() {
return JSObject::cast(this->begin()[T::kHolderIndex]);
}
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
index 93698c9f52..b8f03a89a8 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api-natives.cc
@@ -705,7 +705,7 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
// that is undetectable but not callable, we need to update the types.h
// to allow encoding this.
CHECK(!obj->instance_call_handler()->IsUndefined(isolate));
- map->set_is_undetectable();
+ map->set_is_undetectable(true);
}
// Mark as needs_access_check if needed.
@@ -716,20 +716,20 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
// Set interceptor information in the map.
if (!obj->named_property_handler()->IsUndefined(isolate)) {
- map->set_has_named_interceptor();
+ map->set_has_named_interceptor(true);
map->set_may_have_interesting_symbols(true);
}
if (!obj->indexed_property_handler()->IsUndefined(isolate)) {
- map->set_has_indexed_interceptor();
+ map->set_has_indexed_interceptor(true);
}
// Mark instance as callable in the map.
if (!obj->instance_call_handler()->IsUndefined(isolate)) {
- map->set_is_callable();
+ map->set_is_callable(true);
map->set_is_constructor(true);
}
- if (immutable_proto) map->set_immutable_proto(true);
+ if (immutable_proto) map->set_is_immutable_proto(true);
return result;
}
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 856be6368b..147cc397f2 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -8,9 +8,6 @@
#ifdef V8_USE_ADDRESS_SANITIZER
#include <sanitizer/asan_interface.h>
#endif // V8_USE_ADDRESS_SANITIZER
-#if defined(LEAK_SANITIZER)
-#include <sanitizer/lsan_interface.h>
-#endif // defined(LEAK_SANITIZER)
#include <cmath> // For isnan.
#include <limits>
#include <vector>
@@ -84,6 +81,7 @@
#include "src/vm-state-inl.h"
#include "src/wasm/compilation-manager.h"
#include "src/wasm/streaming-decoder.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
#include "src/wasm/wasm-serialization.h"
@@ -110,9 +108,9 @@ namespace v8 {
* TODO(jochen): Remove calls form API methods to DO_NOT_USE macros.
*/
-#define LOG_API(isolate, class_name, function_name) \
- i::RuntimeCallTimerScope _runtime_timer( \
- isolate, &i::RuntimeCallStats::API_##class_name##_##function_name); \
+#define LOG_API(isolate, class_name, function_name) \
+ i::RuntimeCallTimerScope _runtime_timer( \
+ isolate, i::RuntimeCallCounterId::kAPI_##class_name##_##function_name); \
LOG(isolate, ApiEntryCall("v8::" #class_name "::" #function_name))
#define ENTER_V8_DO_NOT_USE(isolate) i::VMState<v8::OTHER> __state__((isolate))
@@ -326,9 +324,9 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
if (isolate == nullptr) {
// On a background thread -> we cannot retrieve memory information from the
// Isolate. Write easy-to-recognize values on the stack.
- memset(last_few_messages, 0x0badc0de, Heap::kTraceRingBufferSize + 1);
- memset(js_stacktrace, 0x0badc0de, Heap::kStacktraceBufferSize + 1);
- memset(&heap_stats, 0xbadc0de, sizeof(heap_stats));
+ memset(last_few_messages, 0x0BADC0DE, Heap::kTraceRingBufferSize + 1);
+ memset(js_stacktrace, 0x0BADC0DE, Heap::kStacktraceBufferSize + 1);
+ memset(&heap_stats, 0xBADC0DE, sizeof(heap_stats));
// Note that the embedder's oom handler won't be called in this case. We
// just crash.
FATAL("API fatal error handler returned after process out of memory");
@@ -404,7 +402,10 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
void Utils::ReportApiFailure(const char* location, const char* message) {
i::Isolate* isolate = i::Isolate::Current();
- FatalErrorCallback callback = isolate->exception_behavior();
+ FatalErrorCallback callback = nullptr;
+ if (isolate != nullptr) {
+ callback = isolate->exception_behavior();
+ }
if (callback == nullptr) {
base::OS::PrintError("\n#\n# Fatal error in %s\n# %s\n#\n\n", location,
message);
@@ -483,23 +484,34 @@ namespace {
class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
virtual void* Allocate(size_t length) {
- void* data = AllocateUninitialized(length);
- return data == nullptr ? data : memset(data, 0, length);
+#if V8_OS_AIX && _LINUX_SOURCE_COMPAT
+ // Work around for GCC bug on AIX
+ // See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79839
+ void* data = __linux_calloc(length, 1);
+#else
+ void* data = calloc(length, 1);
+#endif
+ return data;
}
- virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
+
+ virtual void* AllocateUninitialized(size_t length) {
+#if V8_OS_AIX && _LINUX_SOURCE_COMPAT
+ // Work around for GCC bug on AIX
+ // See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79839
+ void* data = __linux_malloc(length);
+#else
+ void* data = malloc(length);
+#endif
+ return data;
+ }
+
virtual void Free(void* data, size_t) { free(data); }
virtual void* Reserve(size_t length) {
- size_t page_size = base::OS::AllocatePageSize();
+ size_t page_size = i::AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
- void* address =
- base::OS::Allocate(base::OS::GetRandomMmapAddr(), allocated, page_size,
- base::OS::MemoryPermission::kNoAccess);
-#if defined(LEAK_SANITIZER)
- if (address != nullptr) {
- __lsan_register_root_region(address, allocated);
- }
-#endif
+ void* address = i::AllocatePages(i::GetRandomMmapAddr(), allocated,
+ page_size, PageAllocator::kNoAccess);
return address;
}
@@ -510,7 +522,9 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return Free(data, length);
}
case v8::ArrayBuffer::Allocator::AllocationMode::kReservation: {
- CHECK(base::OS::Free(data, length));
+ size_t page_size = i::AllocatePageSize();
+ size_t allocated = RoundUp(length, page_size);
+ CHECK(i::FreePages(data, allocated));
return;
}
}
@@ -521,11 +535,11 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
v8::ArrayBuffer::Allocator::Protection protection) {
DCHECK(protection == v8::ArrayBuffer::Allocator::Protection::kNoAccess ||
protection == v8::ArrayBuffer::Allocator::Protection::kReadWrite);
- base::OS::MemoryPermission permission =
+ PageAllocator::Permission permission =
(protection == v8::ArrayBuffer::Allocator::Protection::kReadWrite)
- ? base::OS::MemoryPermission::kReadWrite
- : base::OS::MemoryPermission::kNoAccess;
- CHECK(base::OS::SetPermissions(data, length, permission));
+ ? PageAllocator::kReadWrite
+ : PageAllocator::kNoAccess;
+ CHECK(i::SetPermissions(data, length, permission));
}
};
@@ -562,7 +576,6 @@ struct SnapshotCreatorData {
: isolate_(isolate),
default_context_(),
contexts_(isolate),
- templates_(isolate),
created_(false) {}
static SnapshotCreatorData* cast(void* data) {
@@ -574,7 +587,6 @@ struct SnapshotCreatorData {
Persistent<Context> default_context_;
SerializeInternalFieldsCallback default_embedder_fields_serializer_;
PersistentValueVector<Context> contexts_;
- PersistentValueVector<Template> templates_;
std::vector<SerializeInternalFieldsCallback> embedder_fields_serializers_;
bool created_;
};
@@ -634,23 +646,81 @@ size_t SnapshotCreator::AddContext(Local<Context> context,
DCHECK(!data->created_);
Isolate* isolate = data->isolate_;
CHECK_EQ(isolate, context->GetIsolate());
- size_t index = static_cast<int>(data->contexts_.Size());
+ size_t index = data->contexts_.Size();
data->contexts_.Append(context);
data->embedder_fields_serializers_.push_back(callback);
return index;
}
size_t SnapshotCreator::AddTemplate(Local<Template> template_obj) {
- DCHECK(!template_obj.IsEmpty());
+ return AddData(template_obj);
+}
+
+size_t SnapshotCreator::AddData(i::Object* object) {
+ DCHECK_NOT_NULL(object);
SnapshotCreatorData* data = SnapshotCreatorData::cast(data_);
DCHECK(!data->created_);
- DCHECK_EQ(reinterpret_cast<i::Isolate*>(data->isolate_),
- Utils::OpenHandle(*template_obj)->GetIsolate());
- size_t index = static_cast<int>(data->templates_.Size());
- data->templates_.Append(template_obj);
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(data->isolate_);
+ i::HandleScope scope(isolate);
+ i::Handle<i::Object> obj(object, isolate);
+ i::Handle<i::ArrayList> list;
+ if (!isolate->heap()->serialized_objects()->IsArrayList()) {
+ list = i::ArrayList::New(isolate, 1);
+ } else {
+ list = i::Handle<i::ArrayList>(
+ i::ArrayList::cast(isolate->heap()->serialized_objects()));
+ }
+ size_t index = static_cast<size_t>(list->Length());
+ list = i::ArrayList::Add(list, obj);
+ isolate->heap()->SetSerializedObjects(*list);
return index;
}
+size_t SnapshotCreator::AddData(Local<Context> context, i::Object* object) {
+ DCHECK_NOT_NULL(object);
+ DCHECK(!SnapshotCreatorData::cast(data_)->created_);
+ i::Handle<i::Context> ctx = Utils::OpenHandle(*context);
+ i::Isolate* isolate = ctx->GetIsolate();
+ i::HandleScope scope(isolate);
+ i::Handle<i::Object> obj(object, isolate);
+ i::Handle<i::ArrayList> list;
+ if (!ctx->serialized_objects()->IsArrayList()) {
+ list = i::ArrayList::New(isolate, 1);
+ } else {
+ list =
+ i::Handle<i::ArrayList>(i::ArrayList::cast(ctx->serialized_objects()));
+ }
+ size_t index = static_cast<size_t>(list->Length());
+ list = i::ArrayList::Add(list, obj);
+ ctx->set_serialized_objects(*list);
+ return index;
+}
+
+namespace {
+void ConvertSerializedObjectsToFixedArray(Local<Context> context) {
+ i::Handle<i::Context> ctx = Utils::OpenHandle(*context);
+ i::Isolate* isolate = ctx->GetIsolate();
+ if (!ctx->serialized_objects()->IsArrayList()) {
+ ctx->set_serialized_objects(isolate->heap()->empty_fixed_array());
+ } else {
+ i::Handle<i::ArrayList> list(i::ArrayList::cast(ctx->serialized_objects()));
+ i::Handle<i::FixedArray> elements = i::ArrayList::Elements(list);
+ ctx->set_serialized_objects(*elements);
+ }
+}
+
+void ConvertSerializedObjectsToFixedArray(i::Isolate* isolate) {
+ if (!isolate->heap()->serialized_objects()->IsArrayList()) {
+ isolate->heap()->SetSerializedObjects(isolate->heap()->empty_fixed_array());
+ } else {
+ i::Handle<i::ArrayList> list(
+ i::ArrayList::cast(isolate->heap()->serialized_objects()));
+ i::Handle<i::FixedArray> elements = i::ArrayList::Elements(list);
+ isolate->heap()->SetSerializedObjects(*elements);
+ }
+}
+} // anonymous namespace
+
StartupData SnapshotCreator::CreateBlob(
SnapshotCreator::FunctionCodeHandling function_code_handling) {
SnapshotCreatorData* data = SnapshotCreatorData::cast(data_);
@@ -661,15 +731,16 @@ StartupData SnapshotCreator::CreateBlob(
int num_additional_contexts = static_cast<int>(data->contexts_.Size());
{
- int num_templates = static_cast<int>(data->templates_.Size());
i::HandleScope scope(isolate);
- i::Handle<i::FixedArray> templates =
- isolate->factory()->NewFixedArray(num_templates, i::TENURED);
- for (int i = 0; i < num_templates; i++) {
- templates->set(i, *v8::Utils::OpenHandle(*data->templates_.Get(i)));
+ // Convert list of context-independent data to FixedArray.
+ ConvertSerializedObjectsToFixedArray(isolate);
+
+ // Convert lists of context-dependent data to FixedArray.
+ ConvertSerializedObjectsToFixedArray(
+ data->default_context_.Get(data->isolate_));
+ for (int i = 0; i < num_additional_contexts; i++) {
+ ConvertSerializedObjectsToFixedArray(data->contexts_.Get(i));
}
- isolate->heap()->SetSerializedTemplates(*templates);
- data->templates_.Clear();
// We need to store the global proxy size upfront in case we need the
// bootstrapper to create a global proxy before we deserialize the context.
@@ -695,13 +766,13 @@ StartupData SnapshotCreator::CreateBlob(
i::DisallowHeapAllocation no_gc_from_here_on;
- std::vector<i::Object*> contexts;
- contexts.reserve(num_additional_contexts);
- i::Object* default_context;
+ int num_contexts = num_additional_contexts + 1;
+ std::vector<i::Context*> contexts;
+ contexts.reserve(num_contexts);
{
i::HandleScope scope(isolate);
- default_context =
- *v8::Utils::OpenHandle(*data->default_context_.Get(data->isolate_));
+ contexts.push_back(
+ *v8::Utils::OpenHandle(*data->default_context_.Get(data->isolate_)));
data->default_context_.Reset();
for (int i = 0; i < num_additional_contexts; i++) {
i::Handle<i::Context> context =
@@ -711,6 +782,10 @@ StartupData SnapshotCreator::CreateBlob(
data->contexts_.Clear();
}
+ // Check that values referenced by global/eternal handles are accounted for.
+ i::SerializedHandleChecker handle_checker(isolate, &contexts);
+ CHECK(handle_checker.CheckGlobalAndEternalHandles());
+
// Complete in-object slack tracking for all functions.
i::HeapIterator heap_iterator(isolate->heap());
while (i::HeapObject* current_obj = heap_iterator.next()) {
@@ -724,26 +799,18 @@ StartupData SnapshotCreator::CreateBlob(
// Serialize each context with a new partial serializer.
std::vector<i::SnapshotData*> context_snapshots;
- context_snapshots.reserve(num_additional_contexts + 1);
+ context_snapshots.reserve(num_contexts);
// TODO(6593): generalize rehashing, and remove this flag.
bool can_be_rehashed = true;
- {
- // The default context is created with a handler for embedder fields which
- // determines how they are handled if encountered during serialization.
+ for (int i = 0; i < num_contexts; i++) {
+ bool is_default_context = i == 0;
i::PartialSerializer partial_serializer(
isolate, &startup_serializer,
- data->default_embedder_fields_serializer_);
- partial_serializer.Serialize(&default_context, false);
- can_be_rehashed = can_be_rehashed && partial_serializer.can_be_rehashed();
- context_snapshots.push_back(new i::SnapshotData(&partial_serializer));
- }
-
- for (int i = 0; i < num_additional_contexts; i++) {
- i::PartialSerializer partial_serializer(
- isolate, &startup_serializer, data->embedder_fields_serializers_[i]);
- partial_serializer.Serialize(&contexts[i], true);
+ is_default_context ? data->default_embedder_fields_serializer_
+ : data->embedder_fields_serializers_[i - 1]);
+ partial_serializer.Serialize(&contexts[i], !is_default_context);
can_be_rehashed = can_be_rehashed && partial_serializer.can_be_rehashed();
context_snapshots.push_back(new i::SnapshotData(&partial_serializer));
}
@@ -767,6 +834,7 @@ StartupData SnapshotCreator::CreateBlob(
delete context_snapshot;
}
data->created_ = true;
+
return result;
}
@@ -911,7 +979,8 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
uint64_t virtual_memory_limit) {
set_max_semi_space_size_in_kb(
i::Heap::ComputeMaxSemiSpaceSize(physical_memory));
- set_max_old_space_size(i::Heap::ComputeMaxOldGenerationSize(physical_memory));
+ set_max_old_space_size(
+ static_cast<int>(i::Heap::ComputeMaxOldGenerationSize(physical_memory)));
set_max_zone_pool_size(i::AccountingAllocator::kMaxPoolSize);
if (virtual_memory_limit > 0 && i::kRequiresCodeRange) {
@@ -926,9 +995,7 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
void SetResourceConstraints(i::Isolate* isolate,
const ResourceConstraints& constraints) {
size_t semi_space_size = constraints.max_semi_space_size_in_kb();
- size_t old_space_size =
- static_cast<size_t>(
- static_cast<unsigned int>(constraints.max_old_space_size()));
+ int old_space_size = constraints.max_old_space_size();
size_t code_range_size = constraints.code_range_size();
size_t max_pool_size = constraints.max_zone_pool_size();
if (semi_space_size != 0 || old_space_size != 0 || code_range_size != 0) {
@@ -1409,10 +1476,10 @@ Local<FunctionTemplate> FunctionTemplate::New(
MaybeLocal<FunctionTemplate> FunctionTemplate::FromSnapshot(Isolate* isolate,
size_t index) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::FixedArray* templates = i_isolate->heap()->serialized_templates();
+ i::FixedArray* serialized_objects = i_isolate->heap()->serialized_objects();
int int_index = static_cast<int>(index);
- if (int_index < templates->length()) {
- i::Object* info = templates->get(int_index);
+ if (int_index < serialized_objects->length()) {
+ i::Object* info = serialized_objects->get(int_index);
if (info->IsFunctionTemplateInfo()) {
return Utils::ToLocal(i::Handle<i::FunctionTemplateInfo>(
i::FunctionTemplateInfo::cast(info)));
@@ -1593,10 +1660,6 @@ Local<ObjectTemplate> ObjectTemplate::New(
}
-Local<ObjectTemplate> ObjectTemplate::New() {
- return New(i::Isolate::Current(), Local<FunctionTemplate>());
-}
-
static Local<ObjectTemplate> ObjectTemplateNew(
i::Isolate* isolate, v8::Local<FunctionTemplate> constructor,
bool do_not_cache) {
@@ -1626,10 +1689,10 @@ Local<ObjectTemplate> ObjectTemplate::New(
MaybeLocal<ObjectTemplate> ObjectTemplate::FromSnapshot(Isolate* isolate,
size_t index) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::FixedArray* templates = i_isolate->heap()->serialized_templates();
+ i::FixedArray* serialized_objects = i_isolate->heap()->serialized_objects();
int int_index = static_cast<int>(index);
- if (int_index < templates->length()) {
- i::Object* info = templates->get(int_index);
+ if (int_index < serialized_objects->length()) {
+ i::Object* info = serialized_objects->get(int_index);
if (info->IsObjectTemplateInfo()) {
return Utils::ToLocal(
i::Handle<i::ObjectTemplateInfo>(i::ObjectTemplateInfo::cast(info)));
@@ -1748,11 +1811,10 @@ static i::Handle<i::InterceptorInfo> CreateInterceptorInfo(
i::Isolate* isolate, Getter getter, Setter setter, Query query,
Descriptor descriptor, Deleter remover, Enumerator enumerator,
Definer definer, Local<Value> data, PropertyHandlerFlags flags) {
- DCHECK(query == nullptr ||
- descriptor == nullptr); // Either intercept attributes or descriptor.
- DCHECK(query == nullptr ||
- definer ==
- nullptr); // Only use descriptor callback with definer callback.
+ // Either intercept attributes or descriptor.
+ DCHECK(query == nullptr || descriptor == nullptr);
+ // Only use descriptor callback with definer callback.
+ DCHECK(query == nullptr || definer == nullptr);
auto obj = i::Handle<i::InterceptorInfo>::cast(
isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE, i::TENURED));
obj->set_flags(0);
@@ -1781,6 +1843,32 @@ static i::Handle<i::InterceptorInfo> CreateInterceptorInfo(
template <typename Getter, typename Setter, typename Query, typename Descriptor,
typename Deleter, typename Enumerator, typename Definer>
+static i::Handle<i::InterceptorInfo> CreateNamedInterceptorInfo(
+ i::Isolate* isolate, Getter getter, Setter setter, Query query,
+ Descriptor descriptor, Deleter remover, Enumerator enumerator,
+ Definer definer, Local<Value> data, PropertyHandlerFlags flags) {
+ auto interceptor =
+ CreateInterceptorInfo(isolate, getter, setter, query, descriptor, remover,
+ enumerator, definer, data, flags);
+ interceptor->set_is_named(true);
+ return interceptor;
+}
+
+template <typename Getter, typename Setter, typename Query, typename Descriptor,
+ typename Deleter, typename Enumerator, typename Definer>
+static i::Handle<i::InterceptorInfo> CreateIndexedInterceptorInfo(
+ i::Isolate* isolate, Getter getter, Setter setter, Query query,
+ Descriptor descriptor, Deleter remover, Enumerator enumerator,
+ Definer definer, Local<Value> data, PropertyHandlerFlags flags) {
+ auto interceptor =
+ CreateInterceptorInfo(isolate, getter, setter, query, descriptor, remover,
+ enumerator, definer, data, flags);
+ interceptor->set_is_named(false);
+ return interceptor;
+}
+
+template <typename Getter, typename Setter, typename Query, typename Descriptor,
+ typename Deleter, typename Enumerator, typename Definer>
static void ObjectTemplateSetNamedPropertyHandler(
ObjectTemplate* templ, Getter getter, Setter setter, Query query,
Descriptor descriptor, Deleter remover, Enumerator enumerator,
@@ -1790,11 +1878,13 @@ static void ObjectTemplateSetNamedPropertyHandler(
i::HandleScope scope(isolate);
auto cons = EnsureConstructor(isolate, templ);
EnsureNotInstantiated(cons, "ObjectTemplateSetNamedPropertyHandler");
- auto obj = CreateInterceptorInfo(isolate, getter, setter, query, descriptor,
- remover, enumerator, definer, data, flags);
+ auto obj =
+ CreateNamedInterceptorInfo(isolate, getter, setter, query, descriptor,
+ remover, enumerator, definer, data, flags);
cons->set_named_property_handler(*obj);
}
+// TODO(cbruni) deprecate.
void ObjectTemplate::SetNamedPropertyHandler(
NamedPropertyGetterCallback getter, NamedPropertySetterCallback setter,
NamedPropertyQueryCallback query, NamedPropertyDeleterCallback remover,
@@ -1867,12 +1957,12 @@ void ObjectTemplate::SetAccessCheckCallbackAndHandler(
i::Handle<i::AccessCheckInfo>::cast(struct_info);
SET_FIELD_WRAPPED(info, set_callback, callback);
- auto named_interceptor = CreateInterceptorInfo(
+ auto named_interceptor = CreateNamedInterceptorInfo(
isolate, named_handler.getter, named_handler.setter, named_handler.query,
named_handler.descriptor, named_handler.deleter, named_handler.enumerator,
named_handler.definer, named_handler.data, named_handler.flags);
info->set_named_interceptor(*named_interceptor);
- auto indexed_interceptor = CreateInterceptorInfo(
+ auto indexed_interceptor = CreateIndexedInterceptorInfo(
isolate, indexed_handler.getter, indexed_handler.setter,
indexed_handler.query, indexed_handler.descriptor,
indexed_handler.deleter, indexed_handler.enumerator,
@@ -1895,10 +1985,10 @@ void ObjectTemplate::SetHandler(
i::HandleScope scope(isolate);
auto cons = EnsureConstructor(isolate, this);
EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetHandler");
- auto obj = CreateInterceptorInfo(isolate, config.getter, config.setter,
- config.query, config.descriptor,
- config.deleter, config.enumerator,
- config.definer, config.data, config.flags);
+ auto obj = CreateIndexedInterceptorInfo(
+ isolate, config.getter, config.setter, config.query, config.descriptor,
+ config.deleter, config.enumerator, config.definer, config.data,
+ config.flags);
cons->set_indexed_property_handler(*obj);
}
@@ -2239,11 +2329,6 @@ Local<Value> Module::GetModuleNamespace() {
int Module::GetIdentityHash() const { return Utils::OpenHandle(this)->hash(); }
-bool Module::Instantiate(Local<Context> context,
- Module::ResolveCallback callback) {
- return InstantiateModule(context, callback).FromMaybe(false);
-}
-
Maybe<bool> Module::InstantiateModule(Local<Context> context,
Module::ResolveCallback callback) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
@@ -2361,18 +2446,6 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundScript(
return CompileUnboundInternal(v8_isolate, source, options, no_cache_reason);
}
-Local<UnboundScript> ScriptCompiler::CompileUnbound(
- Isolate* v8_isolate, Source* source, CompileOptions options,
- NoCacheReason no_cache_reason) {
- Utils::ApiCheck(
- !source->GetResourceOptions().IsModule(),
- "v8::ScriptCompiler::CompileUnbound",
- "v8::ScriptCompiler::CompileModule must be used to compile modules");
- RETURN_TO_LOCAL_UNCHECKED(
- CompileUnboundInternal(v8_isolate, source, options, no_cache_reason),
- UnboundScript);
-}
-
MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
Source* source,
CompileOptions options,
@@ -2389,13 +2462,6 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
return result->BindToCurrentContext();
}
-Local<Script> ScriptCompiler::Compile(Isolate* v8_isolate, Source* source,
- CompileOptions options,
- NoCacheReason no_cache_reason) {
- auto context = v8_isolate->GetCurrentContext();
- RETURN_TO_LOCAL_UNCHECKED(Compile(context, source, options), Script);
-}
-
MaybeLocal<Module> ScriptCompiler::CompileModule(Isolate* isolate,
Source* source) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -2459,57 +2525,27 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
PREPARE_FOR_EXECUTION(v8_context, ScriptCompiler, CompileFunctionInContext,
Function);
TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.ScriptCompiler");
- i::Handle<i::String> source_string;
- auto factory = isolate->factory();
- if (arguments_count) {
- source_string = factory->NewStringFromStaticChars("(function(");
- for (size_t i = 0; i < arguments_count; ++i) {
- IsIdentifierHelper helper;
- if (!helper.Check(*Utils::OpenHandle(*arguments[i]))) {
- return Local<Function>();
- }
- has_pending_exception =
- !factory->NewConsString(source_string,
- Utils::OpenHandle(*arguments[i]))
- .ToHandle(&source_string);
- RETURN_ON_FAILED_EXECUTION(Function);
- if (i + 1 == arguments_count) continue;
- has_pending_exception =
- !factory->NewConsString(source_string,
- factory->LookupSingleCharacterStringFromCode(
- ',')).ToHandle(&source_string);
- RETURN_ON_FAILED_EXECUTION(Function);
- }
- i::Handle<i::String> brackets;
- brackets = factory->NewStringFromStaticChars("){");
- has_pending_exception = !factory->NewConsString(source_string, brackets)
- .ToHandle(&source_string);
- RETURN_ON_FAILED_EXECUTION(Function);
- } else {
- source_string = factory->NewStringFromStaticChars("(function(){");
- }
-
- int scope_position = source_string->length();
- has_pending_exception =
- !factory->NewConsString(source_string,
- Utils::OpenHandle(*source->source_string))
- .ToHandle(&source_string);
- RETURN_ON_FAILED_EXECUTION(Function);
- // Include \n in case the source contains a line end comment.
- auto brackets = factory->NewStringFromStaticChars("\n})");
- has_pending_exception =
- !factory->NewConsString(source_string, brackets).ToHandle(&source_string);
- RETURN_ON_FAILED_EXECUTION(Function);
i::Handle<i::Context> context = Utils::OpenHandle(*v8_context);
i::Handle<i::SharedFunctionInfo> outer_info(context->closure()->shared(),
isolate);
+
+ i::Handle<i::JSFunction> fun;
+ i::Handle<i::FixedArray> arguments_list =
+ isolate->factory()->NewFixedArray(static_cast<int>(arguments_count));
+ for (int i = 0; i < static_cast<int>(arguments_count); i++) {
+ IsIdentifierHelper helper;
+ i::Handle<i::String> argument = Utils::OpenHandle(*arguments[i]);
+ if (!helper.Check(*argument)) return Local<Function>();
+ arguments_list->set(i, *argument);
+ }
+
for (size_t i = 0; i < context_extension_count; ++i) {
i::Handle<i::JSReceiver> extension =
Utils::OpenHandle(*context_extensions[i]);
if (!extension->IsJSObject()) return Local<Function>();
i::Handle<i::JSFunction> closure(context->closure(), isolate);
- context = factory->NewWithContext(
+ context = isolate->factory()->NewWithContext(
closure, context,
i::ScopeInfo::CreateForWithScope(
isolate, context->IsNativeContext()
@@ -2519,8 +2555,6 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
}
i::Handle<i::Object> name_obj;
- int eval_scope_position = 0;
- int eval_position = i::kNoSourcePosition;
int line_offset = 0;
int column_offset = 0;
if (!source->resource_name.IsEmpty()) {
@@ -2532,27 +2566,15 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
if (!source->resource_column_offset.IsEmpty()) {
column_offset = static_cast<int>(source->resource_column_offset->Value());
}
- i::Handle<i::JSFunction> fun;
- has_pending_exception =
- !i::Compiler::GetFunctionFromEval(
- source_string, outer_info, context, i::LanguageMode::kSloppy,
- i::ONLY_SINGLE_FUNCTION_LITERAL, i::kNoSourcePosition,
- eval_scope_position, eval_position, line_offset,
- column_offset - scope_position, name_obj, source->resource_options)
- .ToHandle(&fun);
- if (has_pending_exception) {
- isolate->ReportPendingMessages();
- }
- RETURN_ON_FAILED_EXECUTION(Function);
- i::Handle<i::Object> result;
+ i::Handle<i::JSFunction> result;
has_pending_exception =
- !i::Execution::Call(isolate, fun,
- Utils::OpenHandle(*v8_context->Global()), 0,
- nullptr).ToHandle(&result);
+ !i::Compiler::GetWrappedFunction(
+ Utils::OpenHandle(*source->source_string), arguments_list, context,
+ line_offset, column_offset, name_obj, source->resource_options)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(Function);
- RETURN_ESCAPED(
- Utils::CallableToLocal(i::Handle<i::JSFunction>::cast(result)));
+ RETURN_ESCAPED(Utils::CallableToLocal(result));
}
@@ -2587,6 +2609,9 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
i::StreamedSource* source = v8_source->impl();
i::Handle<i::String> str = Utils::OpenHandle(*(full_source_string));
i::Handle<i::Script> script = isolate->factory()->NewScript(str);
+ if (isolate->NeedsSourcePositionsForProfiling()) {
+ i::Script::InitLineEnds(script);
+ }
if (!origin.ResourceName().IsEmpty()) {
script->set_name(*Utils::OpenHandle(*(origin.ResourceName())));
}
@@ -2643,23 +2668,49 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
RETURN_ESCAPED(bound);
}
-
-Local<Script> ScriptCompiler::Compile(Isolate* v8_isolate,
- StreamedSource* v8_source,
- Local<String> full_source_string,
- const ScriptOrigin& origin) {
- auto context = v8_isolate->GetCurrentContext();
- RETURN_TO_LOCAL_UNCHECKED(
- Compile(context, v8_source, full_source_string, origin), Script);
-}
-
-
uint32_t ScriptCompiler::CachedDataVersionTag() {
return static_cast<uint32_t>(base::hash_combine(
internal::Version::Hash(), internal::FlagList::Hash(),
static_cast<uint32_t>(internal::CpuFeatures::SupportedFeatures())));
}
+ScriptCompiler::CachedData* ScriptCompiler::CreateCodeCache(
+ Local<UnboundScript> unbound_script, Local<String> source) {
+ i::Handle<i::SharedFunctionInfo> shared =
+ i::Handle<i::SharedFunctionInfo>::cast(
+ Utils::OpenHandle(*unbound_script));
+ i::Isolate* isolate = shared->GetIsolate();
+ TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
+ base::ElapsedTimer timer;
+ if (i::FLAG_profile_deserialization) {
+ timer.Start();
+ }
+ i::HistogramTimerScope histogram_timer(
+ isolate->counters()->compile_serialize());
+ i::RuntimeCallTimerScope runtimeTimer(
+ isolate, i::RuntimeCallCounterId::kCompileSerialize);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileSerialize");
+
+ DCHECK(shared->is_toplevel());
+ i::Handle<i::Script> script(i::Script::cast(shared->script()));
+ // TODO(7110): Enable serialization of Asm modules once the AsmWasmData is
+ // context independent.
+ if (script->ContainsAsmModule()) return nullptr;
+ if (isolate->debug()->is_loaded()) return nullptr;
+
+ i::ScriptData* script_data =
+ i::CodeSerializer::Serialize(isolate, shared, Utils::OpenHandle(*source));
+ CachedData* result = new CachedData(
+ script_data->data(), script_data->length(), CachedData::BufferOwned);
+ script_data->ReleaseDataOwnership();
+ delete script_data;
+
+ if (i::FLAG_profile_deserialization) {
+ i::PrintF("[Serializing took %0.3f ms]\n",
+ timer.Elapsed().InMillisecondsF());
+ }
+ return result;
+}
MaybeLocal<Script> Script::Compile(Local<Context> context, Local<String> source,
ScriptOrigin* origin) {
@@ -2691,24 +2742,6 @@ Local<Script> Script::Compile(v8::Local<String> source,
// --- E x c e p t i o n s ---
-
-v8::TryCatch::TryCatch()
- : isolate_(i::Isolate::Current()),
- next_(isolate_->try_catch_handler()),
- is_verbose_(false),
- can_continue_(true),
- capture_message_(true),
- rethrow_(false),
- has_terminated_(false) {
- ResetInternal();
- // Special handling for simulators which have a separate JS stack.
- js_stack_comparable_address_ =
- reinterpret_cast<void*>(i::SimulatorStack::RegisterCTryCatch(
- isolate_, i::GetCurrentStackPosition()));
- isolate_->RegisterTryCatchHandler(this);
-}
-
-
v8::TryCatch::TryCatch(v8::Isolate* isolate)
: isolate_(reinterpret_cast<i::Isolate*>(isolate)),
next_(isolate_->try_catch_handler()),
@@ -2963,13 +2996,6 @@ Maybe<int> Message::GetEndColumn(Local<Context> context) const {
}
-int Message::GetEndColumn() const {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- const int default_value = kNoColumnInfo;
- return GetEndColumn(context).FromMaybe(default_value);
-}
-
-
bool Message::IsSharedCrossOrigin() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
@@ -3030,65 +3056,6 @@ int StackTrace::GetFrameCount() const {
return Utils::OpenHandle(this)->length();
}
-namespace {
-i::Handle<i::JSObject> NewFrameObject(i::Isolate* isolate,
- i::Handle<i::StackFrameInfo> frame) {
- i::Handle<i::JSObject> frame_obj =
- isolate->factory()->NewJSObject(isolate->object_function());
- i::JSObject::AddProperty(
- frame_obj, handle(isolate->heap()->line_string()),
- handle(i::Smi::FromInt(frame->line_number() + 1), isolate), i::NONE);
- i::JSObject::AddProperty(
- frame_obj, handle(isolate->heap()->column_string()),
- handle(i::Smi::FromInt(frame->column_number() + 1), isolate), i::NONE);
- i::JSObject::AddProperty(frame_obj,
- isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("scriptId")),
- handle(i::Smi::FromInt(frame->script_id()), isolate),
- i::NONE);
- i::JSObject::AddProperty(frame_obj,
- isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("scriptName")),
- handle(frame->script_name(), isolate), i::NONE);
- i::JSObject::AddProperty(frame_obj,
- isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("scriptNameOrSourceURL")),
- handle(frame->script_name_or_source_url(), isolate),
- i::NONE);
- i::JSObject::AddProperty(frame_obj,
- isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("functionName")),
- handle(frame->function_name(), isolate), i::NONE);
- i::JSObject::AddProperty(frame_obj,
- isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("isEval")),
- isolate->factory()->ToBoolean(frame->is_eval()),
- i::NONE);
- i::JSObject::AddProperty(
- frame_obj,
- isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("isConstructor")),
- isolate->factory()->ToBoolean(frame->is_constructor()), i::NONE);
- return frame_obj;
-}
-} // namespace
-
-Local<Array> StackTrace::AsArray() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- i::Handle<i::FixedArray> self = Utils::OpenHandle(this);
- int frame_count = self->length();
- i::Handle<i::FixedArray> frames =
- isolate->factory()->NewFixedArray(frame_count);
- for (int i = 0; i < frame_count; ++i) {
- auto obj = handle(self->get(i), isolate);
- auto frame = i::Handle<i::StackFrameInfo>::cast(obj);
- i::Handle<i::JSObject> frame_obj = NewFrameObject(isolate, frame);
- frames->set(i, *frame_obj);
- }
- return Utils::ToLocal(isolate->factory()->NewJSArrayWithElements(
- frames, i::PACKED_ELEMENTS, frame_count));
-}
-
Local<StackTrace> StackTrace::CurrentStackTrace(
Isolate* isolate,
@@ -3193,10 +3160,6 @@ MaybeLocal<Value> JSON::Parse(Local<Context> context,
RETURN_ESCAPED(result);
}
-Local<Value> JSON::Parse(Local<String> json_string) {
- RETURN_TO_LOCAL_UNCHECKED(Parse(Local<Context>(), json_string), Value);
-}
-
MaybeLocal<String> JSON::Stringify(Local<Context> context,
Local<Value> json_object,
Local<String> gap) {
@@ -3707,12 +3670,6 @@ MaybeLocal<String> Value::ToDetailString(Local<Context> context) const {
}
-Local<String> Value::ToDetailString(Isolate* isolate) const {
- RETURN_TO_LOCAL_UNCHECKED(ToDetailString(isolate->GetCurrentContext()),
- String);
-}
-
-
MaybeLocal<Object> Value::ToObject(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsJSReceiver()) return ToApiHandle<Object>(obj);
@@ -3806,11 +3763,6 @@ MaybeLocal<Uint32> Value::ToUint32(Local<Context> context) const {
}
-Local<Uint32> Value::ToUint32(Isolate* isolate) const {
- RETURN_TO_LOCAL_UNCHECKED(ToUint32(isolate->GetCurrentContext()), Uint32);
-}
-
-
void i::Internals::CheckInitializedImpl(v8::Isolate* external_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
Utils::ApiCheck(isolate != nullptr && !isolate->IsDead(),
@@ -3866,6 +3818,15 @@ void v8::Symbol::CheckCast(v8::Value* that) {
}
+void v8::Private::CheckCast(v8::Data* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ Utils::ApiCheck(obj->IsSymbol() &&
+ i::Handle<i::Symbol>::cast(obj)->is_private(),
+ "v8::Private::Cast",
+ "Could not convert to private");
+}
+
+
void v8::Number::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(obj->IsNumber(),
@@ -4180,17 +4141,6 @@ MaybeLocal<Uint32> Value::ToArrayIndex(Local<Context> context) const {
}
-Local<Uint32> Value::ToArrayIndex() const {
- auto self = Utils::OpenHandle(this);
- if (self->IsSmi()) {
- if (i::Smi::ToInt(*self) >= 0) return Utils::Uint32ToLocal(self);
- return Local<Uint32>();
- }
- auto context = ContextFromHeapObject(self);
- RETURN_TO_LOCAL_UNCHECKED(ToArrayIndex(context), Uint32);
-}
-
-
Maybe<bool> Value::Equals(Local<Context> context, Local<Value> that) const {
auto self = Utils::OpenHandle(this);
auto other = Utils::OpenHandle(*that);
@@ -4469,39 +4419,6 @@ Maybe<bool> v8::Object::DefineProperty(v8::Local<v8::Context> context,
return success;
}
-MUST_USE_RESULT
-static i::MaybeHandle<i::Object> DefineObjectProperty(
- i::Handle<i::JSObject> js_object, i::Handle<i::Object> key,
- i::Handle<i::Object> value, i::PropertyAttributes attrs) {
- i::Isolate* isolate = js_object->GetIsolate();
- bool success = false;
- i::LookupIterator it = i::LookupIterator::PropertyOrElement(
- isolate, js_object, key, &success, i::LookupIterator::OWN);
- if (!success) return i::MaybeHandle<i::Object>();
-
- return i::JSObject::DefineOwnPropertyIgnoreAttributes(
- &it, value, attrs, i::JSObject::FORCE_FIELD);
-}
-
-
-Maybe<bool> v8::Object::ForceSet(v8::Local<v8::Context> context,
- v8::Local<Value> key, v8::Local<Value> value,
- v8::PropertyAttribute attribs) {
- auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
- ENTER_V8_NO_SCRIPT(isolate, context, Object, ForceSet, Nothing<bool>(),
- i::HandleScope);
- auto self = i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
- auto key_obj = Utils::OpenHandle(*key);
- auto value_obj = Utils::OpenHandle(*value);
- has_pending_exception =
- DefineObjectProperty(self, key_obj, value_obj,
- static_cast<i::PropertyAttributes>(attribs))
- .is_null();
- RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- return Just(true);
-}
-
-
Maybe<bool> v8::Object::SetPrivate(Local<Context> context, Local<Private> key,
Local<Value> value) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
@@ -4595,12 +4512,6 @@ Maybe<PropertyAttribute> v8::Object::GetPropertyAttributes(
}
-PropertyAttribute v8::Object::GetPropertyAttributes(v8::Local<Value> key) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- return GetPropertyAttributes(context, key)
- .FromMaybe(static_cast<PropertyAttribute>(i::NONE));
-}
-
MaybeLocal<Value> v8::Object::GetOwnPropertyDescriptor(Local<Context> context,
Local<Name> key) {
PREPARE_FOR_EXECUTION(context, Object, GetOwnPropertyDescriptor, Value);
@@ -4618,11 +4529,6 @@ MaybeLocal<Value> v8::Object::GetOwnPropertyDescriptor(Local<Context> context,
RETURN_ESCAPED(Utils::ToLocal(desc.ToObject(isolate)));
}
-Local<Value> v8::Object::GetOwnPropertyDescriptor(Local<Name> key) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(GetOwnPropertyDescriptor(context, key), Value);
-}
-
Local<Value> v8::Object::GetPrototype() {
auto isolate = Utils::OpenHandle(this)->GetIsolate();
@@ -4650,11 +4556,6 @@ Maybe<bool> v8::Object::SetPrototype(Local<Context> context,
}
-bool v8::Object::SetPrototype(Local<Value> value) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- return SetPrototype(context, value).FromMaybe(false);
-}
-
Local<Object> v8::Object::FindInstanceInPrototypeChain(
v8::Local<FunctionTemplate> tmpl) {
auto self = Utils::OpenHandle(this);
@@ -4733,12 +4634,6 @@ MaybeLocal<String> v8::Object::ObjectProtoToString(Local<Context> context) {
}
-Local<String> v8::Object::ObjectProtoToString() {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(ObjectProtoToString(context), String);
-}
-
-
Local<String> v8::Object::GetConstructorName() {
auto self = Utils::OpenHandle(this);
i::Handle<i::String> name = i::JSReceiver::GetConstructorName(self);
@@ -4850,12 +4745,6 @@ Maybe<bool> v8::Object::Delete(Local<Context> context, uint32_t index) {
}
-bool v8::Object::Delete(uint32_t index) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- return Delete(context, index).FromMaybe(false);
-}
-
-
Maybe<bool> v8::Object::Has(Local<Context> context, uint32_t index) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
ENTER_V8(isolate, context, Object, Has, Nothing<bool>(), i::HandleScope);
@@ -4867,11 +4756,6 @@ Maybe<bool> v8::Object::Has(Local<Context> context, uint32_t index) {
}
-bool v8::Object::Has(uint32_t index) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- return Has(context, index).FromMaybe(false);
-}
-
template <typename Getter, typename Setter, typename Data>
static Maybe<bool> ObjectSetAccessor(Local<Context> context, Object* self,
Local<Name> name, Getter getter,
@@ -4918,27 +4802,6 @@ Maybe<bool> Object::SetAccessor(Local<Context> context, Local<Name> name,
}
-bool Object::SetAccessor(Local<String> name, AccessorGetterCallback getter,
- AccessorSetterCallback setter, v8::Local<Value> data,
- AccessControl settings, PropertyAttribute attributes) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- return ObjectSetAccessor(context, this, name, getter, setter, data, settings,
- attributes, i::FLAG_disable_old_api_accessors)
- .FromMaybe(false);
-}
-
-
-bool Object::SetAccessor(Local<Name> name, AccessorNameGetterCallback getter,
- AccessorNameSetterCallback setter,
- v8::Local<Value> data, AccessControl settings,
- PropertyAttribute attributes) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- return ObjectSetAccessor(context, this, name, getter, setter, data, settings,
- attributes, i::FLAG_disable_old_api_accessors)
- .FromMaybe(false);
-}
-
-
void Object::SetAccessorProperty(Local<Name> name, Local<Function> getter,
Local<Function> setter,
PropertyAttribute attribute,
@@ -4992,12 +4855,6 @@ Maybe<bool> v8::Object::HasOwnProperty(Local<Context> context, uint32_t index) {
return result;
}
-bool v8::Object::HasOwnProperty(Local<String> key) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- return HasOwnProperty(context, key).FromMaybe(false);
-}
-
-
Maybe<bool> v8::Object::HasRealNamedProperty(Local<Context> context,
Local<Name> key) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
@@ -5099,14 +4956,6 @@ MaybeLocal<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
}
-Local<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
- Local<String> key) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(GetRealNamedPropertyInPrototypeChain(context, key),
- Value);
-}
-
-
Maybe<PropertyAttribute>
v8::Object::GetRealNamedPropertyAttributesInPrototypeChain(
Local<Context> context, Local<Name> key) {
@@ -5133,13 +4982,6 @@ v8::Object::GetRealNamedPropertyAttributesInPrototypeChain(
}
-Maybe<PropertyAttribute>
-v8::Object::GetRealNamedPropertyAttributesInPrototypeChain(Local<String> key) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- return GetRealNamedPropertyAttributesInPrototypeChain(context, key);
-}
-
-
MaybeLocal<Value> v8::Object::GetRealNamedProperty(Local<Context> context,
Local<Name> key) {
PREPARE_FOR_EXECUTION(context, Object, GetRealNamedProperty, Value);
@@ -5156,12 +4998,6 @@ MaybeLocal<Value> v8::Object::GetRealNamedProperty(Local<Context> context,
}
-Local<Value> v8::Object::GetRealNamedProperty(Local<String> key) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(GetRealNamedProperty(context, key), Value);
-}
-
-
Maybe<PropertyAttribute> v8::Object::GetRealNamedPropertyAttributes(
Local<Context> context, Local<Name> key) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
@@ -5183,13 +5019,6 @@ Maybe<PropertyAttribute> v8::Object::GetRealNamedPropertyAttributes(
}
-Maybe<PropertyAttribute> v8::Object::GetRealNamedPropertyAttributes(
- Local<String> key) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- return GetRealNamedPropertyAttributes(context, key);
-}
-
-
Local<v8::Object> v8::Object::Clone() {
auto self = i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
auto isolate = self->GetIsolate();
@@ -5245,15 +5074,6 @@ MaybeLocal<Value> Object::CallAsFunction(Local<Context> context,
}
-Local<v8::Value> Object::CallAsFunction(v8::Local<v8::Value> recv, int argc,
- v8::Local<v8::Value> argv[]) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- Local<Value>* argv_cast = reinterpret_cast<Local<Value>*>(argv);
- RETURN_TO_LOCAL_UNCHECKED(CallAsFunction(context, recv, argc, argv_cast),
- Value);
-}
-
-
MaybeLocal<Value> Object::CallAsConstructor(Local<Context> context, int argc,
Local<Value> argv[]) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
@@ -5272,13 +5092,6 @@ MaybeLocal<Value> Object::CallAsConstructor(Local<Context> context, int argc,
}
-Local<v8::Value> Object::CallAsConstructor(int argc,
- v8::Local<v8::Value> argv[]) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- Local<Value>* argv_cast = reinterpret_cast<Local<Value>*>(argv);
- RETURN_TO_LOCAL_UNCHECKED(CallAsConstructor(context, argc, argv_cast), Value);
-}
-
MaybeLocal<Function> Function::New(Local<Context> context,
FunctionCallback callback, Local<Value> data,
int length, ConstructorBehavior behavior) {
@@ -5300,12 +5113,6 @@ Local<Function> Function::New(Isolate* v8_isolate, FunctionCallback callback,
}
-Local<v8::Object> Function::NewInstance() const {
- return NewInstance(Isolate::GetCurrent()->GetCurrentContext(), 0, nullptr)
- .FromMaybe(Local<Object>());
-}
-
-
MaybeLocal<Object> Function::NewInstance(Local<Context> context, int argc,
v8::Local<v8::Value> argv[]) const {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
@@ -5324,13 +5131,6 @@ MaybeLocal<Object> Function::NewInstance(Local<Context> context, int argc,
}
-Local<v8::Object> Function::NewInstance(int argc,
- v8::Local<v8::Value> argv[]) const {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(NewInstance(context, argc, argv), Object);
-}
-
-
MaybeLocal<v8::Value> Function::Call(Local<Context> context,
v8::Local<v8::Value> recv, int argc,
v8::Local<v8::Value> argv[]) {
@@ -5340,6 +5140,8 @@ MaybeLocal<v8::Value> Function::Call(Local<Context> context,
InternalEscapableScope);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
auto self = Utils::OpenHandle(this);
+ Utils::ApiCheck(!self.is_null(), "v8::Function::Call",
+ "Function to be called is a null pointer");
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
@@ -5474,16 +5276,6 @@ int Function::GetScriptColumnNumber() const {
}
-bool Function::IsBuiltin() const {
- auto self = Utils::OpenHandle(this);
- if (!self->IsJSFunction()) {
- return false;
- }
- auto func = i::Handle<i::JSFunction>::cast(self);
- return !func->shared()->IsUserJavaScript();
-}
-
-
int Function::ScriptId() const {
auto self = Utils::OpenHandle(this);
if (!self->IsJSFunction()) {
@@ -6397,7 +6189,9 @@ HeapStatistics::HeapStatistics()
heap_size_limit_(0),
malloced_memory_(0),
peak_malloced_memory_(0),
- does_zap_garbage_(0) {}
+ does_zap_garbage_(0),
+ number_of_native_contexts_(0),
+ number_of_detached_contexts_(0) {}
HeapSpaceStatistics::HeapSpaceStatistics(): space_name_(0),
space_size_(0),
@@ -6415,10 +6209,6 @@ HeapObjectStatistics::HeapObjectStatistics()
HeapCodeStatistics::HeapCodeStatistics()
: code_and_metadata_size_(0), bytecode_and_metadata_size_(0) {}
-bool v8::V8::InitializeICU(const char* icu_data_file) {
- return i::InitializeICU(icu_data_file);
-}
-
bool v8::V8::InitializeICUDefaultLocation(const char* exec_path,
const char* icu_data_file) {
return i::InitializeICUDefaultLocation(exec_path, icu_data_file);
@@ -6724,7 +6514,31 @@ void Context::SetErrorMessageForCodeGenerationFromStrings(Local<String> error) {
context->set_error_message_for_code_gen_from_strings(*error_handle);
}
-size_t Context::EstimatedSize() { return 0; }
+namespace {
+i::Object** GetSerializedDataFromFixedArray(i::Isolate* isolate,
+ i::FixedArray* list, size_t index) {
+ if (index < static_cast<size_t>(list->length())) {
+ int int_index = static_cast<int>(index);
+ i::Object* object = list->get(int_index);
+ if (!object->IsTheHole(isolate)) {
+ list->set_the_hole(isolate, int_index);
+ // Shrink the list so that the last element is not the hole.
+ int last = list->length() - 1;
+ while (last >= 0 && list->is_the_hole(isolate, last)) last--;
+ list->Shrink(last + 1);
+ return i::Handle<i::Object>(object, isolate).location();
+ }
+ }
+ return nullptr;
+}
+} // anonymous namespace
+
+i::Object** Context::GetDataFromSnapshotOnce(size_t index) {
+ auto context = Utils::OpenHandle(this);
+ i::Isolate* i_isolate = context->GetIsolate();
+ i::FixedArray* list = i::FixedArray::cast(context->serialized_objects());
+ return GetSerializedDataFromFixedArray(i_isolate, list, index);
+}
MaybeLocal<v8::Object> ObjectTemplate::NewInstance(Local<Context> context) {
PREPARE_FOR_EXECUTION(context, ObjectTemplate, NewInstance, Object);
@@ -6742,6 +6556,29 @@ Local<v8::Object> ObjectTemplate::NewInstance() {
RETURN_TO_LOCAL_UNCHECKED(NewInstance(context), Object);
}
+void v8::ObjectTemplate::CheckCast(Data* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ Utils::ApiCheck(obj->IsObjectTemplateInfo(), "v8::ObjectTemplate::Cast",
+ "Could not convert to object template");
+}
+
+void v8::FunctionTemplate::CheckCast(Data* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ Utils::ApiCheck(obj->IsFunctionTemplateInfo(), "v8::FunctionTemplate::Cast",
+ "Could not convert to function template");
+}
+
+void v8::Signature::CheckCast(Data* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ Utils::ApiCheck(obj->IsFunctionTemplateInfo(), "v8::Signature::Cast",
+ "Could not convert to signature");
+}
+
+void v8::AccessorSignature::CheckCast(Data* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ Utils::ApiCheck(obj->IsFunctionTemplateInfo(), "v8::AccessorSignature::Cast",
+ "Could not convert to accessor signature");
+}
MaybeLocal<v8::Function> FunctionTemplate::GetFunction(Local<Context> context) {
PREPARE_FOR_EXECUTION(context, FunctionTemplate, GetFunction, Function);
@@ -6915,16 +6752,6 @@ MaybeLocal<String> String::NewFromUtf8(Isolate* isolate, const char* data,
}
-Local<String> String::NewFromOneByte(Isolate* isolate,
- const uint8_t* data,
- NewStringType type,
- int length) {
- NEW_STRING(isolate, String, NewFromOneByte, uint8_t, data,
- static_cast<v8::NewStringType>(type), length);
- RETURN_TO_LOCAL_UNCHECKED(result, String);
-}
-
-
MaybeLocal<String> String::NewFromOneByte(Isolate* isolate, const uint8_t* data,
v8::NewStringType type, int length) {
NEW_STRING(isolate, String, NewFromOneByte, uint8_t, data, type, length);
@@ -6991,12 +6818,6 @@ MaybeLocal<String> v8::String::NewExternalTwoByte(
}
-Local<String> v8::String::NewExternal(
- Isolate* isolate, v8::String::ExternalStringResource* resource) {
- RETURN_TO_LOCAL_UNCHECKED(NewExternalTwoByte(isolate, resource), String);
-}
-
-
MaybeLocal<String> v8::String::NewExternalOneByte(
Isolate* isolate, v8::String::ExternalOneByteStringResource* resource) {
CHECK(resource && resource->data());
@@ -7133,11 +6954,6 @@ Local<v8::Value> v8::BooleanObject::New(Isolate* isolate, bool value) {
}
-Local<v8::Value> v8::BooleanObject::New(bool value) {
- return New(Isolate::GetCurrent(), value);
-}
-
-
bool v8::BooleanObject::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
@@ -7306,27 +7122,6 @@ uint32_t v8::Array::Length() const {
}
-MaybeLocal<Object> Array::CloneElementAt(Local<Context> context,
- uint32_t index) {
- PREPARE_FOR_EXECUTION(context, Array, CloneElementAt, Object);
- auto self = Utils::OpenHandle(this);
- if (!self->HasObjectElements()) return Local<Object>();
- i::FixedArray* elms = i::FixedArray::cast(self->elements());
- i::Object* paragon = elms->get(index);
- if (!paragon->IsJSObject()) return Local<Object>();
- i::Handle<i::JSObject> paragon_handle(i::JSObject::cast(paragon));
- Local<Object> result;
- has_pending_exception =
- !ToLocal<Object>(isolate->factory()->CopyJSObject(paragon_handle),
- &result);
- RETURN_ON_FAILED_EXECUTION(Object);
- RETURN_ESCAPED(result);
-}
-
-
-Local<Object> Array::CloneElementAt(uint32_t index) { return Local<Object>(); }
-
-
Local<v8::Map> v8::Map::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, Map, New);
@@ -7643,12 +7438,6 @@ MaybeLocal<Promise> Promise::Catch(Local<Context> context,
}
-Local<Promise> Promise::Catch(Local<Function> handler) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(Catch(context, handler), Promise);
-}
-
-
MaybeLocal<Promise> Promise::Then(Local<Context> context,
Local<Function> handler) {
PREPARE_FOR_EXECUTION(context, Promise, Then, Promise);
@@ -7663,12 +7452,6 @@ MaybeLocal<Promise> Promise::Then(Local<Context> context,
}
-Local<Promise> Promise::Then(Local<Function> handler) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(Then(context, handler), Promise);
-}
-
-
bool Promise::HasHandler() {
i::Handle<i::JSReceiver> promise = Utils::OpenHandle(this);
i::Isolate* isolate = promise->GetIsolate();
@@ -7700,9 +7483,9 @@ Promise::PromiseState Promise::State() {
return static_cast<PromiseState>(js_promise->status());
}
-Local<Object> Proxy::GetTarget() {
+Local<Value> Proxy::GetTarget() {
i::Handle<i::JSProxy> self = Utils::OpenHandle(this);
- i::Handle<i::JSReceiver> target(self->target());
+ i::Handle<i::Object> target(self->target(), self->GetIsolate());
return Utils::ToLocal(target);
}
@@ -7742,8 +7525,8 @@ Local<String> WasmCompiledModule::GetWasmWireBytes() {
i::Handle<i::WasmModuleObject> obj =
i::Handle<i::WasmModuleObject>::cast(Utils::OpenHandle(this));
i::Handle<i::WasmCompiledModule> compiled_part =
- i::handle(i::WasmCompiledModule::cast(obj->compiled_module()));
- i::Handle<i::String> wire_bytes(compiled_part->module_bytes());
+ i::handle(obj->compiled_module());
+ i::Handle<i::String> wire_bytes(compiled_part->shared()->module_bytes());
return Local<String>::Cast(Utils::ToLocal(wire_bytes));
}
@@ -7782,20 +7565,7 @@ WasmCompiledModule::SerializedModule WasmCompiledModule::Serialize() {
i::Handle<i::WasmModuleObject>::cast(Utils::OpenHandle(this));
i::Handle<i::WasmCompiledModule> compiled_part =
i::handle(i::WasmCompiledModule::cast(obj->compiled_module()));
- if (i::FLAG_wasm_jit_to_native) {
- i::Isolate* isolate = obj->GetIsolate();
-
- return i::wasm::NativeModuleSerializer::SerializeWholeModule(isolate,
- compiled_part);
- } else {
- std::unique_ptr<i::ScriptData> script_data =
- i::WasmCompiledModuleSerializer::SerializeWasmModule(obj->GetIsolate(),
- compiled_part);
- script_data->ReleaseDataOwnership();
-
- size_t size = static_cast<size_t>(script_data->length());
- return {std::unique_ptr<const uint8_t[]>(script_data->data()), size};
- }
+ return i::wasm::SerializeNativeModule(obj->GetIsolate(), compiled_part);
}
MaybeLocal<WasmCompiledModule> WasmCompiledModule::Deserialize(
@@ -7803,25 +7573,14 @@ MaybeLocal<WasmCompiledModule> WasmCompiledModule::Deserialize(
const WasmCompiledModule::CallerOwnedBuffer& serialized_module,
const WasmCompiledModule::CallerOwnedBuffer& wire_bytes) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::MaybeHandle<i::FixedArray> maybe_compiled_part;
- if (i::FLAG_wasm_jit_to_native) {
- maybe_compiled_part =
- i::wasm::NativeModuleDeserializer::DeserializeFullBuffer(
- i_isolate, {serialized_module.first, serialized_module.second},
- {wire_bytes.first, wire_bytes.second});
- } else {
- int size = static_cast<int>(serialized_module.second);
- i::ScriptData sc(serialized_module.first, size);
- maybe_compiled_part =
- i::WasmCompiledModuleSerializer::DeserializeWasmModule(
- i_isolate, &sc, {wire_bytes.first, wire_bytes.second});
- }
- i::Handle<i::FixedArray> compiled_part;
- if (!maybe_compiled_part.ToHandle(&compiled_part)) {
+ i::MaybeHandle<i::WasmCompiledModule> maybe_compiled_module =
+ i::wasm::DeserializeNativeModule(
+ i_isolate, {serialized_module.first, serialized_module.second},
+ {wire_bytes.first, wire_bytes.second});
+ i::Handle<i::WasmCompiledModule> compiled_module;
+ if (!maybe_compiled_module.ToHandle(&compiled_module)) {
return MaybeLocal<WasmCompiledModule>();
}
- i::Handle<i::WasmCompiledModule> compiled_module =
- handle(i::WasmCompiledModule::cast(*compiled_part));
return Local<WasmCompiledModule>::Cast(
Utils::ToLocal(i::Handle<i::JSObject>::cast(
i::WasmModuleObject::New(i_isolate, compiled_module))));
@@ -7866,8 +7625,10 @@ WasmModuleObjectBuilderStreaming::WasmModuleObjectBuilderStreaming(
i::Handle<i::JSPromise> promise = Utils::OpenHandle(*GetPromise());
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
streaming_decoder_ =
- i_isolate->wasm_compilation_manager()->StartStreamingCompilation(
- i_isolate, handle(i_isolate->context()), promise);
+ i_isolate->wasm_engine()
+ ->compilation_manager()
+ ->StartStreamingCompilation(i_isolate, handle(i_isolate->context()),
+ promise);
}
}
@@ -7907,7 +7668,8 @@ void WasmModuleObjectBuilderStreaming::Finish() {
// will be resolved when we move to true streaming compilation.
i::wasm::AsyncCompile(reinterpret_cast<i::Isolate*>(isolate_),
Utils::OpenHandle(*promise_.Get(isolate_)),
- {wire_bytes.get(), wire_bytes.get() + total_size_});
+ {wire_bytes.get(), wire_bytes.get() + total_size_},
+ false);
}
void WasmModuleObjectBuilderStreaming::Abort(Local<Value> exception) {
@@ -7917,6 +7679,12 @@ void WasmModuleObjectBuilderStreaming::Abort(Local<Value> exception) {
if (promise->State() != v8::Promise::kPending) return;
if (i::FLAG_wasm_stream_compilation) streaming_decoder_->Abort();
+ // If there is no exception, then we do not reject the promise. The reason is
+ // that 'no exception' indicates that we are in a ScriptForbiddenScope, which
+ // means that it is not allowed to reject the promise at the moment, or
+ // execute any other JavaScript code.
+ if (exception.IsEmpty()) return;
+
Local<Promise::Resolver> resolver = promise.As<Promise::Resolver>();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate_);
i::HandleScope scope(i_isolate);
@@ -7973,6 +7741,14 @@ v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
Utils::ApiCheck(!self->is_external(), "v8_ArrayBuffer_Externalize",
"ArrayBuffer already externalized");
self->set_is_external(true);
+ if (self->has_guard_region()) {
+ // Since this is being externalized, the Wasm Allocation Tracker can no
+ // longer track it.
+ //
+ // TODO(eholk): Find a way to track this across externalization
+ isolate->wasm_engine()->allocation_tracker()->ReleaseAddressSpace(
+ self->allocation_length());
+ }
isolate->heap()->UnregisterArrayBuffer(*self);
return GetContents();
@@ -8188,6 +7964,14 @@ v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::Externalize() {
Utils::ApiCheck(!self->is_external(), "v8_SharedArrayBuffer_Externalize",
"SharedArrayBuffer already externalized");
self->set_is_external(true);
+ if (self->has_guard_region()) {
+ // Since this is being externalized, the Wasm Allocation Tracker can no
+ // longer track it.
+ //
+ // TODO(eholk): Find a way to track this across externalization
+ isolate->wasm_engine()->allocation_tracker()->ReleaseAddressSpace(
+ self->allocation_length());
+ }
isolate->heap()->UnregisterArrayBuffer(*self);
return GetContents();
}
@@ -8197,14 +7981,14 @@ v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents() {
i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
size_t byte_length = static_cast<size_t>(self->byte_length()->Number());
Contents contents;
+ contents.allocation_base_ = self->allocation_base();
+ contents.allocation_length_ = self->allocation_length();
+ contents.allocation_mode_ =
+ self->has_guard_region()
+ ? ArrayBufferAllocator::Allocator::AllocationMode::kReservation
+ : ArrayBufferAllocator::Allocator::AllocationMode::kNormal;
contents.data_ = self->backing_store();
contents.byte_length_ = byte_length;
- // SharedArrayBuffers never have guard regions, so their allocation and data
- // are equivalent.
- contents.allocation_base_ = self->backing_store();
- contents.allocation_length_ = byte_length;
- contents.allocation_mode_ =
- ArrayBufferAllocator::Allocator::AllocationMode::kNormal;
return contents;
}
@@ -8727,6 +8511,11 @@ Isolate::SuppressMicrotaskExecutionScope::~SuppressMicrotaskExecutionScope() {
isolate_->handle_scope_implementer()->DecrementCallDepth();
}
+i::Object** Isolate::GetDataFromSnapshotOnce(size_t index) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
+ i::FixedArray* list = i_isolate->heap()->serialized_objects();
+ return GetSerializedDataFromFixedArray(i_isolate, list, index);
+}
void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -8742,6 +8531,9 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
isolate->allocator()->GetCurrentMemoryUsage();
heap_statistics->peak_malloced_memory_ =
isolate->allocator()->GetMaxMemoryUsage();
+ heap_statistics->number_of_native_contexts_ = heap->NumberOfNativeContexts();
+ heap_statistics->number_of_detached_contexts_ =
+ heap->NumberOfDetachedContexts();
heap_statistics->does_zap_garbage_ = heap->ShouldZapGarbage();
}
@@ -8870,7 +8662,6 @@ void Isolate::RemoveCallCompletedCallback(CallCompletedCallback callback) {
isolate->RemoveCallCompletedCallback(callback);
}
-
void Isolate::AddCallCompletedCallback(
DeprecatedCallCompletedCallback callback) {
AddCallCompletedCallback(reinterpret_cast<CallCompletedCallback>(callback));
@@ -8985,15 +8776,6 @@ void Isolate::SetAddHistogramSampleFunction(
}
-bool Isolate::IdleNotification(int idle_time_in_ms) {
- // Returning true tells the caller that it need not
- // continue to call IdleNotification.
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- if (!i::FLAG_use_idle_notification) return true;
- return isolate->heap()->IdleNotification(idle_time_in_ms);
-}
-
-
bool Isolate::IdleNotificationDeadline(double deadline_in_seconds) {
// Returning true tells the caller that it need not
// continue to call IdleNotification.
@@ -9346,14 +9128,6 @@ Local<Message> Exception::CreateMessage(Isolate* isolate,
}
-Local<Message> Exception::CreateMessage(Local<Value> exception) {
- i::Handle<i::Object> obj = Utils::OpenHandle(*exception);
- if (!obj->IsHeapObject()) return Local<Message>();
- i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
- return CreateMessage(reinterpret_cast<Isolate*>(isolate), exception);
-}
-
-
Local<StackTrace> Exception::GetStackTrace(Local<Value> exception) {
i::Handle<i::Object> obj = Utils::OpenHandle(*exception);
if (!obj->IsJSObject()) return Local<StackTrace>();
@@ -9664,9 +9438,9 @@ bool debug::Script::GetPossibleBreakpoints(
CHECK(!start.IsEmpty());
i::Handle<i::Script> script = Utils::OpenHandle(this);
if (script->type() == i::Script::TYPE_WASM) {
- i::Handle<i::WasmCompiledModule> compiled_module(
- i::WasmCompiledModule::cast(script->wasm_compiled_module()));
- return compiled_module->GetPossibleBreakpoints(start, end, locations);
+ i::WasmSharedModuleData* shared =
+ i::WasmCompiledModule::cast(script->wasm_compiled_module())->shared();
+ return shared->GetPossibleBreakpoints(start, end, locations);
}
i::Script::InitLineEnds(script);
@@ -9715,6 +9489,7 @@ int debug::Script::GetSourceOffset(const debug::Location& location) const {
i::Handle<i::Script> script = Utils::OpenHandle(this);
if (script->type() == i::Script::TYPE_WASM) {
return i::WasmCompiledModule::cast(script->wasm_compiled_module())
+ ->shared()
->GetFunctionOffset(location.GetLineNumber()) +
location.GetColumnNumber();
}
@@ -9784,8 +9559,9 @@ int debug::WasmScript::NumFunctions() const {
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
i::WasmCompiledModule* compiled_module =
i::WasmCompiledModule::cast(script->wasm_compiled_module());
- DCHECK_GE(i::kMaxInt, compiled_module->module()->functions.size());
- return static_cast<int>(compiled_module->module()->functions.size());
+ i::wasm::WasmModule* module = compiled_module->shared()->module();
+ DCHECK_GE(i::kMaxInt, module->functions.size());
+ return static_cast<int>(module->functions.size());
}
int debug::WasmScript::NumImportedFunctions() const {
@@ -9794,8 +9570,9 @@ int debug::WasmScript::NumImportedFunctions() const {
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
i::WasmCompiledModule* compiled_module =
i::WasmCompiledModule::cast(script->wasm_compiled_module());
- DCHECK_GE(i::kMaxInt, compiled_module->module()->num_imported_functions);
- return static_cast<int>(compiled_module->module()->num_imported_functions);
+ i::wasm::WasmModule* module = compiled_module->shared()->module();
+ DCHECK_GE(i::kMaxInt, module->num_imported_functions);
+ return static_cast<int>(module->num_imported_functions);
}
std::pair<int, int> debug::WasmScript::GetFunctionRange(
@@ -9805,10 +9582,10 @@ std::pair<int, int> debug::WasmScript::GetFunctionRange(
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
i::WasmCompiledModule* compiled_module =
i::WasmCompiledModule::cast(script->wasm_compiled_module());
+ i::wasm::WasmModule* module = compiled_module->shared()->module();
DCHECK_LE(0, function_index);
- DCHECK_GT(compiled_module->module()->functions.size(), function_index);
- i::wasm::WasmFunction& func =
- compiled_module->module()->functions[function_index];
+ DCHECK_GT(module->functions.size(), function_index);
+ i::wasm::WasmFunction& func = module->functions[function_index];
DCHECK_GE(i::kMaxInt, func.code.offset());
DCHECK_GE(i::kMaxInt, func.code.end_offset());
return std::make_pair(static_cast<int>(func.code.offset()),
@@ -9822,7 +9599,7 @@ debug::WasmDisassembly debug::WasmScript::DisassembleFunction(
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
i::WasmCompiledModule* compiled_module =
i::WasmCompiledModule::cast(script->wasm_compiled_module());
- return compiled_module->DisassembleFunction(function_index);
+ return compiled_module->shared()->DisassembleFunction(function_index);
}
debug::Location::Location(int line_number, int column_number)
@@ -9851,9 +9628,6 @@ void debug::GetLoadedScripts(v8::Isolate* v8_isolate,
PersistentValueVector<debug::Script>& scripts) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- // TODO(kozyatinskiy): remove this GC once tests are dealt with.
- isolate->heap()->CollectAllGarbage(i::Heap::kMakeHeapIterableMask,
- i::GarbageCollectionReason::kDebugger);
{
i::DisallowHeapAllocation no_gc;
i::Script::Iterator iterator(isolate);
@@ -10913,7 +10687,7 @@ void InvokeAccessorGetterCallback(
// Leaving JavaScript.
Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
RuntimeCallTimerScope timer(isolate,
- &RuntimeCallStats::AccessorGetterCallback);
+ RuntimeCallCounterId::kAccessorGetterCallback);
Address getter_address = reinterpret_cast<Address>(reinterpret_cast<intptr_t>(
getter));
VMState<EXTERNAL> state(isolate);
@@ -10926,7 +10700,7 @@ void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
v8::FunctionCallback callback) {
Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
RuntimeCallTimerScope timer(isolate,
- &RuntimeCallStats::InvokeFunctionCallback);
+ RuntimeCallCounterId::kInvokeFunctionCallback);
Address callback_address =
reinterpret_cast<Address>(reinterpret_cast<intptr_t>(callback));
VMState<EXTERNAL> state(isolate);
@@ -10934,6 +10708,25 @@ void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
callback(info);
}
+// Undefine macros for jumbo build.
+#undef LOG_API
+#undef ENTER_V8_DO_NOT_USE
+#undef ENTER_V8_HELPER_DO_NOT_USE
+#undef PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE
+#undef PREPARE_FOR_EXECUTION_WITH_CONTEXT
+#undef PREPARE_FOR_EXECUTION
+#undef ENTER_V8
+#undef ENTER_V8_NO_SCRIPT
+#undef ENTER_V8_NO_SCRIPT_NO_EXCEPTION
+#undef ENTER_V8_FOR_NEW_CONTEXT
+#undef EXCEPTION_BAILOUT_CHECK_SCOPED_DO_NOT_USE
+#undef RETURN_ON_FAILED_EXECUTION
+#undef RETURN_ON_FAILED_EXECUTION_PRIMITIVE
+#undef RETURN_TO_LOCAL_UNCHECKED
+#undef RETURN_ESCAPED
+#undef SET_FIELD_WRAPPED
+#undef NEW_STRING
+#undef CALLBACK_SETTER
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 0a70ac83e4..7bd03c37da 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -11,6 +11,7 @@
#include "src/detachable-vector.h"
#include "src/factory.h"
#include "src/isolate.h"
+#include "src/objects/js-collection.h"
namespace v8 {
@@ -404,6 +405,7 @@ class HandleScopeImplementer {
call_depth_(0),
microtasks_depth_(0),
microtasks_suppressions_(0),
+ entered_contexts_count_(0),
entered_context_count_during_microtasks_(0),
#ifdef DEBUG
debug_microtasks_depth_(0),
@@ -530,6 +532,7 @@ class HandleScopeImplementer {
int call_depth_;
int microtasks_depth_;
int microtasks_suppressions_;
+ size_t entered_contexts_count_;
size_t entered_context_count_during_microtasks_;
#ifdef DEBUG
int debug_microtasks_depth_;
@@ -545,10 +548,25 @@ class HandleScopeImplementer {
friend class DeferredHandles;
friend class DeferredHandleScope;
+ friend class HandleScopeImplementerOffsets;
DISALLOW_COPY_AND_ASSIGN(HandleScopeImplementer);
};
+class HandleScopeImplementerOffsets {
+ public:
+ enum Offsets {
+ kMicrotaskContext = offsetof(HandleScopeImplementer, microtask_context_),
+ kEnteredContexts = offsetof(HandleScopeImplementer, entered_contexts_),
+ kEnteredContextsCount =
+ offsetof(HandleScopeImplementer, entered_contexts_count_),
+ kEnteredContextCountDuringMicrotasks = offsetof(
+ HandleScopeImplementer, entered_context_count_during_microtasks_)
+ };
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(HandleScopeImplementerOffsets);
+};
const int kHandleBlockSize = v8::internal::KB - 2; // fit in one page
@@ -583,9 +601,13 @@ bool HandleScopeImplementer::HasSavedContexts() {
void HandleScopeImplementer::EnterContext(Handle<Context> context) {
entered_contexts_.push_back(*context);
+ entered_contexts_count_ = entered_contexts_.size();
}
-void HandleScopeImplementer::LeaveContext() { entered_contexts_.pop_back(); }
+void HandleScopeImplementer::LeaveContext() {
+ entered_contexts_.pop_back();
+ entered_contexts_count_ = entered_contexts_.size();
+}
bool HandleScopeImplementer::LastEnteredContextWas(Handle<Context> context) {
return !entered_contexts_.empty() && entered_contexts_.back() == *context;
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index 3d58b8249b..d01e77314a 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -85,7 +85,7 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
\
V8_NOINLINE static Type Stats_##Name(int args_length, Object** args_object, \
Isolate* isolate) { \
- RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Name); \
+ RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::k##Name); \
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), \
"V8.Runtime_" #Name); \
Arguments args(args_length, args_object); \
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index ce6b759d30..f420f2e5cb 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -68,7 +68,7 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
Address RelocInfo::target_address_address() {
@@ -85,7 +85,7 @@ Address RelocInfo::target_address_address() {
Address RelocInfo::constant_pool_entry_address() {
DCHECK(IsInConstantPool());
- return Assembler::constant_pool_entry_address(pc_, host_->constant_pool());
+ return Assembler::constant_pool_entry_address(pc_, constant_pool_);
}
@@ -95,21 +95,21 @@ int RelocInfo::target_address_size() {
HeapObject* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return HeapObject::cast(
- reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)));
+ return HeapObject::cast(reinterpret_cast<Object*>(
+ Assembler::target_address_at(pc_, constant_pool_)));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Handle<HeapObject>(
- reinterpret_cast<HeapObject**>(Assembler::target_address_at(pc_, host_)));
+ return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
+ Assembler::target_address_at(pc_, constant_pool_)));
}
void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
+ Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
@@ -122,7 +122,7 @@ void RelocInfo::set_target_object(HeapObject* target,
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
@@ -158,7 +158,7 @@ void RelocInfo::WipeOut(Isolate* isolate) {
if (IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = nullptr;
} else {
- Assembler::set_target_address_at(isolate, pc_, host_, nullptr);
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
}
}
@@ -382,18 +382,6 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
}
}
-Address Assembler::target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- return target_address_at(pc, constant_pool);
-}
-
-void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
-}
-
EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
} // namespace internal
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 8c22974ca3..a615d67496 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -72,7 +72,7 @@ static unsigned CpuFeaturesFromCommandLine() {
" armv7+sudiv\n"
" armv7\n"
" armv6\n");
- CHECK(false);
+ FATAL("arm-arch");
}
// If any of the old (deprecated) flags are specified, print a warning, but
@@ -339,21 +339,23 @@ bool RelocInfo::IsInConstantPool() {
}
Address RelocInfo::embedded_address() const {
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
uint32_t RelocInfo::embedded_size() const {
- return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
+ return reinterpret_cast<uint32_t>(
+ Assembler::target_address_at(pc_, constant_pool_));
}
void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, host_, address, flush_mode);
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
+ flush_mode);
}
void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, host_,
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_,
reinterpret_cast<Address>(size), flush_mode);
}
@@ -474,7 +476,6 @@ void NeonMemOperand::SetAlignment(int align) {
break;
default:
UNREACHABLE();
- align_ = 0;
break;
}
}
@@ -519,23 +520,23 @@ const Instr kBlxRegMask =
const Instr kBlxRegPattern =
B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
const Instr kBlxIp = al | kBlxRegPattern | ip.code();
-const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
-const Instr kMovMvnPattern = 0xd * B21;
+const Instr kMovMvnMask = 0x6D * B21 | 0xF * B16;
+const Instr kMovMvnPattern = 0xD * B21;
const Instr kMovMvnFlip = B22;
-const Instr kMovLeaveCCMask = 0xdff * B16;
-const Instr kMovLeaveCCPattern = 0x1a0 * B16;
+const Instr kMovLeaveCCMask = 0xDFF * B16;
+const Instr kMovLeaveCCPattern = 0x1A0 * B16;
const Instr kMovwPattern = 0x30 * B20;
const Instr kMovtPattern = 0x34 * B20;
const Instr kMovwLeaveCCFlip = 0x5 * B21;
-const Instr kMovImmedMask = 0x7f * B21;
-const Instr kMovImmedPattern = 0x1d * B21;
-const Instr kOrrImmedMask = 0x7f * B21;
-const Instr kOrrImmedPattern = 0x1c * B21;
-const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
+const Instr kMovImmedMask = 0x7F * B21;
+const Instr kMovImmedPattern = 0x1D * B21;
+const Instr kOrrImmedMask = 0x7F * B21;
+const Instr kOrrImmedPattern = 0x1C * B21;
+const Instr kCmpCmnMask = 0xDD * B20 | 0xF * B12;
const Instr kCmpCmnPattern = 0x15 * B20;
const Instr kCmpCmnFlip = B21;
const Instr kAddSubFlip = 0x6 * B21;
-const Instr kAndBicFlip = 0xe * B21;
+const Instr kAndBicFlip = 0xE * B21;
// A mask for the Rd register for push, pop, ldr, str instructions.
const Instr kLdrRegFpOffsetPattern = al | B26 | L | Offset | fp.code() * B16;
@@ -543,7 +544,7 @@ const Instr kStrRegFpOffsetPattern = al | B26 | Offset | fp.code() * B16;
const Instr kLdrRegFpNegOffsetPattern =
al | B26 | L | NegOffset | fp.code() * B16;
const Instr kStrRegFpNegOffsetPattern = al | B26 | NegOffset | fp.code() * B16;
-const Instr kLdrStrInstrTypeMask = 0xffff0000;
+const Instr kLdrStrInstrTypeMask = 0xFFFF0000;
Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
: AssemblerBase(isolate_data, buffer, buffer_size),
@@ -1046,7 +1047,7 @@ bool FitsShifter(uint32_t imm32, uint32_t* rotate_imm, uint32_t* immed_8,
// imm32 must be unsigned.
for (int rot = 0; rot < 16; rot++) {
uint32_t imm8 = base::bits::RotateLeft32(imm32, 2 * rot);
- if ((imm8 <= 0xff)) {
+ if ((imm8 <= 0xFF)) {
*rotate_imm = rot;
*immed_8 = imm8;
return true;
@@ -1172,7 +1173,7 @@ void Assembler::Move32BitImmediate(Register rd, const Operand& x,
if (CpuFeatures::IsSupported(ARMv7)) {
uint32_t imm32 = static_cast<uint32_t>(x.immediate());
CpuFeatureScope scope(this, ARMv7);
- movw(target, imm32 & 0xffff, cond);
+ movw(target, imm32 & 0xFFFF, cond);
movt(target, imm32 >> 16, cond);
}
if (target.code() != rd.code()) {
@@ -1187,7 +1188,7 @@ void Assembler::Move32BitImmediate(Register rd, const Operand& x,
immediate = x.immediate();
}
ConstantPoolAddEntry(pc_offset(), x.rmode_, immediate);
- ldr(rd, MemOperand(pc, 0), cond);
+ ldr_pcrel(rd, 0, cond);
}
}
@@ -1234,7 +1235,7 @@ void Assembler::AddrMode1(Instr instr, Register rd, Register rn,
// This means that finding the even number of trailing zeroes of the
// immediate allows us to more efficiently split it:
int trailing_zeroes = base::bits::CountTrailingZeros(imm) & ~1u;
- uint32_t mask = (0xff << trailing_zeroes);
+ uint32_t mask = (0xFF << trailing_zeroes);
add(rd, rd, Operand(imm & mask), LeaveCC, cond);
imm = imm & ~mask;
} while (!ImmediateFitsAddrMode1Instruction(imm));
@@ -1294,6 +1295,9 @@ bool Assembler::AddrMode1TryEncodeOperand(Instr* instr, const Operand& x) {
void Assembler::AddrMode2(Instr instr, Register rd, const MemOperand& x) {
DCHECK((instr & ~(kCondMask | B | L)) == B26);
+ // This method does not handle pc-relative addresses. ldr_pcrel() should be
+ // used instead.
+ DCHECK(x.rn_ != pc);
int am = x.am_;
if (!x.rm_.is_valid()) {
// Immediate offset.
@@ -1331,6 +1335,9 @@ void Assembler::AddrMode2(Instr instr, Register rd, const MemOperand& x) {
void Assembler::AddrMode3(Instr instr, Register rd, const MemOperand& x) {
DCHECK((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
DCHECK(x.rn_.is_valid());
+ // This method does not handle pc-relative addresses. ldr_pcrel() should be
+ // used instead.
+ DCHECK(x.rn_ != pc);
int am = x.am_;
bool is_load = (instr & L) == L;
if (!x.rm_.is_valid()) {
@@ -1353,7 +1360,7 @@ void Assembler::AddrMode3(Instr instr, Register rd, const MemOperand& x) {
return;
}
DCHECK_GE(offset_8, 0); // no masking needed
- instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
+ instr |= B | (offset_8 >> 4) * B8 | (offset_8 & 0xF);
} else if (x.shift_imm_ != 0) {
// Scaled register offsets are not supported, compute the offset separately
// to a scratch register.
@@ -1709,8 +1716,8 @@ void Assembler::sdiv(Register dst, Register src1, Register src2,
Condition cond) {
DCHECK(dst != pc && src1 != pc && src2 != pc);
DCHECK(IsEnabled(SUDIV));
- emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
- src2.code()*B8 | B4 | src1.code());
+ emit(cond | B26 | B25 | B24 | B20 | dst.code() * B16 | 0xF * B12 |
+ src2.code() * B8 | B4 | src1.code());
}
@@ -1718,7 +1725,7 @@ void Assembler::udiv(Register dst, Register src1, Register src2,
Condition cond) {
DCHECK(dst != pc && src1 != pc && src2 != pc);
DCHECK(IsEnabled(SUDIV));
- emit(cond | B26 | B25 | B24 | B21 | B20 | dst.code() * B16 | 0xf * B12 |
+ emit(cond | B26 | B25 | B24 | B21 | B20 | dst.code() * B16 | 0xF * B12 |
src2.code() * B8 | B4 | src1.code());
}
@@ -1742,7 +1749,7 @@ void Assembler::smmla(Register dst, Register src1, Register src2, Register srcA,
void Assembler::smmul(Register dst, Register src1, Register src2,
Condition cond) {
DCHECK(dst != pc && src1 != pc && src2 != pc);
- emit(cond | B26 | B25 | B24 | B22 | B20 | dst.code() * B16 | 0xf * B12 |
+ emit(cond | B26 | B25 | B24 | B22 | B20 | dst.code() * B16 | 0xF * B12 |
src2.code() * B8 | B4 | src1.code());
}
@@ -1824,8 +1831,8 @@ void Assembler::usat(Register dst,
sh = 1;
}
- emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 |
- src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
+ emit(cond | 0x6 * B24 | 0xE * B20 | satpos * B16 | dst.code() * B12 |
+ src.shift_imm_ * B7 | sh * B6 | 0x1 * B4 | src.rm_.code());
}
@@ -1844,8 +1851,8 @@ void Assembler::ubfx(Register dst,
DCHECK(dst != pc && src != pc);
DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK((width >= 1) && (width <= (32 - lsb)));
- emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
- lsb*B7 | B6 | B4 | src.code());
+ emit(cond | 0xF * B23 | B22 | B21 | (width - 1) * B16 | dst.code() * B12 |
+ lsb * B7 | B6 | B4 | src.code());
}
@@ -1863,8 +1870,8 @@ void Assembler::sbfx(Register dst,
DCHECK(dst != pc && src != pc);
DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK((width >= 1) && (width <= (32 - lsb)));
- emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
- lsb*B7 | B6 | B4 | src.code());
+ emit(cond | 0xF * B23 | B21 | (width - 1) * B16 | dst.code() * B12 |
+ lsb * B7 | B6 | B4 | src.code());
}
@@ -1878,7 +1885,7 @@ void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK((width >= 1) && (width <= (32 - lsb)));
int msb = lsb + width - 1;
- emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
+ emit(cond | 0x1F * B22 | msb * B16 | dst.code() * B12 | lsb * B7 | B4 | 0xF);
}
@@ -1896,7 +1903,7 @@ void Assembler::bfi(Register dst,
DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK((width >= 1) && (width <= (32 - lsb)));
int msb = lsb + width - 1;
- emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
+ emit(cond | 0x1F * B22 | msb * B16 | dst.code() * B12 | lsb * B7 | B4 |
src.code());
}
@@ -2073,8 +2080,8 @@ void Assembler::mrs(Register dst, SRegister s, Condition cond) {
void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
Condition cond) {
- DCHECK_NE(fields & 0x000f0000, 0); // At least one field must be set.
- DCHECK(((fields & 0xfff0ffff) == CPSR) || ((fields & 0xfff0ffff) == SPSR));
+ DCHECK_NE(fields & 0x000F0000, 0); // At least one field must be set.
+ DCHECK(((fields & 0xFFF0FFFF) == CPSR) || ((fields & 0xFFF0FFFF) == SPSR));
Instr instr;
if (src.IsImmediate()) {
// Immediate.
@@ -2159,13 +2166,23 @@ void Assembler::strd(Register src1, Register src2,
AddrMode3(cond | B7 | B6 | B5 | B4, src1, dst);
}
+void Assembler::ldr_pcrel(Register dst, int imm12, Condition cond) {
+ AddrMode am = Offset;
+ if (imm12 < 0) {
+ imm12 = -imm12;
+ am = NegOffset;
+ }
+ DCHECK(is_uint12(imm12));
+ emit(cond | B26 | am | L | pc.code() * B16 | dst.code() * B12 | imm12);
+}
+
// Load/Store exclusive instructions.
void Assembler::ldrex(Register dst, Register src, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.75.
// cond(31-28) | 00011001(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
DCHECK(dst != pc);
DCHECK(src != pc);
- emit(cond | B24 | B23 | B20 | src.code() * B16 | dst.code() * B12 | 0xf9f);
+ emit(cond | B24 | B23 | B20 | src.code() * B16 | dst.code() * B12 | 0xF9F);
}
void Assembler::strex(Register src1, Register src2, Register dst,
@@ -2178,7 +2195,7 @@ void Assembler::strex(Register src1, Register src2, Register dst,
DCHECK(src2 != pc);
DCHECK(src1 != dst);
DCHECK(src1 != src2);
- emit(cond | B24 | B23 | dst.code() * B16 | src1.code() * B12 | 0xf9 * B4 |
+ emit(cond | B24 | B23 | dst.code() * B16 | src1.code() * B12 | 0xF9 * B4 |
src2.code());
}
@@ -2188,7 +2205,7 @@ void Assembler::ldrexb(Register dst, Register src, Condition cond) {
DCHECK(dst != pc);
DCHECK(src != pc);
emit(cond | B24 | B23 | B22 | B20 | src.code() * B16 | dst.code() * B12 |
- 0xf9f);
+ 0xF9F);
}
void Assembler::strexb(Register src1, Register src2, Register dst,
@@ -2202,7 +2219,7 @@ void Assembler::strexb(Register src1, Register src2, Register dst,
DCHECK(src1 != dst);
DCHECK(src1 != src2);
emit(cond | B24 | B23 | B22 | dst.code() * B16 | src1.code() * B12 |
- 0xf9 * B4 | src2.code());
+ 0xF9 * B4 | src2.code());
}
void Assembler::ldrexh(Register dst, Register src, Condition cond) {
@@ -2211,7 +2228,7 @@ void Assembler::ldrexh(Register dst, Register src, Condition cond) {
DCHECK(dst != pc);
DCHECK(src != pc);
emit(cond | B24 | B23 | B22 | B21 | B20 | src.code() * B16 |
- dst.code() * B12 | 0xf9f);
+ dst.code() * B12 | 0xF9F);
}
void Assembler::strexh(Register src1, Register src2, Register dst,
@@ -2225,7 +2242,7 @@ void Assembler::strexh(Register src1, Register src2, Register dst,
DCHECK(src1 != dst);
DCHECK(src1 != src2);
emit(cond | B24 | B23 | B22 | B21 | dst.code() * B16 | src1.code() * B12 |
- 0xf9 * B4 | src2.code());
+ 0xF9 * B4 | src2.code());
}
// Preload instructions.
@@ -2242,8 +2259,8 @@ void Assembler::pld(const MemOperand& address) {
U = 0;
}
DCHECK_LT(offset, 4096);
- emit(kSpecialCondition | B26 | B24 | U | B22 | B20 | address.rn().code()*B16 |
- 0xf*B12 | offset);
+ emit(kSpecialCondition | B26 | B24 | U | B22 | B20 |
+ address.rn().code() * B16 | 0xF * B12 | offset);
}
@@ -2305,7 +2322,7 @@ void Assembler::stop(const char* msg, Condition cond, int32_t code) {
void Assembler::bkpt(uint32_t imm16) {
DCHECK(is_uint16(imm16));
- emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
+ emit(al | B24 | B21 | (imm16 >> 4) * B8 | BKPT | (imm16 & 0xF));
}
@@ -2318,7 +2335,7 @@ void Assembler::svc(uint32_t imm24, Condition cond) {
void Assembler::dmb(BarrierOption option) {
if (CpuFeatures::IsSupported(ARMv7)) {
// Details available in ARM DDI 0406C.b, A8-378.
- emit(kSpecialCondition | 0x57ff * B12 | 5 * B4 | option);
+ emit(kSpecialCondition | 0x57FF * B12 | 5 * B4 | option);
} else {
// Details available in ARM DDI 0406C.b, B3-1750.
// CP15DMB: CRn=c7, opc1=0, CRm=c10, opc2=5, Rt is ignored.
@@ -2330,7 +2347,7 @@ void Assembler::dmb(BarrierOption option) {
void Assembler::dsb(BarrierOption option) {
if (CpuFeatures::IsSupported(ARMv7)) {
// Details available in ARM DDI 0406C.b, A8-380.
- emit(kSpecialCondition | 0x57ff * B12 | 4 * B4 | option);
+ emit(kSpecialCondition | 0x57FF * B12 | 4 * B4 | option);
} else {
// Details available in ARM DDI 0406C.b, B3-1750.
// CP15DSB: CRn=c7, opc1=0, CRm=c10, opc2=4, Rt is ignored.
@@ -2342,7 +2359,7 @@ void Assembler::dsb(BarrierOption option) {
void Assembler::isb(BarrierOption option) {
if (CpuFeatures::IsSupported(ARMv7)) {
// Details available in ARM DDI 0406C.b, A8-389.
- emit(kSpecialCondition | 0x57ff * B12 | 6 * B4 | option);
+ emit(kSpecialCondition | 0x57FF * B12 | 6 * B4 | option);
} else {
// Details available in ARM DDI 0406C.b, B3-1750.
// CP15ISB: CRn=c7, opc1=0, CRm=c5, opc2=4, Rt is ignored.
@@ -2728,7 +2745,7 @@ void Assembler::vstm(BlockAddrMode am, Register base, SwVfpRegister first,
static void DoubleAsTwoUInt32(Double d, uint32_t* lo, uint32_t* hi) {
uint64_t i = d.AsUint64();
- *lo = i & 0xffffffff;
+ *lo = i & 0xFFFFFFFF;
*hi = i >> 32;
}
@@ -2757,12 +2774,12 @@ static bool FitsVmovFPImmediate(Double d, uint32_t* encoding) {
DoubleAsTwoUInt32(d, &lo, &hi);
// The most obvious constraint is the long block of zeroes.
- if ((lo != 0) || ((hi & 0xffff) != 0)) {
+ if ((lo != 0) || ((hi & 0xFFFF) != 0)) {
return false;
}
// Bits 61:54 must be all clear or all set.
- if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
+ if (((hi & 0x3FC00000) != 0) && ((hi & 0x3FC00000) != 0x3FC00000)) {
return false;
}
@@ -2773,7 +2790,7 @@ static bool FitsVmovFPImmediate(Double d, uint32_t* encoding) {
// Create the encoded immediate in the form:
// [00000000,0000abcd,00000000,0000efgh]
- *encoding = (hi >> 16) & 0xf; // Low nybble.
+ *encoding = (hi >> 16) & 0xF; // Low nybble.
*encoding |= (hi >> 4) & 0x70000; // Low three bits of the high nybble.
*encoding |= (hi >> 12) & 0x80000; // Top bit of the high nybble.
@@ -2852,8 +2869,7 @@ void Assembler::vmov(const DwVfpRegister dst, Double imm,
// We only have one spare scratch register.
mov(scratch, Operand(lo));
vmov(dst, VmovIndexLo, scratch);
- if (((lo & 0xffff) == (hi & 0xffff)) &&
- CpuFeatures::IsSupported(ARMv7)) {
+ if (((lo & 0xFFFF) == (hi & 0xFFFF)) && CpuFeatures::IsSupported(ARMv7)) {
CpuFeatureScope scope(this, ARMv7);
movt(scratch, hi >> 16);
} else {
@@ -3193,7 +3209,7 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
dst.split_code(&vd, &d);
int imm5 = 32 - fraction_bits;
int i = imm5 & 1;
- int imm4 = (imm5 >> 1) & 0xf;
+ int imm4 = (imm5 >> 1) & 0xF;
emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 |
vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4);
}
@@ -4973,12 +4989,12 @@ Instr Assembler::GetMovWPattern() { return kMovwPattern; }
Instr Assembler::EncodeMovwImmediate(uint32_t immediate) {
DCHECK_LT(immediate, 0x10000);
- return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
+ return ((immediate & 0xF000) << 4) | (immediate & 0xFFF);
}
Instr Assembler::PatchMovwImmediate(Instr instruction, uint32_t immediate) {
- instruction &= ~EncodeMovwImmediate(0xffff);
+ instruction &= ~EncodeMovwImmediate(0xFFFF);
return instruction | EncodeMovwImmediate(immediate);
}
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 9d8cb4c05c..8b95aad886 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -173,6 +173,7 @@ GENERAL_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
constexpr Register no_reg = Register::no_reg();
+constexpr bool kPadArguments = false;
constexpr bool kSimpleFPAliasing = false;
constexpr bool kSimdMaskRegisters = false;
@@ -652,10 +653,6 @@ class Assembler : public AssemblerBase {
INLINE(static void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
- INLINE(static Address target_address_at(Address pc, Code* code));
- INLINE(static void set_target_address_at(
- Isolate* isolate, Address pc, Code* code, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -907,6 +904,9 @@ class Assembler : public AssemblerBase {
Register src2,
const MemOperand& dst, Condition cond = al);
+ // Load literal from a pc relative address.
+ void ldr_pcrel(Register dst, int imm12, Condition cond = al);
+
// Load/Store exclusive instructions
void ldrex(Register dst, Register src, Condition cond = al);
void strex(Register src1, Register src2, Register dst, Condition cond = al);
@@ -1344,6 +1344,10 @@ class Assembler : public AssemblerBase {
void pop();
+ void vpush(QwNeonRegister src, Condition cond = al) {
+ vstm(db_w, sp, src.low(), src.high(), cond);
+ }
+
void vpush(DwVfpRegister src, Condition cond = al) {
vstm(db_w, sp, src, src, cond);
}
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 2add525abd..ee706c7656 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -83,7 +83,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
if (masm->emit_debug_code()) {
// Scratch is exponent - 1.
__ cmp(scratch, Operand(30 - 1));
- __ Check(ge, kUnexpectedValue);
+ __ Check(ge, AbortReason::kUnexpectedValue);
}
// We don't have to handle cases where 0 <= exponent <= 20 for which we would
@@ -116,8 +116,8 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// double_high LSR 31 equals zero.
// New result = (result eor 0) + 0 = result.
// If the input was negative, we have to negate the result.
- // Input_high ASR 31 equals 0xffffffff and double_high LSR 31 equals 1.
- // New result = (result eor 0xffffffff) + 1 = 0 - result.
+ // Input_high ASR 31 equals 0xFFFFFFFF and double_high LSR 31 equals 1.
+ // New result = (result eor 0xFFFFFFFF) + 1 = 0 - result.
__ eor(result_reg, result_reg, Operand(double_high, ASR, 31));
__ add(result_reg, result_reg, Operand(double_high, LSR, 31));
@@ -414,6 +414,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Set up the reserved register for 0.0.
__ vmov(kDoubleRegZero, Double(0.0));
+ __ InitializeRootRegister();
+
// Get address of argv, see stm above.
// r0: code entry
// r1: function
@@ -509,12 +511,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// r2: receiver
// r3: argc
// r4: argv
- if (type() == StackFrame::CONSTRUCT_ENTRY) {
- __ Call(BUILTIN_CODE(isolate(), JSConstructEntryTrampoline),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(BUILTIN_CODE(isolate(), JSEntryTrampoline), RelocInfo::CODE_TARGET);
- }
+ __ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
// Unlink this frame from the handler chain.
__ PopStackHandler();
@@ -681,7 +678,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -723,7 +720,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
if (FLAG_debug_code) {
__ ldr(r5, FieldMemOperand(r2, 0));
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
- __ Assert(eq, kExpectedAllocationSite);
+ __ Assert(eq, AbortReason::kExpectedAllocationSite);
}
// Save the resulting elements kind in type info. We can't just store r3
@@ -747,7 +744,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -824,9 +821,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ ldr(r4, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ tst(r4, Operand(kSmiTagMask));
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CompareObjectType(r4, r4, r5, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
// We should either have undefined in r2 or a valid AllocationSite
__ AssertUndefinedOrAllocationSite(r2, r4);
@@ -904,9 +901,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ tst(r3, Operand(kSmiTagMask));
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CompareObjectType(r3, r3, r4, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
// Figure out the right elements kind
@@ -922,8 +919,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ cmp(r3, Operand(PACKED_ELEMENTS));
__ b(eq, &done);
__ cmp(r3, Operand(HOLEY_ELEMENTS));
- __ Assert(eq,
- kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ Assert(
+ eq,
+ AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
__ bind(&done);
}
@@ -1025,7 +1023,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
if (__ emit_debug_code()) {
__ ldr(r1, MemOperand(r9, kLevelOffset));
__ cmp(r1, r6);
- __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
+ __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
}
__ sub(r6, r6, Operand(1));
__ str(r6, MemOperand(r9, kLevelOffset));
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index f7e29ace49..9fb2eb4e8d 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -24,8 +24,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
return stub;
#else
size_t allocated = 0;
- byte* buffer =
- AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@@ -170,8 +169,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
- CHECK(base::OS::SetPermissions(buffer, allocated,
- base::OS::MemoryPermission::kReadExecute));
+ CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
@@ -184,8 +182,7 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
return stub;
#else
size_t allocated = 0;
- byte* buffer =
- AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@@ -261,8 +258,7 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
masm.GetCode(isolate, &desc);
Assembler::FlushICache(isolate, buffer, allocated);
- CHECK(base::OS::SetPermissions(buffer, allocated,
- base::OS::MemoryPermission::kReadExecute));
+ CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
#endif
}
@@ -273,8 +269,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
#else
size_t allocated = 0;
- byte* buffer =
- AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@@ -290,8 +285,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
- CHECK(base::OS::SetPermissions(buffer, allocated,
- base::OS::MemoryPermission::kReadExecute));
+ CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
diff --git a/deps/v8/src/arm/constants-arm.cc b/deps/v8/src/arm/constants-arm.cc
index c788d33ef2..b50948fc36 100644
--- a/deps/v8/src/arm/constants-arm.cc
+++ b/deps/v8/src/arm/constants-arm.cc
@@ -20,7 +20,7 @@ Float64 Instruction::DoubleImmedVmov() const {
// where B = ~b. Only the high 16 bits are affected.
uint64_t high16;
high16 = (Bits(17, 16) << 4) | Bits(3, 0); // xxxxxxxx,xxcdefgh.
- high16 |= (0xff * Bit(18)) << 6; // xxbbbbbb,bbxxxxxx.
+ high16 |= (0xFF * Bit(18)) << 6; // xxbbbbbb,bbxxxxxx.
high16 |= (Bit(18) ^ 1) << 14; // xBxxxxxx,xxxxxxxx.
high16 |= Bit(19) << 15; // axxxxxxx,xxxxxxxx.
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 20cf8e4d5e..1c865afb09 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -34,9 +34,6 @@ inline int DecodeConstantPoolLength(int instr) {
return ((instr >> 4) & 0xfff0) | (instr & 0xf);
}
-// Used in code age prologue - ldr(pc, MemOperand(pc, -4))
-const int kCodeAgeJumpInstruction = 0xe51ff004;
-
// Number of registers in normal ARM mode.
const int kNumRegisters = 16;
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 81224c5fcb..9a21ef862c 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -245,9 +245,9 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
// Note that registers are still live when jumping to an entry.
// We need to be able to generate immediates up to kMaxNumberOfEntries. On
- // ARMv7, we can use movw (with a maximum immediate of 0xffff). On ARMv6, we
+ // ARMv7, we can use movw (with a maximum immediate of 0xFFFF). On ARMv6, we
// need two instructions.
- STATIC_ASSERT((kMaxNumberOfEntries - 1) <= 0xffff);
+ STATIC_ASSERT((kMaxNumberOfEntries - 1) <= 0xFFFF);
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
if (CpuFeatures::IsSupported(ARMv7)) {
@@ -263,7 +263,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ bind(&done);
} else {
// We want to keep table_entry_size_ == 8 (since this is the common case),
- // but we need two instructions to load most immediates over 0xff. To handle
+ // but we need two instructions to load most immediates over 0xFF. To handle
// this, we set the low byte in the main table, and then set the high byte
// in a separate table if necessary.
Label high_fixes[256];
@@ -272,7 +272,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
- __ mov(scratch, Operand(i & 0xff)); // Set the low byte.
+ __ mov(scratch, Operand(i & 0xFF)); // Set the low byte.
__ b(&high_fixes[i >> 8]); // Jump to the secondary table.
DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start);
}
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 05adc37f61..9951136561 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -541,7 +541,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
// 'msg: for simulator break instructions
DCHECK(STRING_STARTS_WITH(format, "msg"));
byte* str =
- reinterpret_cast<byte*>(instr->InstructionBits() & 0x0fffffff);
+ reinterpret_cast<byte*>(instr->InstructionBits() & 0x0FFFFFFF);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s", converter_.NameInCode(str));
return 3;
@@ -819,7 +819,7 @@ void Decoder::DecodeType01(Instruction* instr) {
Unknown(instr); // not used by V8
}
}
- } else if ((instr->Bit(20) == 0) && ((instr->Bits(7, 4) & 0xd) == 0xd)) {
+ } else if ((instr->Bit(20) == 0) && ((instr->Bits(7, 4) & 0xD) == 0xD)) {
// ldrd, strd
switch (instr->PUField()) {
case da_x: {
@@ -905,7 +905,7 @@ void Decoder::DecodeType01(Instruction* instr) {
}
} else if ((type == 0) && instr->IsMiscType0()) {
if ((instr->Bits(27, 23) == 2) && (instr->Bits(21, 20) == 2) &&
- (instr->Bits(15, 4) == 0xf00)) {
+ (instr->Bits(15, 4) == 0xF00)) {
Format(instr, "msr'cond 'spec_reg'spec_reg_fields, 'rm");
} else if ((instr->Bits(27, 23) == 2) && (instr->Bits(21, 20) == 0) &&
(instr->Bits(11, 0) == 0)) {
@@ -1285,8 +1285,8 @@ void Decoder::DecodeType3(Instruction* instr) {
}
} else {
// PU == 0b01, BW == 0b11, Bits(9, 6) != 0b0001
- if ((instr->Bits(20, 16) == 0x1f) &&
- (instr->Bits(11, 4) == 0xf3)) {
+ if ((instr->Bits(20, 16) == 0x1F) &&
+ (instr->Bits(11, 4) == 0xF3)) {
Format(instr, "rbit'cond 'rd, 'rm");
} else {
UNREACHABLE();
@@ -1561,7 +1561,7 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
const char* rt_name = converter_.NameOfCPURegister(instr->RtValue());
if (instr->Bit(23) == 0) {
int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
- if ((opc1_opc2 & 0xb) == 0) {
+ if ((opc1_opc2 & 0xB) == 0) {
// NeonS32/NeonU32
if (instr->Bit(21) == 0x0) {
Format(instr, "vmov'cond.32 'Dd[0], 'rt");
@@ -1597,7 +1597,7 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
}
} else if ((instr->VLValue() == 0x1) && (instr->VCValue() == 0x1)) {
int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
- if ((opc1_opc2 & 0xb) == 0) {
+ if ((opc1_opc2 & 0xB) == 0) {
// NeonS32 / NeonU32
if (instr->Bit(21) == 0x0) {
Format(instr, "vmov'cond.32 'rt, 'Dd[0]");
@@ -1972,7 +1972,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xa: {
+ case 0xA: {
// vpmin/vpmax.s<size> Dd, Dm, Dn.
const char* op = instr->Bit(4) == 1 ? "vpmin" : "vpmax";
out_buffer_pos_ +=
@@ -1980,14 +1980,14 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
op, size, Vd, Vn, Vm);
break;
}
- case 0xb: {
+ case 0xB: {
// vpadd.i<size> Dd, Dm, Dn.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vpadd.i%d d%d, d%d, d%d",
size, Vd, Vn, Vm);
break;
}
- case 0xd: {
+ case 0xD: {
if (instr->Bit(4) == 0) {
const char* op = (instr->Bits(21, 20) == 0) ? "vadd" : "vsub";
// vadd/vsub.f32 Qd, Qm, Qn.
@@ -1998,7 +1998,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xe: {
+ case 0xE: {
if (instr->Bits(21, 20) == 0 && instr->Bit(4) == 0) {
// vceq.f32 Qd, Qm, Qn.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
@@ -2008,7 +2008,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xf: {
+ case 0xF: {
if (instr->Bit(20) == 0 && instr->Bit(6) == 1) {
if (instr->Bit(4) == 1) {
// vrecps/vrsqrts.f32 Qd, Qm, Qn.
@@ -2158,7 +2158,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xa: {
+ case 0xA: {
// vpmin/vpmax.u<size> Dd, Dm, Dn.
const char* op = instr->Bit(4) == 1 ? "vpmin" : "vpmax";
out_buffer_pos_ +=
@@ -2166,7 +2166,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
op, size, Vd, Vn, Vm);
break;
}
- case 0xd: {
+ case 0xD: {
if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 1 &&
instr->Bit(4) == 1) {
// vmul.f32 Qd, Qm, Qn
@@ -2182,7 +2182,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xe: {
+ case 0xE: {
if (instr->Bit(20) == 0 && instr->Bit(4) == 0) {
const char* op = (instr->Bit(21) == 0) ? "vcge" : "vcgt";
// vcge/vcgt.f32 Qd, Qm, Qn.
@@ -2332,12 +2332,12 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
instr->Bit(6) == 1) {
int size = kBitsPerByte * (1 << instr->Bits(19, 18));
char type = instr->Bit(10) != 0 ? 'f' : 's';
- if (instr->Bits(9, 6) == 0xd) {
+ if (instr->Bits(9, 6) == 0xD) {
// vabs<type>.<size> Qd, Qm.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vabs.%c%d q%d, q%d",
type, size, Vd, Vm);
- } else if (instr->Bits(9, 6) == 0xf) {
+ } else if (instr->Bits(9, 6) == 0xF) {
// vneg<type>.<size> Qd, Qm.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vneg.%c%d q%d, q%d",
@@ -2423,7 +2423,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
break;
case 0xA:
case 0xB:
- if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xf)) {
+ if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xF)) {
const char* rn_name = converter_.NameOfCPURegister(instr->Bits(19, 16));
int offset = instr->Bits(11, 0);
if (offset == 0) {
@@ -2601,14 +2601,6 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
"constant pool begin (length %d)",
DecodeConstantPoolLength(instruction_bits));
return Instruction::kInstrSize;
- } else if (instruction_bits == kCodeAgeJumpInstruction) {
- // The code age prologue has a constant immediately following the jump
- // instruction.
- Instruction* target = Instruction::At(instr_ptr + Instruction::kInstrSize);
- DecodeType2(instr);
- SNPrintF(out_buffer_ + out_buffer_pos_,
- " (0x%08x)", target->InstructionBits());
- return 2 * Instruction::kInstrSize;
}
switch (instr->TypeValue()) {
case 0:
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index 20ef0e37bc..6b7498fde5 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -45,8 +45,6 @@ const Register LoadDescriptor::SlotRegister() { return r0; }
const Register LoadWithVectorDescriptor::VectorRegister() { return r3; }
-const Register LoadICProtoArrayDescriptor::HandlerRegister() { return r4; }
-
const Register StoreDescriptor::ReceiverRegister() { return r1; }
const Register StoreDescriptor::NameRegister() { return r2; }
const Register StoreDescriptor::ValueRegister() { return r0; }
@@ -204,6 +202,11 @@ void TransitionElementsKindDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void AbortJSDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 8575b0336c..30190d3f34 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -224,44 +224,6 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Call(code.address(), rmode, cond, mode);
}
-void MacroAssembler::CallDeoptimizer(Address target) {
- BlockConstPoolScope block_const_pool(this);
-
- uintptr_t target_raw = reinterpret_cast<uintptr_t>(target);
-
- // Use ip directly instead of using UseScratchRegisterScope, as we do not
- // preserve scratch registers across calls.
-
- // We use blx, like a call, but it does not return here. The link register is
- // used by the deoptimizer to work out what called it.
- if (CpuFeatures::IsSupported(ARMv7)) {
- CpuFeatureScope scope(this, ARMv7);
- movw(ip, target_raw & 0xffff);
- movt(ip, (target_raw >> 16) & 0xffff);
- blx(ip);
- } else {
- // We need to load a literal, but we can't use the usual constant pool
- // because we call this from a patcher, and cannot afford the guard
- // instruction and other administrative overhead.
- ldr(ip, MemOperand(pc, (2 * kInstrSize) - kPcLoadDelta));
- blx(ip);
- dd(target_raw);
- }
-}
-
-int MacroAssembler::CallDeoptimizerSize() {
- // ARMv7+:
- // movw ip, ...
- // movt ip, ...
- // blx ip @ This never returns.
- //
- // ARMv6:
- // ldr ip, =address
- // blx ip @ This never returns.
- // .word address
- return 3 * kInstrSize;
-}
-
void TurboAssembler::Ret(Condition cond) { bx(lr, cond); }
void TurboAssembler::Drop(int count, Condition cond) {
@@ -608,7 +570,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
Register scratch = temps.Acquire();
ldr(scratch, MemOperand(address));
cmp(scratch, value);
- Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
if (remembered_set_action == OMIT_REMEMBERED_SET &&
@@ -985,7 +947,7 @@ void TurboAssembler::LslPair(Register dst_low, Register dst_high,
rsb(scratch, shift, Operand(32), SetCC);
b(gt, &less_than_32);
// If shift >= 32
- and_(scratch, shift, Operand(0x1f));
+ and_(scratch, shift, Operand(0x1F));
lsl(dst_high, src_low, Operand(scratch));
mov(dst_low, Operand(0));
jmp(&done);
@@ -1010,7 +972,7 @@ void TurboAssembler::LslPair(Register dst_low, Register dst_high,
Move(dst_high, src_low);
Move(dst_low, Operand(0));
} else if (shift >= 32) {
- shift &= 0x1f;
+ shift &= 0x1F;
lsl(dst_high, src_low, Operand(shift));
mov(dst_low, Operand(0));
} else {
@@ -1031,7 +993,7 @@ void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
rsb(scratch, shift, Operand(32), SetCC);
b(gt, &less_than_32);
// If shift >= 32
- and_(scratch, shift, Operand(0x1f));
+ and_(scratch, shift, Operand(0x1F));
lsr(dst_low, src_high, Operand(scratch));
mov(dst_high, Operand(0));
jmp(&done);
@@ -1054,7 +1016,7 @@ void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
mov(dst_low, src_high);
mov(dst_high, Operand(0));
} else if (shift > 32) {
- shift &= 0x1f;
+ shift &= 0x1F;
lsr(dst_low, src_high, Operand(shift));
mov(dst_high, Operand(0));
} else if (shift == 0) {
@@ -1078,7 +1040,7 @@ void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
rsb(scratch, shift, Operand(32), SetCC);
b(gt, &less_than_32);
// If shift >= 32
- and_(scratch, shift, Operand(0x1f));
+ and_(scratch, shift, Operand(0x1F));
asr(dst_low, src_high, Operand(scratch));
asr(dst_high, src_high, Operand(31));
jmp(&done);
@@ -1100,7 +1062,7 @@ void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
mov(dst_low, src_high);
asr(dst_high, src_high, Operand(31));
} else if (shift > 32) {
- shift &= 0x1f;
+ shift &= 0x1F;
asr(dst_low, src_high, Operand(shift));
asr(dst_high, src_high, Operand(31));
} else if (shift == 0) {
@@ -1218,7 +1180,6 @@ int TurboAssembler::ActivationFrameAlignment() {
#endif // V8_HOST_ARCH_ARM
}
-
void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
bool argument_count_is_length) {
ConstantPoolUnavailableScope constant_pool_unavailable(this);
@@ -1244,6 +1205,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
ldr(cp, MemOperand(scratch));
#ifdef DEBUG
+ mov(r3, Operand(Context::kInvalidContext));
mov(scratch,
Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
str(r3, MemOperand(scratch));
@@ -1307,7 +1269,7 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
if (FLAG_debug_code) {
cmp(src_reg, dst_reg);
- Check(lo, kStackAccessBelowStackPointer);
+ Check(lo, AbortReason::kStackAccessBelowStackPointer);
}
// Restore caller's frame pointer and return address now as they will be
@@ -1539,15 +1501,15 @@ void MacroAssembler::MaybeDropFrames() {
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ Push(Smi::kZero); // Padding.
// Link the current handler as the next handler.
mov(r6,
Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
ldr(r5, MemOperand(r6));
push(r5);
-
// Set this new handler as the current one.
str(sp, MemOperand(r6));
}
@@ -1560,8 +1522,8 @@ void MacroAssembler::PopStackHandler() {
pop(r1);
mov(scratch,
Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
- add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
str(r1, MemOperand(scratch));
+ add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
}
@@ -1660,9 +1622,9 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- // If result is not saturated (0x7fffffff or 0x80000000), we are done.
+ // If result is not saturated (0x7FFFFFFF or 0x80000000), we are done.
sub(scratch, result, Operand(1));
- cmp(scratch, Operand(0x7ffffffe));
+ cmp(scratch, Operand(0x7FFFFFFE));
b(lt, done);
}
@@ -1765,12 +1727,12 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
}
-void TurboAssembler::Assert(Condition cond, BailoutReason reason) {
+void TurboAssembler::Assert(Condition cond, AbortReason reason) {
if (emit_debug_code())
Check(cond, reason);
}
-void TurboAssembler::Check(Condition cond, BailoutReason reason) {
+void TurboAssembler::Check(Condition cond, AbortReason reason) {
Label L;
b(cond, &L);
Abort(reason);
@@ -1778,11 +1740,11 @@ void TurboAssembler::Check(Condition cond, BailoutReason reason) {
bind(&L);
}
-void TurboAssembler::Abort(BailoutReason reason) {
+void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
#ifdef DEBUG
- const char* msg = GetBailoutReason(reason);
+ const char* msg = GetAbortReason(reason);
if (msg != nullptr) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -1873,7 +1835,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
- Check(ne, kOperandIsASmi);
+ Check(ne, AbortReason::kOperandIsASmi);
}
}
@@ -1882,7 +1844,7 @@ void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
- Check(eq, kOperandIsNotSmi);
+ Check(eq, AbortReason::kOperandIsNotASmi);
}
}
@@ -1890,11 +1852,11 @@ void MacroAssembler::AssertFixedArray(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
- Check(ne, kOperandIsASmiAndNotAFixedArray);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFixedArray);
push(object);
CompareObjectType(object, object, object, FIXED_ARRAY_TYPE);
pop(object);
- Check(eq, kOperandIsNotAFixedArray);
+ Check(eq, AbortReason::kOperandIsNotAFixedArray);
}
}
@@ -1902,11 +1864,11 @@ void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
- Check(ne, kOperandIsASmiAndNotAFunction);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFunction);
push(object);
CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
pop(object);
- Check(eq, kOperandIsNotAFunction);
+ Check(eq, AbortReason::kOperandIsNotAFunction);
}
}
@@ -1915,18 +1877,18 @@ void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
- Check(ne, kOperandIsASmiAndNotABoundFunction);
+ Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction);
push(object);
CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
pop(object);
- Check(eq, kOperandIsNotABoundFunction);
+ Check(eq, AbortReason::kOperandIsNotABoundFunction);
}
}
void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
tst(object, Operand(kSmiTagMask));
- Check(ne, kOperandIsASmiAndNotAGeneratorObject);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
// Load map
Register map = object;
@@ -1945,7 +1907,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
bind(&do_check);
// Restore generator object to register and perform assertion
pop(object);
- Check(eq, kOperandIsNotAGeneratorObject);
+ Check(eq, AbortReason::kOperandIsNotAGeneratorObject);
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
@@ -1957,7 +1919,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
b(eq, &done_checking);
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
- Assert(eq, kExpectedUndefinedOrCell);
+ Assert(eq, AbortReason::kExpectedUndefinedOrCell);
bind(&done_checking);
}
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 2f97869621..cf731cbedb 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -294,13 +294,13 @@ class TurboAssembler : public Assembler {
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cond, BailoutReason reason);
+ void Assert(Condition cond, AbortReason reason);
// Like Assert(), but always enabled.
- void Check(Condition cond, BailoutReason reason);
+ void Check(Condition cond, AbortReason reason);
// Print a message to stdout and abort execution.
- void Abort(BailoutReason msg);
+ void Abort(AbortReason msg);
inline bool AllowThisStubCall(CodeStub* stub);
@@ -579,10 +579,6 @@ class MacroAssembler : public TurboAssembler {
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object);
- // Used for patching in calls to the deoptimizer.
- void CallDeoptimizer(Address target);
- static int CallDeoptimizerSize();
-
// Swap two registers. If the scratch register is omitted then a slightly
// less efficient form using xor instead of mov is emitted.
void Swap(Register reg1, Register reg2, Register scratch = no_reg,
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 8ab6cb6b5c..52fe902237 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -259,11 +259,9 @@ void ArmDebugger::Debug() {
for (int i = 0; i < DwVfpRegister::NumRegisters(); i++) {
dvalue = GetVFPDoubleRegisterValue(i);
uint64_t as_words = bit_cast<uint64_t>(dvalue);
- PrintF("%3s: %f 0x%08x %08x\n",
- VFPRegisters::Name(i, true),
- dvalue,
- static_cast<uint32_t>(as_words >> 32),
- static_cast<uint32_t>(as_words & 0xffffffff));
+ PrintF("%3s: %f 0x%08x %08x\n", VFPRegisters::Name(i, true),
+ dvalue, static_cast<uint32_t>(as_words >> 32),
+ static_cast<uint32_t>(as_words & 0xFFFFFFFF));
}
} else {
if (GetValue(arg1, &value)) {
@@ -273,11 +271,9 @@ void ArmDebugger::Debug() {
PrintF("%s: %f 0x%08x\n", arg1, svalue, as_word);
} else if (GetVFPDoubleValue(arg1, &dvalue)) {
uint64_t as_words = bit_cast<uint64_t>(dvalue);
- PrintF("%s: %f 0x%08x %08x\n",
- arg1,
- dvalue,
+ PrintF("%s: %f 0x%08x %08x\n", arg1, dvalue,
static_cast<uint32_t>(as_words >> 32),
- static_cast<uint32_t>(as_words & 0xffffffff));
+ static_cast<uint32_t>(as_words & 0xFFFFFFFF));
} else {
PrintF("%s unrecognized\n", arg1);
}
@@ -575,6 +571,10 @@ void Simulator::set_last_debugger_input(char* input) {
last_debugger_input_ = input;
}
+void Simulator::SetRedirectInstruction(Instruction* instruction) {
+ instruction->SetInstructionBits(al | (0xF * B24) | kCallRtRedirected);
+}
+
void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
void* start_addr, size_t size) {
intptr_t start = reinterpret_cast<intptr_t>(start_addr);
@@ -644,21 +644,12 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
}
-void Simulator::Initialize(Isolate* isolate) {
- if (isolate->simulator_initialized()) return;
- isolate->set_simulator_initialized(true);
- ::v8::internal::ExternalReference::set_redirector(isolate,
- &RedirectExternalReference);
-}
-
-
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
if (i_cache_ == nullptr) {
i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
- Initialize(isolate);
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
@@ -715,100 +706,6 @@ Simulator::~Simulator() {
free(stack_);
}
-// When the generated code calls an external reference we need to catch that in
-// the simulator. The external reference will be a function compiled for the
-// host architecture. We need to call that function instead of trying to
-// execute it with the simulator. We do that by redirecting the external
-// reference to a svc (Supervisor Call) instruction that is handled by
-// the simulator. We write the original destination of the jump just at a known
-// offset from the svc instruction so the simulator knows what to call.
-class Redirection {
- public:
- Redirection(Isolate* isolate, void* external_function,
- ExternalReference::Type type)
- : external_function_(external_function),
- swi_instruction_(al | (0xf * B24) | kCallRtRedirected),
- type_(type),
- next_(nullptr) {
- next_ = isolate->simulator_redirection();
- Simulator::current(isolate)->
- FlushICache(isolate->simulator_i_cache(),
- reinterpret_cast<void*>(&swi_instruction_),
- Instruction::kInstrSize);
- isolate->set_simulator_redirection(this);
- }
-
- void* address_of_swi_instruction() {
- return reinterpret_cast<void*>(&swi_instruction_);
- }
-
- void* external_function() { return external_function_; }
- ExternalReference::Type type() { return type_; }
-
- static Redirection* Get(Isolate* isolate, void* external_function,
- ExternalReference::Type type) {
- Redirection* current = isolate->simulator_redirection();
- for (; current != nullptr; current = current->next_) {
- if (current->external_function_ == external_function &&
- current->type_ == type) {
- return current;
- }
- }
- return new Redirection(isolate, external_function, type);
- }
-
- static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
- char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
- char* addr_of_redirection =
- addr_of_swi - offsetof(Redirection, swi_instruction_);
- return reinterpret_cast<Redirection*>(addr_of_redirection);
- }
-
- static void* ReverseRedirection(int32_t reg) {
- Redirection* redirection = FromSwiInstruction(
- reinterpret_cast<Instruction*>(reinterpret_cast<void*>(reg)));
- return redirection->external_function();
- }
-
- static void DeleteChain(Redirection* redirection) {
- while (redirection != nullptr) {
- Redirection* next = redirection->next_;
- delete redirection;
- redirection = next;
- }
- }
-
- private:
- void* external_function_;
- uint32_t swi_instruction_;
- ExternalReference::Type type_;
- Redirection* next_;
-};
-
-
-// static
-void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
- Redirection* first) {
- Redirection::DeleteChain(first);
- if (i_cache != nullptr) {
- for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
- entry = i_cache->Next(entry)) {
- delete static_cast<CachePage*>(entry->value);
- }
- delete i_cache;
- }
-}
-
-
-void* Simulator::RedirectExternalReference(Isolate* isolate,
- void* external_function,
- ExternalReference::Type type) {
- base::LockGuard<base::Mutex> lock_guard(
- isolate->simulator_redirection_mutex());
- Redirection* redirection = Redirection::Get(isolate, external_function, type);
- return redirection->address_of_swi_instruction();
-}
-
// Get the active Simulator for the current thread.
Simulator* Simulator::current(Isolate* isolate) {
@@ -1035,9 +932,9 @@ void Simulator::SetFpResult(const double& result) {
void Simulator::TrashCallerSaveRegisters() {
// We don't trash the registers with the return value.
- registers_[2] = 0x50Bad4U;
- registers_[3] = 0x50Bad4U;
- registers_[12] = 0x50Bad4U;
+ registers_[2] = 0x50BAD4U;
+ registers_[3] = 0x50BAD4U;
+ registers_[12] = 0x50BAD4U;
}
@@ -1292,7 +1189,7 @@ void Simulator::SetVFlag(bool val) {
bool Simulator::CarryFrom(int32_t left, int32_t right, int32_t carry) {
uint32_t uleft = static_cast<uint32_t>(left);
uint32_t uright = static_cast<uint32_t>(right);
- uint32_t urest = 0xffffffffU - uleft;
+ uint32_t urest = 0xFFFFFFFFU - uleft;
return (uright > urest) ||
(carry && (((uright + 1) > urest) || (uright > (urest - 1))));
@@ -1409,7 +1306,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
case ASR: {
if (shift_amount == 0) {
if (result < 0) {
- result = 0xffffffff;
+ result = 0xFFFFFFFF;
*carry_out = true;
} else {
result = 0;
@@ -1468,7 +1365,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
} else {
// by register
int rs = instr->RsValue();
- shift_amount = get_register(rs) &0xff;
+ shift_amount = get_register(rs) & 0xFF;
switch (shift) {
case ASR: {
if (shift_amount == 0) {
@@ -1481,7 +1378,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
DCHECK_GE(shift_amount, 32);
if (result < 0) {
*carry_out = true;
- result = 0xffffffff;
+ result = 0xFFFFFFFF;
} else {
*carry_out = false;
result = 0;
@@ -1739,7 +1636,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
bool stack_aligned =
(get_register(sp)
& (::v8::internal::FLAG_sim_stack_alignment - 1)) == 0;
- Redirection* redirection = Redirection::FromSwiInstruction(instr);
+ Redirection* redirection = Redirection::FromInstruction(instr);
int32_t arg0 = get_register(r0);
int32_t arg1 = get_register(r1);
int32_t arg2 = get_register(r2);
@@ -1982,7 +1879,7 @@ Float32 Simulator::canonicalizeNaN(Float32 value) {
double Simulator::canonicalizeNaN(double value) {
// Default NaN value, see "NaN handling" in "IEEE 754 standard implementation
// choices" of the ARM Reference Manual.
- constexpr uint64_t kDefaultNaN = V8_UINT64_C(0x7FF8000000000000);
+ constexpr uint64_t kDefaultNaN = uint64_t{0x7FF8000000000000};
if (FPSCR_default_NaN_mode_ && std::isnan(value)) {
value = bit_cast<double>(kDefaultNaN);
}
@@ -1993,7 +1890,7 @@ Float64 Simulator::canonicalizeNaN(Float64 value) {
// Default NaN value, see "NaN handling" in "IEEE 754 standard implementation
// choices" of the ARM Reference Manual.
constexpr Float64 kDefaultNaN =
- Float64::FromBits(V8_UINT64_C(0x7FF8000000000000));
+ Float64::FromBits(uint64_t{0x7FF8000000000000});
return FPSCR_default_NaN_mode_ && value.is_nan() ? kDefaultNaN : value;
}
@@ -2036,7 +1933,7 @@ void Simulator::DisableStop(uint32_t code) {
void Simulator::IncreaseStopCounter(uint32_t code) {
DCHECK_LE(code, kMaxStopCode);
DCHECK(isWatchedStop(code));
- if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
+ if ((watched_stops_[code].count & ~(1 << 31)) == 0x7FFFFFFF) {
PrintF("Stop counter for code %i has overflowed.\n"
"Enabling this code and reseting the counter to 0.\n", code);
watched_stops_[code].count = 0;
@@ -2137,14 +2034,14 @@ void Simulator::DecodeType01(Instruction* instr) {
int64_t right_op = static_cast<int32_t>(rs_val);
uint64_t result = left_op * right_op;
hi_res = static_cast<int32_t>(result >> 32);
- lo_res = static_cast<int32_t>(result & 0xffffffff);
+ lo_res = static_cast<int32_t>(result & 0xFFFFFFFF);
} else {
// unsigned multiply
uint64_t left_op = static_cast<uint32_t>(rm_val);
uint64_t right_op = static_cast<uint32_t>(rs_val);
uint64_t result = left_op * right_op;
hi_res = static_cast<int32_t>(result >> 32);
- lo_res = static_cast<int32_t>(result & 0xffffffff);
+ lo_res = static_cast<int32_t>(result & 0xFFFFFFFF);
}
set_register(rd_lo, lo_res);
set_register(rd_hi, hi_res);
@@ -2316,7 +2213,7 @@ void Simulator::DecodeType01(Instruction* instr) {
}
}
}
- if (((instr->Bits(7, 4) & 0xd) == 0xd) && (instr->Bit(20) == 0)) {
+ if (((instr->Bits(7, 4) & 0xD) == 0xD) && (instr->Bit(20) == 0)) {
DCHECK_EQ(rd % 2, 0);
if (instr->HasH()) {
// The strd instruction.
@@ -2357,7 +2254,7 @@ void Simulator::DecodeType01(Instruction* instr) {
}
} else if ((type == 0) && instr->IsMiscType0()) {
if ((instr->Bits(27, 23) == 2) && (instr->Bits(21, 20) == 2) &&
- (instr->Bits(15, 4) == 0xf00)) {
+ (instr->Bits(15, 4) == 0xF00)) {
// MSR
int rm = instr->RmValue();
DCHECK_NE(pc, rm); // UNPREDICTABLE
@@ -2569,8 +2466,8 @@ void Simulator::DecodeType01(Instruction* instr) {
SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
} else {
// Format(instr, "movt'cond 'rd, 'imm").
- alu_out = (get_register(rd) & 0xffff) |
- (instr->ImmedMovwMovtValue() << 16);
+ alu_out =
+ (get_register(rd) & 0xFFFF) | (instr->ImmedMovwMovtValue() << 16);
set_register(rd, alu_out);
}
break;
@@ -2987,8 +2884,8 @@ void Simulator::DecodeType3(Instruction* instr) {
}
} else {
// PU == 0b01, BW == 0b11, Bits(9, 6) != 0b0001
- if ((instr->Bits(20, 16) == 0x1f) &&
- (instr->Bits(11, 4) == 0xf3)) {
+ if ((instr->Bits(20, 16) == 0x1F) &&
+ (instr->Bits(11, 4) == 0xF3)) {
// Rbit.
uint32_t rm_val = get_register(instr->RmValue());
set_register(rd, base::bits::ReverseBits(rm_val));
@@ -3084,7 +2981,7 @@ void Simulator::DecodeType3(Instruction* instr) {
uint32_t rd_val =
static_cast<uint32_t>(get_register(instr->RdValue()));
uint32_t bitcount = msbit - lsbit + 1;
- uint32_t mask = 0xffffffffu >> (32 - bitcount);
+ uint32_t mask = 0xFFFFFFFFu >> (32 - bitcount);
rd_val &= ~(mask << lsbit);
if (instr->RmValue() != 15) {
// bfi - bitfield insert.
@@ -3422,7 +3319,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
int vd = instr->VFPNRegValue(kDoublePrecision);
int rt = instr->RtValue();
int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
- if ((opc1_opc2 & 0xb) == 0) {
+ if ((opc1_opc2 & 0xB) == 0) {
// NeonS32/NeonU32
uint32_t data[2];
get_d_register(vd, data);
@@ -3500,7 +3397,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
uint64_t data;
get_d_register(vn, &data);
- if ((opc1_opc2 & 0xb) == 0) {
+ if ((opc1_opc2 & 0xB) == 0) {
// NeonS32 / NeonU32
int32_t int_data[2];
memcpy(int_data, &data, sizeof(int_data));
@@ -3514,14 +3411,14 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
int i = opc1_opc2 & 0x7;
int shift = i * kBitsPerByte;
uint32_t scalar = (data >> shift) & 0xFFu;
- if (!u && (scalar & 0x80) != 0) scalar |= 0xffffff00;
+ if (!u && (scalar & 0x80) != 0) scalar |= 0xFFFFFF00;
set_register(rt, scalar);
} else if ((opc1_opc2 & 0x1) != 0) {
// NeonS16 / NeonU16
int i = (opc1_opc2 >> 1) & 0x3;
int shift = i * kBitsPerByte * kShortSize;
uint32_t scalar = (data >> shift) & 0xFFFFu;
- if (!u && (scalar & 0x8000) != 0) scalar |= 0xffff0000;
+ if (!u && (scalar & 0x8000) != 0) scalar |= 0xFFFF0000;
set_register(rt, scalar);
} else {
UNREACHABLE(); // Not used by V8.
@@ -3702,7 +3599,7 @@ bool get_inv_op_vfp_flag(VFPRoundingMode mode,
double val,
bool unsigned_) {
DCHECK((mode == RN) || (mode == RM) || (mode == RZ));
- double max_uint = static_cast<double>(0xffffffffu);
+ double max_uint = static_cast<double>(0xFFFFFFFFu);
double max_int = static_cast<double>(kMaxInt);
double min_int = static_cast<double>(kMinInt);
@@ -3744,7 +3641,7 @@ int VFPConversionSaturate(double val, bool unsigned_res) {
return 0;
} else {
if (unsigned_res) {
- return (val < 0) ? 0 : 0xffffffffu;
+ return (val < 0) ? 0 : 0xFFFFFFFFu;
} else {
return (val < 0) ? kMinInt : kMaxInt;
}
@@ -4496,7 +4393,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xa: {
+ case 0xA: {
// vpmin/vpmax.s<size> Dd, Dm, Dn.
NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
bool min = instr->Bit(4) != 0;
@@ -4516,7 +4413,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xb: {
+ case 0xB: {
// vpadd.i<size> Dd, Dm, Dn.
NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
switch (size) {
@@ -4535,7 +4432,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xd: {
+ case 0xD: {
if (instr->Bit(4) == 0) {
float src1[4], src2[4];
get_neon_register(Vn, src1);
@@ -4555,7 +4452,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xe: {
+ case 0xE: {
if (instr->Bits(21, 20) == 0 && instr->Bit(4) == 0) {
// vceq.f32.
float src1[4], src2[4];
@@ -4571,7 +4468,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xf: {
+ case 0xF: {
if (instr->Bit(20) == 0 && instr->Bit(6) == 1) {
float src1[4], src2[4];
get_neon_register(Vn, src1);
@@ -4862,7 +4759,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xa: {
+ case 0xA: {
// vpmin/vpmax.u<size> Dd, Dm, Dn.
NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
bool min = instr->Bit(4) != 0;
@@ -4882,7 +4779,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xd: {
+ case 0xD: {
if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 1 &&
instr->Bit(4) == 1) {
// vmul.f32 Qd, Qn, Qm
@@ -4902,7 +4799,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xe: {
+ case 0xE: {
if (instr->Bit(20) == 0 && instr->Bit(4) == 0) {
// vcge/vcgt.f32 Qd, Qm, Qn
bool ge = instr->Bit(21) == 0;
@@ -5014,15 +4911,15 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
if ((imm4 & 0x1) != 0) {
size = 8;
index = imm4 >> 1;
- mask = 0xffu;
+ mask = 0xFFu;
} else if ((imm4 & 0x2) != 0) {
size = 16;
index = imm4 >> 2;
- mask = 0xffffu;
+ mask = 0xFFFFu;
} else {
size = 32;
index = imm4 >> 3;
- mask = 0xffffffffu;
+ mask = 0xFFFFFFFFu;
}
uint64_t d_data;
get_d_register(vm, &d_data);
@@ -5275,7 +5172,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
NeonSize size = static_cast<NeonSize>(instr->Bits(19, 18));
- if (instr->Bits(9, 6) == 0xd) {
+ if (instr->Bits(9, 6) == 0xD) {
// vabs<type>.<size> Qd, Qm
if (instr->Bit(10) != 0) {
// floating point (clear sign bits)
@@ -5302,7 +5199,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
break;
}
}
- } else if (instr->Bits(9, 6) == 0xf) {
+ } else if (instr->Bits(9, 6) == 0xF) {
// vneg<type>.<size> Qd, Qm (signed integer)
if (instr->Bit(10) != 0) {
// floating point (toggle sign bits)
@@ -5561,7 +5458,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
break;
case 0xA:
case 0xB:
- if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xf)) {
+ if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xF)) {
// pld: ignore instruction.
} else if (instr->SpecialValue() == 0xA && instr->Bits(22, 20) == 7) {
// dsb, dmb, isb: ignore instruction for now.
@@ -5893,18 +5790,16 @@ void Simulator::CallInternal(byte* entry) {
set_register(r11, r11_val);
}
-
-int32_t Simulator::Call(byte* entry, int argument_count, ...) {
- va_list parameters;
- va_start(parameters, argument_count);
+intptr_t Simulator::CallImpl(byte* entry, int argument_count,
+ const intptr_t* arguments) {
// Set up arguments
// First four arguments passed in registers.
- DCHECK_GE(argument_count, 4);
- set_register(r0, va_arg(parameters, int32_t));
- set_register(r1, va_arg(parameters, int32_t));
- set_register(r2, va_arg(parameters, int32_t));
- set_register(r3, va_arg(parameters, int32_t));
+ int reg_arg_count = std::min(4, argument_count);
+ if (reg_arg_count > 0) set_register(r0, arguments[0]);
+ if (reg_arg_count > 1) set_register(r1, arguments[1]);
+ if (reg_arg_count > 2) set_register(r2, arguments[2]);
+ if (reg_arg_count > 3) set_register(r3, arguments[3]);
// Remaining arguments passed on stack.
int original_stack = get_register(sp);
@@ -5914,11 +5809,8 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
entry_stack &= -base::OS::ActivationFrameAlignment();
}
// Store remaining arguments on stack, from low to high memory.
- intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
- for (int i = 4; i < argument_count; i++) {
- stack_argument[i - 4] = va_arg(parameters, int32_t);
- }
- va_end(parameters);
+ memcpy(reinterpret_cast<intptr_t*>(entry_stack), arguments + reg_arg_count,
+ (argument_count - reg_arg_count) * sizeof(*arguments));
set_register(sp, entry_stack);
CallInternal(entry);
@@ -5927,12 +5819,10 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
CHECK_EQ(entry_stack, get_register(sp));
set_register(sp, original_stack);
- int32_t result = get_register(r0);
- return result;
+ return get_register(r0);
}
-
-void Simulator::CallFP(byte* entry, double d0, double d1) {
+int32_t Simulator::CallFPImpl(byte* entry, double d0, double d1) {
if (use_eabi_hardfloat()) {
set_d_register_from_double(0, d0);
set_d_register_from_double(1, d1);
@@ -5941,13 +5831,7 @@ void Simulator::CallFP(byte* entry, double d0, double d1) {
set_register_pair_from_double(2, &d1);
}
CallInternal(entry);
-}
-
-
-int32_t Simulator::CallFPReturnsInt(byte* entry, double d0, double d1) {
- CallFP(entry, d0, d1);
- int32_t result = get_register(r0);
- return result;
+ return get_register(r0);
}
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 26889018b5..1cb11ffd96 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
// Declares a Simulator for ARM instructions if we are not generating a native
// ARM binary. This Simulator allows us to run and debug ARM code generation on
// regular desktop machines.
-// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
+// V8 calls into generated code by using the GeneratedCode class,
// which will start execution in the Simulator or forwards to the real entry
// on a ARM HW platform.
@@ -18,56 +17,13 @@
#include "src/base/platform/mutex.h"
#include "src/boxed-float.h"
-#if !defined(USE_SIMULATOR)
-// Running without a simulator on a native arm platform.
-
-namespace v8 {
-namespace internal {
-
-// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- (entry(p0, p1, p2, p3, p4))
-
-typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*, int*,
- int, Address, int, Isolate*);
-
-// Call the generated regexp code directly. The code at the entry address
-// should act as a function matching the type arm_regexp_matcher.
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- (FUNCTION_CAST<arm_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
-
-// The stack limit beyond which we will throw stack overflow errors in
-// generated code. Because generated code on arm uses the C stack, we
-// just use the C stack limit.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
- uintptr_t c_limit) {
- USE(isolate);
- return c_limit;
- }
-
- static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
- uintptr_t try_catch_address) {
- USE(isolate);
- return try_catch_address;
- }
-
- static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
- USE(isolate);
- }
-};
-
-} // namespace internal
-} // namespace v8
-
-#else // !defined(USE_SIMULATOR)
+#if defined(USE_SIMULATOR)
// Running with a simulator.
#include "src/arm/constants-arm.h"
#include "src/assembler.h"
#include "src/base/hashmap.h"
+#include "src/simulator-base.h"
namespace v8 {
namespace internal {
@@ -102,8 +58,7 @@ class CachePage {
char validity_map_[kValidityMapSize]; // One byte per line.
};
-
-class Simulator {
+class Simulator : public SimulatorBase {
public:
friend class ArmDebugger;
enum Register {
@@ -134,7 +89,7 @@ class Simulator {
// The currently executing Simulator instance. Potentially there can be one
// for each native thread.
- static Simulator* current(v8::internal::Isolate* isolate);
+ V8_EXPORT_PRIVATE static Simulator* current(v8::internal::Isolate* isolate);
// Accessors for register state. Reading the pc value adheres to the ARM
// architecture specification and is off by a 8 from the currently executing
@@ -203,18 +158,16 @@ class Simulator {
// Executes ARM instructions until the PC reaches end_sim_pc.
void Execute();
- // Call on program start.
- static void Initialize(Isolate* isolate);
-
- static void TearDown(base::CustomMatcherHashMap* i_cache, Redirection* first);
+ template <typename Return, typename... Args>
+ Return Call(byte* entry, Args... args) {
+ return VariadicCall<Return>(this, &Simulator::CallImpl, entry, args...);
+ }
- // V8 generally calls into generated JS code with 5 parameters and into
- // generated RegExp code with 7 parameters. This is a convenience function,
- // which sets up the simulator state and grabs the result on return.
- int32_t Call(byte* entry, int argument_count, ...);
// Alternative: call a 2-argument double function.
- void CallFP(byte* entry, double d0, double d1);
- int32_t CallFPReturnsInt(byte* entry, double d0, double d1);
+ template <typename Return>
+ Return CallFP(byte* entry, double d0, double d1) {
+ return ConvertReturn<Return>(CallFPImpl(entry, d0, d1));
+ }
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
@@ -226,6 +179,9 @@ class Simulator {
void set_last_debugger_input(char* input);
char* last_debugger_input() { return last_debugger_input_; }
+ // Redirection support.
+ static void SetRedirectInstruction(Instruction* instruction);
+
// ICache checking.
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size);
@@ -255,6 +211,10 @@ class Simulator {
end_sim_pc = -2
};
+ V8_EXPORT_PRIVATE intptr_t CallImpl(byte* entry, int argument_count,
+ const intptr_t* arguments);
+ intptr_t CallFPImpl(byte* entry, double d0, double d1);
+
// Unsupported instructions use Format to print an error and stop execution.
void Format(Instruction* instr, const char* format);
@@ -369,11 +329,6 @@ class Simulator {
static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
void* page);
- // Runtime call support. Uses the isolate in a thread-safe way.
- static void* RedirectExternalReference(
- Isolate* isolate, void* external_function,
- v8::internal::ExternalReference::Type type);
-
// Handle arguments and return value for runtime FP functions.
void GetFpArgs(double* x, double* y, int32_t* z);
void SetFpResult(const double& result);
@@ -541,45 +496,8 @@ class Simulator {
static base::LazyInstance<GlobalMonitor>::type global_monitor_;
};
-
-// When running with the simulator transition into simulated execution at this
-// point.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(isolate)->Call( \
- FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
-
-#define CALL_GENERATED_FP_INT(isolate, entry, p0, p1) \
- Simulator::current(isolate)->CallFPReturnsInt(FUNCTION_ADDR(entry), p0, p1)
-
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- Simulator::current(isolate)->Call(entry, 9, p0, p1, p2, p3, p4, p5, p6, p7, \
- p8)
-
-// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code. The JS-based limit normally points near the end of
-// the simulator stack. When the C-based limit is exhausted we reflect that by
-// lowering the JS-based limit as well, to make stack checks trigger.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
- uintptr_t c_limit) {
- return Simulator::current(isolate)->StackLimit(c_limit);
- }
-
- static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
- uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(isolate);
- return sim->PushAddress(try_catch_address);
- }
-
- static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
- Simulator::current(isolate)->PopAddress();
- }
-};
-
} // namespace internal
} // namespace v8
-#endif // !defined(USE_SIMULATOR)
+#endif // defined(USE_SIMULATOR)
#endif // V8_ARM_SIMULATOR_ARM_H_
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index 70d50eb330..11c4bbf33f 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -532,12 +532,6 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
}
-Address Assembler::target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- return target_address_at(pc, constant_pool);
-}
-
-
Address Assembler::target_address_from_return_address(Address pc) {
// Returns the address of the call target from the return address that will
// be returned to after a call.
@@ -615,14 +609,6 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
}
-void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
-}
-
-
int RelocInfo::target_address_size() {
return kPointerSize;
}
@@ -630,7 +616,7 @@ int RelocInfo::target_address_size() {
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
Address RelocInfo::target_address_address() {
@@ -647,21 +633,21 @@ Address RelocInfo::constant_pool_entry_address() {
HeapObject* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return HeapObject::cast(
- reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)));
+ return HeapObject::cast(reinterpret_cast<Object*>(
+ Assembler::target_address_at(pc_, constant_pool_)));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Handle<HeapObject>(
- reinterpret_cast<HeapObject**>(Assembler::target_address_at(pc_, host_)));
+ return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
+ Assembler::target_address_at(pc_, constant_pool_)));
}
void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
+ Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
@@ -674,7 +660,7 @@ void RelocInfo::set_target_object(HeapObject* target,
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
@@ -711,7 +697,7 @@ void RelocInfo::WipeOut(Isolate* isolate) {
if (IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = nullptr;
} else {
- Assembler::set_target_address_at(isolate, pc_, host_, nullptr);
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
}
}
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index 2093a89df6..a031884e1f 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -147,9 +147,6 @@ CPURegList CPURegList::GetSafepointSavedRegisters() {
// is a caller-saved register according to the procedure call standard.
list.Combine(18);
- // Drop jssp as the stack pointer doesn't need to be included.
- list.Remove(28);
-
// Add the link register (x30) to the safepoint list.
list.Combine(30);
@@ -186,7 +183,8 @@ uint32_t RelocInfo::embedded_size() const {
void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, host_, address, flush_mode);
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
+ flush_mode);
}
void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
@@ -2636,7 +2634,7 @@ Instr Assembler::LoadStoreStructAddrModeField(const MemOperand& addr) {
} else {
// The immediate post index addressing mode is indicated by rm = 31.
// The immediate is implied by the number of vector registers used.
- addr_field |= (0x1f << Rm_offset);
+ addr_field |= (0x1F << Rm_offset);
}
} else {
DCHECK(addr.IsImmediateOffset() && (addr.offset() == 0));
@@ -3003,7 +3001,7 @@ void Assembler::fmov(const VRegister& vd, double imm) {
} else {
DCHECK(vd.Is2D());
Instr op = NEONModifiedImmediate_MOVI | NEONModifiedImmediateOpBit;
- Emit(NEON_Q | op | ImmNEONFP(imm) | NEONCmode(0xf) | Rd(vd));
+ Emit(NEON_Q | op | ImmNEONFP(imm) | NEONCmode(0xF) | Rd(vd));
}
}
@@ -3015,7 +3013,7 @@ void Assembler::fmov(const VRegister& vd, float imm) {
DCHECK(vd.Is2S() | vd.Is4S());
Instr op = NEONModifiedImmediate_MOVI;
Instr q = vd.Is4S() ? NEON_Q : 0;
- Emit(q | op | ImmNEONFP(imm) | NEONCmode(0xf) | Rd(vd));
+ Emit(q | op | ImmNEONFP(imm) | NEONCmode(0xF) | Rd(vd));
}
}
@@ -3596,15 +3594,15 @@ void Assembler::movi(const VRegister& vd, const uint64_t imm, Shift shift,
DCHECK_EQ(shift_amount, 0);
int imm8 = 0;
for (int i = 0; i < 8; ++i) {
- int byte = (imm >> (i * 8)) & 0xff;
- DCHECK((byte == 0) || (byte == 0xff));
- if (byte == 0xff) {
+ int byte = (imm >> (i * 8)) & 0xFF;
+ DCHECK((byte == 0) || (byte == 0xFF));
+ if (byte == 0xFF) {
imm8 |= (1 << i);
}
}
Instr q = vd.Is2D() ? NEON_Q : 0;
Emit(q | NEONModImmOp(1) | NEONModifiedImmediate_MOVI |
- ImmNEONabcdefgh(imm8) | NEONCmode(0xe) | Rd(vd));
+ ImmNEONabcdefgh(imm8) | NEONCmode(0xE) | Rd(vd));
} else if (shift == LSL) {
NEONModifiedImmShiftLsl(vd, static_cast<int>(imm), shift_amount,
NEONModifiedImmediate_MOVI);
@@ -3953,7 +3951,7 @@ uint32_t Assembler::FPToImm8(double imm) {
// bit6: 0b00.0000
uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
// bit5_to_0: 00cd.efgh
- uint64_t bit5_to_0 = (bits >> 48) & 0x3f;
+ uint64_t bit5_to_0 = (bits >> 48) & 0x3F;
return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
}
@@ -3971,7 +3969,7 @@ void Assembler::MoveWide(const Register& rd, uint64_t imm, int shift,
// Check that the top 32 bits are zero (a positive 32-bit number) or top
// 33 bits are one (a negative 32-bit number, sign extended to 64 bits).
DCHECK(((imm >> kWRegSizeInBits) == 0) ||
- ((imm >> (kWRegSizeInBits - 1)) == 0x1ffffffff));
+ ((imm >> (kWRegSizeInBits - 1)) == 0x1FFFFFFFF));
imm &= kWRegMask;
}
@@ -3984,16 +3982,16 @@ void Assembler::MoveWide(const Register& rd, uint64_t imm, int shift,
// Calculate a new immediate and shift combination to encode the immediate
// argument.
shift = 0;
- if ((imm & ~0xffffUL) == 0) {
+ if ((imm & ~0xFFFFUL) == 0) {
// Nothing to do.
- } else if ((imm & ~(0xffffUL << 16)) == 0) {
+ } else if ((imm & ~(0xFFFFUL << 16)) == 0) {
imm >>= 16;
shift = 1;
- } else if ((imm & ~(0xffffUL << 32)) == 0) {
+ } else if ((imm & ~(0xFFFFUL << 32)) == 0) {
DCHECK(rd.Is64Bits());
imm >>= 32;
shift = 2;
- } else if ((imm & ~(0xffffUL << 48)) == 0) {
+ } else if ((imm & ~(0xFFFFUL << 48)) == 0) {
DCHECK(rd.Is64Bits());
imm >>= 48;
shift = 3;
@@ -4247,7 +4245,7 @@ void Assembler::NEONModifiedImmShiftMsl(const VRegister& vd, const int imm8,
DCHECK(is_uint8(imm8));
int cmode_0 = (shift_amount >> 4) & 1;
- int cmode = 0xc | cmode_0;
+ int cmode = 0xC | cmode_0;
Instr q = vd.IsQ() ? NEON_Q : 0;
@@ -4343,7 +4341,7 @@ void Assembler::DataProcExtendedRegister(const Register& rd,
bool Assembler::IsImmAddSub(int64_t immediate) {
return is_uint12(immediate) ||
- (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0));
+ (is_uint12(immediate >> 12) && ((immediate & 0xFFF) == 0));
}
void Assembler::LoadStore(const CPURegister& rt,
@@ -4526,7 +4524,7 @@ bool Assembler::IsImmLogical(uint64_t value,
clz_a = CountLeadingZeros(a, kXRegSizeInBits);
int clz_c = CountLeadingZeros(c, kXRegSizeInBits);
d = clz_a - clz_c;
- mask = ((V8_UINT64_C(1) << d) - 1);
+ mask = ((uint64_t{1} << d) - 1);
out_n = 0;
} else {
// Handle degenerate cases.
@@ -4547,7 +4545,7 @@ bool Assembler::IsImmLogical(uint64_t value,
// the general case above, and set the N bit in the output.
clz_a = CountLeadingZeros(a, kXRegSizeInBits);
d = 64;
- mask = ~V8_UINT64_C(0);
+ mask = ~uint64_t{0};
out_n = 1;
}
}
@@ -4596,7 +4594,7 @@ bool Assembler::IsImmLogical(uint64_t value,
// Count the set bits in our basic stretch. The special case of clz(0) == -1
// makes the answer come out right for stretches that reach the very top of
- // the word (e.g. numbers like 0xffffc00000000000).
+ // the word (e.g. numbers like 0xFFFFC00000000000).
int clz_b = (b == 0) ? -1 : CountLeadingZeros(b, kXRegSizeInBits);
int s = clz_a - clz_b;
@@ -4628,7 +4626,7 @@ bool Assembler::IsImmLogical(uint64_t value,
//
// So we 'or' (-d << 1) with our computed s to form imms.
*n = out_n;
- *imm_s = ((-d << 1) | (s - 1)) & 0x3f;
+ *imm_s = ((-d << 1) | (s - 1)) & 0x3F;
*imm_r = r;
return true;
@@ -4645,13 +4643,13 @@ bool Assembler::IsImmFP32(float imm) {
// aBbb.bbbc.defg.h000.0000.0000.0000.0000
uint32_t bits = bit_cast<uint32_t>(imm);
// bits[19..0] are cleared.
- if ((bits & 0x7ffff) != 0) {
+ if ((bits & 0x7FFFF) != 0) {
return false;
}
// bits[29..25] are all set or all cleared.
- uint32_t b_pattern = (bits >> 16) & 0x3e00;
- if (b_pattern != 0 && b_pattern != 0x3e00) {
+ uint32_t b_pattern = (bits >> 16) & 0x3E00;
+ if (b_pattern != 0 && b_pattern != 0x3E00) {
return false;
}
@@ -4670,13 +4668,13 @@ bool Assembler::IsImmFP64(double imm) {
// 0000.0000.0000.0000.0000.0000.0000.0000
uint64_t bits = bit_cast<uint64_t>(imm);
// bits[47..0] are cleared.
- if ((bits & 0xffffffffffffL) != 0) {
+ if ((bits & 0xFFFFFFFFFFFFL) != 0) {
return false;
}
// bits[61..54] are all set or all cleared.
- uint32_t b_pattern = (bits >> 48) & 0x3fc0;
- if (b_pattern != 0 && b_pattern != 0x3fc0) {
+ uint32_t b_pattern = (bits >> 48) & 0x3FC0;
+ if (b_pattern != 0 && b_pattern != 0x3FC0) {
return false;
}
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index bfdab599a3..2deae8aaa4 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -39,7 +39,8 @@ namespace internal {
#define ALLOCATABLE_GENERAL_REGISTERS(R) \
R(x0) R(x1) R(x2) R(x3) R(x4) R(x5) R(x6) R(x7) \
R(x8) R(x9) R(x10) R(x11) R(x12) R(x13) R(x14) R(x15) \
- R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) R(x24) R(x27)
+ R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) R(x24) R(x27) \
+ R(x28)
#define FLOAT_REGISTERS(V) \
V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) \
@@ -295,6 +296,7 @@ class Register : public CPURegister {
static_assert(IS_TRIVIALLY_COPYABLE(Register),
"Register can efficiently be passed by value");
+constexpr bool kPadArguments = true;
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
@@ -479,13 +481,6 @@ ALIAS_REGISTER(Register, root, x26);
ALIAS_REGISTER(Register, rr, x26);
// Context pointer register.
ALIAS_REGISTER(Register, cp, x27);
-// We use a register as a JS stack pointer to overcome the restriction on the
-// architectural SP alignment.
-// We chose x28 because it is contiguous with the other specific purpose
-// registers.
-STATIC_ASSERT(kJSSPCode == 28);
-ALIAS_REGISTER(Register, jssp, x28);
-ALIAS_REGISTER(Register, wjssp, w28);
ALIAS_REGISTER(Register, fp, x29);
ALIAS_REGISTER(Register, lr, x30);
ALIAS_REGISTER(Register, xzr, x31);
@@ -1001,10 +996,6 @@ class Assembler : public AssemblerBase {
inline static void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- static inline Address target_address_at(Address pc, Code* code);
- static inline void set_target_address_at(
- Isolate* isolate, Address pc, Code* code, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address of
// that call in the instruction stream.
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index 1ad50e5112..52f92b6af9 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -30,7 +30,7 @@ namespace internal {
void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ Mov(x5, Operand(x0, LSL, kPointerSizeLog2));
- __ Str(x1, MemOperand(jssp, x5));
+ __ Str(x1, MemOperand(__ StackPointer(), x5));
__ Push(x1, x2);
__ Add(x0, x0, Operand(3));
__ TailCallRuntime(Runtime::kNewArray);
@@ -42,7 +42,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
Register result = destination();
DCHECK(result.Is64Bits());
- DCHECK(jssp.Is(masm->StackPointer()));
UseScratchRegisterScope temps(masm);
Register scratch1 = temps.AcquireX();
@@ -75,7 +74,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
if (masm->emit_debug_code()) {
__ Cmp(exponent, HeapNumber::kExponentBias + 63);
// Exponents less than this should have been handled by the Fcvt case.
- __ Check(ge, kUnexpectedValue);
+ __ Check(ge, AbortReason::kUnexpectedValue);
}
// Isolate the mantissa bits, and set the implicit '1'.
@@ -100,8 +99,8 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
void MathPowStub::Generate(MacroAssembler* masm) {
// Stack on entry:
- // jssp[0]: Exponent (as a tagged value).
- // jssp[1]: Base (as a tagged value).
+ // sp[0]: Exponent (as a tagged value).
+ // sp[1]: Base (as a tagged value).
//
// The (tagged) result will be returned in x0, as a heap number.
@@ -276,15 +275,14 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// The stack on entry holds the arguments and the receiver, with the receiver
// at the highest address:
//
- // jssp]argc-1]: receiver
- // jssp[argc-2]: arg[argc-2]
+ // sp]argc-1]: receiver
+ // sp[argc-2]: arg[argc-2]
// ... ...
- // jssp[1]: arg[1]
- // jssp[0]: arg[0]
+ // sp[1]: arg[1]
+ // sp[0]: arg[0]
//
// The arguments are in reverse order, so that arg[argc-2] is actually the
// first argument to the target function and arg[0] is the last.
- DCHECK(jssp.Is(__ StackPointer()));
const Register& argc_input = x0;
const Register& target_input = x1;
@@ -385,7 +383,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset));
__ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSize)));
__ Cmp(temp, x12);
- __ Check(eq, kReturnAddressNotFoundInFrame);
+ __ Check(eq, AbortReason::kReturnAddressNotFoundInFrame);
}
// Call the builtin.
@@ -415,8 +413,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Peek(argc, 2 * kPointerSize);
__ Peek(target, 3 * kPointerSize);
- __ LeaveExitFrame(save_doubles(), x10);
- DCHECK(jssp.Is(__ StackPointer()));
+ __ LeaveExitFrame(save_doubles(), x10, x9);
if (!argv_in_register()) {
// Drop the remaining stack slots and return from the stub.
__ DropArguments(x11);
@@ -424,10 +421,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ AssertFPCRState();
__ Ret();
- // The stack pointer is still csp if we aren't returning, and the frame
- // hasn't changed (except for the return address).
- __ SetStackPointer(csp);
-
// Handling of exception.
__ Bind(&exception_returned);
@@ -453,18 +446,16 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ CallCFunction(find_handler, 3);
}
- // We didn't execute a return case, so the stack frame hasn't been updated
- // (except for the return address slot). However, we don't need to initialize
- // jssp because the throw method will immediately overwrite it when it
- // unwinds the stack.
- __ SetStackPointer(jssp);
-
// Retrieve the handler context, SP and FP.
__ Mov(cp, Operand(pending_handler_context_address));
__ Ldr(cp, MemOperand(cp));
- __ Mov(jssp, Operand(pending_handler_sp_address));
- __ Ldr(jssp, MemOperand(jssp));
- __ Mov(csp, jssp);
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.AcquireX();
+ __ Mov(scratch, Operand(pending_handler_sp_address));
+ __ Ldr(scratch, MemOperand(scratch));
+ __ Mov(csp, scratch);
+ }
__ Mov(fp, Operand(pending_handler_fp_address));
__ Ldr(fp, MemOperand(fp));
@@ -481,9 +472,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Br(x10);
}
-
// This is the entry point from C++. 5 arguments are provided in x0-x4.
-// See use of the CALL_GENERATED_CODE macro for example in src/execution.cc.
+// See use of the JSEntryFunction for example in src/execution.cc.
// Input:
// x0: code entry.
// x1: function.
@@ -493,7 +483,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Output:
// x0: result.
void JSEntryStub::Generate(MacroAssembler* masm) {
- DCHECK(jssp.Is(__ StackPointer()));
Register code_entry = x0;
// Enable instruction instrumentation. This only works on the simulator, and
@@ -502,21 +491,16 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
Label invoke, handler_entry, exit;
- // Push callee-saved registers and synchronize the system stack pointer (csp)
- // and the JavaScript stack pointer (jssp).
- //
- // We must not write to jssp until after the PushCalleeSavedRegisters()
- // call, since jssp is itself a callee-saved register.
- __ SetStackPointer(csp);
__ PushCalleeSavedRegisters();
- __ Mov(jssp, csp);
- __ SetStackPointer(jssp);
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Set up the reserved register for 0.0.
__ Fmov(fp_zero, 0.0);
+ // Initialize the root array register
+ __ InitializeRootRegister();
+
// Build an entry frame (see layout below).
StackFrame::Type marker = type();
int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
@@ -527,7 +511,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Push(x13, x12, xzr, x10);
// Set up fp.
- __ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset);
+ __ Sub(fp, __ StackPointer(), EntryFrameConstants::kCallerFPOffset);
// Push the JS entry frame marker. Also set js_entry_sp if this is the
// outermost JS call.
@@ -546,14 +530,15 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Str(fp, MemOperand(x10));
__ Bind(&done);
- __ Push(x12);
+ __ Push(x12, padreg);
// The frame set up looks like this:
- // jssp[0] : JS entry frame marker.
- // jssp[1] : C entry FP.
- // jssp[2] : stack frame marker.
- // jssp[3] : stack frame marker.
- // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
+ // sp[0] : padding.
+ // sp[1] : JS entry frame marker.
+ // sp[2] : C entry FP.
+ // sp[3] : stack frame marker.
+ // sp[4] : stack frame marker.
+ // sp[5] : bad frame pointer 0xFFF...FF <- fp points here.
// Jump to a faked try block that does the invoke, with a faked catch
// block that sets the pending exception.
@@ -583,8 +568,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Bind(&invoke);
// Push new stack handler.
- DCHECK(jssp.Is(__ StackPointer()));
- static_assert(StackHandlerConstants::kSize == 1 * kPointerSize,
+ static_assert(StackHandlerConstants::kSize == 2 * kPointerSize,
"Unexpected offset for StackHandlerConstants::kSize");
static_assert(StackHandlerConstants::kNextOffset == 0 * kPointerSize,
"Unexpected offset for StackHandlerConstants::kNextOffset");
@@ -592,10 +576,15 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Link the current handler as the next handler.
__ Mov(x11, ExternalReference(IsolateAddressId::kHandlerAddress, isolate()));
__ Ldr(x10, MemOperand(x11));
- __ Push(x10);
+ __ Push(padreg, x10);
// Set this new handler as the current one.
- __ Str(jssp, MemOperand(x11));
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.AcquireX();
+ __ Mov(scratch, __ StackPointer());
+ __ Str(scratch, MemOperand(x11));
+ }
// If an exception not caught by another handler occurs, this handler
// returns control to the code after the B(&invoke) above, which
@@ -612,37 +601,32 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// x2: receiver.
// x3: argc.
// x4: argv.
-
- if (type() == StackFrame::CONSTRUCT_ENTRY) {
- __ Call(BUILTIN_CODE(isolate(), JSConstructEntryTrampoline),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(BUILTIN_CODE(isolate(), JSEntryTrampoline), RelocInfo::CODE_TARGET);
- }
+ __ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
// Pop the stack handler and unlink this frame from the handler chain.
static_assert(StackHandlerConstants::kNextOffset == 0 * kPointerSize,
"Unexpected offset for StackHandlerConstants::kNextOffset");
- __ Pop(x10);
+ __ Pop(x10, padreg);
__ Mov(x11, ExternalReference(IsolateAddressId::kHandlerAddress, isolate()));
- __ Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes);
+ __ Drop(StackHandlerConstants::kSlotCount - 2);
__ Str(x10, MemOperand(x11));
__ Bind(&exit);
// x0 holds the result.
// The stack pointer points to the top of the entry frame pushed on entry from
// C++ (at the beginning of this stub):
- // jssp[0] : JS entry frame marker.
- // jssp[1] : C entry FP.
- // jssp[2] : stack frame marker.
- // jssp[3] : stack frmae marker.
- // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
+ // sp[0] : padding.
+ // sp[1] : JS entry frame marker.
+ // sp[2] : C entry FP.
+ // sp[3] : stack frame marker.
+ // sp[4] : stack frame marker.
+ // sp[5] : bad frame pointer 0xFFF...FF <- fp points here.
// Check if the current stack frame is marked as the outermost JS frame.
Label non_outermost_js_2;
{
Register c_entry_fp = x11;
- __ Pop(x10, c_entry_fp);
+ __ PeekPair(x10, c_entry_fp, 1 * kPointerSize);
__ Cmp(x10, StackFrame::OUTERMOST_JSENTRY_FRAME);
__ B(ne, &non_outermost_js_2);
__ Mov(x12, ExternalReference(js_entry_sp));
@@ -656,21 +640,17 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
}
// Reset the stack to the callee saved registers.
- __ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes);
+ static_assert(EntryFrameConstants::kFixedFrameSize % (2 * kPointerSize) == 0,
+ "Size of entry frame is not a multiple of 16 bytes");
+ __ Drop(EntryFrameConstants::kFixedFrameSize / kPointerSize);
// Restore the callee-saved registers and return.
- DCHECK(jssp.Is(__ StackPointer()));
- __ Mov(csp, jssp);
- __ SetStackPointer(csp);
__ PopCalleeSavedRegisters();
- // After this point, we must not modify jssp because it is a callee-saved
- // register which we have just restored.
__ Ret();
}
-// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
-// a "Push lr" instruction, followed by a call.
+// The entry hook is a Push (stp) instruction, followed by a call.
static const unsigned int kProfileEntryHookCallSize =
- Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
+ (1 * kInstructionSize) + Assembler::kCallSizeWithRelocation;
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
Zone* zone) {
@@ -748,14 +728,6 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
void DirectCEntryStub::Generate(MacroAssembler* masm) {
- // When calling into C++ code the stack pointer must be csp.
- // Therefore this code must use csp for peek/poke operations when the
- // stub is generated. When the stub is called
- // (via DirectCEntryStub::GenerateCall), the caller must setup an ExitFrame
- // and configure the stack pointer *before* doing the call.
- const Register old_stack_pointer = __ StackPointer();
- __ SetStackPointer(csp);
-
// Put return address on the stack (accessible to GC through exit frame pc).
__ Poke(lr, 0);
// Call the C++ function.
@@ -764,8 +736,6 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
__ Peek(lr, 0);
__ AssertFPCRState();
__ Ret();
-
- __ SetStackPointer(old_stack_pointer);
}
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
@@ -806,7 +776,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
@@ -856,7 +826,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ Ldr(x10, FieldMemOperand(allocation_site, 0));
__ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex,
&normal_sequence);
- __ Assert(eq, kExpectedAllocationSite);
+ __ Assert(eq, AbortReason::kExpectedAllocationSite);
}
// Save the resulting elements kind in type info. We can't just store 'kind'
@@ -884,7 +854,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -972,7 +942,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(x10, &unexpected_map);
__ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
__ Bind(&unexpected_map);
- __ Abort(kUnexpectedInitialMapForArrayFunction);
+ __ Abort(AbortReason::kUnexpectedInitialMapForArrayFunction);
__ Bind(&map_ok);
// We should either have undefined in the allocation_site register or a
@@ -1069,7 +1039,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(x10, &unexpected_map);
__ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
__ Bind(&unexpected_map);
- __ Abort(kUnexpectedInitialMapForArrayFunction);
+ __ Abort(AbortReason::kUnexpectedInitialMapForArrayFunction);
__ Bind(&map_ok);
}
@@ -1085,7 +1055,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
Label done;
__ Cmp(x3, PACKED_ELEMENTS);
__ Ccmp(x3, HOLEY_ELEMENTS, ZFlag, ne);
- __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ Assert(
+ eq,
+ AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
}
Label fast_elements_case;
@@ -1202,7 +1174,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
if (__ emit_debug_code()) {
__ Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
__ Cmp(w1, level_reg);
- __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
+ __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
}
__ Sub(level_reg, level_reg, 1);
__ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
@@ -1218,7 +1190,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ Peek(x21, (spill_offset + 2) * kXRegSize);
__ Peek(x22, (spill_offset + 3) * kXRegSize);
- __ LeaveExitFrame(false, x1);
+ __ LeaveExitFrame(false, x1, x5);
// Check if the function scheduled an exception.
__ Mov(x5, ExternalReference::scheduled_exception_address(isolate));
diff --git a/deps/v8/src/arm64/constants-arm64.h b/deps/v8/src/arm64/constants-arm64.h
index f945830045..b02dd5d2d7 100644
--- a/deps/v8/src/arm64/constants-arm64.h
+++ b/deps/v8/src/arm64/constants-arm64.h
@@ -101,7 +101,6 @@ const int kIp1Code = 17;
const int kFramePointerRegCode = 29;
const int kLinkRegCode = 30;
const int kZeroRegCode = 31;
-const int kJSSPCode = 28;
const int kSPRegInternalCode = 63;
const unsigned kRegCodeMask = 0x1f;
const unsigned kShiftAmountWRegMask = 0x1f;
diff --git a/deps/v8/src/arm64/cpu-arm64.cc b/deps/v8/src/arm64/cpu-arm64.cc
index d4cb200de6..26ec06e094 100644
--- a/deps/v8/src/arm64/cpu-arm64.cc
+++ b/deps/v8/src/arm64/cpu-arm64.cc
@@ -31,7 +31,7 @@ class CacheLineSizes {
uint32_t ExtractCacheLineSize(int cache_line_size_shift) const {
// The cache type register holds the size of cache lines in words as a
// power of two.
- return 4 << ((cache_type_register_ >> cache_line_size_shift) & 0xf);
+ return 4 << ((cache_type_register_ >> cache_line_size_shift) & 0xF);
}
uint32_t cache_type_register_;
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index 5f372eadd2..8269e8e50a 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -108,11 +108,9 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ PushCPURegList(saved_float_registers);
// We save all the registers except sp, lr and the masm scratches.
- CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 27);
+ CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 28);
saved_registers.Remove(ip0);
saved_registers.Remove(ip1);
- // TODO(arm): padding here can be replaced with jssp/x28 when allocatable.
- saved_registers.Combine(padreg);
saved_registers.Combine(fp);
DCHECK_EQ(saved_registers.Count() % 2, 0);
__ PushCPURegList(saved_registers);
@@ -220,8 +218,12 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
__ Pop(x4, padreg); // Restore deoptimizer object (class Deoptimizer).
- __ Ldr(__ StackPointer(),
- MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
+ {
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.AcquireX();
+ __ Ldr(scratch, MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
+ __ Mov(__ StackPointer(), scratch);
+ }
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
@@ -324,7 +326,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
if (__ emit_debug_code()) {
// Ensure the entry_id looks sensible, ie. 0 <= entry_id < count().
__ Cmp(entry_id, count());
- __ Check(lo, kOffsetOutOfRange);
+ __ Check(lo, AbortReason::kOffsetOutOfRange);
}
}
diff --git a/deps/v8/src/arm64/disasm-arm64.cc b/deps/v8/src/arm64/disasm-arm64.cc
index c9b2c9a4aa..41c654b214 100644
--- a/deps/v8/src/arm64/disasm-arm64.cc
+++ b/deps/v8/src/arm64/disasm-arm64.cc
@@ -256,27 +256,26 @@ void DisassemblingDecoder::VisitLogicalImmediate(Instruction* instr) {
bool DisassemblingDecoder::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
DCHECK((reg_size == kXRegSizeInBits) ||
- ((reg_size == kWRegSizeInBits) && (value <= 0xffffffff)));
+ ((reg_size == kWRegSizeInBits) && (value <= 0xFFFFFFFF)));
// Test for movz: 16-bits set at positions 0, 16, 32 or 48.
- if (((value & 0xffffffffffff0000UL) == 0UL) ||
- ((value & 0xffffffff0000ffffUL) == 0UL) ||
- ((value & 0xffff0000ffffffffUL) == 0UL) ||
- ((value & 0x0000ffffffffffffUL) == 0UL)) {
+ if (((value & 0xFFFFFFFFFFFF0000UL) == 0UL) ||
+ ((value & 0xFFFFFFFF0000FFFFUL) == 0UL) ||
+ ((value & 0xFFFF0000FFFFFFFFUL) == 0UL) ||
+ ((value & 0x0000FFFFFFFFFFFFUL) == 0UL)) {
return true;
}
// Test for movn: NOT(16-bits set at positions 0, 16, 32 or 48).
if ((reg_size == kXRegSizeInBits) &&
- (((value & 0xffffffffffff0000UL) == 0xffffffffffff0000UL) ||
- ((value & 0xffffffff0000ffffUL) == 0xffffffff0000ffffUL) ||
- ((value & 0xffff0000ffffffffUL) == 0xffff0000ffffffffUL) ||
- ((value & 0x0000ffffffffffffUL) == 0x0000ffffffffffffUL))) {
+ (((value & 0xFFFFFFFFFFFF0000UL) == 0xFFFFFFFFFFFF0000UL) ||
+ ((value & 0xFFFFFFFF0000FFFFUL) == 0xFFFFFFFF0000FFFFUL) ||
+ ((value & 0xFFFF0000FFFFFFFFUL) == 0xFFFF0000FFFFFFFFUL) ||
+ ((value & 0x0000FFFFFFFFFFFFUL) == 0x0000FFFFFFFFFFFFUL))) {
return true;
}
- if ((reg_size == kWRegSizeInBits) &&
- (((value & 0xffff0000) == 0xffff0000) ||
- ((value & 0x0000ffff) == 0x0000ffff))) {
+ if ((reg_size == kWRegSizeInBits) && (((value & 0xFFFF0000) == 0xFFFF0000) ||
+ ((value & 0x0000FFFF) == 0x0000FFFF))) {
return true;
}
return false;
@@ -3332,8 +3331,6 @@ void DisassemblingDecoder::AppendRegisterNameToOutput(const CPURegister& reg) {
// Filter special registers
if (reg.IsX() && (reg.code() == 27)) {
AppendToOutput("cp");
- } else if (reg.IsX() && (reg.code() == 28)) {
- AppendToOutput("jssp");
} else if (reg.IsX() && (reg.code() == 29)) {
AppendToOutput("fp");
} else if (reg.IsX() && (reg.code() == 30)) {
@@ -3469,7 +3466,7 @@ int DisassemblingDecoder::SubstituteRegisterField(Instruction* instr,
case 'e':
// This is register Rm, but using a 4-bit specifier. Used in NEON
// by-element instructions.
- reg_num = (instr->Rm() & 0xf);
+ reg_num = (instr->Rm() & 0xF);
break;
case 'a':
reg_num = instr->Ra();
@@ -3545,8 +3542,6 @@ int DisassemblingDecoder::SubstituteRegisterField(Instruction* instr,
return field_len;
default:
UNREACHABLE();
- reg_type = CPURegister::kRegister;
- reg_size = kXRegSizeInBits;
}
if ((reg_type == CPURegister::kRegister) && (reg_num == kZeroRegCode) &&
@@ -3569,7 +3564,7 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
uint64_t imm = static_cast<uint64_t>(instr->ImmMoveWide())
<< (16 * instr->ShiftMoveWide());
if (format[5] == 'N') imm = ~imm;
- if (!instr->SixtyFourBits()) imm &= UINT64_C(0xffffffff);
+ if (!instr->SixtyFourBits()) imm &= UINT64_C(0xFFFFFFFF);
AppendToOutput("#0x%" PRIx64, imm);
} else {
DCHECK_EQ(format[5], 'L');
@@ -3696,7 +3691,7 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
vm_index = (vm_index << 1) | instr->NEONM();
}
AppendToOutput("%d", vm_index);
- return strlen("IVByElemIndex");
+ return static_cast<int>(strlen("IVByElemIndex"));
}
case 'I': { // INS element.
if (strncmp(format, "IVInsIndex", strlen("IVInsIndex")) == 0) {
@@ -3709,11 +3704,11 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
rn_index = imm4 >> tz;
if (strncmp(format, "IVInsIndex1", strlen("IVInsIndex1")) == 0) {
AppendToOutput("%d", rd_index);
- return strlen("IVInsIndex1");
+ return static_cast<int>(strlen("IVInsIndex1"));
} else if (strncmp(format, "IVInsIndex2",
strlen("IVInsIndex2")) == 0) {
AppendToOutput("%d", rn_index);
- return strlen("IVInsIndex2");
+ return static_cast<int>(strlen("IVInsIndex2"));
}
}
return 0;
@@ -3728,38 +3723,38 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
0) {
AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmNEONabcdefgh(),
instr->ImmNEONFP32());
- return strlen("IVMIImmFPSingle");
+ return static_cast<int>(strlen("IVMIImmFPSingle"));
} else if (strncmp(format, "IVMIImmFPDouble",
strlen("IVMIImmFPDouble")) == 0) {
AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmNEONabcdefgh(),
instr->ImmNEONFP64());
- return strlen("IVMIImmFPDouble");
+ return static_cast<int>(strlen("IVMIImmFPDouble"));
} else if (strncmp(format, "IVMIImm8", strlen("IVMIImm8")) == 0) {
uint64_t imm8 = instr->ImmNEONabcdefgh();
AppendToOutput("#0x%" PRIx64, imm8);
- return strlen("IVMIImm8");
+ return static_cast<int>(strlen("IVMIImm8"));
} else if (strncmp(format, "IVMIImm", strlen("IVMIImm")) == 0) {
uint64_t imm8 = instr->ImmNEONabcdefgh();
uint64_t imm = 0;
for (int i = 0; i < 8; ++i) {
if (imm8 & (1 << i)) {
- imm |= (UINT64_C(0xff) << (8 * i));
+ imm |= (UINT64_C(0xFF) << (8 * i));
}
}
AppendToOutput("#0x%" PRIx64, imm);
- return strlen("IVMIImm");
+ return static_cast<int>(strlen("IVMIImm"));
} else if (strncmp(format, "IVMIShiftAmt1",
strlen("IVMIShiftAmt1")) == 0) {
int cmode = instr->NEONCmode();
int shift_amount = 8 * ((cmode >> 1) & 3);
AppendToOutput("#%d", shift_amount);
- return strlen("IVMIShiftAmt1");
+ return static_cast<int>(strlen("IVMIShiftAmt1"));
} else if (strncmp(format, "IVMIShiftAmt2",
strlen("IVMIShiftAmt2")) == 0) {
int cmode = instr->NEONCmode();
int shift_amount = 8 << (cmode & 1);
AppendToOutput("#%d", shift_amount);
- return strlen("IVMIShiftAmt2");
+ return static_cast<int>(strlen("IVMIShiftAmt2"));
} else {
UNIMPLEMENTED();
return 0;
diff --git a/deps/v8/src/arm64/eh-frame-arm64.cc b/deps/v8/src/arm64/eh-frame-arm64.cc
index 507cbd1c2b..48909d5b2d 100644
--- a/deps/v8/src/arm64/eh-frame-arm64.cc
+++ b/deps/v8/src/arm64/eh-frame-arm64.cc
@@ -9,7 +9,6 @@ namespace v8 {
namespace internal {
static const int kX0DwarfCode = 0;
-static const int kJsSpDwarfCode = 28;
static const int kFpDwarfCode = 29;
static const int kLrDwarfCode = 30;
static const int kCSpDwarfCode = 31;
@@ -29,13 +28,11 @@ void EhFrameWriter::WriteInitialStateInCie() {
// static
int EhFrameWriter::RegisterToDwarfCode(Register name) {
switch (name.code()) {
- case kRegCode_x28:
- return kJsSpDwarfCode;
case kRegCode_x29:
return kFpDwarfCode;
case kRegCode_x30:
return kLrDwarfCode;
- case kRegCode_x31:
+ case kSPRegInternalCode:
return kCSpDwarfCode;
case kRegCode_x0:
return kX0DwarfCode;
@@ -54,8 +51,6 @@ const char* EhFrameDisassembler::DwarfRegisterCodeToString(int code) {
return "fp";
case kLrDwarfCode:
return "lr";
- case kJsSpDwarfCode:
- return "jssp";
case kCSpDwarfCode:
return "csp"; // This could be zr as well
default:
diff --git a/deps/v8/src/arm64/frame-constants-arm64.h b/deps/v8/src/arm64/frame-constants-arm64.h
index 882a57a851..a337079786 100644
--- a/deps/v8/src/arm64/frame-constants-arm64.h
+++ b/deps/v8/src/arm64/frame-constants-arm64.h
@@ -8,10 +8,31 @@
namespace v8 {
namespace internal {
+// The layout of an EntryFrame is as follows:
+//
+// slot Entry frame
+// +---------------------+-----------------------
+// 0 | bad frame pointer | <-- frame ptr
+// | (0xFFF.. FF) |
+// |- - - - - - - - - - -|
+// 1 | stack frame marker |
+// | (ENTRY) |
+// |- - - - - - - - - - -|
+// 2 | stack frame marker |
+// | (0) |
+// |- - - - - - - - - - -|
+// 3 | C entry FP |
+// |- - - - - - - - - - -|
+// 4 | JS entry frame |
+// | marker |
+// |- - - - - - - - - - -|
+// 5 | padding | <-- stack ptr
+// -----+---------------------+-----------------------
+//
class EntryFrameConstants : public AllStatic {
public:
- static const int kCallerFPOffset =
- -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+ static const int kCallerFPOffset = -3 * kPointerSize;
+ static const int kFixedFrameSize = 6 * kPointerSize;
};
class ExitFrameConstants : public TypedFrameConstants {
diff --git a/deps/v8/src/arm64/instructions-arm64-constants.cc b/deps/v8/src/arm64/instructions-arm64-constants.cc
index 5f1b49fbdc..0a15287417 100644
--- a/deps/v8/src/arm64/instructions-arm64-constants.cc
+++ b/deps/v8/src/arm64/instructions-arm64-constants.cc
@@ -21,26 +21,26 @@ namespace internal {
// then move this code back into instructions-arm64.cc with the same types
// that client code uses.
-extern const uint16_t kFP16PositiveInfinity = 0x7c00;
-extern const uint16_t kFP16NegativeInfinity = 0xfc00;
-extern const uint32_t kFP32PositiveInfinity = 0x7f800000;
-extern const uint32_t kFP32NegativeInfinity = 0xff800000;
-extern const uint64_t kFP64PositiveInfinity = 0x7ff0000000000000UL;
-extern const uint64_t kFP64NegativeInfinity = 0xfff0000000000000UL;
+extern const uint16_t kFP16PositiveInfinity = 0x7C00;
+extern const uint16_t kFP16NegativeInfinity = 0xFC00;
+extern const uint32_t kFP32PositiveInfinity = 0x7F800000;
+extern const uint32_t kFP32NegativeInfinity = 0xFF800000;
+extern const uint64_t kFP64PositiveInfinity = 0x7FF0000000000000UL;
+extern const uint64_t kFP64NegativeInfinity = 0xFFF0000000000000UL;
// This value is a signalling NaN as both a double and as a float (taking the
// least-significant word).
-extern const uint64_t kFP64SignallingNaN = 0x7ff000007f800001;
-extern const uint32_t kFP32SignallingNaN = 0x7f800001;
+extern const uint64_t kFP64SignallingNaN = 0x7FF000007F800001;
+extern const uint32_t kFP32SignallingNaN = 0x7F800001;
// A similar value, but as a quiet NaN.
-extern const uint64_t kFP64QuietNaN = 0x7ff800007fc00001;
-extern const uint32_t kFP32QuietNaN = 0x7fc00001;
+extern const uint64_t kFP64QuietNaN = 0x7FF800007FC00001;
+extern const uint32_t kFP32QuietNaN = 0x7FC00001;
// The default NaN values (for FPCR.DN=1).
-extern const uint64_t kFP64DefaultNaN = 0x7ff8000000000000UL;
-extern const uint32_t kFP32DefaultNaN = 0x7fc00000;
-extern const uint16_t kFP16DefaultNaN = 0x7e00;
+extern const uint64_t kFP64DefaultNaN = 0x7FF8000000000000UL;
+extern const uint32_t kFP32DefaultNaN = 0x7FC00000;
+extern const uint16_t kFP16DefaultNaN = 0x7E00;
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm64/instrument-arm64.cc b/deps/v8/src/arm64/instrument-arm64.cc
index 9fc2adb6f7..8e9cce7197 100644
--- a/deps/v8/src/arm64/instrument-arm64.cc
+++ b/deps/v8/src/arm64/instrument-arm64.cc
@@ -189,8 +189,8 @@ void Instrument::DumpEventMarker(unsigned marker) {
// line.
static Counter* counter = GetCounter("Instruction");
- fprintf(output_stream_, "# %c%c @ %" PRId64 "\n", marker & 0xff,
- (marker >> 8) & 0xff, counter->count());
+ fprintf(output_stream_, "# %c%c @ %" PRId64 "\n", marker & 0xFF,
+ (marker >> 8) & 0xFF, counter->count());
}
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index 300d42d565..17b058bd01 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -45,8 +45,6 @@ const Register LoadDescriptor::SlotRegister() { return x0; }
const Register LoadWithVectorDescriptor::VectorRegister() { return x3; }
-const Register LoadICProtoArrayDescriptor::HandlerRegister() { return x4; }
-
const Register StoreDescriptor::ReceiverRegister() { return x1; }
const Register StoreDescriptor::NameRegister() { return x2; }
const Register StoreDescriptor::ValueRegister() { return x0; }
@@ -209,6 +207,11 @@ void TransitionElementsKindDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void AbortJSDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {x1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
index 9bef2b378b..0861551d89 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
@@ -1048,7 +1048,6 @@ void MacroAssembler::AlignAndSetCSPForFrame() {
DCHECK_GE(sp_alignment, 16);
DCHECK(base::bits::IsPowerOfTwo(sp_alignment));
Bic(csp, StackPointer(), sp_alignment - 1);
- SetStackPointer(csp);
}
void TurboAssembler::BumpSystemStackPointer(const Operand& space) {
@@ -1140,22 +1139,6 @@ void MacroAssembler::SmiUntagToFloat(VRegister dst, Register src) {
Scvtf(dst, src, kSmiShift);
}
-
-void MacroAssembler::SmiTagAndPush(Register src) {
- STATIC_ASSERT((static_cast<unsigned>(kSmiShift) == kWRegSizeInBits) &&
- (static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits) &&
- (kSmiTag == 0));
- Push(src.W(), wzr);
-}
-
-
-void MacroAssembler::SmiTagAndPush(Register src1, Register src2) {
- STATIC_ASSERT((static_cast<unsigned>(kSmiShift) == kWRegSizeInBits) &&
- (static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits) &&
- (kSmiTag == 0));
- Push(src1.W(), wzr, src2.W(), wzr);
-}
-
void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
Label* not_smi_label) {
STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
@@ -1222,7 +1205,7 @@ void MacroAssembler::ObjectTag(Register tagged_obj, Register obj) {
if (emit_debug_code()) {
Label ok;
Tbz(obj, 0, &ok);
- Abort(kObjectTagged);
+ Abort(AbortReason::kObjectTagged);
Bind(&ok);
}
Orr(tagged_obj, obj, kHeapObjectTag);
@@ -1234,7 +1217,7 @@ void MacroAssembler::ObjectUntag(Register untagged_obj, Register obj) {
if (emit_debug_code()) {
Label ok;
Tbnz(obj, 0, &ok);
- Abort(kObjectNotTagged);
+ Abort(AbortReason::kObjectNotTagged);
Bind(&ok);
}
Bic(untagged_obj, obj, kHeapObjectTag);
@@ -1246,7 +1229,10 @@ void TurboAssembler::Push(Handle<HeapObject> handle) {
UseScratchRegisterScope temps(this);
Register tmp = temps.AcquireX();
Mov(tmp, Operand(handle));
- Push(tmp);
+ // This is only used in test-heap.cc, for generating code that is not
+ // executed. Push a padding slot together with the handle here, to
+ // satisfy the alignment requirement.
+ Push(padreg, tmp);
}
void TurboAssembler::Push(Smi* smi) {
@@ -1355,21 +1341,31 @@ void TurboAssembler::Drop(const Register& count, uint64_t unit_size) {
void TurboAssembler::DropArguments(const Register& count,
ArgumentsCountMode mode) {
+ int extra_slots = 1; // Padding slot.
if (mode == kCountExcludesReceiver) {
- UseScratchRegisterScope temps(this);
- Register tmp = temps.AcquireX();
- Add(tmp, count, 1);
- Drop(tmp);
- } else {
- Drop(count);
+ // Add a slot for the receiver.
+ ++extra_slots;
+ }
+ UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ Add(tmp, count, extra_slots);
+ Bic(tmp, tmp, 1);
+ Drop(tmp, kXRegSize);
+}
+
+void TurboAssembler::DropArguments(int64_t count, ArgumentsCountMode mode) {
+ if (mode == kCountExcludesReceiver) {
+ // Add a slot for the receiver.
+ ++count;
}
+ Drop(RoundUp(count, 2), kXRegSize);
}
-void TurboAssembler::DropSlots(int64_t count, uint64_t unit_size) {
- Drop(count, unit_size);
+void TurboAssembler::DropSlots(int64_t count) {
+ Drop(RoundUp(count, 2), kXRegSize);
}
-void TurboAssembler::PushArgument(const Register& arg) { Push(arg); }
+void TurboAssembler::PushArgument(const Register& arg) { Push(padreg, arg); }
void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo(unit_size));
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 5f69f0e1e2..3869046f74 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -44,7 +44,6 @@ TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
#endif
tmp_list_(DefaultTmpList()),
fptmp_list_(DefaultFPTmpList()),
- sp_(jssp),
use_real_aborts_(true) {
if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
@@ -160,7 +159,7 @@ void TurboAssembler::LogicalMacro(const Register& rd, const Register& rn,
UNREACHABLE();
}
} else if ((rd.Is64Bits() && (immediate == -1L)) ||
- (rd.Is32Bits() && (immediate == 0xffffffffL))) {
+ (rd.Is32Bits() && (immediate == 0xFFFFFFFFL))) {
switch (op) {
case AND:
Mov(rd, rn);
@@ -252,15 +251,15 @@ void TurboAssembler::Mov(const Register& rd, uint64_t imm) {
// Generic immediate case. Imm will be represented by
// [imm3, imm2, imm1, imm0], where each imm is 16 bits.
// A move-zero or move-inverted is generated for the first non-zero or
- // non-0xffff immX, and a move-keep for subsequent non-zero immX.
+ // non-0xFFFF immX, and a move-keep for subsequent non-zero immX.
uint64_t ignored_halfword = 0;
bool invert_move = false;
- // If the number of 0xffff halfwords is greater than the number of 0x0000
+ // If the number of 0xFFFF halfwords is greater than the number of 0x0000
// halfwords, it's more efficient to use move-inverted.
if (CountClearHalfWords(~imm, reg_size) >
CountClearHalfWords(imm, reg_size)) {
- ignored_halfword = 0xffffL;
+ ignored_halfword = 0xFFFFL;
invert_move = true;
}
@@ -274,11 +273,11 @@ void TurboAssembler::Mov(const Register& rd, uint64_t imm) {
DCHECK_EQ(reg_size % 16, 0);
bool first_mov_done = false;
for (int i = 0; i < (rd.SizeInBits() / 16); i++) {
- uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
+ uint64_t imm16 = (imm >> (16 * i)) & 0xFFFFL;
if (imm16 != ignored_halfword) {
if (!first_mov_done) {
if (invert_move) {
- movn(temp, (~imm16) & 0xffffL, 16 * i);
+ movn(temp, (~imm16) & 0xFFFFL, 16 * i);
} else {
movz(temp, imm16, 16 * i);
}
@@ -356,18 +355,18 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
void TurboAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) {
DCHECK(is_uint16(imm));
- int byte1 = (imm & 0xff);
- int byte2 = ((imm >> 8) & 0xff);
+ int byte1 = (imm & 0xFF);
+ int byte2 = ((imm >> 8) & 0xFF);
if (byte1 == byte2) {
movi(vd.Is64Bits() ? vd.V8B() : vd.V16B(), byte1);
} else if (byte1 == 0) {
movi(vd, byte2, LSL, 8);
} else if (byte2 == 0) {
movi(vd, byte1);
- } else if (byte1 == 0xff) {
- mvni(vd, ~byte2 & 0xff, LSL, 8);
- } else if (byte2 == 0xff) {
- mvni(vd, ~byte1 & 0xff);
+ } else if (byte1 == 0xFF) {
+ mvni(vd, ~byte2 & 0xFF, LSL, 8);
+ } else if (byte2 == 0xFF) {
+ mvni(vd, ~byte1 & 0xFF);
} else {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireW();
@@ -382,11 +381,11 @@ void TurboAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) {
uint8_t bytes[sizeof(imm)];
memcpy(bytes, &imm, sizeof(imm));
- // All bytes are either 0x00 or 0xff.
+ // All bytes are either 0x00 or 0xFF.
{
bool all0orff = true;
for (int i = 0; i < 4; ++i) {
- if ((bytes[i] != 0) && (bytes[i] != 0xff)) {
+ if ((bytes[i] != 0) && (bytes[i] != 0xFF)) {
all0orff = false;
break;
}
@@ -400,47 +399,47 @@ void TurboAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) {
// Of the 4 bytes, only one byte is non-zero.
for (int i = 0; i < 4; i++) {
- if ((imm & (0xff << (i * 8))) == imm) {
+ if ((imm & (0xFF << (i * 8))) == imm) {
movi(vd, bytes[i], LSL, i * 8);
return;
}
}
- // Of the 4 bytes, only one byte is not 0xff.
+ // Of the 4 bytes, only one byte is not 0xFF.
for (int i = 0; i < 4; i++) {
- uint32_t mask = ~(0xff << (i * 8));
+ uint32_t mask = ~(0xFF << (i * 8));
if ((imm & mask) == mask) {
- mvni(vd, ~bytes[i] & 0xff, LSL, i * 8);
+ mvni(vd, ~bytes[i] & 0xFF, LSL, i * 8);
return;
}
}
// Immediate is of the form 0x00MMFFFF.
- if ((imm & 0xff00ffff) == 0x0000ffff) {
+ if ((imm & 0xFF00FFFF) == 0x0000FFFF) {
movi(vd, bytes[2], MSL, 16);
return;
}
// Immediate is of the form 0x0000MMFF.
- if ((imm & 0xffff00ff) == 0x000000ff) {
+ if ((imm & 0xFFFF00FF) == 0x000000FF) {
movi(vd, bytes[1], MSL, 8);
return;
}
// Immediate is of the form 0xFFMM0000.
- if ((imm & 0xff00ffff) == 0xff000000) {
- mvni(vd, ~bytes[2] & 0xff, MSL, 16);
+ if ((imm & 0xFF00FFFF) == 0xFF000000) {
+ mvni(vd, ~bytes[2] & 0xFF, MSL, 16);
return;
}
// Immediate is of the form 0xFFFFMM00.
- if ((imm & 0xffff00ff) == 0xffff0000) {
- mvni(vd, ~bytes[1] & 0xff, MSL, 8);
+ if ((imm & 0xFFFF00FF) == 0xFFFF0000) {
+ mvni(vd, ~bytes[1] & 0xFF, MSL, 8);
return;
}
// Top and bottom 16-bits are equal.
- if (((imm >> 16) & 0xffff) == (imm & 0xffff)) {
- Movi16bitHelper(vd.Is64Bits() ? vd.V4H() : vd.V8H(), imm & 0xffff);
+ if (((imm >> 16) & 0xFFFF) == (imm & 0xFFFF)) {
+ Movi16bitHelper(vd.Is64Bits() ? vd.V4H() : vd.V8H(), imm & 0xFFFF);
return;
}
@@ -454,12 +453,12 @@ void TurboAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) {
}
void TurboAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) {
- // All bytes are either 0x00 or 0xff.
+ // All bytes are either 0x00 or 0xFF.
{
bool all0orff = true;
for (int i = 0; i < 8; ++i) {
- int byteval = (imm >> (i * 8)) & 0xff;
- if (byteval != 0 && byteval != 0xff) {
+ int byteval = (imm >> (i * 8)) & 0xFF;
+ if (byteval != 0 && byteval != 0xFF) {
all0orff = false;
break;
}
@@ -471,8 +470,8 @@ void TurboAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) {
}
// Top and bottom 32-bits are equal.
- if (((imm >> 32) & 0xffffffff) == (imm & 0xffffffff)) {
- Movi32bitHelper(vd.Is64Bits() ? vd.V2S() : vd.V4S(), imm & 0xffffffff);
+ if (((imm >> 32) & 0xFFFFFFFF) == (imm & 0xFFFFFFFF)) {
+ Movi32bitHelper(vd.Is64Bits() ? vd.V2S() : vd.V4S(), imm & 0xFFFFFFFF);
return;
}
@@ -547,7 +546,7 @@ unsigned TurboAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
DCHECK_EQ(reg_size % 8, 0);
int count = 0;
for (unsigned i = 0; i < (reg_size / 16); i++) {
- if ((imm & 0xffff) == 0) {
+ if ((imm & 0xFFFF) == 0) {
count++;
}
imm >>= 16;
@@ -563,9 +562,8 @@ bool TurboAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
}
-
// The movn instruction can generate immediates containing an arbitrary 16-bit
-// half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
+// half-word, with remaining bits set, eg. 0xFFFF1234, 0xFFFF1234FFFFFFFF.
bool TurboAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
return IsImmMovz(~imm, reg_size);
}
@@ -1375,7 +1373,7 @@ void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
DCHECK_GE(offset.ImmediateValue(), 0);
} else if (emit_debug_code()) {
Cmp(xzr, offset);
- Check(le, kStackAccessBelowStackPointer);
+ Check(le, AbortReason::kStackAccessBelowStackPointer);
}
Str(src, MemOperand(StackPointer(), offset));
@@ -1387,7 +1385,7 @@ void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
DCHECK_GE(offset.ImmediateValue(), 0);
} else if (emit_debug_code()) {
Cmp(xzr, offset);
- Check(le, kStackAccessBelowStackPointer);
+ Check(le, AbortReason::kStackAccessBelowStackPointer);
}
Ldr(dst, MemOperand(StackPointer(), offset));
@@ -1426,7 +1424,7 @@ void MacroAssembler::PushCalleeSavedRegisters() {
stp(d8, d9, tos);
stp(x29, x30, tos);
- stp(x27, x28, tos); // x28 = jssp
+ stp(x27, x28, tos);
stp(x25, x26, tos);
stp(x23, x24, tos);
stp(x21, x22, tos);
@@ -1448,7 +1446,7 @@ void MacroAssembler::PopCalleeSavedRegisters() {
ldp(x21, x22, tos);
ldp(x23, x24, tos);
ldp(x25, x26, tos);
- ldp(x27, x28, tos); // x28 = jssp
+ ldp(x27, x28, tos);
ldp(x29, x30, tos);
ldp(d8, d9, tos);
@@ -1479,7 +1477,7 @@ void TurboAssembler::AssertStackConsistency() {
{ DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
// Restore StackPointer().
sub(StackPointer(), csp, StackPointer());
- Abort(kTheCurrentStackPointerIsBelowCsp);
+ Abort(AbortReason::kTheCurrentStackPointerIsBelowCsp);
}
bind(&ok);
@@ -1531,7 +1529,7 @@ void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count,
Subs(pointer1, pointer1, pointer2);
B(lt, &pointer1_below_pointer2);
Cmp(pointer1, count);
- Check(ge, kOffsetOutOfRange);
+ Check(ge, AbortReason::kOffsetOutOfRange);
Bind(&pointer1_below_pointer2);
Add(pointer1, pointer1, pointer2);
}
@@ -1595,7 +1593,7 @@ void TurboAssembler::AssertFPCRState(Register fpcr) {
B(eq, &done);
Bind(&unexpected_mode);
- Abort(kUnexpectedFPCRMode);
+ Abort(AbortReason::kUnexpectedFPCRMode);
Bind(&done);
}
@@ -1632,7 +1630,7 @@ void TurboAssembler::Move(Register dst, Register src) { Mov(dst, src); }
void TurboAssembler::Move(Register dst, Handle<HeapObject> x) { Mov(dst, x); }
void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
-void TurboAssembler::AssertSmi(Register object, BailoutReason reason) {
+void TurboAssembler::AssertSmi(Register object, AbortReason reason) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
Tst(object, kSmiTagMask);
@@ -1640,7 +1638,7 @@ void TurboAssembler::AssertSmi(Register object, BailoutReason reason) {
}
}
-void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
+void MacroAssembler::AssertNotSmi(Register object, AbortReason reason) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
Tst(object, kSmiTagMask);
@@ -1650,44 +1648,44 @@ void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
void MacroAssembler::AssertFixedArray(Register object) {
if (emit_debug_code()) {
- AssertNotSmi(object, kOperandIsASmiAndNotAFixedArray);
+ AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAFixedArray);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
CompareObjectType(object, temp, temp, FIXED_ARRAY_TYPE);
- Check(eq, kOperandIsNotAFixedArray);
+ Check(eq, AbortReason::kOperandIsNotAFixedArray);
}
}
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
- AssertNotSmi(object, kOperandIsASmiAndNotAFunction);
+ AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAFunction);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
CompareObjectType(object, temp, temp, JS_FUNCTION_TYPE);
- Check(eq, kOperandIsNotAFunction);
+ Check(eq, AbortReason::kOperandIsNotAFunction);
}
}
void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
- AssertNotSmi(object, kOperandIsASmiAndNotABoundFunction);
+ AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotABoundFunction);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
CompareObjectType(object, temp, temp, JS_BOUND_FUNCTION_TYPE);
- Check(eq, kOperandIsNotABoundFunction);
+ Check(eq, AbortReason::kOperandIsNotABoundFunction);
}
}
void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
- AssertNotSmi(object, kOperandIsASmiAndNotAGeneratorObject);
+ AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
// Load map
UseScratchRegisterScope temps(this);
@@ -1704,7 +1702,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
bind(&do_check);
// Restore generator object to register and perform assertion
- Check(eq, kOperandIsNotAGeneratorObject);
+ Check(eq, AbortReason::kOperandIsNotAGeneratorObject);
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
@@ -1716,7 +1714,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking);
Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
- Assert(eq, kExpectedUndefinedOrCell);
+ Assert(eq, AbortReason::kExpectedUndefinedOrCell);
Bind(&done_checking);
}
}
@@ -1726,7 +1724,7 @@ void TurboAssembler::AssertPositiveOrZero(Register value) {
Label done;
int sign_bit = value.Is64Bits() ? kXSignBit : kWSignBit;
Tbz(value, sign_bit, &done);
- Abort(kUnexpectedNegativeValue);
+ Abort(AbortReason::kUnexpectedNegativeValue);
Bind(&done);
}
}
@@ -1855,72 +1853,14 @@ void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
DCHECK_LE(num_of_double_args + num_of_reg_args, 2);
}
- // We rely on the frame alignment being 16 bytes, which means we never need
- // to align the CSP by an unknown number of bytes and we always know the delta
- // between the stack pointer and the frame pointer.
- DCHECK_EQ(ActivationFrameAlignment(), 16);
-
- // If the stack pointer is not csp, we need to derive an aligned csp from the
- // current stack pointer.
- const Register old_stack_pointer = StackPointer();
- if (!csp.Is(old_stack_pointer)) {
- AssertStackConsistency();
-
- int sp_alignment = ActivationFrameAlignment();
- // The current stack pointer is a callee saved register, and is preserved
- // across the call.
- DCHECK(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
-
- // If more than eight arguments are passed to the function, we expect the
- // ninth argument onwards to have been placed on the csp-based stack
- // already. We assume csp already points to the last stack-passed argument
- // in that case.
- // Otherwise, align and synchronize the system stack pointer with jssp.
- if (num_of_reg_args <= kRegisterPassedArguments) {
- Bic(csp, old_stack_pointer, sp_alignment - 1);
- }
- SetStackPointer(csp);
- }
-
// Call directly. The function called cannot cause a GC, or allow preemption,
// so the return address in the link register stays correct.
Call(function);
- if (csp.Is(old_stack_pointer)) {
- if (num_of_reg_args > kRegisterPassedArguments) {
- // Drop the register passed arguments.
- int claim_slots = RoundUp(num_of_reg_args - kRegisterPassedArguments, 2);
- Drop(claim_slots);
- }
- } else {
- DCHECK(jssp.Is(old_stack_pointer));
- if (emit_debug_code()) {
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
-
- if (num_of_reg_args > kRegisterPassedArguments) {
- // We don't need to drop stack arguments, as the stack pointer will be
- // jssp when returning from this function. However, in debug builds, we
- // can check that jssp is as expected.
- int claim_slots =
- RoundUp(num_of_reg_args - kRegisterPassedArguments, 2);
-
- // Check jssp matches the previous value on the stack.
- Ldr(temp, MemOperand(csp, claim_slots * kPointerSize));
- Cmp(jssp, temp);
- Check(eq, kTheStackWasCorruptedByMacroAssemblerCall);
- } else {
- // Because the stack pointer must be aligned on a 16-byte boundary, the
- // aligned csp can be up to 12 bytes below the jssp. This is the case
- // where we only pushed one W register on top of an aligned jssp.
- Sub(temp, csp, old_stack_pointer);
- // We want temp <= 0 && temp >= -12.
- Cmp(temp, 0);
- Ccmp(temp, -12, NFlag, le);
- Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
- }
- }
- SetStackPointer(old_stack_pointer);
+ if (num_of_reg_args > kRegisterPassedArguments) {
+ // Drop the register passed arguments.
+ int claim_slots = RoundUp(num_of_reg_args - kRegisterPassedArguments, 2);
+ Drop(claim_slots);
}
}
@@ -1997,10 +1937,10 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
// Addresses are 48 bits so we never need to load the upper 16 bits.
uint64_t imm = reinterpret_cast<uint64_t>(target);
// If we don't use ARM tagged addresses, the 16 higher bits must be 0.
- DCHECK_EQ((imm >> 48) & 0xffff, 0);
- movz(temp, (imm >> 0) & 0xffff, 0);
- movk(temp, (imm >> 16) & 0xffff, 16);
- movk(temp, (imm >> 32) & 0xffff, 32);
+ DCHECK_EQ((imm >> 48) & 0xFFFF, 0);
+ movz(temp, (imm >> 0) & 0xFFFF, 0);
+ movk(temp, (imm >> 16) & 0xFFFF, 16);
+ movk(temp, (imm >> 32) & 0xFFFF, 32);
} else {
Ldr(temp, Immediate(reinterpret_cast<intptr_t>(target), rmode));
}
@@ -2160,23 +2100,32 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
// after we drop current frame. We add kPointerSize to count the receiver
// argument which is not included into formal parameters count.
Register dst_reg = scratch0;
- add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
- add(dst_reg, dst_reg,
- Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+ Add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
+ Add(dst_reg, dst_reg, StandardFrameConstants::kCallerSPOffset + kPointerSize);
+ // Round dst_reg up to a multiple of 16 bytes, so that we overwrite any
+ // potential padding.
+ Add(dst_reg, dst_reg, 15);
+ Bic(dst_reg, dst_reg, 15);
Register src_reg = caller_args_count_reg;
// Calculate the end of source area. +kPointerSize is for the receiver.
if (callee_args_count.is_reg()) {
- add(src_reg, jssp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
- add(src_reg, src_reg, Operand(kPointerSize));
+ Add(src_reg, StackPointer(),
+ Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
+ Add(src_reg, src_reg, kPointerSize);
} else {
- add(src_reg, jssp,
- Operand((callee_args_count.immediate() + 1) * kPointerSize));
+ Add(src_reg, StackPointer(),
+ (callee_args_count.immediate() + 1) * kPointerSize);
}
+ // Round src_reg up to a multiple of 16 bytes, so we include any potential
+ // padding in the copy.
+ Add(src_reg, src_reg, 15);
+ Bic(src_reg, src_reg, 15);
+
if (FLAG_debug_code) {
Cmp(src_reg, dst_reg);
- Check(lo, kStackAccessBelowStackPointer);
+ Check(lo, AbortReason::kStackAccessBelowStackPointer);
}
// Restore caller's frame pointer and return address now as they will be
@@ -2196,12 +2145,11 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
bind(&entry);
- Cmp(jssp, src_reg);
+ Cmp(StackPointer(), src_reg);
B(ne, &loop);
// Leave current frame.
- Mov(jssp, dst_reg);
- SetStackPointer(jssp);
+ Mov(StackPointer(), dst_reg);
AssertStackConsistency();
}
@@ -2412,12 +2360,12 @@ void TurboAssembler::TryConvertDoubleToInt64(Register result,
// the modulo operation on an integer register so we convert to a 64-bit
// integer.
//
- // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff)
+ // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7FF...FF)
// when the double is out of range. NaNs and infinities will be converted to 0
// (as ECMA-262 requires).
Fcvtzs(result.X(), double_input);
- // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
+ // The values INT64_MIN (0x800...00) or INT64_MAX (0x7FF...FF) are not
// representable using a double, so if the result is one of those then we know
// that saturation occurred, and we need to manually handle the conversion.
//
@@ -2437,17 +2385,6 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
// contain our truncated int32 result.
TryConvertDoubleToInt64(result, double_input, &done);
- const Register old_stack_pointer = StackPointer();
- if (csp.Is(old_stack_pointer)) {
- // This currently only happens during compiler-unittest. If it arises
- // during regular code generation the DoubleToI stub should be updated to
- // cope with csp and have an extra parameter indicating which stack pointer
- // it should use.
- Push(jssp, xzr); // Push xzr to maintain csp required 16-bytes alignment.
- Mov(jssp, csp);
- SetStackPointer(jssp);
- }
-
// If we fell through then inline version didn't succeed - call stub instead.
Push(lr, double_input);
@@ -2458,13 +2395,6 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
Pop(xzr, lr); // xzr to drop the double input on the stack.
- if (csp.Is(old_stack_pointer)) {
- Mov(csp, jssp);
- SetStackPointer(csp);
- AssertStackConsistency();
- Pop(xzr, jssp);
- }
-
Bind(&done);
// Keep our invariant that the upper 32 bits are zero.
Uxtw(result.W(), result.W());
@@ -2472,7 +2402,7 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
void TurboAssembler::Prologue() {
Push(lr, fp, cp, x1);
- Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+ Add(fp, StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
}
void TurboAssembler::EnterFrame(StackFrame::Type type) {
@@ -2481,15 +2411,14 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
Register code_reg = temps.AcquireX();
if (type == StackFrame::INTERNAL) {
- DCHECK(jssp.Is(StackPointer()));
Mov(type_reg, StackFrame::TypeToMarker(type));
Mov(code_reg, Operand(CodeObject()));
Push(lr, fp, type_reg, code_reg);
- Add(fp, jssp, InternalFrameConstants::kFixedFrameSizeFromFp);
- // jssp[4] : lr
- // jssp[3] : fp
- // jssp[1] : type
- // jssp[0] : [code object]
+ Add(fp, StackPointer(), InternalFrameConstants::kFixedFrameSizeFromFp);
+ // sp[4] : lr
+ // sp[3] : fp
+ // sp[1] : type
+ // sp[0] : [code object]
} else if (type == StackFrame::WASM_COMPILED) {
DCHECK(csp.Is(StackPointer()));
Mov(type_reg, StackFrame::TypeToMarker(type));
@@ -2502,7 +2431,6 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
// csp[0] : for alignment
} else {
DCHECK_EQ(type, StackFrame::CONSTRUCT);
- DCHECK(jssp.Is(StackPointer()));
Mov(type_reg, StackFrame::TypeToMarker(type));
// Users of this frame type push a context pointer after the type field,
@@ -2511,11 +2439,12 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
// The context pointer isn't part of the fixed frame, so add an extra slot
// to account for it.
- Add(fp, jssp, TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
- // jssp[3] : lr
- // jssp[2] : fp
- // jssp[1] : type
- // jssp[0] : cp
+ Add(fp, StackPointer(),
+ TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+ // sp[3] : lr
+ // sp[2] : fp
+ // sp[1] : type
+ // sp[0] : cp
}
}
@@ -2526,10 +2455,9 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
AssertStackConsistency();
Pop(fp, lr);
} else {
- DCHECK(jssp.Is(StackPointer()));
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer and return address.
- Mov(jssp, fp);
+ Mov(StackPointer(), fp);
AssertStackConsistency();
Pop(fp, lr);
}
@@ -2560,7 +2488,6 @@ void MacroAssembler::ExitFrameRestoreFPRegs() {
void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
int extra_space,
StackFrame::Type frame_type) {
- DCHECK(jssp.Is(StackPointer()));
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
@@ -2576,7 +2503,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
// fp[-8]: STUB marker
// fp[-16]: Space reserved for SPOffset.
// fp[-24]: CodeObject()
- // jssp -> fp[-32]: padding
+ // sp -> fp[-32]: padding
STATIC_ASSERT((2 * kPointerSize) == ExitFrameConstants::kCallerSPOffset);
STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
@@ -2610,23 +2537,11 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
// fp[-16]: Space reserved for SPOffset.
// fp[-24]: CodeObject()
// fp[-24 - fp_size]: Saved doubles (if save_doubles is true).
- // jssp[8]: Extra space reserved for caller (if extra_space != 0).
- // jssp -> jssp[0]: Space reserved for the return address.
+ // sp[8]: Extra space reserved for caller (if extra_space != 0).
+ // sp -> sp[0]: Space reserved for the return address.
- // Align and synchronize the system stack pointer with jssp.
- AlignAndSetCSPForFrame();
DCHECK(csp.Is(StackPointer()));
- // fp[8]: CallerPC (lr)
- // fp -> fp[0]: CallerFP (old fp)
- // fp[-8]: STUB marker
- // fp[-16]: Space reserved for SPOffset.
- // fp[-24]: CodeObject()
- // fp[-24 - fp_size]: Saved doubles (if save_doubles is true).
- // csp[8]: Memory reserved for the caller if extra_space != 0.
- // Alignment padding, if necessary.
- // csp -> csp[0]: Space reserved for the return address.
-
// ExitFrame::GetStateForFramePointer expects to find the return address at
// the memory address immediately below the pointer stored in SPOffset.
// It is not safe to derive much else from SPOffset, because the size of the
@@ -2638,7 +2553,8 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
// Leave the current exit frame.
void MacroAssembler::LeaveExitFrame(bool restore_doubles,
- const Register& scratch) {
+ const Register& scratch,
+ const Register& scratch2) {
DCHECK(csp.Is(StackPointer()));
if (restore_doubles) {
@@ -2652,9 +2568,10 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
if (emit_debug_code()) {
// Also emit debug code to clear the cp in the top frame.
+ Mov(scratch2, Operand(Context::kInvalidContext));
Mov(scratch, Operand(ExternalReference(IsolateAddressId::kContextAddress,
isolate())));
- Str(xzr, MemOperand(scratch));
+ Str(scratch2, MemOperand(scratch));
}
// Clear the frame pointer from the top frame.
Mov(scratch, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
@@ -2665,8 +2582,7 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
// fp[...]: The rest of the frame.
- Mov(jssp, fp);
- SetStackPointer(jssp);
+ Mov(csp, fp);
AssertStackConsistency();
Pop(fp, lr);
}
@@ -2830,14 +2746,12 @@ void MacroAssembler::PushSafepointRegisters() {
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// Make sure the safepoint registers list is what we expect.
- DCHECK_EQ(CPURegList::GetSafepointSavedRegisters().list(), 0x6ffcffff);
+ DCHECK_EQ(CPURegList::GetSafepointSavedRegisters().list(), 0x6FFCFFFF);
// Safepoint registers are stored contiguously on the stack, but not all the
// registers are saved. The following registers are excluded:
// - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
// the macro assembler.
- // - x28 (jssp) because JS stack pointer doesn't need to be included in
- // safepoint registers.
// - x31 (csp) because the system stack pointer doesn't need to be included
// in safepoint registers.
//
@@ -2845,12 +2759,9 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// safepoint register slots.
if ((reg_code >= 0) && (reg_code <= 15)) {
return reg_code;
- } else if ((reg_code >= 18) && (reg_code <= 27)) {
+ } else if ((reg_code >= 18) && (reg_code <= 30)) {
// Skip ip0 and ip1.
return reg_code - 2;
- } else if ((reg_code == 29) || (reg_code == 30)) {
- // Also skip jssp.
- return reg_code - 3;
} else {
// This register has no safepoint register slot.
UNREACHABLE();
@@ -2909,7 +2820,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label ok;
Tst(scratch, kPointerSize - 1);
B(eq, &ok);
- Abort(kUnalignedCellInWriteBarrier);
+ Abort(AbortReason::kUnalignedCellInWriteBarrier);
Bind(&ok);
}
@@ -2975,11 +2886,9 @@ void TurboAssembler::CallRecordWriteStub(
Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
RecordWriteDescriptor::kFPMode));
- Push(object);
- Push(address);
+ Push(object, address);
- Pop(slot_parameter);
- Pop(object_parameter);
+ Pop(slot_parameter, object_parameter);
Mov(isolate_parameter, ExternalReference::isolate_address(isolate()));
Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
@@ -3008,7 +2917,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
Ldr(temp, MemOperand(address));
Cmp(temp, value);
- Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
// First, check if a write barrier is even needed. The tests below
@@ -3052,7 +2961,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
}
}
-void TurboAssembler::Assert(Condition cond, BailoutReason reason) {
+void TurboAssembler::Assert(Condition cond, AbortReason reason) {
if (emit_debug_code()) {
Check(cond, reason);
}
@@ -3060,14 +2969,14 @@ void TurboAssembler::Assert(Condition cond, BailoutReason reason) {
void MacroAssembler::AssertRegisterIsRoot(Register reg,
Heap::RootListIndex index,
- BailoutReason reason) {
+ AbortReason reason) {
if (emit_debug_code()) {
CompareRoot(reg, index);
Check(eq, reason);
}
}
-void TurboAssembler::Check(Condition cond, BailoutReason reason) {
+void TurboAssembler::Check(Condition cond, AbortReason reason) {
Label ok;
B(cond, &ok);
Abort(reason);
@@ -3075,10 +2984,10 @@ void TurboAssembler::Check(Condition cond, BailoutReason reason) {
Bind(&ok);
}
-void TurboAssembler::Abort(BailoutReason reason) {
+void TurboAssembler::Abort(AbortReason reason) {
#ifdef DEBUG
RecordComment("Abort message: ");
- RecordComment(GetBailoutReason(reason));
+ RecordComment(GetAbortReason(reason));
if (FLAG_trap_on_abort) {
Brk(0);
@@ -3086,13 +2995,6 @@ void TurboAssembler::Abort(BailoutReason reason) {
}
#endif
- // Abort is used in some contexts where csp is the stack pointer. In order to
- // simplify the CallRuntime code, make sure that jssp is the stack pointer.
- // There is no risk of register corruption here because Abort doesn't return.
- Register old_stack_pointer = StackPointer();
- SetStackPointer(jssp);
- Mov(jssp, old_stack_pointer);
-
// We need some scratch registers for the MacroAssembler, so make sure we have
// some. This is safe here because Abort never returns.
RegList old_tmp_list = TmpList()->list();
@@ -3128,11 +3030,10 @@ void TurboAssembler::Abort(BailoutReason reason) {
{
BlockPoolsScope scope(this);
Bind(&msg_address);
- EmitStringData(GetBailoutReason(reason));
+ EmitStringData(GetAbortReason(reason));
}
}
- SetStackPointer(old_stack_pointer);
TmpList()->set_list(old_tmp_list);
}
@@ -3266,7 +3167,7 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
// We don't pass any arguments on the stack, but we still need to align the C
// stack pointer to a 16-byte boundary for PCS compliance.
if (!csp.Is(StackPointer())) {
- Bic(csp, StackPointer(), 0xf);
+ Bic(csp, StackPointer(), 0xF);
}
CallPrintf(arg_count, pcs);
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index 035558fd81..47c08f2622 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -216,12 +216,6 @@ class TurboAssembler : public Assembler {
bool allow_macro_instructions() const { return allow_macro_instructions_; }
#endif
- // Set the current stack pointer, but don't generate any code.
- inline void SetStackPointer(const Register& stack_pointer) {
- DCHECK(!TmpList()->IncludesAliasOf(stack_pointer));
- sp_ = stack_pointer;
- }
-
// Activation support.
void EnterFrame(StackFrame::Type type);
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
@@ -574,17 +568,18 @@ class TurboAssembler : public Assembler {
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cond, BailoutReason reason);
+ void Assert(Condition cond, AbortReason reason);
- void AssertSmi(Register object, BailoutReason reason = kOperandIsNotASmi);
+ void AssertSmi(Register object,
+ AbortReason reason = AbortReason::kOperandIsNotASmi);
// Like Assert(), but always enabled.
- void Check(Condition cond, BailoutReason reason);
+ void Check(Condition cond, AbortReason reason);
inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
// Print a message to stderr and abort execution.
- void Abort(BailoutReason reason);
+ void Abort(AbortReason reason);
// If emit_debug_code() is true, emit a run-time check to ensure that
// StackPointer() does not point below the system stack pointer.
@@ -619,8 +614,8 @@ class TurboAssembler : public Assembler {
static CPURegList DefaultTmpList();
static CPURegList DefaultFPTmpList();
- // Return the current stack pointer, as set by SetStackPointer.
- inline const Register& StackPointer() const { return sp_; }
+ // Return the stack pointer.
+ inline const Register& StackPointer() const { return csp; }
// Move macros.
inline void Mvn(const Register& rd, uint64_t imm);
@@ -711,25 +706,22 @@ class TurboAssembler : public Assembler {
inline void Drop(int64_t count, uint64_t unit_size = kXRegSize);
inline void Drop(const Register& count, uint64_t unit_size = kXRegSize);
- // Drop arguments from stack without actually accessing memory.
- // This will currently drop 'count' arguments from the stack.
+ // Drop 'count' arguments from the stack, rounded up to a multiple of two,
+ // without actually accessing memory.
// We assume the size of the arguments is the pointer size.
// An optional mode argument is passed, which can indicate we need to
// explicitly add the receiver to the count.
- // TODO(arm64): Update this to round up the number of bytes dropped to
- // a multiple of 16, so that we can remove jssp.
enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver };
inline void DropArguments(const Register& count,
ArgumentsCountMode mode = kCountIncludesReceiver);
+ inline void DropArguments(int64_t count,
+ ArgumentsCountMode mode = kCountIncludesReceiver);
- // Drop slots from stack without actually accessing memory.
- // This will currently drop 'count' slots of the given size from the stack.
- // TODO(arm64): Update this to round up the number of bytes dropped to
- // a multiple of 16, so that we can remove jssp.
- inline void DropSlots(int64_t count, uint64_t unit_size = kXRegSize);
+ // Drop 'count' slots from stack, rounded up to a multiple of two, without
+ // actually accessing memory.
+ inline void DropSlots(int64_t count);
- // Push a single argument to the stack.
- // TODO(arm64): Update this to push a padding slot above the argument.
+ // Push a single argument, with padding, to the stack.
inline void PushArgument(const Register& arg);
// Re-synchronizes the system stack pointer (csp) with the current stack
@@ -769,8 +761,7 @@ class TurboAssembler : public Assembler {
LS_MACRO_LIST(DECLARE_FUNCTION)
#undef DECLARE_FUNCTION
- // Push or pop up to 4 registers of the same width to or from the stack,
- // using the current stack pointer as set by SetStackPointer.
+ // Push or pop up to 4 registers of the same width to or from the stack.
//
// If an argument register is 'NoReg', all further arguments are also assumed
// to be 'NoReg', and are thus not pushed or popped.
@@ -784,9 +775,8 @@ class TurboAssembler : public Assembler {
// It is not valid to pop into the same register more than once in one
// operation, not even into the zero register.
//
- // If the current stack pointer (as set by SetStackPointer) is csp, then it
- // must be aligned to 16 bytes on entry and the total size of the specified
- // registers must also be a multiple of 16 bytes.
+ // The stack pointer must be aligned to 16 bytes on entry and the total size
+ // of the specified registers must also be a multiple of 16 bytes.
//
// Even if the current stack pointer is not the system stack pointer (csp),
// Push (and derived methods) will still modify the system stack pointer in
@@ -1291,9 +1281,6 @@ class TurboAssembler : public Assembler {
CPURegList tmp_list_;
CPURegList fptmp_list_;
- // The register to use as a stack pointer for stack operations.
- Register sp_;
-
bool use_real_aborts_;
// Helps resolve branching to labels potentially out of range.
@@ -1707,10 +1694,6 @@ class MacroAssembler : public TurboAssembler {
//
// Note that registers are not checked for invalid values. Use this method
// only if you know that the GC won't try to examine the values on the stack.
- //
- // This method must not be called unless the current stack pointer (as set by
- // SetStackPointer) is the system stack pointer (csp), and is aligned to
- // ActivationFrameAlignment().
void PushCalleeSavedRegisters();
// Restore the callee-saved registers (as defined by AAPCS64).
@@ -1719,10 +1702,6 @@ class MacroAssembler : public TurboAssembler {
// thus come from higher addresses.
// Floating-point registers are popped after general-purpose registers, and
// thus come from higher addresses.
- //
- // This method must not be called unless the current stack pointer (as set by
- // SetStackPointer) is the system stack pointer (csp), and is aligned to
- // ActivationFrameAlignment().
void PopCalleeSavedRegisters();
// Align csp for a frame, as per ActivationFrameAlignment, and make it the
@@ -1752,10 +1731,6 @@ class MacroAssembler : public TurboAssembler {
inline void SmiUntagToDouble(VRegister dst, Register src);
inline void SmiUntagToFloat(VRegister dst, Register src);
- // Tag and push in one step.
- inline void SmiTagAndPush(Register src);
- inline void SmiTagAndPush(Register src1, Register src2);
-
inline void JumpIfNotSmi(Register value, Label* not_smi_label);
inline void JumpIfBothSmi(Register value1, Register value2,
Label* both_smi_label,
@@ -1771,7 +1746,8 @@ class MacroAssembler : public TurboAssembler {
Label* not_smi_label);
// Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object, BailoutReason reason = kOperandIsASmi);
+ void AssertNotSmi(Register object,
+ AbortReason reason = AbortReason::kOperandIsASmi);
inline void ObjectTag(Register tagged_obj, Register obj);
inline void ObjectUntag(Register untagged_obj, Register obj);
@@ -1948,19 +1924,14 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Frames.
- // The stack pointer has to switch between csp and jssp when setting up and
- // destroying the exit frame. Hence preserving/restoring the registers is
- // slightly more complicated than simple push/pop operations.
void ExitFramePreserveFPRegs();
void ExitFrameRestoreFPRegs();
// Enter exit frame. Exit frames are used when calling C code from generated
// (JavaScript) code.
//
- // The stack pointer must be jssp on entry, and will be set to csp by this
- // function. The frame pointer is also configured, but the only other
- // registers modified by this function are the provided scratch register, and
- // jssp.
+ // The only registers modified by this function are the provided scratch
+ // register, the frame pointer and the stack pointer.
//
// The 'extra_space' argument can be used to allocate some space in the exit
// frame that will be ignored by the GC. This space will be reserved in the
@@ -1989,10 +1960,10 @@ class MacroAssembler : public TurboAssembler {
// * Preserved doubles are restored (if restore_doubles is true).
// * The frame information is removed from the top frame.
// * The exit frame is dropped.
- // * The stack pointer is reset to jssp.
//
// The stack pointer must be csp on entry.
- void LeaveExitFrame(bool save_doubles, const Register& scratch);
+ void LeaveExitFrame(bool save_doubles, const Register& scratch,
+ const Register& scratch2);
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst) {
@@ -2042,9 +2013,8 @@ class MacroAssembler : public TurboAssembler {
// Debugging.
void AssertRegisterIsRoot(
- Register reg,
- Heap::RootListIndex index,
- BailoutReason reason = kRegisterDidNotMatchExpectedRoot);
+ Register reg, Heap::RootListIndex index,
+ AbortReason reason = AbortReason::kRegisterDidNotMatchExpectedRoot);
// Abort if the specified register contains the invalid color bit pattern.
// The pattern must be in bits [1:0] of 'reg' register.
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index c01741c31e..d0c464dfbe 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -98,13 +98,6 @@ SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) {
}
-void Simulator::Initialize(Isolate* isolate) {
- if (isolate->simulator_initialized()) return;
- isolate->set_simulator_initialized(true);
- ExternalReference::set_redirector(isolate, &RedirectExternalReference);
-}
-
-
// Get the active Simulator for the current thread.
Simulator* Simulator::current(Isolate* isolate) {
Isolate::PerIsolateThreadData* isolate_data =
@@ -124,8 +117,7 @@ Simulator* Simulator::current(Isolate* isolate) {
return sim;
}
-
-void Simulator::CallVoid(byte* entry, CallArgument* args) {
+void Simulator::CallImpl(byte* entry, CallArgument* args) {
int index_x = 0;
int index_d = 0;
@@ -167,63 +159,6 @@ void Simulator::CallVoid(byte* entry, CallArgument* args) {
set_sp(original_stack);
}
-
-int64_t Simulator::CallInt64(byte* entry, CallArgument* args) {
- CallVoid(entry, args);
- return xreg(0);
-}
-
-
-double Simulator::CallDouble(byte* entry, CallArgument* args) {
- CallVoid(entry, args);
- return dreg(0);
-}
-
-
-int64_t Simulator::CallJS(byte* entry,
- Object* new_target,
- Object* target,
- Object* revc,
- int64_t argc,
- Object*** argv) {
- CallArgument args[] = {
- CallArgument(new_target),
- CallArgument(target),
- CallArgument(revc),
- CallArgument(argc),
- CallArgument(argv),
- CallArgument::End()
- };
- return CallInt64(entry, args);
-}
-
-
-int64_t Simulator::CallRegExp(byte* entry,
- String* input,
- int64_t start_offset,
- const byte* input_start,
- const byte* input_end,
- int* output,
- int64_t output_size,
- Address stack_base,
- int64_t direct_call,
- Isolate* isolate) {
- CallArgument args[] = {
- CallArgument(input),
- CallArgument(start_offset),
- CallArgument(input_start),
- CallArgument(input_end),
- CallArgument(output),
- CallArgument(output_size),
- CallArgument(stack_base),
- CallArgument(direct_call),
- CallArgument(isolate),
- CallArgument::End()
- };
- return CallInt64(entry, args);
-}
-
-
void Simulator::CheckPCSComplianceAndRun() {
// Adjust JS-based stack limit to C-based stack limit.
isolate_->stack_guard()->AdjustStackLimitForSimulator();
@@ -350,6 +285,11 @@ uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
return stack_limit_ + 1024;
}
+void Simulator::SetRedirectInstruction(Instruction* instruction) {
+ instruction->SetInstructionBits(
+ HLT | Assembler::ImmException(kImmExceptionIsRedirectedCall));
+}
+
Simulator::Simulator(Decoder<DispatchingDecoderVisitor>* decoder,
Isolate* isolate, FILE* stream)
: decoder_(decoder),
@@ -392,7 +332,7 @@ void Simulator::Init(FILE* stream) {
stack_limit_ = stack_ + stack_protection_size_;
uintptr_t tos = stack_ + stack_size_ - stack_protection_size_;
// The stack pointer must be 16-byte aligned.
- set_sp(tos & ~0xfUL);
+ set_sp(tos & ~0xFUL);
stream_ = stream;
print_disasm_ = new PrintDisassembler(stream_);
@@ -412,11 +352,11 @@ void Simulator::ResetState() {
// Reset registers to 0.
pc_ = nullptr;
for (unsigned i = 0; i < kNumberOfRegisters; i++) {
- set_xreg(i, 0xbadbeef);
+ set_xreg(i, 0xBADBEEF);
}
for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
// Set FP registers to a value that is NaN in both 32-bit and 64-bit FP.
- set_dreg_bits(i, 0x7ff000007f800001UL);
+ set_dreg_bits(i, 0x7FF000007F800001UL);
}
// Returning to address 0 exits the Simulator.
set_lr(kEndOfSimAddress);
@@ -458,82 +398,6 @@ void Simulator::RunFrom(Instruction* start) {
}
-// When the generated code calls an external reference we need to catch that in
-// the simulator. The external reference will be a function compiled for the
-// host architecture. We need to call that function instead of trying to
-// execute it with the simulator. We do that by redirecting the external
-// reference to a svc (Supervisor Call) instruction that is handled by
-// the simulator. We write the original destination of the jump just at a known
-// offset from the svc instruction so the simulator knows what to call.
-class Redirection {
- public:
- Redirection(Isolate* isolate, void* external_function,
- ExternalReference::Type type)
- : external_function_(external_function), type_(type), next_(nullptr) {
- redirect_call_.SetInstructionBits(
- HLT | Assembler::ImmException(kImmExceptionIsRedirectedCall));
- next_ = isolate->simulator_redirection();
- // TODO(all): Simulator flush I cache
- isolate->set_simulator_redirection(this);
- }
-
- void* address_of_redirect_call() {
- return reinterpret_cast<void*>(&redirect_call_);
- }
-
- template <typename T>
- T external_function() { return reinterpret_cast<T>(external_function_); }
-
- ExternalReference::Type type() { return type_; }
-
- static Redirection* Get(Isolate* isolate, void* external_function,
- ExternalReference::Type type) {
- Redirection* current = isolate->simulator_redirection();
- for (; current != nullptr; current = current->next_) {
- if (current->external_function_ == external_function &&
- current->type_ == type) {
- return current;
- }
- }
- return new Redirection(isolate, external_function, type);
- }
-
- static Redirection* FromHltInstruction(Instruction* redirect_call) {
- char* addr_of_hlt = reinterpret_cast<char*>(redirect_call);
- char* addr_of_redirection =
- addr_of_hlt - offsetof(Redirection, redirect_call_);
- return reinterpret_cast<Redirection*>(addr_of_redirection);
- }
-
- static void* ReverseRedirection(int64_t reg) {
- Redirection* redirection =
- FromHltInstruction(reinterpret_cast<Instruction*>(reg));
- return redirection->external_function<void*>();
- }
-
- static void DeleteChain(Redirection* redirection) {
- while (redirection != nullptr) {
- Redirection* next = redirection->next_;
- delete redirection;
- redirection = next;
- }
- }
-
- private:
- void* external_function_;
- Instruction redirect_call_;
- ExternalReference::Type type_;
- Redirection* next_;
-};
-
-
-// static
-void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
- Redirection* first) {
- Redirection::DeleteChain(first);
-}
-
-
// Calls into the V8 runtime are based on this very simple interface.
// Note: To be able to return two values from some calls the code in runtime.cc
// uses the ObjectPair structure.
@@ -561,20 +425,20 @@ typedef void (*SimulatorRuntimeProfilingGetterCall)(int64_t arg0, int64_t arg1,
void* arg2);
void Simulator::DoRuntimeCall(Instruction* instr) {
- Redirection* redirection = Redirection::FromHltInstruction(instr);
+ Redirection* redirection = Redirection::FromInstruction(instr);
// The called C code might itself call simulated code, so any
// caller-saved registers (including lr) could still be clobbered by a
// redirected call.
Instruction* return_address = lr();
- int64_t external = redirection->external_function<int64_t>();
+ int64_t external =
+ reinterpret_cast<int64_t>(redirection->external_function());
- TraceSim("Call to host function at %p\n",
- redirection->external_function<void*>());
+ TraceSim("Call to host function at %p\n", redirection->external_function());
// SP must be 16-byte-aligned at the call interface.
- bool stack_alignment_exception = ((sp() & 0xf) != 0);
+ bool stack_alignment_exception = ((sp() & 0xF) != 0);
if (stack_alignment_exception) {
TraceSim(" with unaligned stack 0x%016" PRIx64 ".\n", sp());
FATAL("ALIGNMENT EXCEPTION");
@@ -761,28 +625,17 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
set_pc(return_address);
}
-
-void* Simulator::RedirectExternalReference(Isolate* isolate,
- void* external_function,
- ExternalReference::Type type) {
- base::LockGuard<base::Mutex> lock_guard(
- isolate->simulator_redirection_mutex());
- Redirection* redirection = Redirection::Get(isolate, external_function, type);
- return redirection->address_of_redirect_call();
-}
-
-
const char* Simulator::xreg_names[] = {
-"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
-"x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
-"ip0", "ip1", "x18", "x19", "x20", "x21", "x22", "x23",
-"x24", "x25", "x26", "cp", "jssp", "fp", "lr", "xzr", "csp"};
+ "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8",
+ "x9", "x10", "x11", "x12", "x13", "x14", "x15", "ip0", "ip1",
+ "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26",
+ "cp", "x28", "fp", "lr", "xzr", "csp"};
const char* Simulator::wreg_names[] = {
-"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7",
-"w8", "w9", "w10", "w11", "w12", "w13", "w14", "w15",
-"w16", "w17", "w18", "w19", "w20", "w21", "w22", "w23",
-"w24", "w25", "w26", "wcp", "wjssp", "wfp", "wlr", "wzr", "wcsp"};
+ "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8",
+ "w9", "w10", "w11", "w12", "w13", "w14", "w15", "w16", "w17",
+ "w18", "w19", "w20", "w21", "w22", "w23", "w24", "w25", "w26",
+ "wcp", "w28", "wfp", "wlr", "wzr", "wcsp"};
const char* Simulator::sreg_names[] = {
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
@@ -1294,9 +1147,9 @@ void Simulator::PrintRegister(unsigned code, Reg31Mode r31mode) {
// a floating-point interpretation or a memory access annotation).
void Simulator::PrintVRegisterRawHelper(unsigned code, int bytes, int lsb) {
// The template for vector types:
- // "# v{code}: 0xffeeddccbbaa99887766554433221100".
+ // "# v{code}: 0xFFEEDDCCBBAA99887766554433221100".
// An example with bytes=4 and lsb=8:
- // "# v{code}: 0xbbaa9988 ".
+ // "# v{code}: 0xBBAA9988 ".
fprintf(stream_, "# %s%5s: %s", clr_vreg_name, VRegNameForCode(code),
clr_vreg_value);
@@ -1393,8 +1246,8 @@ void Simulator::PrintVRegisterFPHelper(unsigned code,
void Simulator::PrintRegisterRawHelper(unsigned code, Reg31Mode r31mode,
int size_in_bytes) {
// The template for all supported sizes.
- // "# x{code}: 0xffeeddccbbaa9988"
- // "# w{code}: 0xbbaa9988"
+ // "# x{code}: 0xFFEEDDCCBBAA9988"
+ // "# w{code}: 0xBBAA9988"
// "# w{code}<15:0>: 0x9988"
// "# w{code}<7:0>: 0x88"
unsigned padding_chars = (kXRegSize - size_in_bytes) * 2;
@@ -2367,8 +2220,8 @@ void Simulator::VisitMoveWideImmediate(Instruction* instr) {
unsigned reg_code = instr->Rd();
int64_t prev_xn_val = is_64_bits ? xreg(reg_code)
: wreg(reg_code);
- new_xn_val = (prev_xn_val & ~(0xffffL << shift)) | shifted_imm16;
- break;
+ new_xn_val = (prev_xn_val & ~(0xFFFFL << shift)) | shifted_imm16;
+ break;
}
case MOVZ_w:
case MOVZ_x: {
@@ -2532,14 +2385,14 @@ static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
uint64_t u0, v0, w0;
int64_t u1, v1, w1, w2, t;
- u0 = u & 0xffffffffL;
+ u0 = u & 0xFFFFFFFFL;
u1 = u >> 32;
- v0 = v & 0xffffffffL;
+ v0 = v & 0xFFFFFFFFL;
v1 = v >> 32;
w0 = u0 * v0;
t = u1 * v0 + (w0 >> 32);
- w1 = t & 0xffffffffL;
+ w1 = t & 0xFFFFFFFFL;
w2 = t >> 32;
w1 = u0 * v1 + w1;
@@ -3344,7 +3197,7 @@ void Simulator::Debug() {
int next_arg = 1;
if (strcmp(cmd, "stack") == 0) {
- cur = reinterpret_cast<int64_t*>(jssp());
+ cur = reinterpret_cast<int64_t*>(sp());
} else { // "mem"
int64_t value;
@@ -3381,7 +3234,7 @@ void Simulator::Debug() {
PrintF(" (");
if ((value & kSmiTagMask) == 0) {
STATIC_ASSERT(kSmiValueSize == 32);
- int32_t untagged = (value >> kSmiShift) & 0xffffffff;
+ int32_t untagged = (value >> kSmiShift) & 0xFFFFFFFF;
PrintF("smi %" PRId32, untagged);
} else {
obj->ShortPrint();
@@ -4344,7 +4197,7 @@ void Simulator::VisitNEONByIndexedElement(Instruction* instr) {
int rm_reg = instr->Rm();
int index = (instr->NEONH() << 1) | instr->NEONL();
if (instr->NEONSize() == 1) {
- rm_reg &= 0xf;
+ rm_reg &= 0xF;
index = (index << 1) | instr->NEONM();
}
@@ -4909,9 +4762,9 @@ void Simulator::VisitNEONModifiedImmediate(Instruction* instr) {
case 0x6:
vform = (q == 1) ? kFormat4S : kFormat2S;
if (cmode_0 == 0) {
- imm = imm8 << 8 | 0x000000ff;
+ imm = imm8 << 8 | 0x000000FF;
} else {
- imm = imm8 << 16 | 0x0000ffff;
+ imm = imm8 << 16 | 0x0000FFFF;
}
break;
case 0x7:
@@ -4923,10 +4776,10 @@ void Simulator::VisitNEONModifiedImmediate(Instruction* instr) {
imm = 0;
for (int i = 0; i < 8; ++i) {
if (imm8 & (1 << i)) {
- imm |= (UINT64_C(0xff) << (8 * i));
+ imm |= (UINT64_C(0xFF) << (8 * i));
}
}
- } else { // cmode_0 == 1, cmode == 0xf.
+ } else { // cmode_0 == 1, cmode == 0xF.
if (op_bit == 0) {
vform = q ? kFormat4S : kFormat2S;
imm = bit_cast<uint32_t>(instr->ImmNEONFP32());
@@ -4934,7 +4787,7 @@ void Simulator::VisitNEONModifiedImmediate(Instruction* instr) {
vform = kFormat2D;
imm = bit_cast<uint64_t>(instr->ImmNEONFP64());
} else {
- DCHECK((q == 0) && (op_bit == 1) && (cmode == 0xf));
+ DCHECK((q == 0) && (op_bit == 1) && (cmode == 0xF));
VisitUnallocated(instr);
}
}
@@ -5278,7 +5131,7 @@ void Simulator::VisitNEONScalarByIndexedElement(Instruction* instr) {
int rm_reg = instr->Rm();
int index = (instr->NEONH() << 1) | instr->NEONL();
if (instr->NEONSize() == 1) {
- rm_reg &= 0xf;
+ rm_reg &= 0xF;
index = (index << 1) | instr->NEONM();
}
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
index 0411c0bc96..a8f229d764 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -16,56 +16,13 @@
#include "src/assembler.h"
#include "src/base/compiler-specific.h"
#include "src/globals.h"
+#include "src/simulator-base.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
-#if !defined(USE_SIMULATOR)
-
-// Running without a simulator on a native ARM64 platform.
-// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- (entry(p0, p1, p2, p3, p4))
-
-typedef int (*arm64_regexp_matcher)(String* input,
- int64_t start_offset,
- const byte* input_start,
- const byte* input_end,
- int* output,
- int64_t output_size,
- Address stack_base,
- int64_t direct_call,
- Isolate* isolate);
-
-// Call the generated regexp code directly. The code at the entry address
-// should act as a function matching the type arm64_regexp_matcher.
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- (FUNCTION_CAST<arm64_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
- p8))
-
-// Running without a simulator there is nothing to do.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
- uintptr_t c_limit) {
- USE(isolate);
- return c_limit;
- }
-
- static uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
- uintptr_t try_catch_address) {
- USE(isolate);
- return try_catch_address;
- }
-
- static void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
- USE(isolate);
- }
-};
-
-#else // !defined(USE_SIMULATOR)
+#if defined(USE_SIMULATOR)
// Assemble the specified IEEE-754 components into the target type and apply
// appropriate rounding.
@@ -269,6 +226,10 @@ T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
}
}
+class CachePage {
+ // TODO(all): Simulate instruction cache.
+};
+
// Representation of memory, with typed getters and setters for access.
class SimMemory {
public:
@@ -680,8 +641,11 @@ class LogicVRegister {
bool round_[kQRegSize];
};
-class Simulator : public DecoderVisitor {
+// Using multiple inheritance here is permitted because {DecoderVisitor} is a
+// pure interface class with only pure virtual methods.
+class Simulator : public DecoderVisitor, public SimulatorBase {
public:
+ static void SetRedirectInstruction(Instruction* instruction);
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size) {
USE(i_cache);
@@ -696,42 +660,7 @@ class Simulator : public DecoderVisitor {
// System functions.
- static void Initialize(Isolate* isolate);
-
- static void TearDown(base::CustomMatcherHashMap* i_cache, Redirection* first);
-
- static Simulator* current(v8::internal::Isolate* isolate);
-
- class CallArgument;
-
- // Call an arbitrary function taking an arbitrary number of arguments. The
- // varargs list must be a set of arguments with type CallArgument, and
- // terminated by CallArgument::End().
- void CallVoid(byte* entry, CallArgument* args);
-
- // Like CallVoid, but expect a return value.
- int64_t CallInt64(byte* entry, CallArgument* args);
- double CallDouble(byte* entry, CallArgument* args);
-
- // V8 calls into generated JS code with 5 parameters and into
- // generated RegExp code with 10 parameters. These are convenience functions,
- // which set up the simulator state and grab the result on return.
- int64_t CallJS(byte* entry,
- Object* new_target,
- Object* target,
- Object* revc,
- int64_t argc,
- Object*** argv);
- int64_t CallRegExp(byte* entry,
- String* input,
- int64_t start_offset,
- const byte* input_start,
- const byte* input_end,
- int* output,
- int64_t output_size,
- Address stack_base,
- int64_t direct_call,
- Isolate* isolate);
+ V8_EXPORT_PRIVATE static Simulator* current(v8::internal::Isolate* isolate);
// A wrapper class that stores an argument for one of the above Call
// functions.
@@ -787,6 +716,14 @@ class Simulator : public DecoderVisitor {
CallArgument() { type_ = NO_ARG; }
};
+ // Call an arbitrary function taking an arbitrary number of arguments.
+ template <typename Return, typename... Args>
+ Return Call(byte* entry, Args... args) {
+ // Convert all arguments to CallArgument.
+ CallArgument call_args[] = {CallArgument(args)..., CallArgument::End()};
+ CallImpl(entry, call_args);
+ return ReadReturn<Return>();
+ }
// Start the debugging command line.
void Debug();
@@ -806,10 +743,6 @@ class Simulator : public DecoderVisitor {
void ResetState();
- // Runtime call support. Uses the isolate in a thread-safe way.
- static void* RedirectExternalReference(Isolate* isolate,
- void* external_function,
- ExternalReference::Type type);
void DoRuntimeCall(Instruction* instr);
// Run the simulator.
@@ -958,7 +891,6 @@ class Simulator : public DecoderVisitor {
inline SimVRegister& vreg(unsigned code) { return vregisters_[code]; }
int64_t sp() { return xreg(31, Reg31IsStackPointer); }
- int64_t jssp() { return xreg(kJSSPCode, Reg31IsStackPointer); }
int64_t fp() {
return xreg(kFramePointerRegCode, Reg31IsStackPointer);
}
@@ -2345,6 +2277,21 @@ class Simulator : public DecoderVisitor {
private:
void Init(FILE* stream);
+ V8_EXPORT_PRIVATE void CallImpl(byte* entry, CallArgument* args);
+
+ // Read floating point return values.
+ template <typename T>
+ typename std::enable_if<std::is_floating_point<T>::value, T>::type
+ ReadReturn() {
+ return static_cast<T>(dreg(0));
+ }
+ // Read non-float return values.
+ template <typename T>
+ typename std::enable_if<!std::is_floating_point<T>::value, T>::type
+ ReadReturn() {
+ return ConvertReturn<T>(xreg(0));
+ }
+
template <typename T>
static T FPDefaultNaN();
@@ -2407,40 +2354,7 @@ inline float Simulator::FPDefaultNaN<float>() {
return kFP32DefaultNaN;
}
-// When running with the simulator transition into simulated execution at this
-// point.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(isolate)->CallJS( \
- FUNCTION_ADDR(entry), p0, p1, p2, p3, p4))
-
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- static_cast<int>(Simulator::current(isolate)->CallRegExp( \
- entry, p0, p1, p2, p3, p4, p5, p6, p7, p8))
-
-// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code. The JS-based limit normally points near the end of
-// the simulator stack. When the C-based limit is exhausted we reflect that by
-// lowering the JS-based limit as well, to make stack checks trigger.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
- uintptr_t c_limit) {
- return Simulator::current(isolate)->StackLimit(c_limit);
- }
-
- static uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
- uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(isolate);
- return sim->PushAddress(try_catch_address);
- }
-
- static void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
- Simulator::current(isolate)->PopAddress();
- }
-};
-
-#endif // !defined(USE_SIMULATOR)
+#endif // defined(USE_SIMULATOR)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm64/simulator-logic-arm64.cc b/deps/v8/src/arm64/simulator-logic-arm64.cc
index 03d1d37df9..9ee5ea6cc8 100644
--- a/deps/v8/src/arm64/simulator-logic-arm64.cc
+++ b/deps/v8/src/arm64/simulator-logic-arm64.cc
@@ -3986,9 +3986,9 @@ T Simulator::FPRecipEstimate(T op, FPRounding rounding) {
} else {
// Return FPMaxNormal(sign).
if (sizeof(T) == sizeof(float)) {
- return float_pack(sign, 0xfe, 0x07fffff);
+ return float_pack(sign, 0xFE, 0x07FFFFF);
} else {
- return double_pack(sign, 0x7fe, 0x0fffffffffffffl);
+ return double_pack(sign, 0x7FE, 0x0FFFFFFFFFFFFFl);
}
}
} else {
diff --git a/deps/v8/src/arm64/utils-arm64.cc b/deps/v8/src/arm64/utils-arm64.cc
index 8ef8420001..f8804d8b93 100644
--- a/deps/v8/src/arm64/utils-arm64.cc
+++ b/deps/v8/src/arm64/utils-arm64.cc
@@ -98,7 +98,7 @@ int CountTrailingZeros(uint64_t value, int width) {
return static_cast<int>(base::bits::CountTrailingZeros64(value));
}
return static_cast<int>(base::bits::CountTrailingZeros32(
- static_cast<uint32_t>(value & 0xfffffffff)));
+ static_cast<uint32_t>(value & 0xFFFFFFFFF)));
}
@@ -108,7 +108,7 @@ int CountSetBits(uint64_t value, int width) {
return static_cast<int>(base::bits::CountPopulation(value));
}
return static_cast<int>(
- base::bits::CountPopulation(static_cast<uint32_t>(value & 0xfffffffff)));
+ base::bits::CountPopulation(static_cast<uint32_t>(value & 0xFFFFFFFFF)));
}
int LowestSetBitPosition(uint64_t value) {
diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc
index c38c52220d..6be80bf7af 100644
--- a/deps/v8/src/asmjs/asm-parser.cc
+++ b/deps/v8/src/asmjs/asm-parser.cc
@@ -292,8 +292,7 @@ void AsmJsParser::Begin(AsmJsScanner::token_t label) {
void AsmJsParser::Loop(AsmJsScanner::token_t label) {
BareBegin(BlockKind::kLoop, label);
- int position = static_cast<int>(scanner_.Position());
- DCHECK_EQ(position, scanner_.Position());
+ size_t position = scanner_.Position();
current_function_builder_->AddAsmWasmOffset(position, position);
current_function_builder_->EmitWithU8(kExprLoop, kLocalVoid);
}
@@ -450,7 +449,7 @@ void AsmJsParser::ValidateModuleVar(bool mutable_variable) {
DeclareGlobal(info, mutable_variable, AsmType::Double(), kWasmF64,
WasmInitExpr(dvalue));
} else if (CheckForUnsigned(&uvalue)) {
- if (uvalue > 0x7fffffff) {
+ if (uvalue > 0x7FFFFFFF) {
FAIL("Numeric literal out of range");
}
DeclareGlobal(info, mutable_variable,
@@ -461,7 +460,7 @@ void AsmJsParser::ValidateModuleVar(bool mutable_variable) {
DeclareGlobal(info, mutable_variable, AsmType::Double(), kWasmF64,
WasmInitExpr(-dvalue));
} else if (CheckForUnsigned(&uvalue)) {
- if (uvalue > 0x7fffffff) {
+ if (uvalue > 0x7FFFFFFF) {
FAIL("Numeric literal out of range");
}
DeclareGlobal(info, mutable_variable,
@@ -742,8 +741,7 @@ void AsmJsParser::ValidateFunction() {
return_type_ = nullptr;
// Record start of the function, used as position for the stack check.
- int start_position = static_cast<int>(scanner_.Position());
- current_function_builder_->SetAsmFunctionStartPosition(start_position);
+ current_function_builder_->SetAsmFunctionStartPosition(scanner_.Position());
CachedVector<AsmType*> params(cached_asm_type_p_vectors_);
ValidateFunctionParams(&params);
@@ -902,7 +900,7 @@ void AsmJsParser::ValidateFunctionLocals(size_t param_count,
current_function_builder_->EmitF64Const(-dvalue);
current_function_builder_->EmitSetLocal(info->index);
} else if (CheckForUnsigned(&uvalue)) {
- if (uvalue > 0x7fffffff) {
+ if (uvalue > 0x7FFFFFFF) {
FAIL("Numeric literal out of range");
}
info->kind = VarKind::kLocal;
@@ -954,7 +952,7 @@ void AsmJsParser::ValidateFunctionLocals(size_t param_count,
current_function_builder_->EmitF32Const(dvalue);
current_function_builder_->EmitSetLocal(info->index);
} else if (CheckForUnsigned(&uvalue)) {
- if (uvalue > 0x7fffffff) {
+ if (uvalue > 0x7FFFFFFF) {
FAIL("Numeric literal out of range");
}
info->kind = VarKind::kLocal;
@@ -1337,7 +1335,7 @@ void AsmJsParser::ValidateCase() {
FAIL("Expected numeric literal");
}
// TODO(bradnelson): Share negation plumbing.
- if ((negate && uvalue > 0x80000000) || (!negate && uvalue > 0x7fffffff)) {
+ if ((negate && uvalue > 0x80000000) || (!negate && uvalue > 0x7FFFFFFF)) {
FAIL("Numeric literal out of range");
}
int32_t value = static_cast<int32_t>(uvalue);
@@ -1398,11 +1396,11 @@ AsmType* AsmJsParser::NumericLiteral() {
current_function_builder_->EmitF64Const(dvalue);
return AsmType::Double();
} else if (CheckForUnsigned(&uvalue)) {
- if (uvalue <= 0x7fffffff) {
+ if (uvalue <= 0x7FFFFFFF) {
current_function_builder_->EmitI32Const(static_cast<int32_t>(uvalue));
return AsmType::FixNum();
} else {
- DCHECK_LE(uvalue, 0xffffffff);
+ DCHECK_LE(uvalue, 0xFFFFFFFF);
current_function_builder_->EmitI32Const(static_cast<int32_t>(uvalue));
return AsmType::Unsigned();
}
@@ -1553,7 +1551,7 @@ AsmType* AsmJsParser::UnaryExpression() {
if (Check('-')) {
uint32_t uvalue;
if (CheckForUnsigned(&uvalue)) {
- // TODO(bradnelson): was supposed to be 0x7fffffff, check errata.
+ // TODO(bradnelson): was supposed to be 0x7FFFFFFF, check errata.
if (uvalue <= 0x80000000) {
current_function_builder_->EmitI32Const(-static_cast<int32_t>(uvalue));
} else {
@@ -1621,7 +1619,7 @@ AsmType* AsmJsParser::UnaryExpression() {
if (!ret->IsA(AsmType::Intish())) {
FAILn("operator ~ expects intish");
}
- current_function_builder_->EmitI32Const(0xffffffff);
+ current_function_builder_->EmitI32Const(0xFFFFFFFF);
current_function_builder_->Emit(kExprI32Xor);
ret = AsmType::Signed();
}
@@ -2066,8 +2064,8 @@ AsmType* AsmJsParser::ParenthesizedExpression() {
AsmType* AsmJsParser::ValidateCall() {
AsmType* return_type = call_coercion_;
call_coercion_ = nullptr;
- int call_pos = static_cast<int>(scanner_.Position());
- int to_number_pos = static_cast<int>(call_coercion_position_);
+ size_t call_pos = scanner_.Position();
+ size_t to_number_pos = call_coercion_position_;
bool allow_peek = (call_coercion_deferred_position_ == scanner_.Position());
AsmJsScanner::token_t function_name = Consume();
@@ -2113,7 +2111,7 @@ AsmType* AsmJsParser::ValidateCall() {
tmp.emplace(this);
current_function_builder_->EmitSetLocal(tmp->get());
// The position of function table calls is after the table lookup.
- call_pos = static_cast<int>(scanner_.Position());
+ call_pos = scanner_.Position();
} else {
VarInfo* function_info = GetVarInfo(function_name);
if (function_info->kind == VarKind::kUnused) {
@@ -2176,7 +2174,7 @@ AsmType* AsmJsParser::ValidateCall() {
(return_type == nullptr || return_type->IsA(AsmType::Float()))) {
DCHECK_NULL(call_coercion_deferred_);
call_coercion_deferred_ = AsmType::Signed();
- to_number_pos = static_cast<int>(scanner_.Position());
+ to_number_pos = scanner_.Position();
return_type = AsmType::Signed();
} else if (return_type == nullptr) {
to_number_pos = call_pos; // No conversion.
@@ -2395,9 +2393,9 @@ void AsmJsParser::ValidateHeapAccess() {
// TODO(bradnelson): Check more things.
// TODO(mstarzinger): Clarify and explain where this limit is coming from,
// as it is not mandated by the spec directly.
- if (offset > 0x7fffffff ||
+ if (offset > 0x7FFFFFFF ||
static_cast<uint64_t>(offset) * static_cast<uint64_t>(size) >
- 0x7fffffff) {
+ 0x7FFFFFFF) {
FAIL("Heap access out of range");
}
if (Check(']')) {
diff --git a/deps/v8/src/asmjs/asm-scanner.cc b/deps/v8/src/asmjs/asm-scanner.cc
index 910fe37546..af41208ead 100644
--- a/deps/v8/src/asmjs/asm-scanner.cc
+++ b/deps/v8/src/asmjs/asm-scanner.cc
@@ -15,7 +15,7 @@ namespace internal {
namespace {
// Cap number of identifiers to ensure we can assign both global and
// local ones a token id in the range of an int32_t.
-static const int kMaxIdentifierCount = 0xf000000;
+static const int kMaxIdentifierCount = 0xF000000;
};
AsmJsScanner::AsmJsScanner(Utf16CharacterStream* stream)
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 90d7ac3ff8..1b83735bc9 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -131,14 +131,14 @@ static struct V8_ALIGNED(16) {
static struct V8_ALIGNED(16) {
uint64_t a;
uint64_t b;
-} double_absolute_constant = {V8_UINT64_C(0x7FFFFFFFFFFFFFFF),
- V8_UINT64_C(0x7FFFFFFFFFFFFFFF)};
+} double_absolute_constant = {uint64_t{0x7FFFFFFFFFFFFFFF},
+ uint64_t{0x7FFFFFFFFFFFFFFF}};
static struct V8_ALIGNED(16) {
uint64_t a;
uint64_t b;
-} double_negate_constant = {V8_UINT64_C(0x8000000000000000),
- V8_UINT64_C(0x8000000000000000)};
+} double_negate_constant = {uint64_t{0x8000000000000000},
+ uint64_t{0x8000000000000000}};
const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
@@ -351,7 +351,7 @@ void RelocInfo::set_target_address(Isolate* isolate, Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
- Assembler::set_target_address_at(isolate, pc_, host_, target,
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, target,
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr &&
IsCodeTarget(rmode_)) {
@@ -801,6 +801,16 @@ ExternalReference ExternalReference::builtins_address(Isolate* isolate) {
return ExternalReference(isolate->builtins()->builtins_table_address());
}
+ExternalReference ExternalReference::handle_scope_implementer_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->handle_scope_implementer_address());
+}
+
+ExternalReference ExternalReference::pending_microtask_count_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->pending_microtask_count_address());
+}
+
ExternalReference ExternalReference::interpreter_dispatch_table_address(
Isolate* isolate) {
return ExternalReference(isolate->interpreter()->dispatch_table_address());
@@ -1002,6 +1012,16 @@ ExternalReference ExternalReference::wasm_word64_popcnt(Isolate* isolate) {
Redirect(isolate, FUNCTION_ADDR(wasm::word64_popcnt_wrapper)));
}
+ExternalReference ExternalReference::wasm_word32_rol(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::word32_rol_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_word32_ror(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::word32_ror_wrapper)));
+}
+
static void f64_acos_wrapper(double* param) {
WriteDoubleValue(param, base::ieee754::acos(ReadDoubleValue(param)));
}
@@ -1514,6 +1534,12 @@ ExternalReference ExternalReference::runtime_function_table_address(
const_cast<Runtime::Function*>(Runtime::RuntimeFunctionTable(isolate)));
}
+ExternalReference ExternalReference::invalidate_prototype_chains_function(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(JSObject::InvalidatePrototypeChains)));
+}
+
double power_helper(Isolate* isolate, double x, double y) {
int y_int = static_cast<int>(y);
if (y == y_int) {
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 1e8365dcee..0cebdbc2d7 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -36,6 +36,7 @@
#define V8_ASSEMBLER_H_
#include <forward_list>
+#include <iosfwd>
#include "src/allocation.h"
#include "src/builtins/builtins.h"
@@ -54,9 +55,6 @@ namespace v8 {
class ApiFunction;
namespace internal {
-namespace wasm {
-class WasmCode;
-}
// Forward declarations.
class Isolate;
@@ -486,6 +484,7 @@ class RelocInfo {
Mode rmode() const { return rmode_; }
intptr_t data() const { return data_; }
Code* host() const { return host_; }
+ Address constant_pool() const { return constant_pool_; }
// Apply a relocation by delta bytes. When the code object is moved, PC
// relative addresses have to be updated as well as absolute addresses
@@ -625,9 +624,6 @@ class RelocInfo {
byte* pc_;
Mode rmode_;
intptr_t data_;
- // TODO(mtrofin): try remove host_, if all we need is the constant_pool_ or
- // other few attributes, like start address, etc. This is so that we can reuse
- // RelocInfo for WasmCode without having a modal design.
Code* host_;
Address constant_pool_ = nullptr;
friend class RelocIterator;
@@ -830,6 +826,9 @@ class ExternalReference BASE_EMBEDDED {
// The builtins table as an external reference, used by lazy deserialization.
static ExternalReference builtins_address(Isolate* isolate);
+ static ExternalReference handle_scope_implementer_address(Isolate* isolate);
+ static ExternalReference pending_microtask_count_address(Isolate* isolate);
+
// One-of-a-kind references. These references are not part of a general
// pattern. This means that they have to be added to the
// ExternalReferenceTable in serialize.cc manually.
@@ -875,6 +874,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference wasm_word64_ctz(Isolate* isolate);
static ExternalReference wasm_word32_popcnt(Isolate* isolate);
static ExternalReference wasm_word64_popcnt(Isolate* isolate);
+ static ExternalReference wasm_word32_rol(Isolate* isolate);
+ static ExternalReference wasm_word32_ror(Isolate* isolate);
static ExternalReference wasm_float64_pow(Isolate* isolate);
static ExternalReference wasm_set_thread_in_wasm_flag(Isolate* isolate);
static ExternalReference wasm_clear_thread_in_wasm_flag(Isolate* isolate);
@@ -1019,6 +1020,9 @@ class ExternalReference BASE_EMBEDDED {
V8_EXPORT_PRIVATE static ExternalReference runtime_function_table_address(
Isolate* isolate);
+ static ExternalReference invalidate_prototype_chains_function(
+ Isolate* isolate);
+
Address address() const { return reinterpret_cast<Address>(address_); }
// Used to read out the last step action of the debugger.
@@ -1328,16 +1332,24 @@ class RegisterBase {
int bit() const { return 1 << code(); }
- inline bool operator==(SubType other) const {
+ inline constexpr bool operator==(SubType other) const {
return reg_code_ == other.reg_code_;
}
- inline bool operator!=(SubType other) const { return !(*this == other); }
+ inline constexpr bool operator!=(SubType other) const {
+ return reg_code_ != other.reg_code_;
+ }
protected:
explicit constexpr RegisterBase(int code) : reg_code_(code) {}
int reg_code_;
};
+template <typename SubType, int kAfterLastRegister>
+inline std::ostream& operator<<(std::ostream& os,
+ RegisterBase<SubType, kAfterLastRegister> reg) {
+ return reg.is_valid() ? os << "r" << reg.code() : os << "<invalid reg>";
+}
+
} // namespace internal
} // namespace v8
#endif // V8_ASSEMBLER_H_
diff --git a/deps/v8/src/ast/ast-numbering.cc b/deps/v8/src/ast/ast-numbering.cc
index 0736e543e2..ade1a85349 100644
--- a/deps/v8/src/ast/ast-numbering.cc
+++ b/deps/v8/src/ast/ast-numbering.cc
@@ -16,10 +16,7 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
public:
AstNumberingVisitor(uintptr_t stack_limit, Zone* zone,
Compiler::EagerInnerFunctionLiterals* eager_literals)
- : zone_(zone),
- eager_literals_(eager_literals),
- suspend_count_(0),
- dont_optimize_reason_(kNoReason) {
+ : zone_(zone), eager_literals_(eager_literals), suspend_count_(0) {
InitializeAstVisitor(stack_limit);
}
@@ -39,19 +36,12 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
void VisitArguments(ZoneList<Expression*>* arguments);
void VisitLiteralProperty(LiteralProperty* property);
- void DisableOptimization(BailoutReason reason) {
- dont_optimize_reason_ = reason;
- }
-
- BailoutReason dont_optimize_reason() const { return dont_optimize_reason_; }
-
Zone* zone() const { return zone_; }
Zone* zone_;
Compiler::EagerInnerFunctionLiterals* eager_literals_;
int suspend_count_;
FunctionKind function_kind_;
- BailoutReason dont_optimize_reason_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(AstNumberingVisitor);
@@ -80,7 +70,6 @@ void AstNumberingVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
void AstNumberingVisitor::VisitNativeFunctionLiteral(
NativeFunctionLiteral* node) {
- DisableOptimization(kNativeFunctionLiteral);
}
void AstNumberingVisitor::VisitDoExpression(DoExpression* node) {
@@ -206,6 +195,11 @@ void AstNumberingVisitor::VisitProperty(Property* node) {
Visit(node->obj());
}
+void AstNumberingVisitor::VisitResolvedProperty(ResolvedProperty* node) {
+ Visit(node->object());
+ Visit(node->property());
+}
+
void AstNumberingVisitor::VisitAssignment(Assignment* node) {
Visit(node->target());
Visit(node->value());
@@ -262,6 +256,7 @@ void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
Visit(node->assign_iterator()); // Not part of loop.
+ Visit(node->assign_next());
node->set_first_suspend_id(suspend_count_);
Visit(node->next_result());
Visit(node->result_done());
@@ -326,11 +321,6 @@ void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
for (int i = 0; i < node->properties()->length(); i++) {
VisitLiteralProperty(node->properties()->at(i));
}
- node->InitDepthAndFlags();
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code will be is emitted.
- node->CalculateEmitStore(zone_);
}
void AstNumberingVisitor::VisitLiteralProperty(LiteralProperty* node) {
@@ -342,7 +332,6 @@ void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
for (int i = 0; i < node->values()->length(); i++) {
Visit(node->values()->at(i));
}
- node->InitDepthAndFlags();
}
void AstNumberingVisitor::VisitCall(Call* node) {
@@ -402,7 +391,6 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
VisitDeclarations(scope->declarations());
VisitStatements(node->body());
- node->set_dont_optimize_reason(dont_optimize_reason());
node->set_suspend_count(suspend_count_);
return !HasStackOverflow();
diff --git a/deps/v8/src/ast/ast-traversal-visitor.h b/deps/v8/src/ast/ast-traversal-visitor.h
index 6ad4df357c..3679ec762a 100644
--- a/deps/v8/src/ast/ast-traversal-visitor.h
+++ b/deps/v8/src/ast/ast-traversal-visitor.h
@@ -243,6 +243,7 @@ void AstTraversalVisitor<Subclass>::VisitForStatement(ForStatement* stmt) {
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitForInStatement(ForInStatement* stmt) {
PROCESS_NODE(stmt);
+ RECURSE(Visit(stmt->each()));
RECURSE(Visit(stmt->enumerable()));
RECURSE(Visit(stmt->body()));
}
@@ -392,6 +393,14 @@ void AstTraversalVisitor<Subclass>::VisitProperty(Property* expr) {
}
template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitResolvedProperty(
+ ResolvedProperty* expr) {
+ PROCESS_EXPRESSION(expr);
+ RECURSE_EXPRESSION(VisitVariableProxy(expr->object()));
+ RECURSE_EXPRESSION(VisitVariableProxy(expr->property()));
+}
+
+template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitCall(Call* expr) {
PROCESS_EXPRESSION(expr);
RECURSE_EXPRESSION(Visit(expr->expression()));
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index 710cbb40a5..da14d87475 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -514,18 +514,17 @@ bool ArrayLiteral::is_empty() const {
}
int ArrayLiteral::InitDepthAndFlags() {
- DCHECK_LT(first_spread_index_, 0);
if (is_initialized()) return depth();
- int constants_length = values()->length();
+ int constants_length =
+ first_spread_index_ >= 0 ? first_spread_index_ : values()->length();
// Fill in the literals.
- bool is_simple = true;
+ bool is_simple = first_spread_index_ < 0;
int depth_acc = 1;
int array_index = 0;
for (; array_index < constants_length; array_index++) {
Expression* element = values()->at(array_index);
- DCHECK(!element->IsSpread());
MaterializedLiteral* literal = element->AsMaterializedLiteral();
if (literal != nullptr) {
int subliteral_depth = literal->InitDepthAndFlags() + 1;
@@ -546,11 +545,10 @@ int ArrayLiteral::InitDepthAndFlags() {
}
void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
- DCHECK_LT(first_spread_index_, 0);
-
if (!constant_elements_.is_null()) return;
- int constants_length = values()->length();
+ int constants_length =
+ first_spread_index_ >= 0 ? first_spread_index_ : values()->length();
ElementsKind kind = FIRST_FAST_ELEMENTS_KIND;
Handle<FixedArray> fixed_array =
isolate->factory()->NewFixedArrayWithHoles(constants_length);
@@ -614,11 +612,6 @@ bool ArrayLiteral::IsFastCloningSupported() const {
ConstructorBuiltins::kMaximumClonedShallowArrayElements;
}
-void ArrayLiteral::RewindSpreads() {
- values_->Rewind(first_spread_index_);
- first_spread_index_ = -1;
-}
-
bool MaterializedLiteral::IsSimple() const {
if (IsArrayLiteral()) return AsArrayLiteral()->is_simple();
if (IsObjectLiteral()) return AsObjectLiteral()->is_simple();
@@ -812,6 +805,10 @@ Call::CallType Call::GetCallType() const {
}
}
+ if (expression()->IsResolvedProperty()) {
+ return RESOLVED_PROPERTY_CALL;
+ }
+
return OTHER_CALL;
}
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 1ca192a462..f608621d3b 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -94,6 +94,7 @@ namespace internal {
V(Literal) \
V(NativeFunctionLiteral) \
V(Property) \
+ V(ResolvedProperty) \
V(RewritableExpression) \
V(Spread) \
V(SuperCallReference) \
@@ -590,11 +591,13 @@ class ForInStatement final : public ForEachStatement {
class ForOfStatement final : public ForEachStatement {
public:
void Initialize(Statement* body, Variable* iterator,
- Expression* assign_iterator, Expression* next_result,
- Expression* result_done, Expression* assign_each) {
+ Expression* assign_iterator, Expression* assign_next,
+ Expression* next_result, Expression* result_done,
+ Expression* assign_each) {
ForEachStatement::Initialize(body);
iterator_ = iterator;
assign_iterator_ = assign_iterator;
+ assign_next_ = assign_next;
next_result_ = next_result;
result_done_ = result_done;
assign_each_ = assign_each;
@@ -609,6 +612,9 @@ class ForOfStatement final : public ForEachStatement {
return assign_iterator_;
}
+ // iteratorRecord.next = iterator.next
+ Expression* assign_next() const { return assign_next_; }
+
// result = iterator.next() // with type check
Expression* next_result() const {
return next_result_;
@@ -624,6 +630,12 @@ class ForOfStatement final : public ForEachStatement {
return assign_each_;
}
+ void set_assign_iterator(Expression* e) { assign_iterator_ = e; }
+ void set_assign_next(Expression* e) { assign_next_ = e; }
+ void set_next_result(Expression* e) { next_result_ = e; }
+ void set_result_done(Expression* e) { result_done_ = e; }
+ void set_assign_each(Expression* e) { assign_each_ = e; }
+
private:
friend class AstNodeFactory;
@@ -637,6 +649,7 @@ class ForOfStatement final : public ForEachStatement {
Variable* iterator_;
Expression* assign_iterator_;
+ Expression* assign_next_;
Expression* next_result_;
Expression* result_done_;
Expression* assign_each_;
@@ -1450,22 +1463,23 @@ class ArrayLiteral final : public AggregateLiteral {
}
// Provide a mechanism for iterating through values to rewrite spreads.
- ZoneList<Expression*>::iterator FirstSpread() const {
+ ZoneList<Expression*>::iterator FirstSpreadOrEndValue() const {
return (first_spread_index_ >= 0) ? values_->begin() + first_spread_index_
: values_->end();
}
+ ZoneList<Expression*>::iterator BeginValue() const {
+ return values_->begin();
+ }
ZoneList<Expression*>::iterator EndValue() const { return values_->end(); }
- // Rewind an array literal omitting everything from the first spread on.
- void RewindSpreads();
-
private:
friend class AstNodeFactory;
ArrayLiteral(ZoneList<Expression*>* values, int first_spread_index, int pos)
: AggregateLiteral(pos, kArrayLiteral),
first_spread_index_(first_spread_index),
- values_(values) {}
+ values_(values) {
+ }
int first_spread_index_;
Handle<ConstantElementsPair> constant_elements_;
@@ -1606,6 +1620,25 @@ class Property final : public Expression {
Expression* key_;
};
+// ResolvedProperty pairs a receiver field with a value field. It allows Call
+// to support arbitrary receivers while still taking advantage of TypeFeedback.
+class ResolvedProperty final : public Expression {
+ public:
+ VariableProxy* object() const { return object_; }
+ VariableProxy* property() const { return property_; }
+
+ void set_object(VariableProxy* e) { object_ = e; }
+ void set_property(VariableProxy* e) { property_ = e; }
+
+ private:
+ friend class AstNodeFactory;
+
+ ResolvedProperty(VariableProxy* obj, VariableProxy* property, int pos)
+ : Expression(pos, kResolvedProperty), object_(obj), property_(property) {}
+
+ VariableProxy* object_;
+ VariableProxy* property_;
+};
class Call final : public Expression {
public:
@@ -1632,6 +1665,7 @@ class Call final : public Expression {
NAMED_SUPER_PROPERTY_CALL,
KEYED_SUPER_PROPERTY_CALL,
SUPER_CALL,
+ RESOLVED_PROPERTY_CALL,
OTHER_CALL
};
@@ -1697,11 +1731,10 @@ class CallNew final : public Expression {
ZoneList<Expression*>* arguments_;
};
-
// The CallRuntime class does not represent any official JavaScript
// language construct. Instead it is used to call a C or JS function
// with a set of arguments. This is used from the builtins that are
-// implemented in JavaScript (see "v8natives.js").
+// implemented in JavaScript.
class CallRuntime final : public Expression {
public:
ZoneList<Expression*>* arguments() const { return arguments_; }
@@ -2104,7 +2137,6 @@ class YieldStar final : public Suspend {
// - One for awaiting the iterator result yielded by the delegated iterator
// (await_delegated_iterator_output_suspend_id)
int await_iterator_close_suspend_id() const {
- DCHECK_NE(-1, await_iterator_close_suspend_id_);
return await_iterator_close_suspend_id_;
}
void set_await_iterator_close_suspend_id(int id) {
@@ -2112,7 +2144,6 @@ class YieldStar final : public Suspend {
}
int await_delegated_iterator_output_suspend_id() const {
- DCHECK_NE(-1, await_delegated_iterator_output_suspend_id_);
return await_delegated_iterator_output_suspend_id_;
}
void set_await_delegated_iterator_output_suspend_id(int id) {
@@ -2168,7 +2199,8 @@ class FunctionLiteral final : public Expression {
kAnonymousExpression,
kNamedExpression,
kDeclaration,
- kAccessorOrMethod
+ kAccessorOrMethod,
+ kWrapped,
};
enum IdType { kIdTypeInvalid = -1, kIdTypeTopLevel = 0 };
@@ -2199,6 +2231,7 @@ class FunctionLiteral final : public Expression {
bool is_anonymous_expression() const {
return function_type() == kAnonymousExpression;
}
+ bool is_wrapped() const { return function_type() == kWrapped; }
LanguageMode language_mode() const;
static bool NeedsHomeObject(Expression* expr);
@@ -2274,7 +2307,9 @@ class FunctionLiteral final : public Expression {
}
FunctionKind kind() const;
- bool dont_optimize() { return dont_optimize_reason() != kNoReason; }
+ bool dont_optimize() {
+ return dont_optimize_reason() != BailoutReason::kNoReason;
+ }
BailoutReason dont_optimize_reason() {
return DontOptimizeReasonField::decode(bit_field_);
}
@@ -2337,14 +2372,14 @@ class FunctionLiteral final : public Expression {
Pretenure::encode(false) |
HasDuplicateParameters::encode(has_duplicate_parameters ==
kHasDuplicateParameters) |
- DontOptimizeReasonField::encode(kNoReason) |
+ DontOptimizeReasonField::encode(BailoutReason::kNoReason) |
RequiresInstanceFieldsInitializer::encode(false);
if (eager_compile_hint == kShouldEagerCompile) SetShouldEagerCompile();
DCHECK_EQ(body == nullptr, expected_property_count < 0);
}
class FunctionTypeBits
- : public BitField<FunctionType, Expression::kNextBitFieldIndex, 2> {};
+ : public BitField<FunctionType, Expression::kNextBitFieldIndex, 3> {};
class Pretenure : public BitField<bool, FunctionTypeBits::kNext, 1> {};
class HasDuplicateParameters : public BitField<bool, Pretenure::kNext, 1> {};
class DontOptimizeReasonField
@@ -2993,6 +3028,12 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) Property(obj, key, pos);
}
+ ResolvedProperty* NewResolvedProperty(VariableProxy* obj,
+ VariableProxy* property,
+ int pos = kNoSourcePosition) {
+ return new (zone_) ResolvedProperty(obj, property, pos);
+ }
+
Call* NewCall(Expression* expression, ZoneList<Expression*>* arguments,
int pos, Call::PossiblyEval possibly_eval = Call::NOT_EVAL) {
return new (zone_) Call(expression, arguments, pos, possibly_eval);
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index f01ade8896..374c848289 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -26,6 +26,7 @@ CallPrinter::CallPrinter(Isolate* isolate, bool is_user_js)
is_iterator_error_ = false;
is_async_iterator_error_ = false;
is_user_js_ = is_user_js;
+ function_kind_ = kNormalFunction;
InitializeAstVisitor(isolate);
}
@@ -187,7 +188,10 @@ void CallPrinter::VisitDebuggerStatement(DebuggerStatement* node) {}
void CallPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
+ FunctionKind last_function_kind = function_kind_;
+ function_kind_ = node->kind();
FindStatements(node->body());
+ function_kind_ = last_function_kind;
}
@@ -250,7 +254,17 @@ void CallPrinter::VisitArrayLiteral(ArrayLiteral* node) {
Print("[");
for (int i = 0; i < node->values()->length(); i++) {
if (i != 0) Print(",");
- Find(node->values()->at(i), true);
+ Expression* subexpr = node->values()->at(i);
+ Spread* spread = subexpr->AsSpread();
+ if (spread != nullptr && !found_ &&
+ position_ == spread->expression()->position()) {
+ found_ = true;
+ is_iterator_error_ = true;
+ Find(spread->expression(), true);
+ done_ = true;
+ return;
+ }
+ Find(subexpr, true);
}
Print("]");
}
@@ -277,7 +291,17 @@ void CallPrinter::VisitCompoundAssignment(CompoundAssignment* node) {
void CallPrinter::VisitYield(Yield* node) { Find(node->expression()); }
-void CallPrinter::VisitYieldStar(YieldStar* node) { Find(node->expression()); }
+void CallPrinter::VisitYieldStar(YieldStar* node) {
+ if (!found_ && position_ == node->expression()->position()) {
+ found_ = true;
+ if (IsAsyncFunction(function_kind_))
+ is_async_iterator_error_ = true;
+ else
+ is_iterator_error_ = true;
+ Print("yield* ");
+ }
+ Find(node->expression());
+}
void CallPrinter::VisitAwait(Await* node) { Find(node->expression()); }
@@ -302,6 +326,7 @@ void CallPrinter::VisitProperty(Property* node) {
}
}
+void CallPrinter::VisitResolvedProperty(ResolvedProperty* node) {}
void CallPrinter::VisitCall(Call* node) {
bool was_found = false;
@@ -960,8 +985,10 @@ void AstPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
UNREACHABLE();
}
Print(" %s\n", prediction);
- PrintLiteralWithModeIndented("CATCHVAR", node->scope()->catch_variable(),
- node->scope()->catch_variable()->raw_name());
+ if (node->scope()) {
+ PrintLiteralWithModeIndented("CATCHVAR", node->scope()->catch_variable(),
+ node->scope()->catch_variable()->raw_name());
+ }
PrintIndentedVisit("CATCH", node->catch_block());
}
@@ -1223,6 +1250,14 @@ void AstPrinter::VisitProperty(Property* node) {
}
}
+void AstPrinter::VisitResolvedProperty(ResolvedProperty* node) {
+ EmbeddedVector<char, 128> buf;
+ SNPrintF(buf, "RESOLVED-PROPERTY");
+ IndentedScope indent(this, buf.start(), node->position());
+
+ PrintIndentedVisit("RECEIVER", node->object());
+ PrintIndentedVisit("PROPERTY", node->property());
+}
void AstPrinter::VisitCall(Call* node) {
EmbeddedVector<char, 128> buf;
diff --git a/deps/v8/src/ast/prettyprinter.h b/deps/v8/src/ast/prettyprinter.h
index 97c2437877..d93137b7cf 100644
--- a/deps/v8/src/ast/prettyprinter.h
+++ b/deps/v8/src/ast/prettyprinter.h
@@ -50,6 +50,7 @@ class CallPrinter final : public AstVisitor<CallPrinter> {
bool is_iterator_error_;
bool is_async_iterator_error_;
bool is_call_error_;
+ FunctionKind function_kind_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
protected:
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index d012ec90f1..8f2f85080c 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -147,8 +147,6 @@ Scope::Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type)
DCHECK_NE(SCRIPT_SCOPE, scope_type);
SetDefaults();
set_language_mode(outer_scope->language_mode());
- force_context_allocation_ =
- !is_function_scope() && outer_scope->has_forced_context_allocation();
outer_scope_->AddInnerScope(this);
}
@@ -649,8 +647,8 @@ void DeclarationScope::Analyze(ParseInfo* info) {
RuntimeCallTimerScope runtimeTimer(
info->runtime_call_stats(),
info->on_background_thread()
- ? &RuntimeCallStats::CompileBackgroundScopeAnalysis
- : &RuntimeCallStats::CompileScopeAnalysis);
+ ? RuntimeCallCounterId::kCompileBackgroundScopeAnalysis
+ : RuntimeCallCounterId::kCompileScopeAnalysis);
DCHECK_NOT_NULL(info->literal());
DeclarationScope* scope = info->literal()->scope();
@@ -1370,12 +1368,8 @@ bool Scope::AllowsLazyParsingWithoutUnresolvedVariables(
if (s->is_catch_scope()) continue;
// With scopes do not introduce variables that need allocation.
if (s->is_with_scope()) continue;
- // Module scopes context-allocate all variables, and have no
- // {this} or {arguments} variables whose existence depends on
- // references to them.
- if (s->is_module_scope()) continue;
- // Only block scopes and function scopes should disallow preparsing.
- DCHECK(s->is_block_scope() || s->is_function_scope());
+ DCHECK(s->is_module_scope() || s->is_block_scope() ||
+ s->is_function_scope());
return false;
}
return true;
@@ -1443,6 +1437,10 @@ bool Scope::NeedsScopeInfo() const {
return NeedsContext();
}
+bool Scope::ShouldBanArguments() {
+ return GetReceiverScope()->should_ban_arguments();
+}
+
DeclarationScope* Scope::GetReceiverScope() {
Scope* scope = this;
while (!scope->is_script_scope() &&
@@ -1734,9 +1732,6 @@ void Scope::Print(int n) {
if (scope->was_lazily_parsed()) Indent(n1, "// lazily parsed\n");
if (scope->ShouldEagerCompile()) Indent(n1, "// will be compiled\n");
}
- if (has_forced_context_allocation()) {
- Indent(n1, "// forces context allocation\n");
- }
if (num_stack_slots_ > 0) {
Indent(n1, "// ");
PrintF("%d stack slots\n", num_stack_slots_);
@@ -2111,11 +2106,8 @@ bool Scope::MustAllocateInContext(Variable* var) {
// an eval() call or a runtime with lookup), it must be allocated in the
// context.
//
- // Exceptions: If the scope as a whole has forced context allocation, all
- // variables will have context allocation, even temporaries. Otherwise
- // temporary variables are always stack-allocated. Catch-bound variables are
+ // Temporary variables are always stack-allocated. Catch-bound variables are
// always context-allocated.
- if (has_forced_context_allocation()) return true;
if (var->mode() == TEMPORARY) return false;
if (is_catch_scope()) return true;
if ((is_script_scope() || is_eval_scope()) &&
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index bcfd2187df..d2e8886319 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -334,14 +334,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
bool is_hidden() const { return is_hidden_; }
void set_is_hidden() { is_hidden_ = true; }
- // In some cases we want to force context allocation for a whole scope.
- void ForceContextAllocation() {
- DCHECK(!already_resolved_);
- force_context_allocation_ = true;
- }
- bool has_forced_context_allocation() const {
- return force_context_allocation_;
- }
void ForceContextAllocationForParameters() {
DCHECK(!already_resolved_);
force_context_allocation_for_parameters_ = true;
@@ -404,6 +396,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
return static_cast<Variable*>(variables_.Start()->value);
}
+ bool ShouldBanArguments();
+
// ---------------------------------------------------------------------------
// Variable allocation.
@@ -704,6 +698,10 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
bool asm_module() const { return asm_module_; }
void set_asm_module();
+ bool should_ban_arguments() const {
+ return IsClassFieldsInitializerFunction(function_kind());
+ }
+
void DeclareThis(AstValueFactory* ast_value_factory);
void DeclareArguments(AstValueFactory* ast_value_factory);
void DeclareDefaultFunctionVariables(AstValueFactory* ast_value_factory);
diff --git a/deps/v8/src/bailout-reason.cc b/deps/v8/src/bailout-reason.cc
index ac7bb929b9..7cf983861c 100644
--- a/deps/v8/src/bailout-reason.cc
+++ b/deps/v8/src/bailout-reason.cc
@@ -8,13 +8,24 @@
namespace v8 {
namespace internal {
-const char* GetBailoutReason(BailoutReason reason) {
- DCHECK_LT(reason, kLastErrorMessage);
#define ERROR_MESSAGES_TEXTS(C, T) T,
+
+const char* GetBailoutReason(BailoutReason reason) {
+ DCHECK_LT(reason, BailoutReason::kLastErrorMessage);
+ DCHECK_GE(reason, BailoutReason::kNoReason);
static const char* error_messages_[] = {
- ERROR_MESSAGES_LIST(ERROR_MESSAGES_TEXTS)};
-#undef ERROR_MESSAGES_TEXTS
- return error_messages_[reason];
+ BAILOUT_MESSAGES_LIST(ERROR_MESSAGES_TEXTS)};
+ return error_messages_[static_cast<int>(reason)];
}
+
+const char* GetAbortReason(AbortReason reason) {
+ DCHECK_LT(reason, AbortReason::kLastErrorMessage);
+ DCHECK_GE(reason, AbortReason::kNoReason);
+ static const char* error_messages_[] = {
+ ABORT_MESSAGES_LIST(ERROR_MESSAGES_TEXTS)};
+ return error_messages_[static_cast<int>(reason)];
+}
+
+#undef ERROR_MESSAGES_TEXTS
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index 2bb92e1a2b..c8e81c69d4 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -8,158 +8,117 @@
namespace v8 {
namespace internal {
-// TODO(svenpanne) introduce an AbortReason and partition this list
-#define ERROR_MESSAGES_LIST(V) \
+#define ABORT_MESSAGES_LIST(V) \
V(kNoReason, "no reason") \
\
V(k32BitValueInRegisterIsNotZeroExtended, \
"32 bit value in register is not zero-extended") \
+ V(kAPICallReturnedInvalidObject, "API call returned invalid object") \
V(kAllocatingNonEmptyPackedArray, "Allocating non-empty packed array") \
V(kAllocationIsNotDoubleAligned, "Allocation is not double aligned") \
- V(kAPICallReturnedInvalidObject, "API call returned invalid object") \
- V(kBailedOutDueToDependencyChange, "Bailed out due to dependency change") \
- V(kClassConstructorFunction, "Class constructor function") \
- V(kClassLiteral, "Class literal") \
- V(kCodeGenerationFailed, "Code generation failed") \
V(kCodeObjectNotProperlyPatched, "Code object not properly patched") \
- V(kComputedPropertyName, "Computed property name") \
- V(kContextAllocatedArguments, "Context-allocated arguments") \
- V(kDebuggerStatement, "DebuggerStatement") \
- V(kDeclarationInCatchContext, "Declaration in catch context") \
- V(kDeclarationInWithContext, "Declaration in with context") \
- V(kDynamicImport, "Dynamic module import") \
- V(kCyclicObjectStateDetectedInEscapeAnalysis, \
- "Cyclic object state detected by escape analysis") \
- V(kEval, "eval") \
V(kExpectedAllocationSite, "Expected allocation site") \
- V(kExpectedBooleanValue, "Expected boolean value") \
V(kExpectedFeedbackVector, "Expected feedback vector") \
- V(kExpectedHeapNumber, "Expected HeapNumber") \
- V(kExpectedNonIdenticalObjects, "Expected non-identical objects") \
V(kExpectedOptimizationSentinel, \
"Expected optimized code cell or optimization sentinel") \
- V(kExpectedNewSpaceObject, "Expected new space object") \
V(kExpectedUndefinedOrCell, "Expected undefined or cell in register") \
- V(kForOfStatement, "ForOfStatement") \
- V(kFunctionBeingDebugged, "Function is being debugged") \
- V(kFunctionCallsEval, "Function calls eval") \
V(kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, \
"The function_data field should be a BytecodeArray on interpreter entry") \
- V(kGenerator, "Generator") \
- V(kGetIterator, "GetIterator") \
- V(kGraphBuildingFailed, "Optimized graph construction failed") \
- V(kHeapNumberMapRegisterClobbered, "HeapNumberMap register clobbered") \
- V(kIndexIsNegative, "Index is negative") \
- V(kIndexIsTooLarge, "Index is too large") \
- V(kInputGPRIsExpectedToHaveUpper32Cleared, \
- "Input GPR is expected to have upper32 cleared") \
V(kInputStringTooLong, "Input string too long") \
V(kInvalidBytecode, "Invalid bytecode") \
V(kInvalidElementsKindForInternalArrayOrInternalPackedArray, \
"Invalid ElementsKind for InternalArray or InternalPackedArray") \
- V(kInvalidFullCodegenState, "invalid full-codegen state") \
V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
V(kInvalidJumpTableIndex, "Invalid jump table index") \
V(kInvalidRegisterFileInGenerator, "invalid register file in generator") \
- V(kLiveEdit, "LiveEdit") \
V(kMissingBytecodeArray, "Missing bytecode array from function") \
- V(kNativeFunctionLiteral, "Native function literal") \
- V(kNoCasesLeft, "No cases left") \
- V(kNonObject, "Non-object value") \
- V(kNotEnoughVirtualRegistersRegalloc, \
- "Not enough virtual registers (regalloc)") \
+ V(kObjectNotTagged, "The object is not tagged") \
+ V(kObjectTagged, "The object is tagged") \
V(kOffsetOutOfRange, "Offset out of range") \
+ V(kOperandIsASmi, "Operand is a smi") \
V(kOperandIsASmiAndNotABoundFunction, \
"Operand is a smi and not a bound function") \
V(kOperandIsASmiAndNotAFixedArray, "Operand is a smi and not a fixed array") \
V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function") \
V(kOperandIsASmiAndNotAGeneratorObject, \
"Operand is a smi and not a generator object") \
- V(kOperandIsASmi, "Operand is a smi") \
V(kOperandIsNotABoundFunction, "Operand is not a bound function") \
V(kOperandIsNotAFixedArray, "Operand is not a fixed array") \
V(kOperandIsNotAFunction, "Operand is not a function") \
V(kOperandIsNotAGeneratorObject, "Operand is not a generator object") \
V(kOperandIsNotASmi, "Operand is not a smi") \
- V(kOperandIsNotSmi, "Operand is not smi") \
- V(kObjectTagged, "The object is tagged") \
- V(kObjectNotTagged, "The object is not tagged") \
- V(kOptimizationDisabled, "Optimization disabled") \
- V(kOptimizationDisabledForTest, "Optimization disabled for test") \
V(kReceivedInvalidReturnAddress, "Received invalid return address") \
- V(kReferenceToAVariableWhichRequiresDynamicLookup, \
- "Reference to a variable which requires dynamic lookup") \
- V(kReferenceToModuleVariable, "Reference to module-allocated variable") \
V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
V(kRegisterWasClobbered, "Register was clobbered") \
- V(kRememberedSetPointerInNewSpace, "Remembered set pointer is in new space") \
- V(kRestParameter, "Rest parameters") \
V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
- V(kSpreadCall, "Call with spread argument") \
+ V(kShouldNotDirectlyEnterOsrFunction, \
+ "Should not directly enter OSR-compiled function") \
V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
V(kStackFrameTypesMustMatch, "Stack frame types must match") \
- V(kSuperReference, "Super reference") \
- V(kTailCall, "Tail call") \
V(kTheCurrentStackPointerIsBelowCsp, \
"The current stack pointer is below csp") \
V(kTheStackWasCorruptedByMacroAssemblerCall, \
"The stack was corrupted by MacroAssembler::Call()") \
- V(kTooManyParameters, "Too many parameters") \
- V(kTryCatchStatement, "TryCatchStatement") \
- V(kTryFinallyStatement, "TryFinallyStatement") \
- V(kUnalignedAllocationInNewSpace, "Unaligned allocation in new space") \
V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
- V(kUnexpectedColorFound, "Unexpected color bit pattern found") \
V(kUnexpectedElementsKindInArrayConstructor, \
"Unexpected ElementsKind in array constructor") \
- V(kUnexpectedFallthroughFromCharCodeAtSlowCase, \
- "Unexpected fallthrough from CharCodeAt slow case") \
- V(kUnexpectedFallThroughFromStringComparison, \
- "Unexpected fall-through from string comparison") \
- V(kUnexpectedFallthroughToCharCodeAtSlowCase, \
- "Unexpected fallthrough to CharCodeAt slow case") \
+ V(kUnexpectedFPCRMode, "Unexpected FPCR mode.") \
+ V(kUnexpectedFunctionIDForInvokeIntrinsic, \
+ "Unexpected runtime function id for the InvokeIntrinsic bytecode") \
+ V(kUnexpectedInitialMapForArrayFunction, \
+ "Unexpected initial map for Array function") \
V(kUnexpectedInitialMapForArrayFunction1, \
"Unexpected initial map for Array function (1)") \
V(kUnexpectedInitialMapForArrayFunction2, \
"Unexpected initial map for Array function (2)") \
- V(kUnexpectedInitialMapForArrayFunction, \
- "Unexpected initial map for Array function") \
V(kUnexpectedInitialMapForInternalArrayFunction, \
"Unexpected initial map for InternalArray function") \
V(kUnexpectedLevelAfterReturnFromApiCall, \
"Unexpected level after return from api call") \
V(kUnexpectedNegativeValue, "Unexpected negative value") \
- V(kUnexpectedFunctionIDForInvokeIntrinsic, \
- "Unexpected runtime function id for the InvokeIntrinsic bytecode") \
- V(kUnexpectedFPCRMode, "Unexpected FPCR mode.") \
- V(kUnexpectedStackDepth, "Unexpected operand stack depth in full-codegen") \
+ V(kUnexpectedReturnFromFrameDropper, \
+ "Unexpectedly returned from dropping frames") \
+ V(kUnexpectedReturnFromThrow, "Unexpectedly returned from a throw") \
+ V(kUnexpectedReturnFromWasmTrap, \
+ "Should not return after throwing a wasm trap") \
V(kUnexpectedStackPointer, "The stack pointer is not the expected value") \
- V(kUnexpectedStringType, "Unexpected string type") \
V(kUnexpectedValue, "Unexpected value") \
V(kUnsupportedModuleOperation, "Unsupported module operation") \
V(kUnsupportedNonPrimitiveCompare, "Unsupported non-primitive compare") \
- V(kUnexpectedReturnFromFrameDropper, \
- "Unexpectedly returned from dropping frames") \
- V(kUnexpectedReturnFromThrow, "Unexpectedly returned from a throw") \
- V(kVariableResolvedToWithContext, "Variable resolved to with context") \
- V(kWithStatement, "WithStatement") \
- V(kWrongFunctionContext, "Wrong context passed to function") \
V(kWrongAddressOrValuePassedToRecordWrite, \
"Wrong address or value passed to RecordWrite") \
V(kWrongArgumentCountForInvokeIntrinsic, \
"Wrong number of arguments for intrinsic") \
- V(kShouldNotDirectlyEnterOsrFunction, \
- "Should not directly enter OSR-compiled function") \
- V(kUnexpectedReturnFromWasmTrap, \
- "Should not return after throwing a wasm trap")
+ V(kWrongFunctionContext, "Wrong context passed to function")
+
+#define BAILOUT_MESSAGES_LIST(V) \
+ V(kNoReason, "no reason") \
+ \
+ V(kBailedOutDueToDependencyChange, "Bailed out due to dependency change") \
+ V(kCodeGenerationFailed, "Code generation failed") \
+ V(kCyclicObjectStateDetectedInEscapeAnalysis, \
+ "Cyclic object state detected by escape analysis") \
+ V(kFunctionBeingDebugged, "Function is being debugged") \
+ V(kGraphBuildingFailed, "Optimized graph construction failed") \
+ V(kLiveEdit, "LiveEdit") \
+ V(kNativeFunctionLiteral, "Native function literal") \
+ V(kNotEnoughVirtualRegistersRegalloc, \
+ "Not enough virtual registers (regalloc)") \
+ V(kOptimizationDisabled, "Optimization disabled") \
+ V(kOptimizationDisabledForTest, "Optimization disabled for test")
#define ERROR_MESSAGES_CONSTANTS(C, T) C,
-enum BailoutReason {
- ERROR_MESSAGES_LIST(ERROR_MESSAGES_CONSTANTS) kLastErrorMessage
+enum class BailoutReason {
+ BAILOUT_MESSAGES_LIST(ERROR_MESSAGES_CONSTANTS) kLastErrorMessage
+};
+
+enum class AbortReason {
+ ABORT_MESSAGES_LIST(ERROR_MESSAGES_CONSTANTS) kLastErrorMessage
};
#undef ERROR_MESSAGES_CONSTANTS
const char* GetBailoutReason(BailoutReason reason);
+const char* GetAbortReason(AbortReason reason);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/base/DEPS b/deps/v8/src/base/DEPS
index 60db5959fd..a9c31c20d6 100644
--- a/deps/v8/src/base/DEPS
+++ b/deps/v8/src/base/DEPS
@@ -1,6 +1,7 @@
include_rules = [
"-include",
"+include/v8config.h",
+ "+include/v8-platform.h",
"-src",
"+src/base",
]
diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc
index f449612e6a..22e0511dc7 100644
--- a/deps/v8/src/base/cpu.cc
+++ b/deps/v8/src/base/cpu.cc
@@ -356,12 +356,12 @@ CPU::CPU()
// Interpret CPU feature information.
if (num_ids > 0) {
__cpuid(cpu_info, 1);
- stepping_ = cpu_info[0] & 0xf;
- model_ = ((cpu_info[0] >> 4) & 0xf) + ((cpu_info[0] >> 12) & 0xf0);
- family_ = (cpu_info[0] >> 8) & 0xf;
+ stepping_ = cpu_info[0] & 0xF;
+ model_ = ((cpu_info[0] >> 4) & 0xF) + ((cpu_info[0] >> 12) & 0xF0);
+ family_ = (cpu_info[0] >> 8) & 0xF;
type_ = (cpu_info[0] >> 12) & 0x3;
- ext_model_ = (cpu_info[0] >> 16) & 0xf;
- ext_family_ = (cpu_info[0] >> 20) & 0xff;
+ ext_model_ = (cpu_info[0] >> 16) & 0xF;
+ ext_family_ = (cpu_info[0] >> 20) & 0xFF;
has_fpu_ = (cpu_info[3] & 0x00000001) != 0;
has_cmov_ = (cpu_info[3] & 0x00008000) != 0;
has_mmx_ = (cpu_info[3] & 0x00800000) != 0;
@@ -378,16 +378,16 @@ CPU::CPU()
if (family_ == 0x6) {
switch (model_) {
- case 0x1c: // SLT
+ case 0x1C: // SLT
case 0x26:
case 0x36:
case 0x27:
case 0x35:
case 0x37: // SLM
- case 0x4a:
- case 0x4d:
- case 0x4c: // AMT
- case 0x6e:
+ case 0x4A:
+ case 0x4D:
+ case 0x4C: // AMT
+ case 0x6E:
is_atom_ = true;
}
}
diff --git a/deps/v8/src/base/debug/stack_trace_posix.cc b/deps/v8/src/base/debug/stack_trace_posix.cc
index 67f86c634f..ec3add1682 100644
--- a/deps/v8/src/base/debug/stack_trace_posix.cc
+++ b/deps/v8/src/base/debug/stack_trace_posix.cc
@@ -400,7 +400,7 @@ char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding) {
if (n > sz) return nullptr;
if (base < 2 || base > 16) {
- buf[0] = '\000';
+ buf[0] = '\0';
return nullptr;
}
@@ -415,7 +415,7 @@ char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding) {
// Make sure we can write the '-' character.
if (++n > sz) {
- buf[0] = '\000';
+ buf[0] = '\0';
return nullptr;
}
*start++ = '-';
@@ -427,7 +427,7 @@ char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding) {
do {
// Make sure there is still enough space left in our output buffer.
if (++n > sz) {
- buf[0] = '\000';
+ buf[0] = '\0';
return nullptr;
}
@@ -439,7 +439,7 @@ char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding) {
} while (j > 0 || padding > 0);
// Terminate the output with a NUL character.
- *ptr = '\000';
+ *ptr = '\0';
// Conversion to ASCII actually resulted in the digits being in reverse
// order. We can't easily generate them in forward order, as we can't tell
diff --git a/deps/v8/src/base/functional.cc b/deps/v8/src/base/functional.cc
index 80a7585bcc..dffb91f3cc 100644
--- a/deps/v8/src/base/functional.cc
+++ b/deps/v8/src/base/functional.cc
@@ -69,8 +69,8 @@ V8_INLINE size_t hash_value_unsigned(T v) {
// This code was taken from MurmurHash.
size_t hash_combine(size_t seed, size_t value) {
#if V8_HOST_ARCH_32_BIT
- const uint32_t c1 = 0xcc9e2d51;
- const uint32_t c2 = 0x1b873593;
+ const uint32_t c1 = 0xCC9E2D51;
+ const uint32_t c2 = 0x1B873593;
value *= c1;
value = bits::RotateRight32(value, 15);
@@ -78,9 +78,9 @@ size_t hash_combine(size_t seed, size_t value) {
seed ^= value;
seed = bits::RotateRight32(seed, 13);
- seed = seed * 5 + 0xe6546b64;
+ seed = seed * 5 + 0xE6546B64;
#else
- const uint64_t m = V8_UINT64_C(0xc6a4a7935bd1e995);
+ const uint64_t m = uint64_t{0xC6A4A7935BD1E995};
const uint32_t r = 47;
value *= m;
diff --git a/deps/v8/src/base/ieee754.cc b/deps/v8/src/base/ieee754.cc
index ca0c6d1314..54f7e2e6aa 100644
--- a/deps/v8/src/base/ieee754.cc
+++ b/deps/v8/src/base/ieee754.cc
@@ -225,16 +225,16 @@ int32_t __ieee754_rem_pio2(double x, double *y) {
z = 0;
GET_HIGH_WORD(hx, x); /* high word of x */
- ix = hx & 0x7fffffff;
- if (ix <= 0x3fe921fb) { /* |x| ~<= pi/4 , no need for reduction */
+ ix = hx & 0x7FFFFFFF;
+ if (ix <= 0x3FE921FB) { /* |x| ~<= pi/4 , no need for reduction */
y[0] = x;
y[1] = 0;
return 0;
}
- if (ix < 0x4002d97c) { /* |x| < 3pi/4, special case with n=+-1 */
+ if (ix < 0x4002D97C) { /* |x| < 3pi/4, special case with n=+-1 */
if (hx > 0) {
z = x - pio2_1;
- if (ix != 0x3ff921fb) { /* 33+53 bit pi is good enough */
+ if (ix != 0x3FF921FB) { /* 33+53 bit pi is good enough */
y[0] = z - pio2_1t;
y[1] = (z - y[0]) - pio2_1t;
} else { /* near pi/2, use 33+33+53 bit pi */
@@ -245,7 +245,7 @@ int32_t __ieee754_rem_pio2(double x, double *y) {
return 1;
} else { /* negative x */
z = x + pio2_1;
- if (ix != 0x3ff921fb) { /* 33+53 bit pi is good enough */
+ if (ix != 0x3FF921FB) { /* 33+53 bit pi is good enough */
y[0] = z + pio2_1t;
y[1] = (z - y[0]) + pio2_1t;
} else { /* near pi/2, use 33+33+53 bit pi */
@@ -256,7 +256,7 @@ int32_t __ieee754_rem_pio2(double x, double *y) {
return -1;
}
}
- if (ix <= 0x413921fb) { /* |x| ~<= 2^19*(pi/2), medium size */
+ if (ix <= 0x413921FB) { /* |x| ~<= 2^19*(pi/2), medium size */
t = fabs(x);
n = static_cast<int32_t>(t * invpio2 + half);
fn = static_cast<double>(n);
@@ -269,7 +269,7 @@ int32_t __ieee754_rem_pio2(double x, double *y) {
j = ix >> 20;
y[0] = r - w;
GET_HIGH_WORD(high, y[0]);
- i = j - ((high >> 20) & 0x7ff);
+ i = j - ((high >> 20) & 0x7FF);
if (i > 16) { /* 2nd iteration needed, good to 118 */
t = r;
w = fn * pio2_2;
@@ -277,7 +277,7 @@ int32_t __ieee754_rem_pio2(double x, double *y) {
w = fn * pio2_2t - ((t - r) - w);
y[0] = r - w;
GET_HIGH_WORD(high, y[0]);
- i = j - ((high >> 20) & 0x7ff);
+ i = j - ((high >> 20) & 0x7FF);
if (i > 49) { /* 3rd iteration need, 151 bits acc */
t = r; /* will cover all possible cases */
w = fn * pio2_3;
@@ -299,7 +299,7 @@ int32_t __ieee754_rem_pio2(double x, double *y) {
/*
* all other (large) arguments
*/
- if (ix >= 0x7ff00000) { /* x is inf or NaN */
+ if (ix >= 0x7FF00000) { /* x is inf or NaN */
y[0] = y[1] = x - x;
return 0;
}
@@ -331,7 +331,7 @@ int32_t __ieee754_rem_pio2(double x, double *y) {
*
* Algorithm
* 1. Since cos(-x) = cos(x), we need only to consider positive x.
- * 2. if x < 2^-27 (hx<0x3e400000 0), return 1 with inexact if x!=0.
+ * 2. if x < 2^-27 (hx<0x3E400000 0), return 1 with inexact if x!=0.
* 3. cos(x) is approximated by a polynomial of degree 14 on
* [0,pi/4]
* 4 14
@@ -370,8 +370,8 @@ V8_INLINE double __kernel_cos(double x, double y) {
double a, iz, z, r, qx;
int32_t ix;
GET_HIGH_WORD(ix, x);
- ix &= 0x7fffffff; /* ix = |x|'s high word*/
- if (ix < 0x3e400000) { /* if x < 2**27 */
+ ix &= 0x7FFFFFFF; /* ix = |x|'s high word*/
+ if (ix < 0x3E400000) { /* if x < 2**27 */
if (static_cast<int>(x) == 0) return one; /* generate inexact */
}
z = x * x;
@@ -379,7 +379,7 @@ V8_INLINE double __kernel_cos(double x, double y) {
if (ix < 0x3FD33333) { /* if |x| < 0.3 */
return one - (0.5 * z - (z * r - x * y));
} else {
- if (ix > 0x3fe90000) { /* x > 0.78125 */
+ if (ix > 0x3FE90000) { /* x > 0.78125 */
qx = 0.28125;
} else {
INSERT_WORDS(qx, ix - 0x00200000, 0); /* x/4 */
@@ -585,16 +585,16 @@ recompute:
iq[i] = 0x1000000 - j;
}
} else {
- iq[i] = 0xffffff - j;
+ iq[i] = 0xFFFFFF - j;
}
}
if (q0 > 0) { /* rare case: chance is 1 in 12 */
switch (q0) {
case 1:
- iq[jz - 1] &= 0x7fffff;
+ iq[jz - 1] &= 0x7FFFFF;
break;
case 2:
- iq[jz - 1] &= 0x3fffff;
+ iq[jz - 1] &= 0x3FFFFF;
break;
}
}
@@ -706,7 +706,7 @@ recompute:
*
* Algorithm
* 1. Since sin(-x) = -sin(x), we need only to consider positive x.
- * 2. if x < 2^-27 (hx<0x3e400000 0), return x with inexact if x!=0.
+ * 2. if x < 2^-27 (hx<0x3E400000 0), return x with inexact if x!=0.
* 3. sin(x) is approximated by a polynomial of degree 13 on
* [0,pi/4]
* 3 13
@@ -738,8 +738,8 @@ V8_INLINE double __kernel_sin(double x, double y, int iy) {
double z, r, v;
int32_t ix;
GET_HIGH_WORD(ix, x);
- ix &= 0x7fffffff; /* high word of x */
- if (ix < 0x3e400000) { /* |x| < 2**-27 */
+ ix &= 0x7FFFFFFF; /* high word of x */
+ if (ix < 0x3E400000) { /* |x| < 2**-27 */
if (static_cast<int>(x) == 0) return x;
} /* generate inexact */
z = x * x;
@@ -761,7 +761,7 @@ V8_INLINE double __kernel_sin(double x, double y, int iy) {
*
* Algorithm
* 1. Since tan(-x) = -tan(x), we need only to consider positive x.
- * 2. if x < 2^-28 (hx<0x3e300000 0), return x with inexact if x!=0.
+ * 2. if x < 2^-28 (hx<0x3E300000 0), return x with inexact if x!=0.
* 3. tan(x) is approximated by a odd polynomial of degree 27 on
* [0,0.67434]
* 3 27
@@ -813,8 +813,8 @@ double __kernel_tan(double x, double y, int iy) {
int32_t ix, hx;
GET_HIGH_WORD(hx, x); /* high word of x */
- ix = hx & 0x7fffffff; /* high word of |x| */
- if (ix < 0x3e300000) { /* x < 2**-28 */
+ ix = hx & 0x7FFFFFFF; /* high word of |x| */
+ if (ix < 0x3E300000) { /* x < 2**-28 */
if (static_cast<int>(x) == 0) { /* generate inexact */
uint32_t low;
GET_LOW_WORD(low, x);
@@ -934,11 +934,11 @@ double acos(double x) {
double z, p, q, r, w, s, c, df;
int32_t hx, ix;
GET_HIGH_WORD(hx, x);
- ix = hx & 0x7fffffff;
- if (ix >= 0x3ff00000) { /* |x| >= 1 */
+ ix = hx & 0x7FFFFFFF;
+ if (ix >= 0x3FF00000) { /* |x| >= 1 */
uint32_t lx;
GET_LOW_WORD(lx, x);
- if (((ix - 0x3ff00000) | lx) == 0) { /* |x|==1 */
+ if (((ix - 0x3FF00000) | lx) == 0) { /* |x|==1 */
if (hx > 0)
return 0.0; /* acos(1) = 0 */
else
@@ -946,8 +946,8 @@ double acos(double x) {
}
return (x - x) / (x - x); /* acos(|x|>1) is NaN */
}
- if (ix < 0x3fe00000) { /* |x| < 0.5 */
- if (ix <= 0x3c600000) return pio2_hi + pio2_lo; /*if|x|<2**-57*/
+ if (ix < 0x3FE00000) { /* |x| < 0.5 */
+ if (ix <= 0x3C600000) return pio2_hi + pio2_lo; /*if|x|<2**-57*/
z = x * x;
p = z * (pS0 + z * (pS1 + z * (pS2 + z * (pS3 + z * (pS4 + z * pS5)))));
q = one + z * (qS1 + z * (qS2 + z * (qS3 + z * qS4)));
@@ -996,15 +996,15 @@ double acosh(double x) {
int32_t hx;
uint32_t lx;
EXTRACT_WORDS(hx, lx, x);
- if (hx < 0x3ff00000) { /* x < 1 */
+ if (hx < 0x3FF00000) { /* x < 1 */
return (x - x) / (x - x);
- } else if (hx >= 0x41b00000) { /* x > 2**28 */
- if (hx >= 0x7ff00000) { /* x is inf of NaN */
+ } else if (hx >= 0x41B00000) { /* x > 2**28 */
+ if (hx >= 0x7FF00000) { /* x is inf of NaN */
return x + x;
} else {
return log(x) + ln2; /* acosh(huge)=log(2x) */
}
- } else if (((hx - 0x3ff00000) | lx) == 0) {
+ } else if (((hx - 0x3FF00000) | lx) == 0) {
return 0.0; /* acosh(1) = 0 */
} else if (hx > 0x40000000) { /* 2**28 > x > 2 */
t = x * x;
@@ -1067,15 +1067,15 @@ double asin(double x) {
t = 0;
GET_HIGH_WORD(hx, x);
- ix = hx & 0x7fffffff;
- if (ix >= 0x3ff00000) { /* |x|>= 1 */
+ ix = hx & 0x7FFFFFFF;
+ if (ix >= 0x3FF00000) { /* |x|>= 1 */
uint32_t lx;
GET_LOW_WORD(lx, x);
- if (((ix - 0x3ff00000) | lx) == 0) /* asin(1)=+-pi/2 with inexact */
+ if (((ix - 0x3FF00000) | lx) == 0) /* asin(1)=+-pi/2 with inexact */
return x * pio2_hi + x * pio2_lo;
return (x - x) / (x - x); /* asin(|x|>1) is NaN */
- } else if (ix < 0x3fe00000) { /* |x|<0.5 */
- if (ix < 0x3e400000) { /* if |x| < 2**-27 */
+ } else if (ix < 0x3FE00000) { /* |x|<0.5 */
+ if (ix < 0x3E400000) { /* if |x| < 2**-27 */
if (huge + x > one) return x; /* return x with inexact if x!=0*/
} else {
t = x * x;
@@ -1127,12 +1127,12 @@ double asinh(double x) {
double t, w;
int32_t hx, ix;
GET_HIGH_WORD(hx, x);
- ix = hx & 0x7fffffff;
- if (ix >= 0x7ff00000) return x + x; /* x is inf or NaN */
- if (ix < 0x3e300000) { /* |x|<2**-28 */
+ ix = hx & 0x7FFFFFFF;
+ if (ix >= 0x7FF00000) return x + x; /* x is inf or NaN */
+ if (ix < 0x3E300000) { /* |x|<2**-28 */
if (huge + x > one) return x; /* return x inexact except 0 */
}
- if (ix > 0x41b00000) { /* |x| > 2**28 */
+ if (ix > 0x41B00000) { /* |x| > 2**28 */
w = log(fabs(x)) + ln2;
} else if (ix > 0x40000000) { /* 2**28 > |x| > 2.0 */
t = fabs(x);
@@ -1202,26 +1202,26 @@ double atan(double x) {
int32_t ix, hx, id;
GET_HIGH_WORD(hx, x);
- ix = hx & 0x7fffffff;
+ ix = hx & 0x7FFFFFFF;
if (ix >= 0x44100000) { /* if |x| >= 2^66 */
uint32_t low;
GET_LOW_WORD(low, x);
- if (ix > 0x7ff00000 || (ix == 0x7ff00000 && (low != 0)))
+ if (ix > 0x7FF00000 || (ix == 0x7FF00000 && (low != 0)))
return x + x; /* NaN */
if (hx > 0)
return atanhi[3] + *(volatile double *)&atanlo[3];
else
return -atanhi[3] - *(volatile double *)&atanlo[3];
}
- if (ix < 0x3fdc0000) { /* |x| < 0.4375 */
- if (ix < 0x3e400000) { /* |x| < 2^-27 */
+ if (ix < 0x3FDC0000) { /* |x| < 0.4375 */
+ if (ix < 0x3E400000) { /* |x| < 2^-27 */
if (huge + x > one) return x; /* raise inexact */
}
id = -1;
} else {
x = fabs(x);
- if (ix < 0x3ff30000) { /* |x| < 1.1875 */
- if (ix < 0x3fe60000) { /* 7/16 <=|x|<11/16 */
+ if (ix < 0x3FF30000) { /* |x| < 1.1875 */
+ if (ix < 0x3FE60000) { /* 7/16 <=|x|<11/16 */
id = 0;
x = (2.0 * x - one) / (2.0 + x);
} else { /* 11/16<=|x|< 19/16 */
@@ -1294,14 +1294,14 @@ double atan2(double y, double x) {
uint32_t lx, ly;
EXTRACT_WORDS(hx, lx, x);
- ix = hx & 0x7fffffff;
+ ix = hx & 0x7FFFFFFF;
EXTRACT_WORDS(hy, ly, y);
- iy = hy & 0x7fffffff;
- if (((ix | ((lx | -static_cast<int32_t>(lx)) >> 31)) > 0x7ff00000) ||
- ((iy | ((ly | -static_cast<int32_t>(ly)) >> 31)) > 0x7ff00000)) {
+ iy = hy & 0x7FFFFFFF;
+ if (((ix | ((lx | -static_cast<int32_t>(lx)) >> 31)) > 0x7FF00000) ||
+ ((iy | ((ly | -static_cast<int32_t>(ly)) >> 31)) > 0x7FF00000)) {
return x + y; /* x or y is NaN */
}
- if (((hx - 0x3ff00000) | lx) == 0) return atan(y); /* x=1.0 */
+ if (((hx - 0x3FF00000) | lx) == 0) return atan(y); /* x=1.0 */
m = ((hy >> 31) & 1) | ((hx >> 30) & 2); /* 2*sign(x)+sign(y) */
/* when y = 0 */
@@ -1320,8 +1320,8 @@ double atan2(double y, double x) {
if ((ix | lx) == 0) return (hy < 0) ? -pi_o_2 - tiny : pi_o_2 + tiny;
/* when x is INF */
- if (ix == 0x7ff00000) {
- if (iy == 0x7ff00000) {
+ if (ix == 0x7FF00000) {
+ if (iy == 0x7FF00000) {
switch (m) {
case 0:
return pi_o_4 + tiny; /* atan(+INF,+INF) */
@@ -1346,7 +1346,7 @@ double atan2(double y, double x) {
}
}
/* when y is INF */
- if (iy == 0x7ff00000) return (hy < 0) ? -pi_o_2 - tiny : pi_o_2 + tiny;
+ if (iy == 0x7FF00000) return (hy < 0) ? -pi_o_2 - tiny : pi_o_2 + tiny;
/* compute y/x */
k = (iy - ix) >> 20;
@@ -1408,10 +1408,10 @@ double cos(double x) {
GET_HIGH_WORD(ix, x);
/* |x| ~< pi/4 */
- ix &= 0x7fffffff;
- if (ix <= 0x3fe921fb) {
+ ix &= 0x7FFFFFFF;
+ if (ix <= 0x3FE921FB) {
return __kernel_cos(x, z);
- } else if (ix >= 0x7ff00000) {
+ } else if (ix >= 0x7FF00000) {
/* cos(Inf or NaN) is NaN */
return x - x;
} else {
@@ -1497,18 +1497,18 @@ double exp(double x) {
one = 1.0,
halF[2] = {0.5, -0.5},
o_threshold = 7.09782712893383973096e+02, /* 0x40862E42, 0xFEFA39EF */
- u_threshold = -7.45133219101941108420e+02, /* 0xc0874910, 0xD52D3051 */
- ln2HI[2] = {6.93147180369123816490e-01, /* 0x3fe62e42, 0xfee00000 */
- -6.93147180369123816490e-01}, /* 0xbfe62e42, 0xfee00000 */
- ln2LO[2] = {1.90821492927058770002e-10, /* 0x3dea39ef, 0x35793c76 */
- -1.90821492927058770002e-10}, /* 0xbdea39ef, 0x35793c76 */
- invln2 = 1.44269504088896338700e+00, /* 0x3ff71547, 0x652b82fe */
+ u_threshold = -7.45133219101941108420e+02, /* 0xC0874910, 0xD52D3051 */
+ ln2HI[2] = {6.93147180369123816490e-01, /* 0x3FE62E42, 0xFEE00000 */
+ -6.93147180369123816490e-01}, /* 0xBFE62E42, 0xFEE00000 */
+ ln2LO[2] = {1.90821492927058770002e-10, /* 0x3DEA39EF, 0x35793C76 */
+ -1.90821492927058770002e-10}, /* 0xBDEA39EF, 0x35793C76 */
+ invln2 = 1.44269504088896338700e+00, /* 0x3FF71547, 0x652B82FE */
P1 = 1.66666666666666019037e-01, /* 0x3FC55555, 0x5555553E */
P2 = -2.77777777770155933842e-03, /* 0xBF66C16C, 0x16BEBD93 */
P3 = 6.61375632143793436117e-05, /* 0x3F11566A, 0xAF25DE2C */
P4 = -1.65339022054652515390e-06, /* 0xBEBBBD41, 0xC5D26BF1 */
P5 = 4.13813679705723846039e-08, /* 0x3E663769, 0x72BEA4D0 */
- E = 2.718281828459045; /* 0x4005bf0a, 0x8b145769 */
+ E = 2.718281828459045; /* 0x4005BF0A, 0x8B145769 */
static volatile double
huge = 1.0e+300,
@@ -1521,14 +1521,14 @@ double exp(double x) {
GET_HIGH_WORD(hx, x);
xsb = (hx >> 31) & 1; /* sign bit of x */
- hx &= 0x7fffffff; /* high word of |x| */
+ hx &= 0x7FFFFFFF; /* high word of |x| */
/* filter out non-finite argument */
if (hx >= 0x40862E42) { /* if |x|>=709.78... */
- if (hx >= 0x7ff00000) {
+ if (hx >= 0x7FF00000) {
uint32_t lx;
GET_LOW_WORD(lx, x);
- if (((hx & 0xfffff) | lx) != 0)
+ if (((hx & 0xFFFFF) | lx) != 0)
return x + x; /* NaN */
else
return (xsb == 0) ? x : 0.0; /* exp(+-inf)={inf,0} */
@@ -1538,7 +1538,7 @@ double exp(double x) {
}
/* argument reduction */
- if (hx > 0x3fd62e42) { /* if |x| > 0.5 ln2 */
+ if (hx > 0x3FD62E42) { /* if |x| > 0.5 ln2 */
if (hx < 0x3FF0A2B2) { /* and |x| < 1.5 ln2 */
/* TODO(rtoy): We special case exp(1) here to return the correct
* value of E, as the computation below would get the last bit
@@ -1555,7 +1555,7 @@ double exp(double x) {
lo = t * ln2LO[0];
}
STRICT_ASSIGN(double, x, hi - lo);
- } else if (hx < 0x3e300000) { /* when |x|<2**-28 */
+ } else if (hx < 0x3E300000) { /* when |x|<2**-28 */
if (huge + x > one) return one + x; /* trigger inexact */
} else {
k = 0;
@@ -1564,9 +1564,9 @@ double exp(double x) {
/* x is now in primary range */
t = x * x;
if (k >= -1021) {
- INSERT_WORDS(twopk, 0x3ff00000 + (k << 20), 0);
+ INSERT_WORDS(twopk, 0x3FF00000 + (k << 20), 0);
} else {
- INSERT_WORDS(twopk, 0x3ff00000 + ((k + 1000) << 20), 0);
+ INSERT_WORDS(twopk, 0x3FF00000 + ((k + 1000) << 20), 0);
}
c = x - t * (P1 + t * (P2 + t * (P3 + t * (P4 + t * P5))));
if (k == 0) {
@@ -1607,13 +1607,13 @@ double atanh(double x) {
int32_t hx, ix;
uint32_t lx;
EXTRACT_WORDS(hx, lx, x);
- ix = hx & 0x7fffffff;
- if ((ix | ((lx | -static_cast<int32_t>(lx)) >> 31)) > 0x3ff00000) /* |x|>1 */
+ ix = hx & 0x7FFFFFFF;
+ if ((ix | ((lx | -static_cast<int32_t>(lx)) >> 31)) > 0x3FF00000) /* |x|>1 */
return (x - x) / (x - x);
- if (ix == 0x3ff00000) return x / zero;
- if (ix < 0x3e300000 && (huge + x) > zero) return x; /* x<2**-28 */
+ if (ix == 0x3FF00000) return x / zero;
+ if (ix < 0x3E300000 && (huge + x) > zero) return x; /* x<2**-28 */
SET_HIGH_WORD(x, ix);
- if (ix < 0x3fe00000) { /* x < 0.5 */
+ if (ix < 0x3FE00000) { /* x < 0.5 */
t = x + x;
t = 0.5 * log1p(t + t * x / (one - x));
} else {
@@ -1699,21 +1699,21 @@ double log(double x) {
k = 0;
if (hx < 0x00100000) { /* x < 2**-1022 */
- if (((hx & 0x7fffffff) | lx) == 0)
+ if (((hx & 0x7FFFFFFF) | lx) == 0)
return -two54 / vzero; /* log(+-0)=-inf */
if (hx < 0) return (x - x) / zero; /* log(-#) = NaN */
k -= 54;
x *= two54; /* subnormal number, scale up x */
GET_HIGH_WORD(hx, x);
}
- if (hx >= 0x7ff00000) return x + x;
+ if (hx >= 0x7FF00000) return x + x;
k += (hx >> 20) - 1023;
- hx &= 0x000fffff;
- i = (hx + 0x95f64) & 0x100000;
- SET_HIGH_WORD(x, hx | (i ^ 0x3ff00000)); /* normalize x or x/2 */
+ hx &= 0x000FFFFF;
+ i = (hx + 0x95F64) & 0x100000;
+ SET_HIGH_WORD(x, hx | (i ^ 0x3FF00000)); /* normalize x or x/2 */
k += (i >> 20);
f = x - 1.0;
- if ((0x000fffff & (2 + hx)) < 3) { /* -2**-20 <= f < 2**-20 */
+ if ((0x000FFFFF & (2 + hx)) < 3) { /* -2**-20 <= f < 2**-20 */
if (f == zero) {
if (k == 0) {
return zero;
@@ -1733,9 +1733,9 @@ double log(double x) {
s = f / (2.0 + f);
dk = static_cast<double>(k);
z = s * s;
- i = hx - 0x6147a;
+ i = hx - 0x6147A;
w = z * z;
- j = 0x6b851 - hx;
+ j = 0x6B851 - hx;
t1 = w * (Lg2 + w * (Lg4 + w * Lg6));
t2 = z * (Lg1 + w * (Lg3 + w * (Lg5 + w * Lg7)));
i |= j;
@@ -1838,30 +1838,30 @@ double log1p(double x) {
int32_t k, hx, hu, ax;
GET_HIGH_WORD(hx, x);
- ax = hx & 0x7fffffff;
+ ax = hx & 0x7FFFFFFF;
k = 1;
if (hx < 0x3FDA827A) { /* 1+x < sqrt(2)+ */
- if (ax >= 0x3ff00000) { /* x <= -1.0 */
+ if (ax >= 0x3FF00000) { /* x <= -1.0 */
if (x == -1.0)
return -two54 / vzero; /* log1p(-1)=+inf */
else
return (x - x) / (x - x); /* log1p(x<-1)=NaN */
}
- if (ax < 0x3e200000) { /* |x| < 2**-29 */
+ if (ax < 0x3E200000) { /* |x| < 2**-29 */
if (two54 + x > zero /* raise inexact */
- && ax < 0x3c900000) /* |x| < 2**-54 */
+ && ax < 0x3C900000) /* |x| < 2**-54 */
return x;
else
return x - x * x * 0.5;
}
- if (hx > 0 || hx <= static_cast<int32_t>(0xbfd2bec4)) {
+ if (hx > 0 || hx <= static_cast<int32_t>(0xBFD2BEC4)) {
k = 0;
f = x;
hu = 1;
} /* sqrt(2)/2- <= 1+x < sqrt(2)+ */
}
- if (hx >= 0x7ff00000) return x + x;
+ if (hx >= 0x7FF00000) return x + x;
if (k != 0) {
if (hx < 0x43400000) {
STRICT_ASSIGN(double, u, 1.0 + x);
@@ -1875,7 +1875,7 @@ double log1p(double x) {
k = (hu >> 20) - 1023;
c = 0;
}
- hu &= 0x000fffff;
+ hu &= 0x000FFFFF;
/*
* The approximation to sqrt(2) used in thresholds is not
* critical. However, the ones used above must give less
@@ -1883,11 +1883,11 @@ double log1p(double x) {
* never reached from here, since here we have committed to
* using the correction term but don't use it if k==0.
*/
- if (hu < 0x6a09e) { /* u ~< sqrt(2) */
- SET_HIGH_WORD(u, hu | 0x3ff00000); /* normalize u */
+ if (hu < 0x6A09E) { /* u ~< sqrt(2) */
+ SET_HIGH_WORD(u, hu | 0x3FF00000); /* normalize u */
} else {
k += 1;
- SET_HIGH_WORD(u, hu | 0x3fe00000); /* normalize u/2 */
+ SET_HIGH_WORD(u, hu | 0x3FE00000); /* normalize u/2 */
hu = (0x00100000 - hu) >> 2;
}
f = u - 1.0;
@@ -2012,8 +2012,8 @@ static inline double k_log1p(double f) {
double log2(double x) {
static const double
two54 = 1.80143985094819840000e+16, /* 0x43500000, 0x00000000 */
- ivln2hi = 1.44269504072144627571e+00, /* 0x3ff71547, 0x65200000 */
- ivln2lo = 1.67517131648865118353e-10; /* 0x3de705fc, 0x2eefa200 */
+ ivln2hi = 1.44269504072144627571e+00, /* 0x3FF71547, 0x65200000 */
+ ivln2lo = 1.67517131648865118353e-10; /* 0x3DE705FC, 0x2EEFA200 */
static const double zero = 0.0;
static volatile double vzero = 0.0;
@@ -2026,19 +2026,19 @@ double log2(double x) {
k = 0;
if (hx < 0x00100000) { /* x < 2**-1022 */
- if (((hx & 0x7fffffff) | lx) == 0)
+ if (((hx & 0x7FFFFFFF) | lx) == 0)
return -two54 / vzero; /* log(+-0)=-inf */
if (hx < 0) return (x - x) / zero; /* log(-#) = NaN */
k -= 54;
x *= two54; /* subnormal number, scale up x */
GET_HIGH_WORD(hx, x);
}
- if (hx >= 0x7ff00000) return x + x;
- if (hx == 0x3ff00000 && lx == 0) return zero; /* log(1) = +0 */
+ if (hx >= 0x7FF00000) return x + x;
+ if (hx == 0x3FF00000 && lx == 0) return zero; /* log(1) = +0 */
k += (hx >> 20) - 1023;
- hx &= 0x000fffff;
- i = (hx + 0x95f64) & 0x100000;
- SET_HIGH_WORD(x, hx | (i ^ 0x3ff00000)); /* normalize x or x/2 */
+ hx &= 0x000FFFFF;
+ i = (hx + 0x95F64) & 0x100000;
+ SET_HIGH_WORD(x, hx | (i ^ 0x3FF00000)); /* normalize x or x/2 */
k += (i >> 20);
y = static_cast<double>(k);
f = x - 1.0;
@@ -2133,7 +2133,7 @@ double log10(double x) {
k = 0;
if (hx < 0x00100000) { /* x < 2**-1022 */
- if (((hx & 0x7fffffff) | lx) == 0)
+ if (((hx & 0x7FFFFFFF) | lx) == 0)
return -two54 / vzero; /* log(+-0)=-inf */
if (hx < 0) return (x - x) / zero; /* log(-#) = NaN */
k -= 54;
@@ -2141,12 +2141,12 @@ double log10(double x) {
GET_HIGH_WORD(hx, x);
GET_LOW_WORD(lx, x);
}
- if (hx >= 0x7ff00000) return x + x;
- if (hx == 0x3ff00000 && lx == 0) return zero; /* log(1) = +0 */
+ if (hx >= 0x7FF00000) return x + x;
+ if (hx == 0x3FF00000 && lx == 0) return zero; /* log(1) = +0 */
k += (hx >> 20) - 1023;
i = (k & 0x80000000) >> 31;
- hx = (hx & 0x000fffff) | ((0x3ff - i) << 20);
+ hx = (hx & 0x000FFFFF) | ((0x3FF - i) << 20);
y = k + i;
SET_HIGH_WORD(x, hx);
SET_LOW_WORD(x, lx);
@@ -2254,9 +2254,9 @@ double expm1(double x) {
one = 1.0,
tiny = 1.0e-300,
o_threshold = 7.09782712893383973096e+02, /* 0x40862E42, 0xFEFA39EF */
- ln2_hi = 6.93147180369123816490e-01, /* 0x3fe62e42, 0xfee00000 */
- ln2_lo = 1.90821492927058770002e-10, /* 0x3dea39ef, 0x35793c76 */
- invln2 = 1.44269504088896338700e+00, /* 0x3ff71547, 0x652b82fe */
+ ln2_hi = 6.93147180369123816490e-01, /* 0x3FE62E42, 0xFEE00000 */
+ ln2_lo = 1.90821492927058770002e-10, /* 0x3DEA39EF, 0x35793C76 */
+ invln2 = 1.44269504088896338700e+00, /* 0x3FF71547, 0x652B82FE */
/* Scaled Q's: Qn_here = 2**n * Qn_above, for R(2*z) where z = hxs =
x*x/2: */
Q1 = -3.33333333333331316428e-02, /* BFA11111 111110F4 */
@@ -2273,15 +2273,15 @@ double expm1(double x) {
GET_HIGH_WORD(hx, x);
xsb = hx & 0x80000000; /* sign bit of x */
- hx &= 0x7fffffff; /* high word of |x| */
+ hx &= 0x7FFFFFFF; /* high word of |x| */
/* filter out huge and non-finite argument */
if (hx >= 0x4043687A) { /* if |x|>=56*ln2 */
if (hx >= 0x40862E42) { /* if |x|>=709.78... */
- if (hx >= 0x7ff00000) {
+ if (hx >= 0x7FF00000) {
uint32_t low;
GET_LOW_WORD(low, x);
- if (((hx & 0xfffff) | low) != 0)
+ if (((hx & 0xFFFFF) | low) != 0)
return x + x; /* NaN */
else
return (xsb == 0) ? x : -1.0; /* exp(+-inf)={inf,-1} */
@@ -2295,7 +2295,7 @@ double expm1(double x) {
}
/* argument reduction */
- if (hx > 0x3fd62e42) { /* if |x| > 0.5 ln2 */
+ if (hx > 0x3FD62E42) { /* if |x| > 0.5 ln2 */
if (hx < 0x3FF0A2B2) { /* and |x| < 1.5 ln2 */
if (xsb == 0) {
hi = x - ln2_hi;
@@ -2314,7 +2314,7 @@ double expm1(double x) {
}
STRICT_ASSIGN(double, x, hi - lo);
c = (hi - x) - lo;
- } else if (hx < 0x3c900000) { /* when |x|<2**-54, return x */
+ } else if (hx < 0x3C900000) { /* when |x|<2**-54, return x */
t = huge + x; /* return x with inexact flags when x!=0 */
return x - (t - (huge + x));
} else {
@@ -2330,7 +2330,7 @@ double expm1(double x) {
if (k == 0) {
return x - (x * e - hxs); /* c is 0 */
} else {
- INSERT_WORDS(twopk, 0x3ff00000 + (k << 20), 0); /* 2^k */
+ INSERT_WORDS(twopk, 0x3FF00000 + (k << 20), 0); /* 2^k */
e = (x * (e - c) - c);
e -= hxs;
if (k == -1) return 0.5 * (x - e) - 0.5;
@@ -2353,11 +2353,11 @@ double expm1(double x) {
}
t = one;
if (k < 20) {
- SET_HIGH_WORD(t, 0x3ff00000 - (0x200000 >> k)); /* t=1-2^-k */
+ SET_HIGH_WORD(t, 0x3FF00000 - (0x200000 >> k)); /* t=1-2^-k */
y = t - (e - x);
y = y * twopk;
} else {
- SET_HIGH_WORD(t, ((0x3ff - k) << 20)); /* 2^-k */
+ SET_HIGH_WORD(t, ((0x3FF - k) << 20)); /* 2^-k */
y = x - (e + t);
y += one;
y = y * twopk;
@@ -2372,11 +2372,11 @@ double cbrt(double x) {
B2 = 696219795; /* B2 = (1023-1023/3-54/3-0.03306235651)*2**20 */
/* |1/cbrt(x) - p(x)| < 2**-23.5 (~[-7.93e-8, 7.929e-8]). */
- static const double P0 = 1.87595182427177009643, /* 0x3ffe03e6, 0x0f61e692 */
- P1 = -1.88497979543377169875, /* 0xbffe28e0, 0x92f02420 */
- P2 = 1.621429720105354466140, /* 0x3ff9f160, 0x4a49d6c2 */
- P3 = -0.758397934778766047437, /* 0xbfe844cb, 0xbee751d9 */
- P4 = 0.145996192886612446982; /* 0x3fc2b000, 0xd4e4edd7 */
+ static const double P0 = 1.87595182427177009643, /* 0x3FFE03E6, 0x0F61E692 */
+ P1 = -1.88497979543377169875, /* 0xBFFE28E0, 0x92F02420 */
+ P2 = 1.621429720105354466140, /* 0x3FF9F160, 0x4A49D6C2 */
+ P3 = -0.758397934778766047437, /* 0xBFE844CB, 0xBEE751D9 */
+ P4 = 0.145996192886612446982; /* 0x3FC2B000, 0xD4E4EDD7 */
int32_t hx;
union {
@@ -2390,7 +2390,7 @@ double cbrt(double x) {
EXTRACT_WORDS(hx, low, x);
sign = hx & 0x80000000; /* sign= sign(x) */
hx ^= sign;
- if (hx >= 0x7ff00000) return (x + x); /* cbrt(NaN,INF) is itself */
+ if (hx >= 0x7FF00000) return (x + x); /* cbrt(NaN,INF) is itself */
/*
* Rough cbrt to 5 bits:
@@ -2412,7 +2412,7 @@ double cbrt(double x) {
SET_HIGH_WORD(t, 0x43500000); /* set t= 2**54 */
t *= x;
GET_HIGH_WORD(high, t);
- INSERT_WORDS(t, sign | ((high & 0x7fffffff) / 3 + B2), 0);
+ INSERT_WORDS(t, sign | ((high & 0x7FFFFFFF) / 3 + B2), 0);
} else {
INSERT_WORDS(t, sign | (hx / 3 + B1), 0);
}
@@ -2441,7 +2441,7 @@ double cbrt(double x) {
* before the final error is larger than 0.667 ulps.
*/
u.value = t;
- u.bits = (u.bits + 0x80000000) & 0xffffffffc0000000ULL;
+ u.bits = (u.bits + 0x80000000) & 0xFFFFFFFFC0000000ULL;
t = u.value;
/* one step Newton iteration to 53 bits with error < 0.667 ulps */
@@ -2492,10 +2492,10 @@ double sin(double x) {
GET_HIGH_WORD(ix, x);
/* |x| ~< pi/4 */
- ix &= 0x7fffffff;
- if (ix <= 0x3fe921fb) {
+ ix &= 0x7FFFFFFF;
+ if (ix <= 0x3FE921FB) {
return __kernel_sin(x, z, 0);
- } else if (ix >= 0x7ff00000) {
+ } else if (ix >= 0x7FF00000) {
/* sin(Inf or NaN) is NaN */
return x - x;
} else {
@@ -2551,10 +2551,10 @@ double tan(double x) {
GET_HIGH_WORD(ix, x);
/* |x| ~< pi/4 */
- ix &= 0x7fffffff;
- if (ix <= 0x3fe921fb) {
+ ix &= 0x7FFFFFFF;
+ if (ix <= 0x3FE921FB) {
return __kernel_tan(x, z, 1);
- } else if (ix >= 0x7ff00000) {
+ } else if (ix >= 0x7FF00000) {
/* tan(Inf or NaN) is NaN */
return x - x; /* NaN */
} else {
@@ -2596,14 +2596,14 @@ double cosh(double x) {
/* High word of |x|. */
GET_HIGH_WORD(ix, x);
- ix &= 0x7fffffff;
+ ix &= 0x7FFFFFFF;
// |x| in [0,0.5*log2], return 1+expm1(|x|)^2/(2*exp(|x|))
- if (ix < 0x3fd62e43) {
+ if (ix < 0x3FD62E43) {
double t = expm1(fabs(x));
double w = one + t;
// For |x| < 2^-55, cosh(x) = 1
- if (ix < 0x3c800000) return w;
+ if (ix < 0x3C800000) return w;
return one + (t * t) / (w + w);
}
@@ -2614,7 +2614,7 @@ double cosh(double x) {
}
// |x| in [22, log(maxdouble)], return half*exp(|x|)
- if (ix < 0x40862e42) return half * exp(fabs(x));
+ if (ix < 0x40862E42) return half * exp(fabs(x));
// |x| in [log(maxdouble), overflowthreshold]
if (fabs(x) <= KCOSH_OVERFLOW) {
@@ -2624,7 +2624,7 @@ double cosh(double x) {
}
/* x is INF or NaN */
- if (ix >= 0x7ff00000) return x * x;
+ if (ix >= 0x7FF00000) return x * x;
// |x| > overflowthreshold.
return huge * huge;
@@ -2653,7 +2653,7 @@ double sinh(double x) {
static const double KSINH_OVERFLOW = 710.4758600739439,
TWO_M28 =
3.725290298461914e-9, // 2^-28, empty lower half
- LOG_MAXD = 709.7822265625; // 0x40862e42 00000000, empty lower half
+ LOG_MAXD = 709.7822265625; // 0x40862E42 00000000, empty lower half
static const double shuge = 1.0e307;
double h = (x < 0) ? -0.5 : 0.5;
@@ -2712,10 +2712,10 @@ double tanh(double x) {
int32_t jx, ix;
GET_HIGH_WORD(jx, x);
- ix = jx & 0x7fffffff;
+ ix = jx & 0x7FFFFFFF;
/* x is INF or NaN */
- if (ix >= 0x7ff00000) {
+ if (ix >= 0x7FF00000) {
if (jx >= 0)
return one / x + one; /* tanh(+-inf)=+-1 */
else
@@ -2724,10 +2724,10 @@ double tanh(double x) {
/* |x| < 22 */
if (ix < 0x40360000) { /* |x|<22 */
- if (ix < 0x3e300000) { /* |x|<2**-28 */
+ if (ix < 0x3E300000) { /* |x|<2**-28 */
if (huge + x > one) return x; /* tanh(tiny) = tiny with inexact */
}
- if (ix >= 0x3ff00000) { /* |x|>=1 */
+ if (ix >= 0x3FF00000) { /* |x|>=1 */
t = expm1(two * fabs(x));
z = one - two / (t + two);
} else {
diff --git a/deps/v8/src/base/lazy-instance.h b/deps/v8/src/base/lazy-instance.h
index 92f9b309a7..e965382b8d 100644
--- a/deps/v8/src/base/lazy-instance.h
+++ b/deps/v8/src/base/lazy-instance.h
@@ -168,17 +168,13 @@ struct LazyInstanceImpl {
typedef typename AllocationTrait::StorageType StorageType;
private:
- static void InitInstance(StorageType* storage) {
- AllocationTrait::template InitStorageUsingTrait<CreateTrait>(storage);
+ static void InitInstance(void* storage) {
+ AllocationTrait::template InitStorageUsingTrait<CreateTrait>(
+ static_cast<StorageType*>(storage));
}
void Init() const {
- InitOnceTrait::Init(
- &once_,
- // Casts to void* are needed here to avoid breaking strict aliasing
- // rules.
- reinterpret_cast<void(*)(void*)>(&InitInstance), // NOLINT
- reinterpret_cast<void*>(&storage_));
+ InitOnceTrait::Init(&once_, &InitInstance, static_cast<void*>(&storage_));
}
public:
diff --git a/deps/v8/src/base/logging.cc b/deps/v8/src/base/logging.cc
index 13fbec0e90..ad5349ac7e 100644
--- a/deps/v8/src/base/logging.cc
+++ b/deps/v8/src/base/logging.cc
@@ -119,8 +119,6 @@ DEFINE_CHECK_OP_IMPL(GT)
} // namespace base
} // namespace v8
-
-// Contains protection against recursive calls (faults while handling faults).
void V8_Fatal(const char* file, int line, const char* format, ...) {
fflush(stdout);
fflush(stderr);
diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h
index 9f3a1e6991..5275fdc6a6 100644
--- a/deps/v8/src/base/logging.h
+++ b/deps/v8/src/base/logging.h
@@ -20,23 +20,13 @@
V8_BASE_EXPORT V8_NOINLINE void V8_Dcheck(const char* file, int line,
const char* message);
-// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
-// development, but they should not be relied on in the final product.
#ifdef DEBUG
-#define FATAL(msg) \
- V8_Fatal(__FILE__, __LINE__, "%s", (msg))
-#define UNIMPLEMENTED() \
- V8_Fatal(__FILE__, __LINE__, "unimplemented code")
-#define UNREACHABLE() \
- V8_Fatal(__FILE__, __LINE__, "unreachable code")
+#define FATAL(...) V8_Fatal(__FILE__, __LINE__, __VA_ARGS__)
#else
-#define FATAL(msg) \
- V8_Fatal("", 0, "%s", (msg))
-#define UNIMPLEMENTED() \
- V8_Fatal("", 0, "unimplemented code")
-#define UNREACHABLE() V8_Fatal("", 0, "unreachable code")
+#define FATAL(...) V8_Fatal("", 0, __VA_ARGS__)
#endif
-
+#define UNIMPLEMENTED() FATAL("unimplemented code")
+#define UNREACHABLE() FATAL("unreachable code")
namespace v8 {
namespace base {
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index 5aa8eff68d..a265408d91 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -5,6 +5,8 @@
#ifndef V8_BASE_MACROS_H_
#define V8_BASE_MACROS_H_
+#include <limits>
+
#include "src/base/compiler-specific.h"
#include "src/base/format-macros.h"
#include "src/base/logging.h"
@@ -167,18 +169,23 @@ V8_INLINE Dest bit_cast(Source const& source) {
#define DISABLE_ASAN
#endif
-// DISABLE_CFI_PERF -- Disable Control Flow Integrity checks for Perf reasons.
-#if !defined(DISABLE_CFI_PERF)
+// Helper macro to define no_sanitize attributes only with clang.
#if defined(__clang__) && defined(__has_attribute)
#if __has_attribute(no_sanitize)
-#define DISABLE_CFI_PERF __attribute__((no_sanitize("cfi")))
-#endif
+#define CLANG_NO_SANITIZE(what) __attribute__((no_sanitize(what)))
#endif
#endif
-#if !defined(DISABLE_CFI_PERF)
-#define DISABLE_CFI_PERF
+#if !defined(CLANG_NO_SANITIZE)
+#define CLANG_NO_SANITIZE(what)
#endif
+// DISABLE_CFI_PERF -- Disable Control Flow Integrity checks for Perf reasons.
+#define DISABLE_CFI_PERF CLANG_NO_SANITIZE("cfi")
+
+// DISABLE_CFI_ICALL -- Disable Control Flow Integrity indirect call checks,
+// useful because calls into JITed code can not be CFI verified.
+#define DISABLE_CFI_ICALL CLANG_NO_SANITIZE("cfi-icall")
+
#if V8_CC_GNU
#define V8_IMMEDIATE_CRASH() __builtin_trap()
#else
@@ -214,34 +221,16 @@ struct Use {
// than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
// works on compilers that don't have it (like MSVC).
#if V8_CC_MSVC
-# define V8_UINT64_C(x) (x ## UI64)
-# define V8_INT64_C(x) (x ## I64)
# if V8_HOST_ARCH_64_BIT
-# define V8_INTPTR_C(x) (x ## I64)
# define V8_PTR_PREFIX "ll"
# else
-# define V8_INTPTR_C(x) (x)
# define V8_PTR_PREFIX ""
# endif // V8_HOST_ARCH_64_BIT
#elif V8_CC_MINGW64
-# define V8_UINT64_C(x) (x ## ULL)
-# define V8_INT64_C(x) (x ## LL)
-# define V8_INTPTR_C(x) (x ## LL)
# define V8_PTR_PREFIX "I64"
#elif V8_HOST_ARCH_64_BIT
-# if V8_OS_MACOSX || V8_OS_OPENBSD
-# define V8_UINT64_C(x) (x ## ULL)
-# define V8_INT64_C(x) (x ## LL)
-# else
-# define V8_UINT64_C(x) (x ## UL)
-# define V8_INT64_C(x) (x ## L)
-# endif
-# define V8_INTPTR_C(x) (x ## L)
# define V8_PTR_PREFIX "l"
#else
-# define V8_UINT64_C(x) (x ## ULL)
-# define V8_INT64_C(x) (x ## LL)
-# define V8_INTPTR_C(x) (x)
#if V8_OS_AIX
#define V8_PTR_PREFIX "l"
#else
@@ -329,4 +318,24 @@ inline void* AlignedAddress(void* address, size_t alignment) {
~static_cast<uintptr_t>(alignment - 1));
}
+// Bounds checks for float to integer conversions, which does truncation. Hence,
+// the range of legal values is (min - 1, max + 1).
+template <typename int_t, typename float_t, typename biggest_int_t = int64_t>
+bool is_inbounds(float_t v) {
+ static_assert(sizeof(int_t) < sizeof(biggest_int_t),
+ "int_t can't be bounds checked by the compiler");
+ constexpr float_t kLowerBound =
+ static_cast<float_t>(std::numeric_limits<int_t>::min()) - 1;
+ constexpr float_t kUpperBound =
+ static_cast<float_t>(std::numeric_limits<int_t>::max()) + 1;
+ constexpr bool kLowerBoundIsMin =
+ static_cast<biggest_int_t>(kLowerBound) ==
+ static_cast<biggest_int_t>(std::numeric_limits<int_t>::min());
+ constexpr bool kUpperBoundIsMax =
+ static_cast<biggest_int_t>(kUpperBound) ==
+ static_cast<biggest_int_t>(std::numeric_limits<int_t>::max());
+ return (kLowerBoundIsMin ? (kLowerBound <= v) : (kLowerBound < v)) &&
+ (kUpperBoundIsMax ? (v <= kUpperBound) : (v < kUpperBound));
+}
+
#endif // V8_BASE_MACROS_H_
diff --git a/deps/v8/src/base/once.cc b/deps/v8/src/base/once.cc
index 818a9f2e84..3e5e21925d 100644
--- a/deps/v8/src/base/once.cc
+++ b/deps/v8/src/base/once.cc
@@ -15,7 +15,7 @@
namespace v8 {
namespace base {
-void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg) {
+void CallOnceImpl(OnceType* once, std::function<void()> init_func) {
AtomicWord state = Acquire_Load(once);
// Fast path. The provided function was already executed.
if (state == ONCE_STATE_DONE) {
@@ -34,7 +34,7 @@ void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg) {
if (state == ONCE_STATE_UNINITIALIZED) {
// We are the first thread to call this function, so we have to call the
// function.
- init_func(arg);
+ init_func();
Release_Store(once, ONCE_STATE_DONE);
} else {
// Another thread has already started executing the function. We need to
diff --git a/deps/v8/src/base/once.h b/deps/v8/src/base/once.h
index ea9c2fa88d..f355ef52ae 100644
--- a/deps/v8/src/base/once.h
+++ b/deps/v8/src/base/once.h
@@ -53,6 +53,7 @@
#define V8_BASE_ONCE_H_
#include <stddef.h>
+#include <functional>
#include "src/base/atomicops.h"
#include "src/base/base-export.h"
@@ -80,13 +81,12 @@ struct OneArgFunction {
typedef void (*type)(T);
};
-V8_BASE_EXPORT void CallOnceImpl(OnceType* once, PointerArgFunction init_func,
- void* arg);
+V8_BASE_EXPORT void CallOnceImpl(OnceType* once,
+ std::function<void()> init_func);
inline void CallOnce(OnceType* once, NoArgFunction init_func) {
if (Acquire_Load(once) != ONCE_STATE_DONE) {
- CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func),
- nullptr);
+ CallOnceImpl(once, init_func);
}
}
@@ -95,8 +95,7 @@ template <typename Arg>
inline void CallOnce(OnceType* once,
typename OneArgFunction<Arg*>::type init_func, Arg* arg) {
if (Acquire_Load(once) != ONCE_STATE_DONE) {
- CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func),
- static_cast<void*>(arg));
+ CallOnceImpl(once, [=]() { init_func(arg); });
}
}
diff --git a/deps/v8/src/base/page-allocator.cc b/deps/v8/src/base/page-allocator.cc
new file mode 100644
index 0000000000..25ee2e4721
--- /dev/null
+++ b/deps/v8/src/base/page-allocator.cc
@@ -0,0 +1,64 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/page-allocator.h"
+
+#include "src/base/platform/platform.h"
+
+namespace v8 {
+namespace base {
+
+#define STATIC_ASSERT_ENUM(a, b) \
+ static_assert(static_cast<int>(a) == static_cast<int>(b), \
+ "mismatching enum: " #a)
+
+STATIC_ASSERT_ENUM(PageAllocator::kNoAccess,
+ base::OS::MemoryPermission::kNoAccess);
+STATIC_ASSERT_ENUM(PageAllocator::kReadWrite,
+ base::OS::MemoryPermission::kReadWrite);
+STATIC_ASSERT_ENUM(PageAllocator::kReadWriteExecute,
+ base::OS::MemoryPermission::kReadWriteExecute);
+STATIC_ASSERT_ENUM(PageAllocator::kReadExecute,
+ base::OS::MemoryPermission::kReadExecute);
+
+#undef STATIC_ASSERT_ENUM
+
+size_t PageAllocator::AllocatePageSize() {
+ return base::OS::AllocatePageSize();
+}
+
+size_t PageAllocator::CommitPageSize() { return base::OS::CommitPageSize(); }
+
+void PageAllocator::SetRandomMmapSeed(int64_t seed) {
+ base::OS::SetRandomMmapSeed(seed);
+}
+
+void* PageAllocator::GetRandomMmapAddr() {
+ return base::OS::GetRandomMmapAddr();
+}
+
+void* PageAllocator::AllocatePages(void* address, size_t size, size_t alignment,
+ PageAllocator::Permission access) {
+ return base::OS::Allocate(address, size, alignment,
+ static_cast<base::OS::MemoryPermission>(access));
+}
+
+bool PageAllocator::FreePages(void* address, size_t size) {
+ return base::OS::Free(address, size);
+}
+
+bool PageAllocator::ReleasePages(void* address, size_t size, size_t new_size) {
+ DCHECK_LT(new_size, size);
+ return base::OS::Release(reinterpret_cast<uint8_t*>(address) + new_size,
+ size - new_size);
+}
+
+bool PageAllocator::SetPermissions(void* address, size_t size,
+ PageAllocator::Permission access) {
+ return base::OS::SetPermissions(
+ address, size, static_cast<base::OS::MemoryPermission>(access));
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/page-allocator.h b/deps/v8/src/base/page-allocator.h
new file mode 100644
index 0000000000..ff817cdba2
--- /dev/null
+++ b/deps/v8/src/base/page-allocator.h
@@ -0,0 +1,41 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_PAGE_ALLOCATOR_H_
+#define V8_BASE_PAGE_ALLOCATOR_H_
+
+#include "include/v8-platform.h"
+#include "src/base/base-export.h"
+#include "src/base/compiler-specific.h"
+
+namespace v8 {
+namespace base {
+
+class V8_BASE_EXPORT PageAllocator
+ : public NON_EXPORTED_BASE(::v8::PageAllocator) {
+ public:
+ virtual ~PageAllocator() = default;
+
+ size_t AllocatePageSize() override;
+
+ size_t CommitPageSize() override;
+
+ void SetRandomMmapSeed(int64_t seed) override;
+
+ void* GetRandomMmapAddr() override;
+
+ void* AllocatePages(void* address, size_t size, size_t alignment,
+ PageAllocator::Permission access) override;
+
+ bool FreePages(void* address, size_t size) override;
+
+ bool ReleasePages(void* address, size_t size, size_t new_size) override;
+
+ bool SetPermissions(void* address, size_t size,
+ PageAllocator::Permission access) override;
+};
+
+} // namespace base
+} // namespace v8
+#endif // V8_BASE_PAGE_ALLOCATOR_H_
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
index 83a8a23c48..38a7070e85 100644
--- a/deps/v8/src/base/platform/platform-fuchsia.cc
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -124,12 +124,11 @@ bool OS::HasLazyCommits() {
}
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
- CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
- return std::vector<SharedLibraryAddress>();
+ UNREACHABLE(); // TODO(scottmg): Port, https://crbug.com/731217.
}
void OS::SignalCodeMovingGC() {
- CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
+ UNREACHABLE(); // TODO(scottmg): Port, https://crbug.com/731217.
}
} // namespace base
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index b873197d3b..5edbd7648b 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -89,6 +89,7 @@ const char* g_gc_fake_mmap = nullptr;
static LazyInstance<RandomNumberGenerator>::type
platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
+static LazyMutex rng_mutex = LAZY_MUTEX_INITIALIZER;
#if !V8_OS_FUCHSIA
#if V8_OS_MACOSX
@@ -130,11 +131,9 @@ int GetFlagsForMemoryPermission(OS::MemoryPermission access) {
}
void* Allocate(void* address, size_t size, OS::MemoryPermission access) {
- const size_t actual_size = RoundUp(size, OS::AllocatePageSize());
int prot = GetProtectionFromMemoryPermission(access);
int flags = GetFlagsForMemoryPermission(access);
- void* result =
- mmap(address, actual_size, prot, flags, kMmapFd, kMmapFdOffset);
+ void* result = mmap(address, size, prot, flags, kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return nullptr;
return result;
}
@@ -167,11 +166,7 @@ int ReclaimInaccessibleMemory(void* address, size_t size) {
} // namespace
-void OS::Initialize(int64_t random_seed, bool hard_abort,
- const char* const gc_fake_mmap) {
- if (random_seed) {
- platform_random_number_generator.Pointer()->SetSeed(random_seed);
- }
+void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
g_hard_abort = hard_abort;
g_gc_fake_mmap = gc_fake_mmap;
}
@@ -207,45 +202,60 @@ size_t OS::CommitPageSize() {
}
// static
+void OS::SetRandomMmapSeed(int64_t seed) {
+ if (seed) {
+ LockGuard<Mutex> guard(rng_mutex.Pointer());
+ platform_random_number_generator.Pointer()->SetSeed(seed);
+ }
+}
+
+// static
void* OS::GetRandomMmapAddr() {
-#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
- defined(THREAD_SANITIZER)
- // Dynamic tools do not support custom mmap addresses.
- return nullptr;
-#endif
uintptr_t raw_addr;
- platform_random_number_generator.Pointer()->NextBytes(&raw_addr,
- sizeof(raw_addr));
+ {
+ LockGuard<Mutex> guard(rng_mutex.Pointer());
+ platform_random_number_generator.Pointer()->NextBytes(&raw_addr,
+ sizeof(raw_addr));
+ }
+#if defined(V8_USE_ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
+ defined(THREAD_SANITIZER) || defined(LEAK_SANITIZER)
+ // If random hint addresses interfere with address ranges hard coded in
+ // sanitizers, bad things happen. This address range is copied from TSAN
+ // source but works with all tools.
+ // See crbug.com/539863.
+ raw_addr &= 0x007fffff0000ULL;
+ raw_addr += 0x7e8000000000ULL;
+#else
#if V8_TARGET_ARCH_X64
// Currently available CPUs have 48 bits of virtual addressing. Truncate
// the hint address to 46 bits to give the kernel a fighting chance of
// fulfilling our placement request.
- raw_addr &= V8_UINT64_C(0x3ffffffff000);
+ raw_addr &= uint64_t{0x3FFFFFFFF000};
#elif V8_TARGET_ARCH_PPC64
#if V8_OS_AIX
// AIX: 64 bits of virtual addressing, but we limit address range to:
// a) minimize Segment Lookaside Buffer (SLB) misses and
- raw_addr &= V8_UINT64_C(0x3ffff000);
+ raw_addr &= uint64_t{0x3FFFF000};
// Use extra address space to isolate the mmap regions.
- raw_addr += V8_UINT64_C(0x400000000000);
+ raw_addr += uint64_t{0x400000000000};
#elif V8_TARGET_BIG_ENDIAN
// Big-endian Linux: 44 bits of virtual addressing.
- raw_addr &= V8_UINT64_C(0x03fffffff000);
+ raw_addr &= uint64_t{0x03FFFFFFF000};
#else
// Little-endian Linux: 48 bits of virtual addressing.
- raw_addr &= V8_UINT64_C(0x3ffffffff000);
+ raw_addr &= uint64_t{0x3FFFFFFFF000};
#endif
#elif V8_TARGET_ARCH_S390X
// Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
// of virtual addressing. Truncate to 40 bits to allow kernel chance to
// fulfill request.
- raw_addr &= V8_UINT64_C(0xfffffff000);
+ raw_addr &= uint64_t{0xFFFFFFF000};
#elif V8_TARGET_ARCH_S390
// 31 bits of virtual addressing. Truncate to 29 bits to allow kernel chance
// to fulfill request.
- raw_addr &= 0x1ffff000;
+ raw_addr &= 0x1FFFF000;
#else
- raw_addr &= 0x3ffff000;
+ raw_addr &= 0x3FFFF000;
#ifdef __sun
// For our Solaris/illumos mmap hint, we pick a random address in the bottom
@@ -269,6 +279,7 @@ void* OS::GetRandomMmapAddr() {
raw_addr += 0x20000000;
#endif
#endif
+#endif
return reinterpret_cast<void*>(raw_addr);
}
@@ -283,6 +294,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
address = AlignedAddress(address, alignment);
// Add the maximum misalignment so we are guaranteed an aligned base address.
size_t request_size = size + (alignment - page_size);
+ request_size = RoundUp(request_size, OS::AllocatePageSize());
void* result = base::Allocate(address, request_size, access);
if (result == nullptr) return nullptr;
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index e026d7edae..22580cc407 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -674,8 +674,15 @@ void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
#undef _TRUNCATE
#undef STRUNCATE
-// The allocation alignment is the guaranteed alignment for
-// VirtualAlloc'ed blocks of memory.
+static LazyInstance<RandomNumberGenerator>::type
+ platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
+static LazyMutex rng_mutex = LAZY_MUTEX_INITIALIZER;
+
+void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
+ g_hard_abort = hard_abort;
+}
+
+// static
size_t OS::AllocatePageSize() {
static size_t allocate_alignment = 0;
if (allocate_alignment == 0) {
@@ -686,6 +693,7 @@ size_t OS::AllocatePageSize() {
return allocate_alignment;
}
+// static
size_t OS::CommitPageSize() {
static size_t page_size = 0;
if (page_size == 0) {
@@ -697,17 +705,15 @@ size_t OS::CommitPageSize() {
return page_size;
}
-static LazyInstance<RandomNumberGenerator>::type
- platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
-
-void OS::Initialize(int64_t random_seed, bool hard_abort,
- const char* const gc_fake_mmap) {
- if (random_seed) {
- platform_random_number_generator.Pointer()->SetSeed(random_seed);
+// static
+void OS::SetRandomMmapSeed(int64_t seed) {
+ if (seed) {
+ LockGuard<Mutex> guard(rng_mutex.Pointer());
+ platform_random_number_generator.Pointer()->SetSeed(seed);
}
- g_hard_abort = hard_abort;
}
+// static
void* OS::GetRandomMmapAddr() {
// The address range used to randomize RWX allocations in OS::Allocate
// Try not to map pages into the default range that windows loads DLLs
@@ -722,8 +728,11 @@ void* OS::GetRandomMmapAddr() {
static const uintptr_t kAllocationRandomAddressMax = 0x3FFF0000;
#endif
uintptr_t address;
- platform_random_number_generator.Pointer()->NextBytes(&address,
- sizeof(address));
+ {
+ LockGuard<Mutex> guard(rng_mutex.Pointer());
+ platform_random_number_generator.Pointer()->NextBytes(&address,
+ sizeof(address));
+ }
address <<= kPageSizeBits;
address += kAllocationRandomAddressMin;
address &= kAllocationRandomAddressMax;
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index dd454ecd43..8a4545c607 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -36,6 +36,7 @@
#endif
namespace v8 {
+
namespace base {
// ----------------------------------------------------------------------------
@@ -93,10 +94,9 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
#endif // V8_NO_FAST_TLS
-
+class PageAllocator;
class TimezoneCache;
-
// ----------------------------------------------------------------------------
// OS
//
@@ -107,11 +107,9 @@ class TimezoneCache;
class V8_BASE_EXPORT OS {
public:
// Initialize the OS class.
- // - random_seed: Used for the GetRandomMmapAddress() if non-zero.
// - hard_abort: If true, OS::Abort() will crash instead of aborting.
// - gc_fake_mmap: Name of the file for fake gc mmap used in ll_prof.
- static void Initialize(int64_t random_seed, bool hard_abort,
- const char* const gc_fake_mmap);
+ static void Initialize(bool hard_abort, const char* const gc_fake_mmap);
// Returns the accumulated user time for thread. This routine
// can be used for profiling. The implementation should
@@ -157,6 +155,8 @@ class V8_BASE_EXPORT OS {
static PRINTF_FORMAT(1, 2) void PrintError(const char* format, ...);
static PRINTF_FORMAT(1, 0) void VPrintError(const char* format, va_list args);
+ // Memory permissions. These should be kept in sync with the ones in
+ // v8::PageAllocator.
enum class MemoryPermission {
kNoAccess,
kReadWrite,
@@ -165,40 +165,6 @@ class V8_BASE_EXPORT OS {
kReadExecute
};
- // Gets the page granularity for Allocate. Addresses returned by Allocate are
- // aligned to this size.
- static size_t AllocatePageSize();
-
- // Gets the granularity at which the permissions and commit calls can be made.
- static size_t CommitPageSize();
-
- // Generate a random address to be used for hinting allocation calls.
- static void* GetRandomMmapAddr();
-
- // Allocates memory. Permissions are set according to the access argument.
- // The address parameter is a hint. The size and alignment parameters must be
- // multiples of AllocatePageSize(). Returns the address of the allocated
- // memory, with the specified size and alignment, or nullptr on failure.
- V8_WARN_UNUSED_RESULT static void* Allocate(void* address, size_t size,
- size_t alignment,
- MemoryPermission access);
-
- // Frees memory allocated by a call to Allocate. address and size must be
- // multiples of AllocatePageSize(). Returns true on success, otherwise false.
- V8_WARN_UNUSED_RESULT static bool Free(void* address, const size_t size);
-
- // Releases memory that is no longer needed. The range specified by address
- // and size must be part of an allocated memory region, and must be multiples
- // of CommitPageSize(). Released memory is left in an undefined state, so it
- // should not be accessed. Returns true on success, otherwise false.
- V8_WARN_UNUSED_RESULT static bool Release(void* address, size_t size);
-
- // Sets permissions according to the access argument. address and size must be
- // multiples of CommitPageSize(). Setting permission to kNoAccess may cause
- // the memory contents to be lost. Returns true on success, otherwise false.
- V8_WARN_UNUSED_RESULT static bool SetPermissions(void* address, size_t size,
- MemoryPermission access);
-
static bool HasLazyCommits();
// Sleep for a specified time interval.
@@ -280,6 +246,30 @@ class V8_BASE_EXPORT OS {
static int GetCurrentThreadId();
private:
+ // These classes use the private memory management API below.
+ friend class MemoryMappedFile;
+ friend class PosixMemoryMappedFile;
+ friend class v8::base::PageAllocator;
+
+ static size_t AllocatePageSize();
+
+ static size_t CommitPageSize();
+
+ static void SetRandomMmapSeed(int64_t seed);
+
+ static void* GetRandomMmapAddr();
+
+ V8_WARN_UNUSED_RESULT static void* Allocate(void* address, size_t size,
+ size_t alignment,
+ MemoryPermission access);
+
+ V8_WARN_UNUSED_RESULT static bool Free(void* address, const size_t size);
+
+ V8_WARN_UNUSED_RESULT static bool Release(void* address, size_t size);
+
+ V8_WARN_UNUSED_RESULT static bool SetPermissions(void* address, size_t size,
+ MemoryPermission access);
+
static const int msPerSecond = 1000;
#if V8_OS_POSIX
diff --git a/deps/v8/src/base/platform/semaphore.cc b/deps/v8/src/base/platform/semaphore.cc
index 9a7ef7a8f4..5950664523 100644
--- a/deps/v8/src/base/platform/semaphore.cc
+++ b/deps/v8/src/base/platform/semaphore.cc
@@ -136,7 +136,7 @@ bool Semaphore::WaitFor(const TimeDelta& rel_time) {
Semaphore::Semaphore(int count) {
DCHECK_GE(count, 0);
- native_handle_ = ::CreateSemaphoreA(nullptr, count, 0x7fffffff, nullptr);
+ native_handle_ = ::CreateSemaphoreA(nullptr, count, 0x7FFFFFFF, nullptr);
DCHECK_NOT_NULL(native_handle_);
}
diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc
index 3529d55875..1fcd7aecce 100644
--- a/deps/v8/src/base/platform/time.cc
+++ b/deps/v8/src/base/platform/time.cc
@@ -298,8 +298,7 @@ Time Time::NowFromSystemTime() {
// Time between windows epoch and standard epoch.
-static const int64_t kTimeToEpochInMicroseconds = V8_INT64_C(11644473600000000);
-
+static const int64_t kTimeToEpochInMicroseconds = int64_t{11644473600000000};
Time Time::FromFiletime(FILETIME ft) {
if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) {
diff --git a/deps/v8/src/base/safe_conversions.h b/deps/v8/src/base/safe_conversions.h
index c16fa36682..f63f1ad99e 100644
--- a/deps/v8/src/base/safe_conversions.h
+++ b/deps/v8/src/base/safe_conversions.h
@@ -53,8 +53,7 @@ inline Dst saturated_cast(Src value) {
// Should fail only on attempting to assign NaN to a saturated integer.
case internal::RANGE_INVALID:
- CHECK(false);
- return std::numeric_limits<Dst>::max();
+ UNREACHABLE();
}
UNREACHABLE();
diff --git a/deps/v8/src/base/utils/random-number-generator.cc b/deps/v8/src/base/utils/random-number-generator.cc
index 86c3694feb..afe5a1f098 100644
--- a/deps/v8/src/base/utils/random-number-generator.cc
+++ b/deps/v8/src/base/utils/random-number-generator.cc
@@ -213,9 +213,9 @@ void RandomNumberGenerator::SetSeed(int64_t seed) {
uint64_t RandomNumberGenerator::MurmurHash3(uint64_t h) {
h ^= h >> 33;
- h *= V8_UINT64_C(0xFF51AFD7ED558CCD);
+ h *= uint64_t{0xFF51AFD7ED558CCD};
h ^= h >> 33;
- h *= V8_UINT64_C(0xC4CEB9FE1A85EC53);
+ h *= uint64_t{0xC4CEB9FE1A85EC53};
h ^= h >> 33;
return h;
}
diff --git a/deps/v8/src/base/utils/random-number-generator.h b/deps/v8/src/base/utils/random-number-generator.h
index 285c5972e0..321ce861fb 100644
--- a/deps/v8/src/base/utils/random-number-generator.h
+++ b/deps/v8/src/base/utils/random-number-generator.h
@@ -113,8 +113,8 @@ class V8_BASE_EXPORT RandomNumberGenerator final {
// Static and exposed for external use.
static inline double ToDouble(uint64_t state0, uint64_t state1) {
// Exponent for double values for [1.0 .. 2.0)
- static const uint64_t kExponentBits = V8_UINT64_C(0x3FF0000000000000);
- static const uint64_t kMantissaMask = V8_UINT64_C(0x000FFFFFFFFFFFFF);
+ static const uint64_t kExponentBits = uint64_t{0x3FF0000000000000};
+ static const uint64_t kMantissaMask = uint64_t{0x000FFFFFFFFFFFFF};
uint64_t random = ((state0 + state1) & kMantissaMask) | kExponentBits;
return bit_cast<double>(random) - 1;
}
diff --git a/deps/v8/src/bignum.cc b/deps/v8/src/bignum.cc
index 087ec45323..a0a398b7aa 100644
--- a/deps/v8/src/bignum.cc
+++ b/deps/v8/src/bignum.cc
@@ -278,7 +278,7 @@ void Bignum::MultiplyByUInt64(uint64_t factor) {
void Bignum::MultiplyByPowerOfTen(int exponent) {
- const uint64_t kFive27 = V8_2PART_UINT64_C(0x6765c793, fa10079d);
+ const uint64_t kFive27 = V8_2PART_UINT64_C(0x6765C793, fa10079d);
const uint16_t kFive1 = 5;
const uint16_t kFive2 = kFive1 * 5;
const uint16_t kFive3 = kFive2 * 5;
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 695200172d..399b705f00 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -214,10 +214,12 @@ class Genesis BASE_EMBEDDED {
HARMONY_SHIPPING(DECLARE_FEATURE_INITIALIZATION)
#undef DECLARE_FEATURE_INITIALIZATION
+ enum ArrayBufferKind {
+ ARRAY_BUFFER,
+ SHARED_ARRAY_BUFFER,
+ };
Handle<JSFunction> CreateArrayBuffer(Handle<String> name,
- Builtins::Name call_byteLength,
- BuiltinFunctionId byteLength_id,
- Builtins::Name call_slice);
+ ArrayBufferKind array_buffer_kind);
Handle<JSFunction> InstallInternalArray(Handle<JSObject> target,
const char* name,
ElementsKind elements_kind);
@@ -780,7 +782,7 @@ void Genesis::CreateObjectFunction(Handle<JSFunction> empty_function) {
"EmptyObjectPrototype");
map->set_is_prototype_map(true);
// Ban re-setting Object.prototype.__proto__ to prevent Proxy security bug
- map->set_immutable_proto(true);
+ map->set_is_immutable_proto(true);
object_function_prototype->set_map(*map);
// Complete setting up empty function.
@@ -1073,12 +1075,12 @@ void Genesis::CreateJSProxyMaps() {
// constructable proxies.
Handle<Map> proxy_map = factory()->NewMap(JS_PROXY_TYPE, JSProxy::kSize,
TERMINAL_FAST_ELEMENTS_KIND);
- proxy_map->set_dictionary_map(true);
+ proxy_map->set_is_dictionary_map(true);
proxy_map->set_may_have_interesting_symbols(true);
native_context()->set_proxy_map(*proxy_map);
Handle<Map> proxy_callable_map = Map::Copy(proxy_map, "callable Proxy");
- proxy_callable_map->set_is_callable();
+ proxy_callable_map->set_is_callable(true);
native_context()->set_proxy_callable_map(*proxy_callable_map);
proxy_callable_map->SetConstructor(native_context()->function_function());
@@ -1086,6 +1088,31 @@ void Genesis::CreateJSProxyMaps() {
Map::Copy(proxy_callable_map, "constructor Proxy");
proxy_constructor_map->set_is_constructor(true);
native_context()->set_proxy_constructor_map(*proxy_constructor_map);
+
+ {
+ Handle<Map> map =
+ factory()->NewMap(JS_OBJECT_TYPE, JSProxyRevocableResult::kSize,
+ TERMINAL_FAST_ELEMENTS_KIND, 2);
+ Map::EnsureDescriptorSlack(map, 2);
+
+ { // proxy
+ Descriptor d = Descriptor::DataField(factory()->proxy_string(),
+ JSProxyRevocableResult::kProxyIndex,
+ NONE, Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+ { // revoke
+ Descriptor d = Descriptor::DataField(factory()->revoke_string(),
+ JSProxyRevocableResult::kRevokeIndex,
+ NONE, Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+
+ Map::SetPrototype(map, isolate()->initial_object_prototype());
+ map->SetConstructor(native_context()->object_function());
+
+ native_context()->set_proxy_revocable_result_map(*map);
+ }
}
namespace {
@@ -1227,7 +1254,7 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
}
js_global_object_function->initial_map()->set_is_prototype_map(true);
- js_global_object_function->initial_map()->set_dictionary_map(true);
+ js_global_object_function->initial_map()->set_is_dictionary_map(true);
js_global_object_function->initial_map()->set_may_have_interesting_symbols(
true);
Handle<JSGlobalObject> global_object =
@@ -1481,9 +1508,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
object_function, "keys", Builtins::kObjectKeys, 1, true);
native_context()->set_object_keys(*object_keys);
SimpleInstallFunction(object_function, factory->entries_string(),
- Builtins::kObjectEntries, 1, false);
+ Builtins::kObjectEntries, 1, true);
SimpleInstallFunction(object_function, factory->values_string(),
- Builtins::kObjectValues, 1, false);
+ Builtins::kObjectValues, 1, true);
SimpleInstallFunction(isolate->initial_object_prototype(),
"__defineGetter__", Builtins::kObjectDefineGetter, 2,
@@ -1517,6 +1544,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
factory->proto_string(),
Builtins::kObjectPrototypeGetProto,
Builtins::kObjectPrototypeSetProto, DONT_ENUM);
+
+ SimpleInstallFunction(isolate->initial_object_prototype(), "toLocaleString",
+ Builtins::kObjectPrototypeToLocaleString, 0, true);
}
Handle<JSObject> global(native_context()->global_object());
@@ -1679,12 +1709,18 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
DONT_ENUM);
SimpleInstallFunction(proto, "concat", Builtins::kArrayConcat, 1, false);
- SimpleInstallFunction(proto, "pop", Builtins::kFastArrayPop, 0, false);
- SimpleInstallFunction(proto, "push", Builtins::kFastArrayPush, 1, false);
- SimpleInstallFunction(proto, "shift", Builtins::kFastArrayShift, 0, false);
+ SimpleInstallFunction(proto, "find", Builtins::kArrayPrototypeFind, 1,
+ false);
+ SimpleInstallFunction(proto, "findIndex",
+ Builtins::kArrayPrototypeFindIndex, 1, false);
+ SimpleInstallFunction(proto, "pop", Builtins::kArrayPrototypePop, 0, false);
+ SimpleInstallFunction(proto, "push", Builtins::kArrayPrototypePush, 1,
+ false);
+ SimpleInstallFunction(proto, "shift", Builtins::kArrayPrototypeShift, 0,
+ false);
SimpleInstallFunction(proto, "unshift", Builtins::kArrayUnshift, 1, false);
if (FLAG_enable_experimental_builtins) {
- SimpleInstallFunction(proto, "slice", Builtins::kFastArraySlice, 2,
+ SimpleInstallFunction(proto, "slice", Builtins::kArrayPrototypeSlice, 2,
false);
} else {
SimpleInstallFunction(proto, "slice", Builtins::kArraySlice, 2, false);
@@ -1798,6 +1834,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> number_fun = InstallFunction(
global, "Number", JS_VALUE_TYPE, JSValue::kSize, 0,
isolate->initial_object_prototype(), Builtins::kNumberConstructor);
+ number_fun->shared()->set_builtin_function_id(kNumberConstructor);
number_fun->shared()->DontAdaptArguments();
number_fun->shared()->SetConstructStub(
*BUILTIN_CODE(isolate, NumberConstructor_ConstructStub));
@@ -1942,6 +1979,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> string_fun = InstallFunction(
global, "String", JS_VALUE_TYPE, JSValue::kSize, 0,
isolate->initial_object_prototype(), Builtins::kStringConstructor);
+ string_fun->shared()->set_builtin_function_id(kStringConstructor);
string_fun->shared()->SetConstructStub(
*BUILTIN_CODE(isolate, StringConstructor_ConstructStub));
string_fun->shared()->DontAdaptArguments();
@@ -2123,6 +2161,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> symbol_fun = InstallFunction(
global, "Symbol", JS_VALUE_TYPE, JSValue::kSize, 0,
factory->the_hole_value(), Builtins::kSymbolConstructor);
+ symbol_fun->shared()->set_builtin_function_id(kSymbolConstructor);
symbol_fun->shared()->SetConstructStub(
*BUILTIN_CODE(isolate, SymbolConstructor_ConstructStub));
symbol_fun->shared()->set_length(0);
@@ -2135,6 +2174,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
false);
// Install well-known symbols.
+ InstallConstant(isolate, symbol_fun, "asyncIterator",
+ factory->async_iterator_symbol());
InstallConstant(isolate, symbol_fun, "hasInstance",
factory->has_instance_symbol());
InstallConstant(isolate, symbol_fun, "isConcatSpreadable",
@@ -2362,11 +2403,11 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> promise_then =
SimpleInstallFunction(prototype, isolate->factory()->then_string(),
- Builtins::kPromiseThen, 2, true);
+ Builtins::kPromisePrototypeThen, 2, true);
native_context()->set_promise_then(*promise_then);
Handle<JSFunction> promise_catch = SimpleInstallFunction(
- prototype, "catch", Builtins::kPromiseCatch, 1, true);
+ prototype, "catch", Builtins::kPromisePrototypeCatch, 1, true);
native_context()->set_promise_catch(*promise_catch);
// Force the Promise constructor to fast properties, so that we can use the
@@ -2410,8 +2451,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // Internal: PromiseHandle
- Handle<JSFunction> function = SimpleCreateFunction(
- isolate, factory->empty_string(), Builtins::kPromiseHandle, 5, false);
+ Handle<JSFunction> function =
+ SimpleCreateFunction(isolate, factory->empty_string(),
+ Builtins::kPromiseHandleJS, 5, false);
native_context()->set_promise_handle(*function);
}
@@ -2921,10 +2963,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- A r r a y B u f f e r
Handle<String> name = factory->InternalizeUtf8String("ArrayBuffer");
- Handle<JSFunction> array_buffer_fun =
- CreateArrayBuffer(name, Builtins::kArrayBufferPrototypeGetByteLength,
- BuiltinFunctionId::kArrayBufferByteLength,
- Builtins::kArrayBufferPrototypeSlice);
+ Handle<JSFunction> array_buffer_fun = CreateArrayBuffer(name, ARRAY_BUFFER);
JSObject::AddProperty(global, name, array_buffer_fun, DONT_ENUM);
InstallWithIntrinsicDefaultProto(isolate, array_buffer_fun,
Context::ARRAY_BUFFER_FUN_INDEX);
@@ -2940,10 +2979,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- S h a r e d A r r a y B u f f e r
Handle<String> name = factory->InternalizeUtf8String("SharedArrayBuffer");
- Handle<JSFunction> shared_array_buffer_fun = CreateArrayBuffer(
- name, Builtins::kSharedArrayBufferPrototypeGetByteLength,
- BuiltinFunctionId::kSharedArrayBufferByteLength,
- Builtins::kSharedArrayBufferPrototypeSlice);
+ Handle<JSFunction> shared_array_buffer_fun =
+ CreateArrayBuffer(name, SHARED_ARRAY_BUFFER);
InstallWithIntrinsicDefaultProto(isolate, shared_array_buffer_fun,
Context::SHARED_ARRAY_BUFFER_FUN_INDEX);
InstallSpeciesGetter(shared_array_buffer_fun);
@@ -3415,6 +3452,15 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->set_proxy_function(*proxy_function);
InstallFunction(global, name, proxy_function, factory->Object_string());
+
+ SimpleInstallFunction(proxy_function, "revocable",
+ Builtins::kProxyRevocable, 2, true);
+
+ { // Internal: ProxyRevoke
+ Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
+ isolate, Builtins::kProxyRevoke, factory->empty_string(), 0);
+ native_context()->set_proxy_revoke_shared_fun(*info);
+ }
}
{ // -- R e f l e c t
@@ -3467,7 +3513,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
factory->NewMap(JS_BOUND_FUNCTION_TYPE, JSBoundFunction::kSize,
TERMINAL_FAST_ELEMENTS_KIND, 0);
map->SetConstructor(native_context()->object_function());
- map->set_is_callable();
+ map->set_is_callable(true);
Map::SetPrototype(map, empty_function);
PropertyAttributes roc_attribs =
@@ -4301,10 +4347,13 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_property)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_sent)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_tostring)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_public_fields)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_private_fields)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_static_fields)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_class_fields)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_dynamic_import)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_meta)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrict_constructor_return)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_optional_catch_binding)
void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
const char* name, Handle<Symbol> value) {
@@ -4362,20 +4411,13 @@ void Genesis::InitializeGlobal_harmony_array_prototype_values() {
NONE);
}
-void Genesis::InitializeGlobal_harmony_async_iteration() {
- if (!FLAG_harmony_async_iteration) return;
- Handle<JSFunction> symbol_fun(native_context()->symbol_function());
- InstallConstant(isolate(), symbol_fun, "asyncIterator",
- factory()->async_iterator_symbol());
-}
-
void Genesis::InitializeGlobal_harmony_promise_finally() {
if (!FLAG_harmony_promise_finally) return;
Handle<JSFunction> constructor(native_context()->promise_function());
Handle<JSObject> prototype(JSObject::cast(constructor->instance_prototype()));
- SimpleInstallFunction(prototype, "finally", Builtins::kPromiseFinally, 1,
- true, DONT_ENUM);
+ SimpleInstallFunction(prototype, "finally",
+ Builtins::kPromisePrototypeFinally, 1, true, DONT_ENUM);
// The promise prototype map has changed because we added a property
// to prototype, so we update the saved map.
@@ -4421,6 +4463,7 @@ void Genesis::InitializeGlobal_harmony_bigint() {
Handle<JSFunction> bigint_fun =
InstallFunction(global, "BigInt", JS_VALUE_TYPE, JSValue::kSize, 0,
factory->the_hole_value(), Builtins::kBigIntConstructor);
+ bigint_fun->shared()->set_builtin_function_id(kBigIntConstructor);
bigint_fun->shared()->DontAdaptArguments();
bigint_fun->shared()->SetConstructStub(
*BUILTIN_CODE(isolate(), BigIntConstructor_ConstructStub));
@@ -4492,10 +4535,8 @@ void Genesis::InitializeGlobal_harmony_plural_rules() {
#endif // V8_INTL_SUPPORT
-Handle<JSFunction> Genesis::CreateArrayBuffer(Handle<String> name,
- Builtins::Name call_byteLength,
- BuiltinFunctionId byteLength_id,
- Builtins::Name call_slice) {
+Handle<JSFunction> Genesis::CreateArrayBuffer(
+ Handle<String> name, ArrayBufferKind array_buffer_kind) {
// Create the %ArrayBufferPrototype%
// Setup the {prototype} with the given {name} for @@toStringTag.
Handle<JSObject> prototype =
@@ -4519,15 +4560,33 @@ Handle<JSFunction> Genesis::CreateArrayBuffer(Handle<String> name,
JSObject::AddProperty(prototype, factory()->constructor_string(),
array_buffer_fun, DONT_ENUM);
- SimpleInstallFunction(array_buffer_fun, factory()->isView_string(),
- Builtins::kArrayBufferIsView, 1, true, DONT_ENUM,
- kArrayBufferIsView);
+ switch (array_buffer_kind) {
+ case ARRAY_BUFFER:
+ SimpleInstallFunction(array_buffer_fun, factory()->isView_string(),
+ Builtins::kArrayBufferIsView, 1, true, DONT_ENUM,
+ kArrayBufferIsView);
+
+ // Install the "byteLength" getter on the {prototype}.
+ SimpleInstallGetter(prototype, factory()->byte_length_string(),
+ Builtins::kArrayBufferPrototypeGetByteLength, false,
+ BuiltinFunctionId::kArrayBufferByteLength);
+
+ SimpleInstallFunction(prototype, "slice",
+ Builtins::kArrayBufferPrototypeSlice, 2, true);
+ break;
- // Install the "byteLength" getter on the {prototype}.
- SimpleInstallGetter(prototype, factory()->byte_length_string(),
- call_byteLength, false, byteLength_id);
+ case SHARED_ARRAY_BUFFER:
+ // Install the "byteLength" getter on the {prototype}.
+ SimpleInstallGetter(prototype, factory()->byte_length_string(),
+ Builtins::kSharedArrayBufferPrototypeGetByteLength,
+ false,
+ BuiltinFunctionId::kSharedArrayBufferByteLength);
- SimpleInstallFunction(prototype, "slice", call_slice, 2, true);
+ SimpleInstallFunction(prototype, "slice",
+ Builtins::kSharedArrayBufferPrototypeSlice, 2,
+ true);
+ break;
+ }
return array_buffer_fun;
}
@@ -4823,9 +4882,9 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
}
// Create a constructor for RegExp results (a variant of Array that
- // predefines the two properties index and match).
+ // predefines the properties index, input, and groups).
{
- // RegExpResult initial map.
+ // JSRegExpResult initial map.
// Find global.Array.prototype to inherit from.
Handle<JSFunction> array_constructor(native_context()->array_function());
@@ -4834,16 +4893,20 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
// Add initial map.
Handle<Map> initial_map = factory()->NewMap(
- JS_ARRAY_TYPE, JSRegExpResult::kSize, TERMINAL_FAST_ELEMENTS_KIND, 2);
+ JS_ARRAY_TYPE, JSRegExpResult::kSize, TERMINAL_FAST_ELEMENTS_KIND,
+ JSRegExpResult::kInObjectPropertyCount);
initial_map->SetConstructor(*array_constructor);
// Set prototype on map.
- initial_map->set_non_instance_prototype(false);
+ initial_map->set_has_non_instance_prototype(false);
Map::SetPrototype(initial_map, array_prototype);
- // Update map with length accessor from Array and add "index" and "input".
- Map::EnsureDescriptorSlack(initial_map, 3);
+ // Update map with length accessor from Array and add "index", "input" and
+ // "groups".
+ Map::EnsureDescriptorSlack(initial_map,
+ JSRegExpResult::kInObjectPropertyCount + 1);
+ // length descriptor.
{
JSFunction* array_function = native_context()->array_function();
Handle<DescriptorArray> array_descriptors(
@@ -4857,6 +4920,8 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
array_descriptors->GetDetails(old).attributes());
initial_map->AppendDescriptor(&d);
}
+
+ // index descriptor.
{
Descriptor d = Descriptor::DataField(factory()->index_string(),
JSRegExpResult::kIndexIndex, NONE,
@@ -4864,6 +4929,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
initial_map->AppendDescriptor(&d);
}
+ // input descriptor.
{
Descriptor d = Descriptor::DataField(factory()->input_string(),
JSRegExpResult::kInputIndex, NONE,
@@ -4871,6 +4937,14 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
initial_map->AppendDescriptor(&d);
}
+ // groups descriptor.
+ {
+ Descriptor d = Descriptor::DataField(factory()->groups_string(),
+ JSRegExpResult::kGroupsIndex, NONE,
+ Representation::Tagged());
+ initial_map->AppendDescriptor(&d);
+ }
+
native_context()->set_regexp_result_map(*initial_map);
}
@@ -5482,10 +5556,6 @@ Genesis::Genesis(
ConfigureUtilsObject(context_type);
- // Check that the script context table is empty except for the 'this' binding.
- // We do not need script contexts for native scripts.
- DCHECK_EQ(1, native_context()->script_context_table()->used());
-
native_context()->ResetErrorsThrown();
result_ = native_context();
}
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 1c31009d93..2b2b9c2b34 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -116,9 +116,9 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Initial map for the builtin InternalArray functions should be maps.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(r2);
- __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
__ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
}
// Run the native code for the InternalArray function called as a normal
@@ -143,9 +143,9 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// Initial map for the builtin Array functions should be maps.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(r2);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
__ mov(r3, r1);
@@ -283,14 +283,16 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
Label post_instantiation_deopt_entry, not_create_implicit_receiver;
// Preserve the incoming parameters on the stack.
+ __ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
__ SmiTag(r0);
- __ Push(cp, r0, r1, r3);
+ __ Push(cp, r0, r1, r4, r3);
// ----------- S t a t e -------------
// -- sp[0*kPointerSize]: new target
- // -- r1 and sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: number of arguments (tagged)
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- r1 and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
@@ -332,9 +334,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- r3: new target
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
- // -- sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: padding
+ // -- sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
// Restore constructor function and argument count.
@@ -355,9 +358,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- r5: counter
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
- // -- r1 and sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: padding
+ // -- r1 and sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
__ b(&entry);
@@ -375,9 +379,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- r0: constructor result
// -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: number of arguments
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
// Store offset of return address for deoptimizer.
@@ -541,7 +546,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
__ CompareObjectType(r3, r3, r3, BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kMissingBytecodeArray);
+ __ Assert(eq, AbortReason::kMissingBytecodeArray);
}
// Resume (Ignition/TurboFan) generator object.
@@ -629,8 +634,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(cp, Operand(context_address));
__ ldr(cp, MemOperand(cp));
- __ InitializeRootRegister();
-
// Push the function and the receiver onto the stack.
__ Push(r1, r2);
@@ -777,6 +780,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ b(eq, &fallthrough);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
@@ -791,7 +797,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ cmp(
optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
- __ Assert(eq, kExpectedOptimizationSentinel);
+ __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
}
__ jmp(&fallthrough);
}
@@ -871,7 +877,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ add(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size);
// Load the size of the current bytecode.
__ bind(&load_size);
@@ -935,10 +940,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ SmiTst(kInterpreterBytecodeArrayRegister);
- __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ ne, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r0, no_reg,
BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Reset code age.
@@ -1194,10 +1201,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
__ SmiTst(kInterpreterBytecodeArrayRegister);
- __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ ne, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r1, no_reg,
BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Get the target bytecode offset from the frame.
@@ -1259,7 +1268,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
- __ Assert(ne, BailoutReason::kExpectedFeedbackVector);
+ __ Assert(ne, AbortReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
@@ -1799,8 +1808,9 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ mov(r4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() |
fp.bit() | lr.bit());
+ __ Push(Smi::kZero); // Padding.
__ add(fp, sp,
- Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
+ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
}
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -1809,8 +1819,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// -----------------------------------
// Get the number of arguments passed (as a smi), tear down the frame and
// then tear down the parameters.
- __ ldr(r1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize)));
+ __ ldr(r1, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR);
__ add(sp, sp, Operand::PointerOffsetFromSmiKey(r1));
@@ -1889,7 +1898,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ JumpIfSmi(r3, &new_target_not_constructor);
__ ldr(scratch, FieldMemOperand(r3, HeapObject::kMapOffset));
__ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kIsConstructor));
+ __ tst(scratch, Operand(Map::IsConstructorBit::kMask));
__ b(ne, &new_target_constructor);
__ bind(&new_target_not_constructor);
{
@@ -2178,7 +2187,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Check if target has a [[Call]] internal method.
__ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
- __ tst(r4, Operand(1 << Map::kIsCallable));
+ __ tst(r4, Operand(Map::IsCallableBit::kMask));
__ b(eq, &non_callable);
// Check if target is a proxy and call CallProxy external builtin
@@ -2268,7 +2277,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Check if target has a [[Construct]] internal method.
__ ldrb(r2, FieldMemOperand(r4, Map::kBitFieldOffset));
- __ tst(r2, Operand(1 << Map::kIsConstructor));
+ __ tst(r2, Operand(Map::IsConstructorBit::kMask));
__ b(eq, &non_constructor);
// Only dispatch to bound functions after checking whether they are
@@ -2337,17 +2346,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kAbort);
}
-// static
-void Builtins::Generate_AbortJS(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r1 : message as String object
- // -- lr : return address
- // -----------------------------------
- __ Push(r1);
- __ Move(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbortJS);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : actual number of arguments
@@ -2434,8 +2432,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
__ sub(r4, fp, Operand(r2, LSL, kPointerSizeLog2));
// Adjust for frame.
- __ sub(r4, r4, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
- 2 * kPointerSize));
+ __ sub(r4, r4,
+ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize));
Label fill;
__ bind(&fill);
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index 875f261835..dd92af89bb 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -111,9 +111,9 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Initial map for the builtin InternalArray functions should be maps.
__ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
__ Tst(x10, kSmiTagMask);
- __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
__ CompareObjectType(x10, x11, x12, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
}
// Run the native code for the InternalArray function called as a normal
@@ -138,9 +138,9 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// Initial map for the builtin Array functions should be maps.
__ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
__ Tst(x10, kSmiTagMask);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CompareObjectType(x10, x11, x12, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
// Run the native code for the Array function called as a normal function.
@@ -210,7 +210,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Check that FrameScope pushed the context on to the stack already.
__ Peek(x2, 0);
__ Cmp(x2, cp);
- __ Check(eq, kUnexpectedValue);
+ __ Check(eq, AbortReason::kUnexpectedValue);
}
// Push number of arguments.
@@ -315,7 +315,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// Check that FrameScope pushed the context on to the stack already.
__ Peek(x2, 0);
__ Cmp(x2, cp);
- __ Check(eq, kUnexpectedValue);
+ __ Check(eq, AbortReason::kUnexpectedValue);
}
// Preserve the incoming parameters on the stack.
@@ -348,10 +348,11 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- x0: receiver
- // -- Slot 3 / sp[0*kPointerSize]: new target
- // -- Slot 2 / sp[1*kPointerSize]: constructor function
- // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
- // -- Slot 0 / sp[3*kPointerSize]: context
+ // -- Slot 4 / sp[0*kPointerSize]: new target
+ // -- Slot 3 / sp[1*kPointerSize]: padding
+ // -- Slot 2 / sp[2*kPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kPointerSize]: context
// -----------------------------------
// Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
@@ -388,9 +389,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- sp[0*kPointerSize]: implicit receiver (overwrite if argc odd)
// -- sp[1*kPointerSize]: implicit receiver
// -- sp[2*kPointerSize]: implicit receiver
- // -- x1 and sp[3*kPointerSize]: constructor function
- // -- sp[4*kPointerSize]: number of arguments (tagged)
- // -- sp[5*kPointerSize]: context
+ // -- sp[3*kPointerSize]: padding
+ // -- x1 and sp[4*kPointerSize]: constructor function
+ // -- sp[5*kPointerSize]: number of arguments (tagged)
+ // -- sp[6*kPointerSize]: context
// -----------------------------------
// Round the number of arguments down to the next even number, and claim
@@ -416,14 +418,8 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ InvokeFunction(x1, x3, actual, CALL_FUNCTION);
// ----------- S t a t e -------------
- // If argc is odd:
- // -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: number of arguments
- // -- sp[3*kPointerSize]: context
- // If argc is even:
// -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: padding
// -- sp[2*kPointerSize]: constructor function
// -- sp[3*kPointerSize]: number of arguments
// -- sp[4*kPointerSize]: context
@@ -556,7 +552,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- __ CompareRoot(jssp, Heap::kRealStackLimitRootIndex);
+ __ CompareRoot(__ StackPointer(), Heap::kRealStackLimitRootIndex);
__ B(lo, &stack_overflow);
// Get number of arguments for generator function.
@@ -564,10 +560,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Ldr(w10,
FieldMemOperand(x10, SharedFunctionInfo::kFormalParameterCountOffset));
- // Claim slots for arguments and receiver.
- __ Add(x11, x10, 1);
+ // Claim slots for arguments and receiver (rounded up to a multiple of two).
+ __ Add(x11, x10, 2);
+ __ Bic(x11, x11, 1);
__ Claim(x11);
+ // Store padding (which might be replaced by the receiver).
+ __ Sub(x11, x11, 1);
+ __ Poke(padreg, Operand(x11, LSL, kPointerSizeLog2));
+
// Poke receiver into highest claimed slot.
__ Ldr(x5, FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
__ Poke(x5, Operand(x10, LSL, kPointerSizeLog2));
@@ -578,8 +579,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- x10 : argument count
// -- cp : generator context
// -- lr : return address
- // -- jssp[arg count] : generator receiver
- // -- jssp[0 .. arg count - 1] : claimed for args
+ // -- sp[arg count] : generator receiver
+ // -- sp[0 .. arg count - 1] : claimed for args
// -----------------------------------
// Push holes for arguments to generator function. Since the parser forced
@@ -603,7 +604,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Ldr(x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
__ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kMissingBytecodeArray);
+ __ Assert(eq, AbortReason::kMissingBytecodeArray);
}
// Resume (Ignition/TurboFan) generator object.
@@ -624,10 +625,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Bind(&prepare_step_in_if_stepping);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(x1);
+ __ Push(x1, padreg);
__ PushArgument(x4);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
- __ Pop(x1);
+ __ Pop(padreg, x1);
__ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
}
__ B(&stepping_prepared);
@@ -635,9 +636,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Bind(&prepare_step_in_suspended_generator);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(x1);
+ __ Push(x1, padreg);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
- __ Pop(x1);
+ __ Pop(padreg, x1);
__ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
}
__ B(&stepping_prepared);
@@ -652,8 +653,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Label* stack_overflow) {
- DCHECK(masm->StackPointer().Is(jssp));
-
UseScratchRegisterScope temps(masm);
Register scratch = temps.AcquireX();
@@ -767,10 +766,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Mov(x23, x19);
__ Mov(x24, x19);
__ Mov(x25, x19);
+ __ Mov(x28, x19);
// Don't initialize the reserved registers.
// x26 : root register (root).
// x27 : context pointer (cp).
- // x28 : JS stack pointer (jssp).
// x29 : frame pointer (fp).
Handle<Code> builtin = is_construct
@@ -820,7 +819,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
// Drop receiver + arguments.
if (__ emit_debug_code()) {
__ Tst(args_size, kPointerSize - 1);
- __ Check(eq, kUnexpectedValue);
+ __ Check(eq, AbortReason::kUnexpectedValue);
}
__ Lsr(args_size, args_size, kPointerSizeLog2);
__ DropArguments(args_size);
@@ -873,6 +872,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
&fallthrough);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
@@ -887,7 +889,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ Cmp(
optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
- __ Assert(eq, kExpectedOptimizationSentinel);
+ __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
}
__ B(&fallthrough);
}
@@ -967,7 +969,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ Add(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ B(&load_size);
// Load the size of the current bytecode.
__ Bind(&load_size);
@@ -985,7 +986,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
// - x3: the incoming new target or generator object
// - cp: our context.
// - fp: our caller's frame pointer.
-// - jssp: stack pointer.
// - lr: return address.
//
// The function builds an interpreter frame. See InterpreterFrameConstants in
@@ -1009,7 +1009,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ Push(lr, fp, cp, closure);
- __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+ __ Add(fp, __ StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
@@ -1030,11 +1030,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
- __ AssertNotSmi(kInterpreterBytecodeArrayRegister,
- kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ AssertNotSmi(
+ kInterpreterBytecodeArrayRegister,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, x0, x0,
BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Reset code age.
@@ -1058,8 +1060,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
Label ok;
- DCHECK(jssp.Is(__ StackPointer()));
- __ Sub(x10, jssp, Operand(x11));
+ __ Sub(x10, __ StackPointer(), Operand(x11));
__ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
__ B(hs, &ok);
__ CallRuntime(Runtime::kThrowStackOverflow);
@@ -1181,10 +1182,19 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
__ Unreachable();
__ Bind(&done);
- // TODO(arm64): Claim one extra slot for padding and store padreg to the
- // padding slot.
+ // Round up to an even number of slots and claim them.
+ __ Add(slots_to_claim, slots_to_claim, 1);
+ __ Bic(slots_to_claim, slots_to_claim, 1);
__ Claim(slots_to_claim);
+ {
+ // Store padding, which may be overwritten.
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.AcquireX();
+ __ Sub(scratch, slots_to_claim, 1);
+ __ Poke(padreg, Operand(scratch, LSL, kPointerSizeLog2));
+ }
+
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// Store "undefined" as the receiver arg if we need to.
Register receiver = x14;
@@ -1311,11 +1321,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
- __ AssertNotSmi(kInterpreterBytecodeArrayRegister,
- kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ AssertNotSmi(
+ kInterpreterBytecodeArrayRegister,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, x1, x1,
BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Get the target bytecode offset from the frame.
@@ -1375,7 +1387,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
- __ Assert(ne, BailoutReason::kExpectedFeedbackVector);
+ __ Assert(ne, AbortReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
@@ -1634,7 +1646,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
kPointerSize;
// Set up frame pointer.
- __ Add(fp, jssp, frame_size);
+ __ Add(fp, __ StackPointer(), frame_size);
if (with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
@@ -1770,9 +1782,9 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argc
- // -- jssp[0] : argArray (if argc == 2)
- // -- jssp[8] : thisArg (if argc >= 1)
- // -- jssp[16] : receiver
+ // -- sp[0] : argArray (if argc == 2)
+ // -- sp[8] : thisArg (if argc >= 1)
+ // -- sp[16] : receiver
// -----------------------------------
ASM_LOCATION("Builtins::Generate_FunctionPrototypeApply");
@@ -1824,7 +1836,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x2 : argArray
// -- x1 : receiver
- // -- jssp[0] : thisArg
+ // -- sp[0] : thisArg
// -----------------------------------
// 2. We don't need to check explicitly for callable receiver here,
@@ -1855,55 +1867,65 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
Register argc = x0;
Register function = x1;
- Register scratch1 = x10;
- Register scratch2 = x11;
ASM_LOCATION("Builtins::Generate_FunctionPrototypeCall");
- // 1. Make sure we have at least one argument.
+ // 1. Get the callable to call (passed as receiver) from the stack.
+ __ Peek(function, Operand(argc, LSL, kXRegSizeLog2));
+
+ // 2. Handle case with no arguments.
{
- Label done;
- __ Cbnz(argc, &done);
- __ LoadRoot(scratch1, Heap::kUndefinedValueRootIndex);
- __ Push(scratch1);
- __ Mov(argc, 1);
- __ Bind(&done);
+ Label non_zero;
+ Register scratch = x10;
+ __ Cbnz(argc, &non_zero);
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ // Overwrite receiver with undefined, which will be the new receiver.
+ // We do not need to overwrite the padding slot above it with anything.
+ __ Poke(scratch, 0);
+ // Call function. The argument count is already zero.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Bind(&non_zero);
}
- // 2. Get the callable to call (passed as receiver) from the stack.
- __ Peek(function, Operand(argc, LSL, kXRegSizeLog2));
+ // 3. Overwrite the receiver with padding. If argc is odd, this is all we
+ // need to do.
+ Label arguments_ready;
+ __ Poke(padreg, Operand(argc, LSL, kXRegSizeLog2));
+ __ Tbnz(argc, 0, &arguments_ready);
- // 3. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
+ // 4. If argc is even:
+ // Copy arguments two slots higher in memory, overwriting the original
+ // receiver and padding.
{
Label loop;
- // Calculate the copy start address (destination). Copy end address is jssp.
- __ SlotAddress(scratch2, argc);
- __ Sub(scratch1, scratch2, kPointerSize);
-
- __ Bind(&loop);
- __ Ldr(x12, MemOperand(scratch1, -kPointerSize, PostIndex));
- __ Str(x12, MemOperand(scratch2, -kPointerSize, PostIndex));
- __ Cmp(scratch1, jssp);
- __ B(ge, &loop);
- // Adjust the actual number of arguments and remove the top element
- // (which is a copy of the last argument).
- __ Sub(argc, argc, 1);
- __ Drop(1);
+ Register copy_from = x10;
+ Register copy_to = x11;
+ Register count = x12;
+ Register last_arg_slot = x13;
+ __ Mov(count, argc);
+ __ Sub(last_arg_slot, argc, 1);
+ __ SlotAddress(copy_from, last_arg_slot);
+ __ Add(copy_to, copy_from, 2 * kPointerSize);
+ __ CopyDoubleWords(copy_to, copy_from, count,
+ TurboAssembler::kSrcLessThanDst);
+ // Drop two slots. These are copies of the last two arguments.
+ __ Drop(2);
}
- // 4. Call the callable.
+ // 5. Adjust argument count to make the original first argument the new
+ // receiver and call the callable.
+ __ Bind(&arguments_ready);
+ __ Sub(argc, argc, 1);
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argc
- // -- jssp[0] : argumentsList (if argc == 3)
- // -- jssp[8] : thisArgument (if argc >= 2)
- // -- jssp[16] : target (if argc >= 1)
- // -- jssp[24] : receiver
+ // -- sp[0] : argumentsList (if argc == 3)
+ // -- sp[8] : thisArgument (if argc >= 2)
+ // -- sp[16] : target (if argc >= 1)
+ // -- sp[24] : receiver
// -----------------------------------
ASM_LOCATION("Builtins::Generate_ReflectApply");
@@ -1962,7 +1984,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x2 : argumentsList
// -- x1 : target
- // -- jssp[0] : thisArgument
+ // -- sp[0] : thisArgument
// -----------------------------------
// 2. We don't need to check explicitly for callable target here,
@@ -1977,10 +1999,10 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argc
- // -- jssp[0] : new.target (optional)
- // -- jssp[8] : argumentsList
- // -- jssp[16] : target
- // -- jssp[24] : receiver
+ // -- sp[0] : new.target (optional)
+ // -- sp[8] : argumentsList
+ // -- sp[16] : target
+ // -- sp[24] : receiver
// -----------------------------------
ASM_LOCATION("Builtins::Generate_ReflectConstruct");
@@ -2044,7 +2066,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// -- x2 : argumentsList
// -- x1 : target
// -- x3 : new.target
- // -- jssp[0] : receiver (undefined)
+ // -- sp[0] : receiver (undefined)
// -----------------------------------
// 2. We don't need to check explicitly for constructor target here,
@@ -2060,25 +2082,26 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+namespace {
+
+void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ Push(lr, fp);
__ Mov(x11, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
__ Push(x11, x1); // x1: function
- // We do not yet push the number of arguments, to maintain a 16-byte aligned
- // stack pointer. This is done in step (3) in
- // Generate_ArgumentsAdaptorTrampoline.
- __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+ __ SmiTag(x11, x0); // x0: number of arguments.
+ __ Push(x11, padreg);
+ __ Add(fp, __ StackPointer(),
+ ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp);
}
-static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : result being passed through
// -----------------------------------
// Get the number of arguments passed (as a smi), tear down the frame and
// then drop the parameters and the receiver.
- __ Ldr(x10, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize)));
- __ Mov(jssp, fp);
+ __ Ldr(x10, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Mov(__ StackPointer(), fp);
__ Pop(fp, lr);
// Drop actual parameters and receiver.
@@ -2086,6 +2109,67 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
__ DropArguments(x10, TurboAssembler::kCountExcludesReceiver);
}
+// Prepares the stack for copying the varargs. First we claim the necessary
+// slots, taking care of potential padding. Then we copy the existing arguments
+// one slot up or one slot down, as needed.
+void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
+ Register len) {
+ Label len_odd, exit;
+ Register slots_to_copy = x10; // If needed.
+ __ Add(slots_to_copy, argc, 1);
+ __ Add(argc, argc, len);
+ __ Tbnz(len, 0, &len_odd);
+ __ Claim(len);
+ __ B(&exit);
+
+ __ Bind(&len_odd);
+ // Claim space we need. If argc is even, slots_to_claim = len + 1, as we need
+ // one extra padding slot. If argc is odd, we know that the original arguments
+ // will have a padding slot we can reuse (since len is odd), so
+ // slots_to_claim = len - 1.
+ {
+ Register scratch = x11;
+ Register slots_to_claim = x12;
+ __ Add(slots_to_claim, len, 1);
+ __ And(scratch, argc, 1);
+ __ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
+ __ Claim(slots_to_claim);
+ }
+
+ Label copy_down;
+ __ Tbz(slots_to_copy, 0, &copy_down);
+
+ // Copy existing arguments one slot up.
+ {
+ Register src = x11;
+ Register dst = x12;
+ Register scratch = x13;
+ __ Sub(scratch, argc, 1);
+ __ SlotAddress(src, scratch);
+ __ SlotAddress(dst, argc);
+ __ CopyDoubleWords(dst, src, slots_to_copy,
+ TurboAssembler::kSrcLessThanDst);
+ }
+ __ B(&exit);
+
+ // Copy existing arguments one slot down and add padding.
+ __ Bind(&copy_down);
+ {
+ Register src = x11;
+ Register dst = x12;
+ Register scratch = x13;
+ __ Add(src, len, 1);
+ __ Mov(dst, len); // CopySlots will corrupt dst.
+ __ CopySlots(dst, src, slots_to_copy);
+ __ Add(scratch, argc, 1);
+ __ Poke(padreg, Operand(scratch, LSL, kPointerSizeLog2)); // Store padding.
+ }
+
+ __ Bind(&exit);
+}
+
+} // namespace
+
// static
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
@@ -2118,30 +2202,34 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ Bind(&done);
}
- // Push arguments onto the stack (thisArgument is already on the stack).
- {
- Label done, push, loop;
- Register src = x5;
+ // Skip argument setup if we don't need to push any varargs.
+ Label done;
+ __ Cbz(len, &done);
- __ Add(src, arguments_list, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Add(argc, argc, len); // The 'len' argument for Call() or Construct().
- __ Cbz(len, &done);
+ Generate_PrepareForCopyingVarargs(masm, argc, len);
+
+ // Push varargs.
+ {
+ Label loop;
+ Register src = x10;
Register the_hole_value = x11;
Register undefined_value = x12;
- // We do not use the CompareRoot macro as it would do a LoadRoot behind the
- // scenes and we want to avoid that in a loop.
+ Register scratch = x13;
+ __ Add(src, arguments_list, FixedArray::kHeaderSize - kHeapObjectTag);
__ LoadRoot(the_hole_value, Heap::kTheHoleValueRootIndex);
__ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
- __ Claim(len);
+ // We do not use the CompareRoot macro as it would do a LoadRoot behind the
+ // scenes and we want to avoid that in a loop.
+ // TODO(all): Consider using Ldp and Stp.
__ Bind(&loop);
__ Sub(len, len, 1);
- __ Ldr(x10, MemOperand(src, kPointerSize, PostIndex));
- __ Cmp(x10, the_hole_value);
- __ Csel(x10, x10, undefined_value, ne);
- __ Poke(x10, Operand(len, LSL, kPointerSizeLog2));
+ __ Ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
+ __ Cmp(scratch, the_hole_value);
+ __ Csel(scratch, scratch, undefined_value, ne);
+ __ Poke(scratch, Operand(len, LSL, kPointerSizeLog2));
__ Cbnz(len, &loop);
- __ Bind(&done);
}
+ __ Bind(&done);
// Tail-call to the actual Call or Construct builtin.
__ Jump(code, RelocInfo::CODE_TARGET);
@@ -2158,13 +2246,16 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// -- x2 : start index (to support rest parameters)
// -----------------------------------
+ Register argc = x0;
+ Register start_index = x2;
+
// Check if new.target has a [[Construct]] internal method.
if (mode == CallOrConstructMode::kConstruct) {
Label new_target_constructor, new_target_not_constructor;
__ JumpIfSmi(x3, &new_target_not_constructor);
__ Ldr(x5, FieldMemOperand(x3, HeapObject::kMapOffset));
__ Ldrb(x5, FieldMemOperand(x5, Map::kBitFieldOffset));
- __ TestAndBranchIfAnySet(x5, 1 << Map::kIsConstructor,
+ __ TestAndBranchIfAnySet(x5, Map::IsConstructorBit::kMask,
&new_target_constructor);
__ Bind(&new_target_not_constructor);
{
@@ -2177,49 +2268,57 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
}
// Check if we have an arguments adaptor frame below the function frame.
- Label arguments_adaptor, arguments_done;
- __ Ldr(x5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(x4, MemOperand(x5, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Cmp(x4, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(eq, &arguments_adaptor);
- {
- __ Ldr(x6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Ldr(x6, FieldMemOperand(x6, JSFunction::kSharedFunctionInfoOffset));
- __ Ldrsw(x6, FieldMemOperand(
- x6, SharedFunctionInfo::kFormalParameterCountOffset));
- __ Mov(x5, fp);
- }
- __ B(&arguments_done);
- __ Bind(&arguments_adaptor);
+ // args_fp will point to the frame that contains the actual arguments, which
+ // will be the current frame unless we have an arguments adaptor frame, in
+ // which case args_fp points to the arguments adaptor frame.
+ Register args_fp = x5;
+ Register len = x6;
{
- // Just load the length from ArgumentsAdaptorFrame.
- __ Ldrsw(x6, UntagSmiMemOperand(
- x5, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ Label arguments_adaptor, arguments_done;
+ Register scratch = x10;
+ __ Ldr(args_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(x4, MemOperand(args_fp,
+ CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ Cmp(x4, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(eq, &arguments_adaptor);
+ {
+ __ Ldr(scratch,
+ MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(scratch,
+ FieldMemOperand(scratch, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldrsw(len,
+ FieldMemOperand(
+ scratch, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Mov(args_fp, fp);
+ }
+ __ B(&arguments_done);
+ __ Bind(&arguments_adaptor);
+ {
+ // Just load the length from ArgumentsAdaptorFrame.
+ __ Ldrsw(len,
+ UntagSmiMemOperand(
+ args_fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ }
+ __ Bind(&arguments_done);
}
- __ Bind(&arguments_done);
Label stack_done, stack_overflow;
- __ Subs(x6, x6, x2);
+ __ Subs(len, len, start_index);
__ B(le, &stack_done);
- {
- // Check for stack overflow.
- Generate_StackOverflowCheck(masm, x6, &stack_overflow);
+ // Check for stack overflow.
+ Generate_StackOverflowCheck(masm, x6, &stack_overflow);
- // Forward the arguments from the caller frame.
- {
- Label loop;
- __ Add(x5, x5, kPointerSize);
- __ Add(x0, x0, x6);
- __ Bind(&loop);
- {
- __ Ldr(x4, MemOperand(x5, x6, LSL, kPointerSizeLog2));
- __ Push(x4);
- __ Subs(x6, x6, 1);
- __ B(ne, &loop);
- }
- }
+ Generate_PrepareForCopyingVarargs(masm, argc, len);
+
+ // Push varargs.
+ {
+ Register dst = x13;
+ __ Add(args_fp, args_fp, 2 * kPointerSize);
+ __ SlotAddress(dst, 0);
+ __ CopyDoubleWords(dst, args_fp, len);
}
__ B(&stack_done);
+
__ Bind(&stack_overflow);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ Bind(&stack_done);
@@ -2338,12 +2437,16 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// -- x3 : new.target (only in case of [[Construct]])
// -----------------------------------
+ Register bound_argc = x4;
+ Register bound_argv = x2;
+
// Load [[BoundArguments]] into x2 and length of that into x4.
Label no_bound_arguments;
- __ Ldr(x2, FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
- __ Ldrsw(x4, UntagSmiFieldMemOperand(x2, FixedArray::kLengthOffset));
- __ Cmp(x4, 0);
- __ B(eq, &no_bound_arguments);
+ __ Ldr(bound_argv,
+ FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
+ __ Ldrsw(bound_argc,
+ UntagSmiFieldMemOperand(bound_argv, FixedArray::kLengthOffset));
+ __ Cbz(bound_argc, &no_bound_arguments);
{
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
@@ -2353,44 +2456,97 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// -- x4 : the number of [[BoundArguments]]
// -----------------------------------
+ Register argc = x0;
+
+ // Check for stack overflow.
{
- Label done;
- __ Claim(x4);
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
- __ CompareRoot(jssp, Heap::kRealStackLimitRootIndex);
+ Label done;
+ __ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
+ // Make x10 the space we have left. The stack might already be overflowed
+ // here which will cause x10 to become negative.
+ __ Sub(x10, masm->StackPointer(), x10);
+ // Check if the arguments will overflow the stack.
+ __ Cmp(x10, Operand(bound_argc, LSL, kPointerSizeLog2));
__ B(gt, &done); // Signed comparison.
- // Restore the stack pointer.
- __ Drop(x4);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterFrame(StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- }
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
__ Bind(&done);
}
- UseScratchRegisterScope temps(masm);
- Register argc = temps.AcquireX();
- // Relocate arguments down the stack.
- __ Mov(argc, x0);
- __ CopySlots(0, x4, argc);
+ // Check if we need padding.
+ Label copy_args, copy_bound_args;
+ Register total_argc = x15;
+ Register slots_to_claim = x12;
+ __ Add(total_argc, argc, bound_argc);
+ __ Mov(slots_to_claim, bound_argc);
+ __ Tbz(bound_argc, 0, &copy_args);
+
+ // Load receiver before we start moving the arguments. We will only
+ // need this in this path because the bound arguments are odd.
+ Register receiver = x14;
+ __ Peek(receiver, Operand(argc, LSL, kPointerSizeLog2));
- // Copy [[BoundArguments]] to the stack (below the arguments). The first
- // element of the array is copied to the highest address.
+ // Claim space we need. If argc is even, slots_to_claim = bound_argc + 1,
+ // as we need one extra padding slot. If argc is odd, we know that the
+ // original arguments will have a padding slot we can reuse (since
+ // bound_argc is odd), so slots_to_claim = bound_argc - 1.
{
- Label loop;
- __ Ldrsw(x4, UntagSmiFieldMemOperand(x2, FixedArray::kLengthOffset));
- __ Add(x2, x2, FixedArray::kHeaderSize - kHeapObjectTag);
- __ SlotAddress(x11, x0);
- __ Add(x0, x0, x4);
- __ Bind(&loop);
- __ Sub(x4, x4, 1);
- __ Ldr(x10, MemOperand(x2, x4, LSL, kPointerSizeLog2));
- // Poke into claimed area of stack.
- __ Str(x10, MemOperand(x11, kPointerSize, PostIndex));
- __ Cbnz(x4, &loop);
+ Register scratch = x11;
+ __ Add(slots_to_claim, bound_argc, 1);
+ __ And(scratch, total_argc, 1);
+ __ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
+ }
+
+ // Copy bound arguments.
+ __ Bind(&copy_args);
+ // Skip claim and copy of existing arguments in the special case where we
+ // do not need to claim any slots (this will be the case when
+ // bound_argc == 1 and the existing arguments have padding we can reuse).
+ __ Cbz(slots_to_claim, &copy_bound_args);
+ __ Claim(slots_to_claim);
+ {
+ Register count = x10;
+ // Relocate arguments to a lower address.
+ __ Mov(count, argc);
+ __ CopySlots(0, slots_to_claim, count);
+
+ __ Bind(&copy_bound_args);
+ // Copy [[BoundArguments]] to the stack (below the arguments). The first
+ // element of the array is copied to the highest address.
+ {
+ Label loop;
+ Register counter = x10;
+ Register scratch = x11;
+ Register copy_to = x12;
+ __ Add(bound_argv, bound_argv,
+ FixedArray::kHeaderSize - kHeapObjectTag);
+ __ SlotAddress(copy_to, argc);
+ __ Add(argc, argc,
+ bound_argc); // Update argc to include bound arguments.
+ __ Lsl(counter, bound_argc, kPointerSizeLog2);
+ __ Bind(&loop);
+ __ Sub(counter, counter, kPointerSize);
+ __ Ldr(scratch, MemOperand(bound_argv, counter));
+ // Poke into claimed area of stack.
+ __ Str(scratch, MemOperand(copy_to, kPointerSize, PostIndex));
+ __ Cbnz(counter, &loop);
+ }
+
+ {
+ Label done;
+ Register scratch = x10;
+ __ Tbz(bound_argc, 0, &done);
+ // Store receiver.
+ __ Add(scratch, __ StackPointer(),
+ Operand(total_argc, LSL, kPointerSizeLog2));
+ __ Str(receiver, MemOperand(scratch, kPointerSize, PostIndex));
+ __ Tbnz(total_argc, 0, &done);
+ // Store padding.
+ __ Str(padreg, MemOperand(scratch));
+ __ Bind(&done);
+ }
}
}
__ Bind(&no_bound_arguments);
@@ -2438,7 +2594,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Check if target has a [[Call]] internal method.
__ Ldrb(x4, FieldMemOperand(x4, Map::kBitFieldOffset));
- __ TestAndBranchIfAllClear(x4, 1 << Map::kIsCallable, &non_callable);
+ __ TestAndBranchIfAllClear(x4, Map::IsCallableBit::kMask, &non_callable);
// Check if target is a proxy and call CallProxy external builtin
__ Cmp(x5, JS_PROXY_TYPE);
@@ -2533,7 +2689,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Check if target has a [[Construct]] internal method.
__ Ldrb(x2, FieldMemOperand(x4, Map::kBitFieldOffset));
- __ TestAndBranchIfAllClear(x2, 1 << Map::kIsConstructor, &non_constructor);
+ __ TestAndBranchIfAllClear(x2, Map::IsConstructorBit::kMask,
+ &non_constructor);
// Only dispatch to bound functions after checking whether they are
// constructors.
@@ -2605,19 +2762,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kAbort);
}
-// static
-void Builtins::Generate_AbortJS(MacroAssembler* masm) {
- ASM_LOCATION("Builtins::Generate_AbortJS");
- // ----------- S t a t e -------------
- // -- x1 : message as String object
- // -- lr : return address
- // -----------------------------------
- MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
- __ PushArgument(x1);
- __ Move(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbortJS);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
// ----------- S t a t e -------------
@@ -2651,14 +2795,16 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// 4 | num of | |
// | actual args | |
// |- - - - - - - - -| |
- // [5] | [padding] | |
+ // 5 | padding | |
// |-----------------+---- |
- // 5+pad | receiver | ^ |
+ // [6] | [padding] | ^ |
+ // |- - - - - - - - -| | |
+ // 6+pad | receiver | | |
// | (parameter 0) | | |
// |- - - - - - - - -| | |
- // 6+pad | parameter 1 | | |
+ // 7+pad | parameter 1 | | |
// |- - - - - - - - -| Frame slots ----> expected args
- // 7+pad | parameter 2 | | |
+ // 8+pad | parameter 2 | | |
// |- - - - - - - - -| | |
// | | | |
// ... | ... | | |
@@ -2671,7 +2817,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// | [undefined] | v <-- stack ptr v
// -----+-----------------+---------------------------------
//
- // There is an optional slot of padding to ensure stack alignment.
+ // There is an optional slot of padding above the receiver to ensure stack
+ // alignment of the arguments.
// If the number of expected arguments is larger than the number of actual
// arguments, the remaining expected slots will be filled with undefined.
@@ -2695,10 +2842,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Register argc_unused_actual = x14;
Register scratch1 = x15, scratch2 = x16;
- // We need slots for the expected arguments, with two extra slots for the
- // number of actual arguments and the receiver.
+ // We need slots for the expected arguments, with one extra slot for the
+ // receiver.
__ RecordComment("-- Stack check --");
- __ Add(scratch1, argc_expected, 2);
+ __ Add(scratch1, argc_expected, 1);
Generate_StackOverflowCheck(masm, scratch1, &stack_overflow);
// Round up number of slots to be even, to maintain stack alignment.
@@ -2707,7 +2854,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Bic(scratch1, scratch1, 1);
__ Claim(scratch1, kPointerSize);
- __ Mov(copy_to, jssp);
+ __ Mov(copy_to, __ StackPointer());
// Preparing the expected arguments is done in four steps, the order of
// which is chosen so we can use LDP/STP and avoid conditional branches as
@@ -2738,7 +2885,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Bind(&enough_arguments);
// (2) Copy all of the actual arguments, or as many as we need.
+ Label skip_copy;
__ RecordComment("-- Copy actual arguments --");
+ __ Cbz(argc_to_copy, &skip_copy);
__ Add(copy_end, copy_to, Operand(argc_to_copy, LSL, kPointerSizeLog2));
__ Add(copy_from, fp, 2 * kPointerSize);
// Adjust for difference between actual and expected arguments.
@@ -2755,21 +2904,22 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Stp(scratch1, scratch2, MemOperand(copy_to, 2 * kPointerSize, PostIndex));
__ Cmp(copy_end, copy_to);
__ B(hi, &copy_2_by_2);
+ __ Bind(&skip_copy);
- // (3) Store number of actual arguments and padding. The padding might be
- // unnecessary, in which case it will be overwritten by the receiver.
- __ RecordComment("-- Store number of args and padding --");
- __ SmiTag(scratch1, argc_actual);
- __ Stp(xzr, scratch1, MemOperand(fp, -4 * kPointerSize));
+ // (3) Store padding, which might be overwritten by the receiver, if it is not
+ // necessary.
+ __ RecordComment("-- Store padding --");
+ __ Str(padreg, MemOperand(fp, -5 * kPointerSize));
- // (4) Store receiver. Calculate target address from jssp to avoid checking
+ // (4) Store receiver. Calculate target address from the sp to avoid checking
// for padding. Storing the receiver will overwrite either the extra slot
// we copied with the actual arguments, if we did copy one, or the padding we
// stored above.
__ RecordComment("-- Store receiver --");
__ Add(copy_from, fp, 2 * kPointerSize);
__ Ldr(scratch1, MemOperand(copy_from, argc_actual, LSL, kPointerSizeLog2));
- __ Str(scratch1, MemOperand(jssp, argc_expected, LSL, kPointerSizeLog2));
+ __ Str(scratch1,
+ MemOperand(__ StackPointer(), argc_expected, LSL, kPointerSizeLog2));
// Arguments have been adapted. Now call the entry point.
__ RecordComment("-- Call entry point --");
@@ -2805,10 +2955,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
- // Wasm code uses the csp. This builtin excepts to use the jssp.
- // Thus, move csp to jssp when entering this builtin (called from wasm).
- DCHECK(masm->StackPointer().is(jssp));
- __ Move(jssp, csp);
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -2833,9 +2979,6 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ PopDRegList(fp_regs);
__ PopXRegList(gp_regs);
}
- // Move back to csp land. jssp now has the same value as when entering this
- // function, but csp might have changed in the runtime call.
- __ Move(csp, jssp);
// Now jump to the instructions of the returned code object.
__ Jump(x8);
}
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index 5fec0abfa5..027baa2873 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -31,6 +31,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
typedef std::function<void(ArrayBuiltinCodeStubAssembler* masm)>
PostLoopAction;
+ enum class MissingPropertyMode { kSkip, kUseUndefined };
+
void FindResultGenerator() { a_.Bind(UndefinedConstant()); }
Node* FindProcessor(Node* k_value, Node* k) {
@@ -383,6 +385,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
const char* name, const BuiltinResultGenerator& generator,
const CallResultProcessor& processor, const PostLoopAction& action,
const Callable& slow_case_continuation,
+ MissingPropertyMode missing_property_mode,
ForEachDirection direction = ForEachDirection::kForward) {
Label non_array(this), array_changes(this, {&k_, &a_, &to_});
@@ -439,7 +442,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
generator(this);
- HandleFastElements(processor, action, &fully_spec_compliant_, direction);
+ HandleFastElements(processor, action, &fully_spec_compliant_, direction,
+ missing_property_mode);
BIND(&fully_spec_compliant_);
@@ -550,6 +554,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
void GenerateIteratingArrayBuiltinLoopContinuation(
const CallResultProcessor& processor, const PostLoopAction& action,
+ MissingPropertyMode missing_property_mode,
ForEachDirection direction = ForEachDirection::kForward) {
Label loop(this, {&k_, &a_, &to_});
Label after_loop(this);
@@ -558,11 +563,11 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
{
if (direction == ForEachDirection::kForward) {
// 8. Repeat, while k < len
- GotoIfNumericGreaterThanOrEqual(k(), len_, &after_loop);
+ GotoIfNumberGreaterThanOrEqual(k(), len_, &after_loop);
} else {
// OR
// 10. Repeat, while k >= 0
- GotoIfNumericGreaterThanOrEqual(SmiConstant(-1), k(), &after_loop);
+ GotoIfNumberGreaterThanOrEqual(SmiConstant(-1), k(), &after_loop);
}
Label done_element(this, &to_);
@@ -572,12 +577,15 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
// index in the range [0, 2^32-1).
CSA_ASSERT(this, IsNumberArrayIndex(k()));
- // b. Let kPresent be HasProperty(O, Pk).
- // c. ReturnIfAbrupt(kPresent).
- Node* k_present = HasProperty(o(), k(), context(), kHasProperty);
+ if (missing_property_mode == MissingPropertyMode::kSkip) {
+ // b. Let kPresent be HasProperty(O, Pk).
+ // c. ReturnIfAbrupt(kPresent).
+ TNode<Oddball> k_present =
+ HasProperty(o(), k(), context(), kHasProperty);
- // d. If kPresent is true, then
- GotoIf(WordNotEqual(k_present, TrueConstant()), &done_element);
+ // d. If kPresent is true, then
+ GotoIf(IsFalse(k_present), &done_element);
+ }
// i. Let kValue be Get(O, Pk).
// ii. ReturnIfAbrupt(kValue).
@@ -655,7 +663,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
void VisitAllFastElementsOneKind(ElementsKind kind,
const CallResultProcessor& processor,
Label* array_changed, ParameterMode mode,
- ForEachDirection direction) {
+ ForEachDirection direction,
+ MissingPropertyMode missing_property_mode) {
Comment("begin VisitAllFastElementsOneKind");
VARIABLE(original_map, MachineRepresentation::kTagged);
original_map.Bind(LoadMap(o()));
@@ -670,7 +679,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
list, start, end,
[=, &original_map](Node* index) {
k_.Bind(ParameterToTagged(index, mode));
- Label one_element_done(this), hole_element(this);
+ Label one_element_done(this), hole_element(this),
+ process_element(this);
// Check if o's map has changed during the callback. If so, we have to
// fall back to the slower spec implementation for the rest of the
@@ -693,24 +703,32 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
? FixedArray::kHeaderSize
: (FixedArray::kHeaderSize - kHeapObjectTag);
Node* offset = ElementOffsetFromIndex(index, kind, mode, base_size);
- Node* value = nullptr;
+ VARIABLE(value, MachineRepresentation::kTagged);
if (kind == PACKED_ELEMENTS) {
- value = LoadObjectField(elements, offset);
- GotoIf(WordEqual(value, TheHoleConstant()), &hole_element);
+ value.Bind(LoadObjectField(elements, offset));
+ GotoIf(WordEqual(value.value(), TheHoleConstant()), &hole_element);
} else {
Node* double_value =
LoadDoubleWithHoleCheck(elements, offset, &hole_element);
- value = AllocateHeapNumberWithValue(double_value);
+ value.Bind(AllocateHeapNumberWithValue(double_value));
}
- a_.Bind(processor(this, value, k()));
- Goto(&one_element_done);
+ Goto(&process_element);
BIND(&hole_element);
- // Check if o's prototype change unexpectedly has elements after the
- // callback in the case of a hole.
- BranchIfPrototypesHaveNoElements(o_map, &one_element_done,
- array_changed);
-
+ if (missing_property_mode == MissingPropertyMode::kSkip) {
+ // Check if o's prototype change unexpectedly has elements after
+ // the callback in the case of a hole.
+ BranchIfPrototypesHaveNoElements(o_map, &one_element_done,
+ array_changed);
+ } else {
+ value.Bind(UndefinedConstant());
+ Goto(&process_element);
+ }
+ BIND(&process_element);
+ {
+ a_.Bind(processor(this, value.value(), k()));
+ Goto(&one_element_done);
+ }
BIND(&one_element_done);
},
1, mode, advance_mode);
@@ -719,7 +737,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
void HandleFastElements(const CallResultProcessor& processor,
const PostLoopAction& action, Label* slow,
- ForEachDirection direction) {
+ ForEachDirection direction,
+ MissingPropertyMode missing_property_mode) {
Label switch_on_elements_kind(this), fast_elements(this),
maybe_double_elements(this), fast_double_elements(this);
@@ -742,7 +761,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
BIND(&fast_elements);
{
VisitAllFastElementsOneKind(PACKED_ELEMENTS, processor, slow, mode,
- direction);
+ direction, missing_property_mode);
action(this);
@@ -757,7 +776,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
BIND(&fast_double_elements);
{
VisitAllFastElementsOneKind(PACKED_DOUBLE_ELEMENTS, processor, slow, mode,
- direction);
+ direction, missing_property_mode);
action(this);
@@ -879,7 +898,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
ElementsKind source_elements_kind_ = ElementsKind::NO_ELEMENTS;
};
-TF_BUILTIN(FastArrayPop, CodeStubAssembler) {
+TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
Node* context = Parameter(BuiltinDescriptor::kContext);
CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
@@ -977,7 +996,7 @@ TF_BUILTIN(FastArrayPop, CodeStubAssembler) {
}
}
-TF_BUILTIN(FastArrayPush, CodeStubAssembler) {
+TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
TVARIABLE(IntPtrT, arg_index);
Label default_label(this, &arg_index);
Label smi_transition(this);
@@ -1106,9 +1125,10 @@ TF_BUILTIN(FastArrayPush, CodeStubAssembler) {
}
}
-class FastArraySliceCodeStubAssembler : public CodeStubAssembler {
+class ArrayPrototypeSliceCodeStubAssembler : public CodeStubAssembler {
public:
- explicit FastArraySliceCodeStubAssembler(compiler::CodeAssemblerState* state)
+ explicit ArrayPrototypeSliceCodeStubAssembler(
+ compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
Node* HandleFastSlice(Node* context, Node* array, Node* from, Node* count,
@@ -1245,11 +1265,11 @@ class FastArraySliceCodeStubAssembler : public CodeStubAssembler {
void CopyOneElement(Node* context, Node* o, Node* a, Node* p_k, Variable& n) {
// b. Let kPresent be HasProperty(O, Pk).
// c. ReturnIfAbrupt(kPresent).
- Node* k_present = HasProperty(o, p_k, context, kHasProperty);
+ TNode<Oddball> k_present = HasProperty(o, p_k, context, kHasProperty);
// d. If kPresent is true, then
Label done_element(this);
- GotoIf(WordNotEqual(k_present, TrueConstant()), &done_element);
+ GotoIf(IsFalse(k_present), &done_element);
// i. Let kValue be Get(O, Pk).
// ii. ReturnIfAbrupt(kValue).
@@ -1264,10 +1284,10 @@ class FastArraySliceCodeStubAssembler : public CodeStubAssembler {
}
};
-TF_BUILTIN(FastArraySlice, FastArraySliceCodeStubAssembler) {
+TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
Node* const argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
- Node* const context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Label slow(this, Label::kDeferred), fast_elements_kind(this);
CodeStubArguments args(this, argc);
@@ -1339,15 +1359,15 @@ TF_BUILTIN(FastArraySlice, FastArraySliceCodeStubAssembler) {
// 5. Let relativeStart be ToInteger(start).
// 6. ReturnIfAbrupt(relativeStart).
- Node* arg0 = args.GetOptionalArgumentValue(0, SmiConstant(0));
- Node* relative_start = ToInteger(context, arg0);
+ TNode<Object> arg0 = CAST(args.GetOptionalArgumentValue(0, SmiConstant(0)));
+ Node* relative_start = ToInteger_Inline(context, arg0);
// 7. If relativeStart < 0, let k be max((len + relativeStart),0);
// else let k be min(relativeStart, len.value()).
VARIABLE(k, MachineRepresentation::kTagged);
Label relative_start_positive(this), relative_start_done(this);
- GotoIfNumericGreaterThanOrEqual(relative_start, SmiConstant(0),
- &relative_start_positive);
+ GotoIfNumberGreaterThanOrEqual(relative_start, SmiConstant(0),
+ &relative_start_positive);
k.Bind(NumberMax(NumberAdd(len.value(), relative_start), NumberConstant(0)));
Goto(&relative_start_done);
BIND(&relative_start_positive);
@@ -1358,11 +1378,12 @@ TF_BUILTIN(FastArraySlice, FastArraySliceCodeStubAssembler) {
// 8. If end is undefined, let relativeEnd be len;
// else let relativeEnd be ToInteger(end).
// 9. ReturnIfAbrupt(relativeEnd).
- Node* end = args.GetOptionalArgumentValue(1, UndefinedConstant());
+ TNode<Object> end =
+ CAST(args.GetOptionalArgumentValue(1, UndefinedConstant()));
Label end_undefined(this), end_done(this);
VARIABLE(relative_end, MachineRepresentation::kTagged);
GotoIf(WordEqual(end, UndefinedConstant()), &end_undefined);
- relative_end.Bind(ToInteger(context, end));
+ relative_end.Bind(ToInteger_Inline(context, end));
Goto(&end_done);
BIND(&end_undefined);
relative_end.Bind(len.value());
@@ -1373,8 +1394,8 @@ TF_BUILTIN(FastArraySlice, FastArraySliceCodeStubAssembler) {
// else let final be min(relativeEnd, len).
VARIABLE(final, MachineRepresentation::kTagged);
Label relative_end_positive(this), relative_end_done(this);
- GotoIfNumericGreaterThanOrEqual(relative_end.value(), NumberConstant(0),
- &relative_end_positive);
+ GotoIfNumberGreaterThanOrEqual(relative_end.value(), NumberConstant(0),
+ &relative_end_positive);
final.Bind(NumberMax(NumberAdd(len.value(), relative_end.value()),
NumberConstant(0)));
Goto(&relative_end_done);
@@ -1412,7 +1433,7 @@ TF_BUILTIN(FastArraySlice, FastArraySliceCodeStubAssembler) {
BIND(&loop);
{
// 15. Repeat, while k < final
- GotoIfNumericGreaterThanOrEqual(k.value(), final.value(), &after_loop);
+ GotoIfNumberGreaterThanOrEqual(k.value(), final.value(), &after_loop);
Node* p_k = k.value(); // ToString(context, k.value()) is no-op
@@ -1438,7 +1459,7 @@ TF_BUILTIN(FastArraySlice, FastArraySliceCodeStubAssembler) {
args.PopAndReturn(a);
}
-TF_BUILTIN(FastArrayShift, CodeStubAssembler) {
+TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
Node* context = Parameter(BuiltinDescriptor::kContext);
CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
@@ -1619,6 +1640,206 @@ TF_BUILTIN(CloneFastJSArray, ArrayBuiltinCodeStubAssembler) {
Return(CloneFastJSArray(context, array, mode));
}
+TF_BUILTIN(ArrayFindLoopContinuation, ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* array = Parameter(Descriptor::kArray);
+ Node* object = Parameter(Descriptor::kObject);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+ Node* to = Parameter(Descriptor::kTo);
+
+ InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
+ this_arg, array, object, initial_k,
+ len, to);
+
+ GenerateIteratingArrayBuiltinLoopContinuation(
+ &ArrayBuiltinCodeStubAssembler::FindProcessor,
+ &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
+}
+
+// Continuation that is called after an eager deoptimization from TF (ex. the
+// array changes during iteration).
+TF_BUILTIN(ArrayFindLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Return(CallBuiltin(Builtins::kArrayFindLoopContinuation, context, receiver,
+ callbackfn, this_arg, UndefinedConstant(), receiver,
+ initial_k, len, UndefinedConstant()));
+}
+
+// Continuation that is called after a lazy deoptimization from TF (ex. the
+// callback function is no longer callable).
+TF_BUILTIN(ArrayFindLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Return(CallBuiltin(Builtins::kArrayFindLoopContinuation, context, receiver,
+ callbackfn, this_arg, UndefinedConstant(), receiver,
+ initial_k, len, UndefinedConstant()));
+}
+
+// Continuation that is called after a lazy deoptimization from TF that happens
+// right after the callback and it's returned value must be handled before
+// iteration continues.
+TF_BUILTIN(ArrayFindLoopAfterCallbackLazyDeoptContinuation,
+ ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+ Node* found_value = Parameter(Descriptor::kFoundValue);
+ Node* is_found = Parameter(Descriptor::kIsFound);
+
+ // This custom lazy deopt point is right after the callback. find() needs
+ // to pick up at the next step, which is returning the element if the callback
+ // value is truthy. Otherwise, continue the search by calling the
+ // continuation.
+ Label if_true(this), if_false(this);
+ BranchIfToBooleanIsTrue(is_found, &if_true, &if_false);
+ BIND(&if_true);
+ Return(found_value);
+ BIND(&if_false);
+ Return(CallBuiltin(Builtins::kArrayFindLoopContinuation, context, receiver,
+ callbackfn, this_arg, UndefinedConstant(), receiver,
+ initial_k, len, UndefinedConstant()));
+}
+
+// ES #sec-get-%typedarray%.prototype.find
+TF_BUILTIN(ArrayPrototypeFind, ArrayBuiltinCodeStubAssembler) {
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* receiver = args.GetReceiver();
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
+ Node* this_arg = args.GetOptionalArgumentValue(1);
+
+ InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
+ new_target, argc);
+
+ GenerateIteratingArrayBuiltinBody(
+ "Array.prototype.find",
+ &ArrayBuiltinCodeStubAssembler::FindResultGenerator,
+ &ArrayBuiltinCodeStubAssembler::FindProcessor,
+ &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ Builtins::CallableFor(isolate(), Builtins::kArrayFindLoopContinuation),
+ MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
+}
+
+TF_BUILTIN(ArrayFindIndexLoopContinuation, ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* array = Parameter(Descriptor::kArray);
+ Node* object = Parameter(Descriptor::kObject);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+ Node* to = Parameter(Descriptor::kTo);
+
+ InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
+ this_arg, array, object, initial_k,
+ len, to);
+
+ GenerateIteratingArrayBuiltinLoopContinuation(
+ &ArrayBuiltinCodeStubAssembler::FindIndexProcessor,
+ &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
+}
+
+TF_BUILTIN(ArrayFindIndexLoopEagerDeoptContinuation,
+ ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Return(CallBuiltin(Builtins::kArrayFindIndexLoopContinuation, context,
+ receiver, callbackfn, this_arg, SmiConstant(-1), receiver,
+ initial_k, len, UndefinedConstant()));
+}
+
+TF_BUILTIN(ArrayFindIndexLoopLazyDeoptContinuation,
+ ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Return(CallBuiltin(Builtins::kArrayFindIndexLoopContinuation, context,
+ receiver, callbackfn, this_arg, SmiConstant(-1), receiver,
+ initial_k, len, UndefinedConstant()));
+}
+
+TF_BUILTIN(ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation,
+ ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+ Node* found_value = Parameter(Descriptor::kFoundValue);
+ Node* is_found = Parameter(Descriptor::kIsFound);
+
+ // This custom lazy deopt point is right after the callback. find() needs
+ // to pick up at the next step, which is returning the element if the callback
+ // value is truthy. Otherwise, continue the search by calling the
+ // continuation.
+ Label if_true(this), if_false(this);
+ BranchIfToBooleanIsTrue(is_found, &if_true, &if_false);
+ BIND(&if_true);
+ Return(found_value);
+ BIND(&if_false);
+ Return(CallBuiltin(Builtins::kArrayFindIndexLoopContinuation, context,
+ receiver, callbackfn, this_arg, SmiConstant(-1), receiver,
+ initial_k, len, UndefinedConstant()));
+}
+
+// ES #sec-get-%typedarray%.prototype.findIndex
+TF_BUILTIN(ArrayPrototypeFindIndex, ArrayBuiltinCodeStubAssembler) {
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* receiver = args.GetReceiver();
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
+ Node* this_arg = args.GetOptionalArgumentValue(1);
+
+ InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
+ new_target, argc);
+
+ GenerateIteratingArrayBuiltinBody(
+ "Array.prototype.findIndex",
+ &ArrayBuiltinCodeStubAssembler::FindIndexResultGenerator,
+ &ArrayBuiltinCodeStubAssembler::FindIndexProcessor,
+ &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ Builtins::CallableFor(isolate(),
+ Builtins::kArrayFindIndexLoopContinuation),
+ MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
+}
+
// ES #sec-get-%typedarray%.prototype.find
TF_BUILTIN(TypedArrayPrototypeFind, ArrayBuiltinCodeStubAssembler) {
Node* argc =
@@ -1678,7 +1899,8 @@ TF_BUILTIN(ArrayForEachLoopContinuation, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingArrayBuiltinLoopContinuation(
&ArrayBuiltinCodeStubAssembler::ForEachProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ MissingPropertyMode::kSkip);
}
TF_BUILTIN(ArrayForEachLoopEagerDeoptContinuation,
@@ -1690,11 +1912,9 @@ TF_BUILTIN(ArrayForEachLoopEagerDeoptContinuation,
Node* initial_k = Parameter(Descriptor::kInitialK);
Node* len = Parameter(Descriptor::kLength);
- Callable stub(Builtins::CallableFor(isolate(),
- Builtins::kArrayForEachLoopContinuation));
- Return(CallStub(stub, context, receiver, callbackfn, this_arg,
- UndefinedConstant(), receiver, initial_k, len,
- UndefinedConstant()));
+ Return(CallBuiltin(Builtins::kArrayForEachLoopContinuation, context, receiver,
+ callbackfn, this_arg, UndefinedConstant(), receiver,
+ initial_k, len, UndefinedConstant()));
}
TF_BUILTIN(ArrayForEachLoopLazyDeoptContinuation,
@@ -1706,11 +1926,9 @@ TF_BUILTIN(ArrayForEachLoopLazyDeoptContinuation,
Node* initial_k = Parameter(Descriptor::kInitialK);
Node* len = Parameter(Descriptor::kLength);
- Callable stub(Builtins::CallableFor(isolate(),
- Builtins::kArrayForEachLoopContinuation));
- Return(CallStub(stub, context, receiver, callbackfn, this_arg,
- UndefinedConstant(), receiver, initial_k, len,
- UndefinedConstant()));
+ Return(CallBuiltin(Builtins::kArrayForEachLoopContinuation, context, receiver,
+ callbackfn, this_arg, UndefinedConstant(), receiver,
+ initial_k, len, UndefinedConstant()));
}
TF_BUILTIN(ArrayForEach, ArrayBuiltinCodeStubAssembler) {
@@ -1731,8 +1949,8 @@ TF_BUILTIN(ArrayForEach, ArrayBuiltinCodeStubAssembler) {
&ArrayBuiltinCodeStubAssembler::ForEachResultGenerator,
&ArrayBuiltinCodeStubAssembler::ForEachProcessor,
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- Builtins::CallableFor(isolate(),
- Builtins::kArrayForEachLoopContinuation));
+ Builtins::CallableFor(isolate(), Builtins::kArrayForEachLoopContinuation),
+ MissingPropertyMode::kSkip);
}
TF_BUILTIN(TypedArrayPrototypeForEach, ArrayBuiltinCodeStubAssembler) {
@@ -1755,6 +1973,48 @@ TF_BUILTIN(TypedArrayPrototypeForEach, ArrayBuiltinCodeStubAssembler) {
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
}
+TF_BUILTIN(ArraySomeLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+ Node* result = Parameter(Descriptor::kResult);
+
+ // This custom lazy deopt point is right after the callback. every() needs
+ // to pick up at the next step, which is either continuing to the next
+ // array element or returning false if {result} is false.
+ Label true_continue(this), false_continue(this);
+
+ // iii. If selected is true, then...
+ BranchIfToBooleanIsTrue(result, &true_continue, &false_continue);
+ BIND(&true_continue);
+ { Return(TrueConstant()); }
+ BIND(&false_continue);
+ {
+ // Increment k.
+ initial_k = NumberInc(initial_k);
+
+ Return(CallBuiltin(Builtins::kArraySomeLoopContinuation, context, receiver,
+ callbackfn, this_arg, FalseConstant(), receiver,
+ initial_k, len, UndefinedConstant()));
+ }
+}
+
+TF_BUILTIN(ArraySomeLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Return(CallBuiltin(Builtins::kArraySomeLoopContinuation, context, receiver,
+ callbackfn, this_arg, FalseConstant(), receiver, initial_k,
+ len, UndefinedConstant()));
+}
+
TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -1772,7 +2032,8 @@ TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingArrayBuiltinLoopContinuation(
&ArrayBuiltinCodeStubAssembler::SomeProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ MissingPropertyMode::kSkip);
}
TF_BUILTIN(ArraySome, ArrayBuiltinCodeStubAssembler) {
@@ -1793,7 +2054,8 @@ TF_BUILTIN(ArraySome, ArrayBuiltinCodeStubAssembler) {
&ArrayBuiltinCodeStubAssembler::SomeResultGenerator,
&ArrayBuiltinCodeStubAssembler::SomeProcessor,
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- Builtins::CallableFor(isolate(), Builtins::kArraySomeLoopContinuation));
+ Builtins::CallableFor(isolate(), Builtins::kArraySomeLoopContinuation),
+ MissingPropertyMode::kSkip);
}
TF_BUILTIN(TypedArrayPrototypeSome, ArrayBuiltinCodeStubAssembler) {
@@ -1816,6 +2078,49 @@ TF_BUILTIN(TypedArrayPrototypeSome, ArrayBuiltinCodeStubAssembler) {
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
}
+TF_BUILTIN(ArrayEveryLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+ Node* result = Parameter(Descriptor::kResult);
+
+ // This custom lazy deopt point is right after the callback. every() needs
+ // to pick up at the next step, which is either continuing to the next
+ // array element or returning false if {result} is false.
+ Label true_continue(this), false_continue(this);
+
+ // iii. If selected is true, then...
+ BranchIfToBooleanIsTrue(result, &true_continue, &false_continue);
+ BIND(&true_continue);
+ {
+ // Increment k.
+ initial_k = NumberInc(initial_k);
+
+ Return(CallBuiltin(Builtins::kArrayEveryLoopContinuation, context, receiver,
+ callbackfn, this_arg, TrueConstant(), receiver,
+ initial_k, len, UndefinedConstant()));
+ }
+ BIND(&false_continue);
+ { Return(FalseConstant()); }
+}
+
+TF_BUILTIN(ArrayEveryLoopEagerDeoptContinuation,
+ ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Return(CallBuiltin(Builtins::kArrayEveryLoopContinuation, context, receiver,
+ callbackfn, this_arg, TrueConstant(), receiver, initial_k,
+ len, UndefinedConstant()));
+}
+
TF_BUILTIN(ArrayEveryLoopContinuation, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -1833,7 +2138,8 @@ TF_BUILTIN(ArrayEveryLoopContinuation, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingArrayBuiltinLoopContinuation(
&ArrayBuiltinCodeStubAssembler::EveryProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ MissingPropertyMode::kSkip);
}
TF_BUILTIN(ArrayEvery, ArrayBuiltinCodeStubAssembler) {
@@ -1854,7 +2160,8 @@ TF_BUILTIN(ArrayEvery, ArrayBuiltinCodeStubAssembler) {
&ArrayBuiltinCodeStubAssembler::EveryResultGenerator,
&ArrayBuiltinCodeStubAssembler::EveryProcessor,
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- Builtins::CallableFor(isolate(), Builtins::kArrayEveryLoopContinuation));
+ Builtins::CallableFor(isolate(), Builtins::kArrayEveryLoopContinuation),
+ MissingPropertyMode::kSkip);
}
TF_BUILTIN(TypedArrayPrototypeEvery, ArrayBuiltinCodeStubAssembler) {
@@ -1894,7 +2201,38 @@ TF_BUILTIN(ArrayReduceLoopContinuation, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingArrayBuiltinLoopContinuation(
&ArrayBuiltinCodeStubAssembler::ReduceProcessor,
- &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction);
+ &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
+ MissingPropertyMode::kSkip);
+}
+
+TF_BUILTIN(ArrayReduceLoopEagerDeoptContinuation,
+ ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* accumulator = Parameter(Descriptor::kAccumulator);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Callable stub(
+ Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation));
+ Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(),
+ accumulator, receiver, initial_k, len, UndefinedConstant()));
+}
+
+TF_BUILTIN(ArrayReduceLoopLazyDeoptContinuation,
+ ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+ Node* result = Parameter(Descriptor::kResult);
+
+ Callable stub(
+ Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation));
+ Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(),
+ result, receiver, initial_k, len, UndefinedConstant()));
}
TF_BUILTIN(ArrayReduce, ArrayBuiltinCodeStubAssembler) {
@@ -1915,7 +2253,8 @@ TF_BUILTIN(ArrayReduce, ArrayBuiltinCodeStubAssembler) {
&ArrayBuiltinCodeStubAssembler::ReduceResultGenerator,
&ArrayBuiltinCodeStubAssembler::ReduceProcessor,
&ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
- Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation));
+ Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation),
+ MissingPropertyMode::kSkip);
}
TF_BUILTIN(TypedArrayPrototypeReduce, ArrayBuiltinCodeStubAssembler) {
@@ -1956,7 +2295,37 @@ TF_BUILTIN(ArrayReduceRightLoopContinuation, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingArrayBuiltinLoopContinuation(
&ArrayBuiltinCodeStubAssembler::ReduceProcessor,
&ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
- ForEachDirection::kReverse);
+ MissingPropertyMode::kSkip, ForEachDirection::kReverse);
+}
+
+TF_BUILTIN(ArrayReduceRightLoopEagerDeoptContinuation,
+ ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* accumulator = Parameter(Descriptor::kAccumulator);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Callable stub(Builtins::CallableFor(
+ isolate(), Builtins::kArrayReduceRightLoopContinuation));
+ Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(),
+ accumulator, receiver, initial_k, len, UndefinedConstant()));
+}
+
+TF_BUILTIN(ArrayReduceRightLoopLazyDeoptContinuation,
+ ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+ Node* result = Parameter(Descriptor::kResult);
+
+ Callable stub(Builtins::CallableFor(
+ isolate(), Builtins::kArrayReduceRightLoopContinuation));
+ Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(),
+ result, receiver, initial_k, len, UndefinedConstant()));
}
TF_BUILTIN(ArrayReduceRight, ArrayBuiltinCodeStubAssembler) {
@@ -1979,7 +2348,7 @@ TF_BUILTIN(ArrayReduceRight, ArrayBuiltinCodeStubAssembler) {
&ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
Builtins::CallableFor(isolate(),
Builtins::kArrayReduceRightLoopContinuation),
- ForEachDirection::kReverse);
+ MissingPropertyMode::kSkip, ForEachDirection::kReverse);
}
TF_BUILTIN(TypedArrayPrototypeReduceRight, ArrayBuiltinCodeStubAssembler) {
@@ -2020,7 +2389,8 @@ TF_BUILTIN(ArrayFilterLoopContinuation, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingArrayBuiltinLoopContinuation(
&ArrayBuiltinCodeStubAssembler::FilterProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ MissingPropertyMode::kSkip);
}
TF_BUILTIN(ArrayFilterLoopEagerDeoptContinuation,
@@ -2034,10 +2404,9 @@ TF_BUILTIN(ArrayFilterLoopEagerDeoptContinuation,
Node* len = Parameter(Descriptor::kLength);
Node* to = Parameter(Descriptor::kTo);
- Callable stub(
- Builtins::CallableFor(isolate(), Builtins::kArrayFilterLoopContinuation));
- Return(CallStub(stub, context, receiver, callbackfn, this_arg, array,
- receiver, initial_k, len, to));
+ Return(CallBuiltin(Builtins::kArrayFilterLoopContinuation, context, receiver,
+ callbackfn, this_arg, array, receiver, initial_k, len,
+ to));
}
TF_BUILTIN(ArrayFilterLoopLazyDeoptContinuation,
@@ -2077,10 +2446,9 @@ TF_BUILTIN(ArrayFilterLoopLazyDeoptContinuation,
// Increment k.
initial_k = NumberInc(initial_k);
- Callable stub(
- Builtins::CallableFor(isolate(), Builtins::kArrayFilterLoopContinuation));
- Return(CallStub(stub, context, receiver, callbackfn, this_arg, array,
- receiver, initial_k, len, to.value()));
+ Return(CallBuiltin(Builtins::kArrayFilterLoopContinuation, context, receiver,
+ callbackfn, this_arg, array, receiver, initial_k, len,
+ to.value()));
}
TF_BUILTIN(ArrayFilter, ArrayBuiltinCodeStubAssembler) {
@@ -2101,7 +2469,8 @@ TF_BUILTIN(ArrayFilter, ArrayBuiltinCodeStubAssembler) {
&ArrayBuiltinCodeStubAssembler::FilterResultGenerator,
&ArrayBuiltinCodeStubAssembler::FilterProcessor,
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- Builtins::CallableFor(isolate(), Builtins::kArrayFilterLoopContinuation));
+ Builtins::CallableFor(isolate(), Builtins::kArrayFilterLoopContinuation),
+ MissingPropertyMode::kSkip);
}
TF_BUILTIN(ArrayMapLoopContinuation, ArrayBuiltinCodeStubAssembler) {
@@ -2121,7 +2490,8 @@ TF_BUILTIN(ArrayMapLoopContinuation, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingArrayBuiltinLoopContinuation(
&ArrayBuiltinCodeStubAssembler::SpecCompliantMapProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ MissingPropertyMode::kSkip);
}
TF_BUILTIN(ArrayMapLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
@@ -2133,10 +2503,9 @@ TF_BUILTIN(ArrayMapLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
Node* initial_k = Parameter(Descriptor::kInitialK);
Node* len = Parameter(Descriptor::kLength);
- Callable stub(
- Builtins::CallableFor(isolate(), Builtins::kArrayMapLoopContinuation));
- Return(CallStub(stub, context, receiver, callbackfn, this_arg, array,
- receiver, initial_k, len, UndefinedConstant()));
+ Return(CallBuiltin(Builtins::kArrayMapLoopContinuation, context, receiver,
+ callbackfn, this_arg, array, receiver, initial_k, len,
+ UndefinedConstant()));
}
TF_BUILTIN(ArrayMapLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
@@ -2159,10 +2528,9 @@ TF_BUILTIN(ArrayMapLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
// Then we have to increment k before going on.
initial_k = NumberInc(initial_k);
- Callable stub(
- Builtins::CallableFor(isolate(), Builtins::kArrayMapLoopContinuation));
- Return(CallStub(stub, context, receiver, callbackfn, this_arg, array,
- receiver, initial_k, len, UndefinedConstant()));
+ Return(CallBuiltin(Builtins::kArrayMapLoopContinuation, context, receiver,
+ callbackfn, this_arg, array, receiver, initial_k, len,
+ UndefinedConstant()));
}
TF_BUILTIN(ArrayMap, ArrayBuiltinCodeStubAssembler) {
@@ -2182,7 +2550,8 @@ TF_BUILTIN(ArrayMap, ArrayBuiltinCodeStubAssembler) {
"Array.prototype.map", &ArrayBuiltinCodeStubAssembler::MapResultGenerator,
&ArrayBuiltinCodeStubAssembler::FastMapProcessor,
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- Builtins::CallableFor(isolate(), Builtins::kArrayMapLoopContinuation));
+ Builtins::CallableFor(isolate(), Builtins::kArrayMapLoopContinuation),
+ MissingPropertyMode::kSkip);
}
TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinCodeStubAssembler) {
@@ -2848,7 +3217,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
{
Label if_invalid(this, Label::kDeferred);
// A fast array iterator transitioned to a slow iterator during
- // iteration. Invalidate fast_array_iteration_prtoector cell to
+ // iteration. Invalidate fast_array_iteration_protector cell to
// prevent potential deopt loops.
StoreObjectFieldNoWriteBarrier(
iterator, JSArrayIterator::kIteratedObjectMapOffset,
@@ -2877,7 +3246,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
length = var_length.value();
}
- GotoIfNumericGreaterThanOrEqual(index, length, &set_done);
+ GotoIfNumberGreaterThanOrEqual(index, length, &set_done);
StoreObjectField(iterator, JSArrayIterator::kNextIndexOffset,
NumberInc(index));
diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc
index 060696ee5d..0cdcb57a3f 100644
--- a/deps/v8/src/builtins/builtins-async-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-gen.cc
@@ -161,6 +161,7 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context,
CSA_ASSERT(this, WordEqual(LoadMapInstanceSizeInWords(function_map),
IntPtrConstant(JSFunction::kSizeWithoutPrototype /
kPointerSize)));
+ STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
StoreMapNoWriteBarrier(function, function_map);
StoreObjectFieldRoot(function, JSObject::kPropertiesOrHashOffset,
Heap::kEmptyFixedArrayRootIndex);
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index aec265dc35..392040c995 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -232,10 +232,9 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromIterable(
TNode<Object> add_func = GetAddFunction(variant, context, collection);
IteratorBuiltinsAssembler iterator_assembler(this->state());
- TNode<Object> iterator =
- CAST(iterator_assembler.GetIterator(context, iterable));
+ IteratorRecord iterator = iterator_assembler.GetIterator(context, iterable);
- CSA_ASSERT(this, Word32BinaryNot(IsUndefined(iterator)));
+ CSA_ASSERT(this, Word32BinaryNot(IsUndefined(iterator.object)));
TNode<Object> fast_iterator_result_map =
LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index 2722f7b7a7..5c3883a870 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -134,6 +134,7 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
BIND(&cell_done);
}
+ STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kFeedbackVectorOffset,
literals_cell);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kSharedFunctionInfoOffset,
@@ -457,10 +458,10 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
VARIABLE(var_properties, MachineRepresentation::kTagged);
{
Node* bit_field_3 = LoadMapBitField3(boilerplate_map);
- GotoIf(IsSetWord32<Map::Deprecated>(bit_field_3), call_runtime);
+ GotoIf(IsSetWord32<Map::IsDeprecatedBit>(bit_field_3), call_runtime);
// Directly copy over the property store for dict-mode boilerplates.
Label if_dictionary(this), if_fast(this), done(this);
- Branch(IsSetWord32<Map::DictionaryMap>(bit_field_3), &if_dictionary,
+ Branch(IsSetWord32<Map::IsDictionaryMapBit>(bit_field_3), &if_dictionary,
&if_fast);
BIND(&if_dictionary);
{
@@ -636,8 +637,8 @@ Node* ConstructorBuiltinsAssembler::EmitCreateEmptyObjectLiteral(
CSA_ASSERT(this, IsMap(map));
// Ensure that slack tracking is disabled for the map.
STATIC_ASSERT(Map::kNoSlackTracking == 0);
- CSA_ASSERT(this,
- IsClearWord32<Map::ConstructionCounter>(LoadMapBitField3(map)));
+ CSA_ASSERT(
+ this, IsClearWord32<Map::ConstructionCounterBits>(LoadMapBitField3(map)));
Node* empty_fixed_array = EmptyFixedArrayConstant();
Node* result =
AllocateJSObjectFromMap(map, empty_fixed_array, empty_fixed_array);
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index 823e6ca937..98e0f2c8b2 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -99,10 +99,9 @@ TF_BUILTIN(NonPrimitiveToPrimitive_String, ConversionBuiltinsAssembler) {
}
TF_BUILTIN(StringToNumber, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
Node* input = Parameter(Descriptor::kArgument);
- Return(StringToNumber(context, input));
+ Return(StringToNumber(input));
}
TF_BUILTIN(ToName, CodeStubAssembler) {
@@ -145,10 +144,9 @@ TF_BUILTIN(ToNumber, CodeStubAssembler) {
// ES section #sec-tostring-applied-to-the-number-type
TF_BUILTIN(NumberToString, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
Node* input = Parameter(Descriptor::kArgument);
- Return(NumberToString(context, input));
+ Return(NumberToString(input));
}
// ES section #sec-tostring
@@ -330,7 +328,14 @@ TF_BUILTIN(ToInteger, CodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* input = Parameter(Descriptor::kArgument);
- Return(ToInteger(context, input));
+ Return(ToInteger(context, input, kNoTruncation));
+}
+
+TF_BUILTIN(ToInteger_TruncateMinusZero, CodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* input = Parameter(Descriptor::kArgument);
+
+ Return(ToInteger(context, input, kTruncateMinusZero));
}
// ES6 section 7.1.13 ToObject (argument)
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index 2b2cc407b5..0ffd15df7c 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -91,8 +91,9 @@ namespace internal {
ASM(StackCheck) \
\
/* String helpers */ \
- TFC(StringCharAt, StringCharAt, 1) \
- TFC(StringCharCodeAt, StringCharCodeAt, 1) \
+ TFC(StringCharAt, StringAt, 1) \
+ TFC(StringCharCodeAt, StringAt, 1) \
+ TFC(StringCodePointAt, StringAt, 1) \
TFC(StringEqual, Compare, 1) \
TFC(StringGreaterThan, Compare, 1) \
TFC(StringGreaterThanOrEqual, Compare, 1) \
@@ -190,6 +191,7 @@ namespace internal {
TFC(NumberToString, TypeConversion, 1) \
TFC(ToString, TypeConversion, 1) \
TFC(ToInteger, TypeConversion, 1) \
+ TFC(ToInteger_TruncateMinusZero, TypeConversion, 1) \
TFC(ToLength, TypeConversion, 1) \
TFC(ClassOf, Typeof, 1) \
TFC(Typeof, Typeof, 1) \
@@ -199,26 +201,19 @@ namespace internal {
TFC(ToBooleanLazyDeoptContinuation, TypeConversionStackParameter, 1) \
\
/* Handlers */ \
- TFH(LoadICProtoArray, LoadICProtoArray) \
- TFH(LoadICProtoArrayThrowIfNonexistent, LoadICProtoArray) \
TFH(KeyedLoadIC_Megamorphic, LoadWithVector) \
- TFH(KeyedLoadIC_Miss, LoadWithVector) \
TFH(KeyedLoadIC_PolymorphicName, LoadWithVector) \
TFH(KeyedLoadIC_Slow, LoadWithVector) \
TFH(KeyedStoreIC_Megamorphic, StoreWithVector) \
- TFH(KeyedStoreIC_Miss, StoreWithVector) \
TFH(KeyedStoreIC_Slow, StoreWithVector) \
- TFH(LoadGlobalIC_Miss, LoadGlobalWithVector) \
- TFH(LoadGlobalIC_Slow, LoadGlobalWithVector) \
+ TFH(LoadGlobalIC_Slow, LoadWithVector) \
TFH(LoadField, LoadField) \
TFH(LoadIC_FunctionPrototype, LoadWithVector) \
- TFH(LoadIC_Miss, LoadWithVector) \
TFH(LoadIC_Slow, LoadWithVector) \
TFH(LoadIC_StringLength, LoadWithVector) \
TFH(LoadIC_StringWrapperLength, LoadWithVector) \
TFH(LoadIC_Uninitialized, LoadWithVector) \
TFH(StoreGlobalIC_Slow, StoreWithVector) \
- TFH(StoreIC_Miss, StoreWithVector) \
TFH(StoreIC_Uninitialized, StoreWithVector) \
\
/* Promise helpers */ \
@@ -226,6 +221,9 @@ namespace internal {
TFS(RejectNativePromise, kPromise, kValue, kDebugEvent) \
TFS(PerformNativePromiseThen, kPromise, kResolveReaction, kRejectReaction, \
kResultPromise) \
+ TFS(EnqueueMicrotask, kMicrotask) \
+ TFC(RunMicrotasks, RunMicrotasks, 1) \
+ TFS(PromiseResolveThenableJob, kMicrotask) \
\
/* Object property helpers */ \
TFS(HasProperty, kKey, kObject) \
@@ -233,7 +231,7 @@ namespace internal {
\
/* Abort */ \
ASM(Abort) \
- ASM(AbortJS) \
+ TFC(AbortJS, AbortJS, 1) \
\
/* Built-in functions for Javascript */ \
/* Special internal builtins */ \
@@ -255,16 +253,16 @@ namespace internal {
TFJ(ArrayIndexOf, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.pop */ \
CPP(ArrayPop) \
- TFJ(FastArrayPop, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ArrayPrototypePop, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.push */ \
CPP(ArrayPush) \
- TFJ(FastArrayPush, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ArrayPrototypePush, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.shift */ \
CPP(ArrayShift) \
- TFJ(FastArrayShift, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ArrayPrototypeShift, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.slice */ \
CPP(ArraySlice) \
- TFJ(FastArraySlice, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ArrayPrototypeSlice, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.splice */ \
CPP(ArraySplice) \
/* ES6 #sec-array.prototype.unshift */ \
@@ -283,10 +281,18 @@ namespace internal {
/* ES6 #sec-array.prototype.every */ \
TFS(ArrayEveryLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
kObject, kInitialK, kLength, kTo) \
+ TFJ(ArrayEveryLoopEagerDeoptContinuation, 4, kCallbackFn, kThisArg, \
+ kInitialK, kLength) \
+ TFJ(ArrayEveryLoopLazyDeoptContinuation, 5, kCallbackFn, kThisArg, \
+ kInitialK, kLength, kResult) \
TFJ(ArrayEvery, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.some */ \
TFS(ArraySomeLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
kObject, kInitialK, kLength, kTo) \
+ TFJ(ArraySomeLoopEagerDeoptContinuation, 4, kCallbackFn, kThisArg, \
+ kInitialK, kLength) \
+ TFJ(ArraySomeLoopLazyDeoptContinuation, 5, kCallbackFn, kThisArg, kInitialK, \
+ kLength, kResult) \
TFJ(ArraySome, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.filter */ \
TFS(ArrayFilterLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
@@ -307,13 +313,42 @@ namespace internal {
/* ES6 #sec-array.prototype.reduce */ \
TFS(ArrayReduceLoopContinuation, kReceiver, kCallbackFn, kThisArg, \
kAccumulator, kObject, kInitialK, kLength, kTo) \
+ TFJ(ArrayReduceLoopEagerDeoptContinuation, 4, kCallbackFn, kInitialK, \
+ kLength, kAccumulator) \
+ TFJ(ArrayReduceLoopLazyDeoptContinuation, 4, kCallbackFn, kInitialK, \
+ kLength, kResult) \
TFJ(ArrayReduce, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.reduceRight */ \
TFS(ArrayReduceRightLoopContinuation, kReceiver, kCallbackFn, kThisArg, \
kAccumulator, kObject, kInitialK, kLength, kTo) \
+ TFJ(ArrayReduceRightLoopEagerDeoptContinuation, 4, kCallbackFn, kInitialK, \
+ kLength, kAccumulator) \
+ TFJ(ArrayReduceRightLoopLazyDeoptContinuation, 4, kCallbackFn, kInitialK, \
+ kLength, kResult) \
TFJ(ArrayReduceRight, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.entries */ \
TFJ(ArrayPrototypeEntries, 0) \
+ /* ES6 #sec-array.prototype.find */ \
+ TFS(ArrayFindLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
+ kObject, kInitialK, kLength, kTo) \
+ TFJ(ArrayFindLoopEagerDeoptContinuation, 4, kCallbackFn, kThisArg, \
+ kInitialK, kLength) \
+ TFJ(ArrayFindLoopLazyDeoptContinuation, 5, kCallbackFn, kThisArg, kInitialK, \
+ kLength, kResult) \
+ TFJ(ArrayFindLoopAfterCallbackLazyDeoptContinuation, 6, kCallbackFn, \
+ kThisArg, kInitialK, kLength, kFoundValue, kIsFound) \
+ TFJ(ArrayPrototypeFind, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 #sec-array.prototype.findIndex */ \
+ TFS(ArrayFindIndexLoopContinuation, kReceiver, kCallbackFn, kThisArg, \
+ kArray, kObject, kInitialK, kLength, kTo) \
+ TFJ(ArrayFindIndexLoopEagerDeoptContinuation, 4, kCallbackFn, kThisArg, \
+ kInitialK, kLength) \
+ TFJ(ArrayFindIndexLoopLazyDeoptContinuation, 5, kCallbackFn, kThisArg, \
+ kInitialK, kLength, kResult) \
+ TFJ(ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation, 6, kCallbackFn, \
+ kThisArg, kInitialK, kLength, kFoundValue, kIsFound) \
+ TFJ(ArrayPrototypeFindIndex, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.keys */ \
TFJ(ArrayPrototypeKeys, 0) \
/* ES6 #sec-array.prototype.values */ \
@@ -555,6 +590,8 @@ namespace internal {
TFH(LoadICTrampoline, Load) \
TFH(KeyedLoadIC, LoadWithVector) \
TFH(KeyedLoadICTrampoline, Load) \
+ TFH(StoreGlobalIC, StoreGlobalWithVector) \
+ TFH(StoreGlobalICTrampoline, StoreGlobal) \
TFH(StoreIC, StoreWithVector) \
TFH(StoreICTrampoline, Store) \
TFH(KeyedStoreIC, StoreWithVector) \
@@ -718,7 +755,7 @@ namespace internal {
CPP(ObjectDefineProperties) \
CPP(ObjectDefineProperty) \
CPP(ObjectDefineSetter) \
- CPP(ObjectEntries) \
+ TFJ(ObjectEntries, 1, kObject) \
CPP(ObjectFreeze) \
TFJ(ObjectGetOwnPropertyDescriptor, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
@@ -745,8 +782,10 @@ namespace internal {
CPP(ObjectPrototypePropertyIsEnumerable) \
CPP(ObjectPrototypeGetProto) \
CPP(ObjectPrototypeSetProto) \
+ /* ES #sec-object.prototype.tolocalestring */ \
+ TFJ(ObjectPrototypeToLocaleString, 0) \
CPP(ObjectSeal) \
- CPP(ObjectValues) \
+ TFJ(ObjectValues, 1, kObject) \
\
/* instanceof */ \
TFC(OrdinaryHasInstance, Compare, 1) \
@@ -771,13 +810,15 @@ namespace internal {
TFJ(PromiseRejectClosure, 1, kValue) \
TFJ(PromiseAllResolveElementClosure, 1, kValue) \
/* ES #sec-promise.prototype.then */ \
- TFJ(PromiseThen, 2, kOnFullfilled, kOnRejected) \
+ TFJ(PromisePrototypeThen, 2, kOnFullfilled, kOnRejected) \
/* ES #sec-promise.prototype.catch */ \
- TFJ(PromiseCatch, 1, kOnRejected) \
+ TFJ(PromisePrototypeCatch, 1, kOnRejected) \
/* ES #sec-fulfillpromise */ \
TFJ(ResolvePromise, 2, kPromise, kValue) \
TFS(PromiseHandleReject, kPromise, kOnReject, kException) \
- TFJ(PromiseHandle, 5, kValue, kHandler, kDeferredPromise, \
+ TFS(PromiseHandle, kValue, kHandler, kDeferredPromise, kDeferredOnResolve, \
+ kDeferredOnReject) \
+ TFJ(PromiseHandleJS, 5, kValue, kHandler, kDeferredPromise, \
kDeferredOnResolve, kDeferredOnReject) \
/* ES #sec-promise.resolve */ \
TFJ(PromiseResolveWrapper, 1, kValue) \
@@ -785,7 +826,7 @@ namespace internal {
/* ES #sec-promise.reject */ \
TFJ(PromiseReject, 1, kReason) \
TFJ(InternalPromiseReject, 3, kPromise, kReason, kDebugEvent) \
- TFJ(PromiseFinally, 1, kOnFinally) \
+ TFJ(PromisePrototypeFinally, 1, kOnFinally) \
TFJ(PromiseThenFinally, 1, kValue) \
TFJ(PromiseCatchFinally, 1, kReason) \
TFJ(PromiseValueThunkFinally, 0) \
@@ -799,6 +840,8 @@ namespace internal {
TFJ(ProxyConstructor, 0) \
TFJ(ProxyConstructor_ConstructStub, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ProxyRevocable, 2, kTarget, kHandler) \
+ TFJ(ProxyRevoke, 0) \
TFS(ProxyGetProperty, kProxy, kName, kReceiverValue) \
TFS(ProxyHasProperty, kProxy, kName) \
TFS(ProxySetProperty, kProxy, kName, kValue, kReceiverValue, kLanguageMode) \
diff --git a/deps/v8/src/builtins/builtins-function-gen.cc b/deps/v8/src/builtins/builtins-function-gen.cc
index 0b98a7169b..7c1db5093d 100644
--- a/deps/v8/src/builtins/builtins-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-function-gen.cc
@@ -6,7 +6,6 @@
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
#include "src/frame-constants.h"
-#include "src/zone/zone-list-inl.h" // TODO(mstarzinger): Temporary cycle breaker.
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc
index 4d85be9f91..48c28ab730 100644
--- a/deps/v8/src/builtins/builtins-handler-gen.cc
+++ b/deps/v8/src/builtins/builtins-handler-gen.cc
@@ -23,17 +23,6 @@ TF_BUILTIN(LoadIC_StringWrapperLength, CodeStubAssembler) {
Return(LoadStringLengthAsSmi(string));
}
-TF_BUILTIN(KeyedLoadIC_Miss, CodeStubAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* name = Parameter(Descriptor::kName);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
-
- TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, name, slot,
- vector);
-}
-
TF_BUILTIN(KeyedLoadIC_Slow, CodeStubAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = Parameter(Descriptor::kName);
@@ -52,18 +41,6 @@ void Builtins::Generate_StoreIC_Uninitialized(
StoreICUninitializedGenerator::Generate(state);
}
-TF_BUILTIN(KeyedStoreIC_Miss, CodeStubAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* name = Parameter(Descriptor::kName);
- Node* value = Parameter(Descriptor::kValue);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
-
- TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value, slot, vector,
- receiver, name);
-}
-
TF_BUILTIN(KeyedStoreIC_Slow, CodeStubAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = Parameter(Descriptor::kName);
@@ -78,15 +55,6 @@ TF_BUILTIN(KeyedStoreIC_Slow, CodeStubAssembler) {
receiver, name);
}
-TF_BUILTIN(LoadGlobalIC_Miss, CodeStubAssembler) {
- Node* name = Parameter(Descriptor::kName);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
-
- TailCallRuntime(Runtime::kLoadGlobalIC_Miss, context, name, slot, vector);
-}
-
TF_BUILTIN(LoadGlobalIC_Slow, CodeStubAssembler) {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
@@ -110,16 +78,6 @@ TF_BUILTIN(LoadIC_FunctionPrototype, CodeStubAssembler) {
TailCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name, slot, vector);
}
-TF_BUILTIN(LoadIC_Miss, CodeStubAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* name = Parameter(Descriptor::kName);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
-
- TailCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name, slot, vector);
-}
-
TF_BUILTIN(LoadIC_Slow, CodeStubAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = Parameter(Descriptor::kName);
@@ -128,18 +86,6 @@ TF_BUILTIN(LoadIC_Slow, CodeStubAssembler) {
TailCallRuntime(Runtime::kGetProperty, context, receiver, name);
}
-TF_BUILTIN(StoreIC_Miss, CodeStubAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* name = Parameter(Descriptor::kName);
- Node* value = Parameter(Descriptor::kValue);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
-
- TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot, vector,
- receiver, name);
-}
-
TF_BUILTIN(StoreGlobalIC_Slow, CodeStubAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = Parameter(Descriptor::kName);
diff --git a/deps/v8/src/builtins/builtins-ic-gen.cc b/deps/v8/src/builtins/builtins-ic-gen.cc
index 536a7f31ed..94613a6a32 100644
--- a/deps/v8/src/builtins/builtins-ic-gen.cc
+++ b/deps/v8/src/builtins/builtins-ic-gen.cc
@@ -29,6 +29,8 @@ IC_BUILTIN(LoadField)
IC_BUILTIN(KeyedLoadICTrampoline)
IC_BUILTIN(KeyedLoadIC_Megamorphic)
IC_BUILTIN(KeyedLoadIC_PolymorphicName)
+IC_BUILTIN(StoreGlobalIC)
+IC_BUILTIN(StoreGlobalICTrampoline)
IC_BUILTIN(StoreIC)
IC_BUILTIN(StoreICTrampoline)
IC_BUILTIN(KeyedStoreIC)
@@ -40,8 +42,6 @@ IC_BUILTIN_PARAM(LoadGlobalICTrampoline, LoadGlobalICTrampoline,
NOT_INSIDE_TYPEOF)
IC_BUILTIN_PARAM(LoadGlobalICInsideTypeofTrampoline, LoadGlobalICTrampoline,
INSIDE_TYPEOF)
-IC_BUILTIN_PARAM(LoadICProtoArray, LoadICProtoArray, false)
-IC_BUILTIN_PARAM(LoadICProtoArrayThrowIfNonexistent, LoadICProtoArray, true)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index bc9723700c..bb4b66e3a4 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/api.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
@@ -13,6 +14,9 @@
namespace v8 {
namespace internal {
+template <typename T>
+using TNode = compiler::TNode<T>;
+
// -----------------------------------------------------------------------------
// Interrupt and stack checks.
@@ -583,7 +587,7 @@ TF_BUILTIN(ForInFilter, CodeStubAssembler) {
CSA_ASSERT(this, IsString(key));
Label if_true(this), if_false(this);
- Node* result = HasProperty(object, key, context, kForInHasProperty);
+ TNode<Oddball> result = HasProperty(object, key, context, kForInHasProperty);
Branch(IsTrue(result), &if_true, &if_false);
BIND(&if_true);
@@ -607,5 +611,448 @@ TF_BUILTIN(SameValue, CodeStubAssembler) {
Return(FalseConstant());
}
+class InternalBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit InternalBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ TNode<IntPtrT> GetPendingMicrotaskCount();
+ void SetPendingMicrotaskCount(TNode<IntPtrT> count);
+
+ TNode<FixedArray> GetMicrotaskQueue();
+ void SetMicrotaskQueue(TNode<FixedArray> queue);
+
+ TNode<Context> GetCurrentContext();
+ void SetCurrentContext(TNode<Context> context);
+
+ void EnterMicrotaskContext(TNode<Context> context);
+ void LeaveMicrotaskContext();
+
+ TNode<Object> GetPendingException() {
+ auto ref = ExternalReference(kPendingExceptionAddress, isolate());
+ return TNode<Object>::UncheckedCast(
+ Load(MachineType::AnyTagged(), ExternalConstant(ref)));
+ }
+ void ClearPendingException() {
+ auto ref = ExternalReference(kPendingExceptionAddress, isolate());
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, ExternalConstant(ref),
+ TheHoleConstant());
+ }
+
+ TNode<Object> GetScheduledException() {
+ auto ref = ExternalReference::scheduled_exception_address(isolate());
+ return TNode<Object>::UncheckedCast(
+ Load(MachineType::AnyTagged(), ExternalConstant(ref)));
+ }
+ void ClearScheduledException() {
+ auto ref = ExternalReference::scheduled_exception_address(isolate());
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, ExternalConstant(ref),
+ TheHoleConstant());
+ }
+};
+
+TNode<IntPtrT> InternalBuiltinsAssembler::GetPendingMicrotaskCount() {
+ auto ref = ExternalReference::pending_microtask_count_address(isolate());
+ if (kIntSize == 8) {
+ return TNode<IntPtrT>::UncheckedCast(
+ Load(MachineType::Int64(), ExternalConstant(ref)));
+ } else {
+ Node* const value = Load(MachineType::Int32(), ExternalConstant(ref));
+ return ChangeInt32ToIntPtr(value);
+ }
+}
+
+void InternalBuiltinsAssembler::SetPendingMicrotaskCount(TNode<IntPtrT> count) {
+ auto ref = ExternalReference::pending_microtask_count_address(isolate());
+ auto rep = kIntSize == 8 ? MachineRepresentation::kWord64
+ : MachineRepresentation::kWord32;
+ if (kIntSize == 4 && kPointerSize == 8) {
+ Node* const truncated_count =
+ TruncateInt64ToInt32(TNode<Int64T>::UncheckedCast(count));
+ StoreNoWriteBarrier(rep, ExternalConstant(ref), truncated_count);
+ } else {
+ StoreNoWriteBarrier(rep, ExternalConstant(ref), count);
+ }
+}
+
+TNode<FixedArray> InternalBuiltinsAssembler::GetMicrotaskQueue() {
+ return TNode<FixedArray>::UncheckedCast(
+ LoadRoot(Heap::kMicrotaskQueueRootIndex));
+}
+
+void InternalBuiltinsAssembler::SetMicrotaskQueue(TNode<FixedArray> queue) {
+ StoreRoot(Heap::kMicrotaskQueueRootIndex, queue);
+}
+
+TNode<Context> InternalBuiltinsAssembler::GetCurrentContext() {
+ auto ref = ExternalReference(kContextAddress, isolate());
+ return TNode<Context>::UncheckedCast(
+ Load(MachineType::AnyTagged(), ExternalConstant(ref)));
+}
+
+void InternalBuiltinsAssembler::SetCurrentContext(TNode<Context> context) {
+ auto ref = ExternalReference(kContextAddress, isolate());
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, ExternalConstant(ref),
+ context);
+}
+
+void InternalBuiltinsAssembler::EnterMicrotaskContext(
+ TNode<Context> microtask_context) {
+ auto ref = ExternalReference::handle_scope_implementer_address(isolate());
+ Node* const hsi = Load(MachineType::Pointer(), ExternalConstant(ref));
+ StoreNoWriteBarrier(
+ MachineType::PointerRepresentation(), hsi,
+ IntPtrConstant(HandleScopeImplementerOffsets::kMicrotaskContext),
+ BitcastTaggedToWord(microtask_context));
+
+ // Load mirrored std::vector length from
+ // HandleScopeImplementer::entered_contexts_count_
+ auto type = kSizetSize == 8 ? MachineType::Uint64() : MachineType::Uint32();
+ Node* entered_contexts_length = Load(
+ type, hsi,
+ IntPtrConstant(HandleScopeImplementerOffsets::kEnteredContextsCount));
+
+ auto rep = kSizetSize == 8 ? MachineRepresentation::kWord64
+ : MachineRepresentation::kWord32;
+
+ StoreNoWriteBarrier(
+ rep, hsi,
+ IntPtrConstant(
+ HandleScopeImplementerOffsets::kEnteredContextCountDuringMicrotasks),
+ entered_contexts_length);
+}
+
+void InternalBuiltinsAssembler::LeaveMicrotaskContext() {
+ auto ref = ExternalReference::handle_scope_implementer_address(isolate());
+
+ Node* const hsi = Load(MachineType::Pointer(), ExternalConstant(ref));
+ StoreNoWriteBarrier(
+ MachineType::PointerRepresentation(), hsi,
+ IntPtrConstant(HandleScopeImplementerOffsets::kMicrotaskContext),
+ IntPtrConstant(0));
+ if (kSizetSize == 4) {
+ StoreNoWriteBarrier(
+ MachineRepresentation::kWord32, hsi,
+ IntPtrConstant(HandleScopeImplementerOffsets::
+ kEnteredContextCountDuringMicrotasks),
+ Int32Constant(0));
+ } else {
+ StoreNoWriteBarrier(
+ MachineRepresentation::kWord64, hsi,
+ IntPtrConstant(HandleScopeImplementerOffsets::
+ kEnteredContextCountDuringMicrotasks),
+ Int64Constant(0));
+ }
+}
+
+TF_BUILTIN(EnqueueMicrotask, InternalBuiltinsAssembler) {
+ Node* microtask = Parameter(Descriptor::kMicrotask);
+
+ TNode<IntPtrT> num_tasks = GetPendingMicrotaskCount();
+ TNode<IntPtrT> new_num_tasks = IntPtrAdd(num_tasks, IntPtrConstant(1));
+ TNode<FixedArray> queue = GetMicrotaskQueue();
+ TNode<IntPtrT> queue_length = LoadAndUntagFixedArrayBaseLength(queue);
+
+ Label if_append(this), if_grow(this), done(this);
+ Branch(WordEqual(num_tasks, queue_length), &if_grow, &if_append);
+
+ BIND(&if_grow);
+ {
+ // Determine the new queue length and check if we need to allocate
+ // in large object space (instead of just going to new space, where
+ // we also know that we don't need any write barriers for setting
+ // up the new queue object).
+ Label if_newspace(this), if_lospace(this, Label::kDeferred);
+ TNode<IntPtrT> new_queue_length =
+ IntPtrMax(IntPtrConstant(8), IntPtrAdd(num_tasks, num_tasks));
+ Branch(IntPtrLessThanOrEqual(new_queue_length,
+ IntPtrConstant(FixedArray::kMaxRegularLength)),
+ &if_newspace, &if_lospace);
+
+ BIND(&if_newspace);
+ {
+ // This is the likely case where the new queue fits into new space,
+ // and thus we don't need any write barriers for initializing it.
+ TNode<FixedArray> new_queue =
+ CAST(AllocateFixedArray(PACKED_ELEMENTS, new_queue_length));
+ CopyFixedArrayElements(PACKED_ELEMENTS, queue, new_queue, num_tasks,
+ SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(new_queue, num_tasks, microtask,
+ SKIP_WRITE_BARRIER);
+ FillFixedArrayWithValue(PACKED_ELEMENTS, new_queue, new_num_tasks,
+ new_queue_length, Heap::kUndefinedValueRootIndex);
+ SetMicrotaskQueue(new_queue);
+ Goto(&done);
+ }
+
+ BIND(&if_lospace);
+ {
+ // The fallback case where the new queue ends up in large object space.
+ TNode<FixedArray> new_queue = CAST(AllocateFixedArray(
+ PACKED_ELEMENTS, new_queue_length, INTPTR_PARAMETERS,
+ AllocationFlag::kAllowLargeObjectAllocation));
+ CopyFixedArrayElements(PACKED_ELEMENTS, queue, new_queue, num_tasks);
+ StoreFixedArrayElement(new_queue, num_tasks, microtask);
+ FillFixedArrayWithValue(PACKED_ELEMENTS, new_queue, new_num_tasks,
+ new_queue_length, Heap::kUndefinedValueRootIndex);
+ SetMicrotaskQueue(new_queue);
+ Goto(&done);
+ }
+ }
+
+ BIND(&if_append);
+ {
+ StoreFixedArrayElement(queue, num_tasks, microtask);
+ Goto(&done);
+ }
+
+ BIND(&done);
+ SetPendingMicrotaskCount(new_num_tasks);
+ Return(UndefinedConstant());
+}
+
+TF_BUILTIN(RunMicrotasks, InternalBuiltinsAssembler) {
+ Label init_queue_loop(this);
+
+ Goto(&init_queue_loop);
+ BIND(&init_queue_loop);
+ {
+ TVARIABLE(IntPtrT, index, IntPtrConstant(0));
+ Label loop(this, &index);
+
+ TNode<IntPtrT> num_tasks = GetPendingMicrotaskCount();
+ ReturnIf(IntPtrEqual(num_tasks, IntPtrConstant(0)), UndefinedConstant());
+
+ TNode<FixedArray> queue = GetMicrotaskQueue();
+
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(
+ LoadAndUntagFixedArrayBaseLength(queue), num_tasks));
+ CSA_ASSERT(this, IntPtrGreaterThan(num_tasks, IntPtrConstant(0)));
+
+ SetPendingMicrotaskCount(IntPtrConstant(0));
+ SetMicrotaskQueue(
+ TNode<FixedArray>::UncheckedCast(EmptyFixedArrayConstant()));
+
+ Goto(&loop);
+ BIND(&loop);
+ {
+ TNode<HeapObject> microtask =
+ TNode<HeapObject>::UncheckedCast(LoadFixedArrayElement(queue, index));
+ index = IntPtrAdd(index, IntPtrConstant(1));
+
+ CSA_ASSERT(this, TaggedIsNotSmi(microtask));
+
+ TNode<Map> microtask_map = LoadMap(microtask);
+ TNode<Int32T> microtask_type = LoadMapInstanceType(microtask_map);
+
+ Label is_call_handler_info(this);
+ Label is_function(this);
+ Label is_promise_resolve_thenable_job(this);
+ Label is_promise_reaction_job(this);
+ Label is_unreachable(this);
+
+ int32_t case_values[] = {TUPLE3_TYPE, // CallHandlerInfo
+ JS_FUNCTION_TYPE,
+ PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE,
+ PROMISE_REACTION_JOB_INFO_TYPE};
+
+ Label* case_labels[] = {&is_call_handler_info, &is_function,
+ &is_promise_resolve_thenable_job,
+ &is_promise_reaction_job};
+
+ static_assert(arraysize(case_values) == arraysize(case_labels), "");
+ Switch(microtask_type, &is_unreachable, case_values, case_labels,
+ arraysize(case_labels));
+
+ BIND(&is_call_handler_info);
+ {
+ // Bailout to C++ slow path for the remainder of the loop.
+ auto index_ref =
+ ExternalReference(kMicrotaskQueueBailoutIndexAddress, isolate());
+ auto count_ref =
+ ExternalReference(kMicrotaskQueueBailoutCountAddress, isolate());
+ auto rep = kIntSize == 4 ? MachineRepresentation::kWord32
+ : MachineRepresentation::kWord64;
+
+ // index was pre-incremented, decrement for bailout to C++.
+ Node* value = IntPtrSub(index, IntPtrConstant(1));
+
+ if (kPointerSize == 4) {
+ DCHECK_EQ(kIntSize, 4);
+ StoreNoWriteBarrier(rep, ExternalConstant(index_ref), value);
+ StoreNoWriteBarrier(rep, ExternalConstant(count_ref), num_tasks);
+ } else {
+ Node* count = num_tasks;
+ if (kIntSize == 4) {
+ value = TruncateInt64ToInt32(value);
+ count = TruncateInt64ToInt32(count);
+ }
+ StoreNoWriteBarrier(rep, ExternalConstant(index_ref), value);
+ StoreNoWriteBarrier(rep, ExternalConstant(count_ref), count);
+ }
+
+ Return(queue);
+ }
+
+ BIND(&is_function);
+ {
+ Label cont(this);
+ VARIABLE(exception, MachineRepresentation::kTagged, TheHoleConstant());
+ TNode<Context> old_context = GetCurrentContext();
+ TNode<Context> fn_context = TNode<Context>::UncheckedCast(
+ LoadObjectField(microtask, JSFunction::kContextOffset));
+ TNode<Context> native_context =
+ TNode<Context>::UncheckedCast(LoadNativeContext(fn_context));
+ SetCurrentContext(native_context);
+ EnterMicrotaskContext(fn_context);
+ Node* const call = CallJS(CodeFactory::Call(isolate()), native_context,
+ microtask, UndefinedConstant());
+ GotoIfException(call, &cont);
+ Goto(&cont);
+ BIND(&cont);
+ LeaveMicrotaskContext();
+ SetCurrentContext(old_context);
+ Branch(IntPtrLessThan(index, num_tasks), &loop, &init_queue_loop);
+ }
+
+ BIND(&is_promise_resolve_thenable_job);
+ {
+ VARIABLE(exception, MachineRepresentation::kTagged, TheHoleConstant());
+ TNode<Context> old_context = GetCurrentContext();
+ TNode<Context> microtask_context =
+ TNode<Context>::UncheckedCast(LoadObjectField(
+ microtask, PromiseResolveThenableJobInfo::kContextOffset));
+ TNode<Context> native_context =
+ TNode<Context>::UncheckedCast(LoadNativeContext(microtask_context));
+ SetCurrentContext(native_context);
+ EnterMicrotaskContext(microtask_context);
+
+ Label if_unhandled_exception(this), done(this);
+ Node* const ret = CallBuiltin(Builtins::kPromiseResolveThenableJob,
+ native_context, microtask);
+ GotoIfException(ret, &if_unhandled_exception, &exception);
+ Goto(&done);
+
+ BIND(&if_unhandled_exception);
+ CallRuntime(Runtime::kReportMessage, native_context, exception.value());
+ Goto(&done);
+
+ BIND(&done);
+ LeaveMicrotaskContext();
+ SetCurrentContext(old_context);
+
+ Branch(IntPtrLessThan(index, num_tasks), &loop, &init_queue_loop);
+ }
+
+ BIND(&is_promise_reaction_job);
+ {
+ Label if_multiple(this);
+ Label if_single(this);
+
+ Node* const value =
+ LoadObjectField(microtask, PromiseReactionJobInfo::kValueOffset);
+ Node* const tasks =
+ LoadObjectField(microtask, PromiseReactionJobInfo::kTasksOffset);
+ Node* const deferred_promises = LoadObjectField(
+ microtask, PromiseReactionJobInfo::kDeferredPromiseOffset);
+ Node* const deferred_on_resolves = LoadObjectField(
+ microtask, PromiseReactionJobInfo::kDeferredOnResolveOffset);
+ Node* const deferred_on_rejects = LoadObjectField(
+ microtask, PromiseReactionJobInfo::kDeferredOnRejectOffset);
+
+ TNode<Context> old_context = GetCurrentContext();
+ TNode<Context> microtask_context = TNode<Context>::UncheckedCast(
+ LoadObjectField(microtask, PromiseReactionJobInfo::kContextOffset));
+ TNode<Context> native_context =
+ TNode<Context>::UncheckedCast(LoadNativeContext(microtask_context));
+ SetCurrentContext(native_context);
+ EnterMicrotaskContext(microtask_context);
+
+ Branch(IsFixedArray(deferred_promises), &if_multiple, &if_single);
+
+ BIND(&if_single);
+ {
+ CallBuiltin(Builtins::kPromiseHandle, native_context, value, tasks,
+ deferred_promises, deferred_on_resolves,
+ deferred_on_rejects);
+ LeaveMicrotaskContext();
+ SetCurrentContext(old_context);
+ Branch(IntPtrLessThan(index, num_tasks), &loop, &init_queue_loop);
+ }
+
+ BIND(&if_multiple);
+ {
+ TVARIABLE(IntPtrT, inner_index, IntPtrConstant(0));
+ TNode<IntPtrT> inner_length =
+ LoadAndUntagFixedArrayBaseLength(deferred_promises);
+ Label inner_loop(this, &inner_index), done(this);
+
+ CSA_ASSERT(this, IntPtrGreaterThan(inner_length, IntPtrConstant(0)));
+ Goto(&inner_loop);
+ BIND(&inner_loop);
+ {
+ Node* const task = LoadFixedArrayElement(tasks, inner_index);
+ Node* const deferred_promise =
+ LoadFixedArrayElement(deferred_promises, inner_index);
+ Node* const deferred_on_resolve =
+ LoadFixedArrayElement(deferred_on_resolves, inner_index);
+ Node* const deferred_on_reject =
+ LoadFixedArrayElement(deferred_on_rejects, inner_index);
+ CallBuiltin(Builtins::kPromiseHandle, native_context, value, task,
+ deferred_promise, deferred_on_resolve,
+ deferred_on_reject);
+ inner_index = IntPtrAdd(inner_index, IntPtrConstant(1));
+ Branch(IntPtrLessThan(inner_index, inner_length), &inner_loop,
+ &done);
+ }
+ BIND(&done);
+
+ LeaveMicrotaskContext();
+ SetCurrentContext(old_context);
+
+ Branch(IntPtrLessThan(index, num_tasks), &loop, &init_queue_loop);
+ }
+ }
+
+ BIND(&is_unreachable);
+ Unreachable();
+ }
+ }
+}
+
+TF_BUILTIN(PromiseResolveThenableJob, InternalBuiltinsAssembler) {
+ VARIABLE(exception, MachineRepresentation::kTagged, TheHoleConstant());
+ Callable call = CodeFactory::Call(isolate());
+ Label reject_promise(this, Label::kDeferred);
+ TNode<PromiseResolveThenableJobInfo> microtask =
+ TNode<PromiseResolveThenableJobInfo>::UncheckedCast(
+ Parameter(Descriptor::kMicrotask));
+ TNode<Context> context =
+ TNode<Context>::UncheckedCast(Parameter(Descriptor::kContext));
+
+ TNode<JSReceiver> thenable = TNode<JSReceiver>::UncheckedCast(LoadObjectField(
+ microtask, PromiseResolveThenableJobInfo::kThenableOffset));
+ TNode<JSReceiver> then = TNode<JSReceiver>::UncheckedCast(
+ LoadObjectField(microtask, PromiseResolveThenableJobInfo::kThenOffset));
+ TNode<JSFunction> resolve = TNode<JSFunction>::UncheckedCast(LoadObjectField(
+ microtask, PromiseResolveThenableJobInfo::kResolveOffset));
+ TNode<JSFunction> reject = TNode<JSFunction>::UncheckedCast(
+ LoadObjectField(microtask, PromiseResolveThenableJobInfo::kRejectOffset));
+
+ Node* const result = CallJS(call, context, then, thenable, resolve, reject);
+ GotoIfException(result, &reject_promise, &exception);
+ Return(UndefinedConstant());
+
+ BIND(&reject_promise);
+ CallJS(call, context, reject, UndefinedConstant(), exception.value());
+ Return(UndefinedConstant());
+}
+
+TF_BUILTIN(AbortJS, CodeStubAssembler) {
+ Node* message = Parameter(Descriptor::kObject);
+ Node* reason = SmiConstant(0);
+ TailCallRuntime(Runtime::kAbortJS, reason, message);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-intl-gen.cc b/deps/v8/src/builtins/builtins-intl-gen.cc
index 3c7956246b..88641b04e2 100644
--- a/deps/v8/src/builtins/builtins-intl-gen.cc
+++ b/deps/v8/src/builtins/builtins-intl-gen.cc
@@ -8,7 +8,6 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/code-stub-assembler.h"
-#include "src/zone/zone-list-inl.h" // TODO(mstarzinger): Temporary cycle breaker.
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
index f186cf2d76..f6a6d85880 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -11,9 +11,10 @@ namespace internal {
using compiler::Node;
-Node* IteratorBuiltinsAssembler::GetIterator(Node* context, Node* object,
- Label* if_exception,
- Variable* exception) {
+IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context,
+ Node* object,
+ Label* if_exception,
+ Variable* exception) {
Node* method = GetProperty(context, object, factory()->iterator_symbol());
GotoIfException(method, if_exception, exception);
@@ -21,9 +22,9 @@ Node* IteratorBuiltinsAssembler::GetIterator(Node* context, Node* object,
Node* iterator = CallJS(callable, context, method, object);
GotoIfException(iterator, if_exception, exception);
- Label done(this), if_notobject(this, Label::kDeferred);
+ Label get_next(this), if_notobject(this, Label::kDeferred);
GotoIf(TaggedIsSmi(iterator), &if_notobject);
- Branch(IsJSReceiver(iterator), &done, &if_notobject);
+ Branch(IsJSReceiver(iterator), &get_next, &if_notobject);
BIND(&if_notobject);
{
@@ -34,24 +35,21 @@ Node* IteratorBuiltinsAssembler::GetIterator(Node* context, Node* object,
Unreachable();
}
- BIND(&done);
- return iterator;
+ BIND(&get_next);
+ Node* const next = GetProperty(context, iterator, factory()->next_string());
+ GotoIfException(next, if_exception, exception);
+
+ return IteratorRecord{TNode<JSReceiver>::UncheckedCast(iterator),
+ TNode<Object>::UncheckedCast(next)};
}
-Node* IteratorBuiltinsAssembler::IteratorStep(Node* context, Node* iterator,
- Label* if_done,
- Node* fast_iterator_result_map,
- Label* if_exception,
- Variable* exception) {
+Node* IteratorBuiltinsAssembler::IteratorStep(
+ Node* context, const IteratorRecord& iterator, Label* if_done,
+ Node* fast_iterator_result_map, Label* if_exception, Variable* exception) {
DCHECK_NOT_NULL(if_done);
-
- // IteratorNext
- Node* next_method = GetProperty(context, iterator, factory()->next_string());
- GotoIfException(next_method, if_exception, exception);
-
// 1. a. Let result be ? Invoke(iterator, "next", « »).
Callable callable = CodeFactory::Call(isolate());
- Node* result = CallJS(callable, context, next_method, iterator);
+ Node* result = CallJS(callable, context, iterator.next, iterator.object);
GotoIfException(result, if_exception, exception);
// 3. If Type(result) is not Object, throw a TypeError exception.
@@ -129,20 +127,20 @@ Node* IteratorBuiltinsAssembler::IteratorValue(Node* context, Node* result,
return var_value.value();
}
-void IteratorBuiltinsAssembler::IteratorCloseOnException(Node* context,
- Node* iterator,
- Label* if_exception,
- Variable* exception) {
+void IteratorBuiltinsAssembler::IteratorCloseOnException(
+ Node* context, const IteratorRecord& iterator, Label* if_exception,
+ Variable* exception) {
// Perform ES #sec-iteratorclose when an exception occurs. This simpler
// algorithm does not include redundant steps which are never reachable from
// the spec IteratorClose algorithm.
DCHECK_NOT_NULL(if_exception);
DCHECK_NOT_NULL(exception);
CSA_ASSERT(this, IsNotTheHole(exception->value()));
- CSA_ASSERT(this, IsJSReceiver(iterator));
+ CSA_ASSERT(this, IsJSReceiver(iterator.object));
// Let return be ? GetMethod(iterator, "return").
- Node* method = GetProperty(context, iterator, factory()->return_string());
+ Node* method =
+ GetProperty(context, iterator.object, factory()->return_string());
GotoIfException(method, if_exception, exception);
// If return is undefined, return Completion(completion).
@@ -152,7 +150,7 @@ void IteratorBuiltinsAssembler::IteratorCloseOnException(Node* context,
// Let innerResult be Call(return, iterator, « »).
// If an exception occurs, the original exception remains bound
Node* inner_result =
- CallJS(CodeFactory::Call(isolate()), context, method, iterator);
+ CallJS(CodeFactory::Call(isolate()), context, method, iterator.object);
GotoIfException(inner_result, if_exception, nullptr);
// (If completion.[[Type]] is throw) return Completion(completion).
@@ -160,9 +158,8 @@ void IteratorBuiltinsAssembler::IteratorCloseOnException(Node* context,
}
}
-void IteratorBuiltinsAssembler::IteratorCloseOnException(Node* context,
- Node* iterator,
- Variable* exception) {
+void IteratorBuiltinsAssembler::IteratorCloseOnException(
+ Node* context, const IteratorRecord& iterator, Variable* exception) {
Label rethrow(this, Label::kDeferred);
IteratorCloseOnException(context, iterator, &rethrow, exception);
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.h b/deps/v8/src/builtins/builtins-iterator-gen.h
index 9eb332e926..42627b8437 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.h
+++ b/deps/v8/src/builtins/builtins-iterator-gen.h
@@ -19,16 +19,17 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
// https://tc39.github.io/ecma262/#sec-getiterator --- never used for
// @@asyncIterator.
- Node* GetIterator(Node* context, Node* object, Label* if_exception = nullptr,
- Variable* exception = nullptr);
+ IteratorRecord GetIterator(Node* context, Node* object,
+ Label* if_exception = nullptr,
+ Variable* exception = nullptr);
// https://tc39.github.io/ecma262/#sec-iteratorstep
// Returns `false` if the iterator is done, otherwise returns an
// iterator result.
// `fast_iterator_result_map` refers to the map for the JSIteratorResult
// object, loaded from the native context.
- Node* IteratorStep(Node* context, Node* iterator, Label* if_done,
- Node* fast_iterator_result_map = nullptr,
+ Node* IteratorStep(Node* context, const IteratorRecord& iterator,
+ Label* if_done, Node* fast_iterator_result_map = nullptr,
Label* if_exception = nullptr,
Variable* exception = nullptr);
@@ -42,9 +43,9 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
Variable* exception = nullptr);
// https://tc39.github.io/ecma262/#sec-iteratorclose
- void IteratorCloseOnException(Node* context, Node* iterator,
+ void IteratorCloseOnException(Node* context, const IteratorRecord& iterator,
Label* if_exception, Variable* exception);
- void IteratorCloseOnException(Node* context, Node* iterator,
+ void IteratorCloseOnException(Node* context, const IteratorRecord& iterator,
Variable* exception);
};
diff --git a/deps/v8/src/builtins/builtins-math-gen.cc b/deps/v8/src/builtins/builtins-math-gen.cc
index 706fa4f3a8..d588113cdd 100644
--- a/deps/v8/src/builtins/builtins-math-gen.cc
+++ b/deps/v8/src/builtins/builtins-math-gen.cc
@@ -8,7 +8,6 @@
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
-#include "src/zone/zone-list-inl.h" // TODO(mstarzinger): Temporary cycle breaker.
namespace v8 {
namespace internal {
@@ -162,7 +161,7 @@ void MathBuiltinsAssembler::MathMaxMin(
SloppyTNode<Float64T>),
double default_val) {
CodeStubArguments arguments(this, ChangeInt32ToIntPtr(argc));
- argc = arguments.GetLength();
+ argc = arguments.GetLength(INTPTR_PARAMETERS);
VARIABLE(result, MachineRepresentation::kFloat64);
result.Bind(Float64Constant(default_val));
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index 65170d321d..9e344820dc 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -16,6 +16,8 @@ namespace internal {
// ES6 section 19.1 Object Objects
typedef compiler::Node Node;
+template <class T>
+using TNode = CodeStubAssembler::TNode<T>;
class ObjectBuiltinsAssembler : public CodeStubAssembler {
public:
@@ -34,6 +36,46 @@ class ObjectBuiltinsAssembler : public CodeStubAssembler {
Node* ConstructDataDescriptor(Node* context, Node* value, Node* writable,
Node* enumerable, Node* configurable);
Node* GetAccessorOrUndefined(Node* accessor, Label* if_bailout);
+
+ Node* IsSpecialReceiverMap(SloppyTNode<Map> map);
+};
+
+class ObjectEntriesValuesBuiltinsAssembler : public ObjectBuiltinsAssembler {
+ public:
+ explicit ObjectEntriesValuesBuiltinsAssembler(
+ compiler::CodeAssemblerState* state)
+ : ObjectBuiltinsAssembler(state) {}
+
+ protected:
+ enum CollectType { kEntries, kValues };
+
+ TNode<Word32T> IsStringWrapperElementsKind(TNode<Map> map);
+
+ TNode<BoolT> IsPropertyEnumerable(TNode<Uint32T> details);
+
+ TNode<BoolT> IsPropertyKindAccessor(TNode<Uint32T> kind);
+
+ TNode<BoolT> IsPropertyKindData(TNode<Uint32T> kind);
+
+ TNode<Uint32T> HasHiddenPrototype(TNode<Map> map);
+
+ TNode<Uint32T> LoadPropertyKind(TNode<Uint32T> details) {
+ return DecodeWord32<PropertyDetails::KindField>(details);
+ }
+
+ void GetOwnValuesOrEntries(TNode<Context> context, TNode<Object> maybe_object,
+ CollectType collect_type);
+
+ void GotoIfMapHasSlowProperties(TNode<Map> map, Label* if_slow);
+
+ TNode<JSArray> FastGetOwnValuesOrEntries(
+ TNode<Context> context, TNode<JSObject> object,
+ Label* if_call_runtime_with_fast_path, Label* if_no_properties,
+ CollectType collect_type);
+
+ TNode<JSArray> FinalizeValuesOrEntriesJSArray(
+ TNode<Context> context, TNode<FixedArray> values_or_entries,
+ TNode<IntPtrT> size, TNode<Map> array_map, Label* if_empty);
};
void ObjectBuiltinsAssembler::ReturnToStringFormat(Node* context,
@@ -97,6 +139,265 @@ Node* ObjectBuiltinsAssembler::ConstructDataDescriptor(Node* context,
return js_desc;
}
+Node* ObjectBuiltinsAssembler::IsSpecialReceiverMap(SloppyTNode<Map> map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
+ Node* is_special = IsSpecialReceiverInstanceType(LoadMapInstanceType(map));
+ uint32_t mask =
+ Map::HasNamedInterceptorBit::kMask | Map::IsAccessCheckNeededBit::kMask;
+ USE(mask);
+ // Interceptors or access checks imply special receiver.
+ CSA_ASSERT(this,
+ SelectConstant(IsSetWord32(LoadMapBitField(map), mask), is_special,
+ Int32Constant(1), MachineRepresentation::kWord32));
+ return is_special;
+}
+
+TNode<Word32T>
+ObjectEntriesValuesBuiltinsAssembler::IsStringWrapperElementsKind(
+ TNode<Map> map) {
+ Node* kind = LoadMapElementsKind(map);
+ return Word32Or(
+ Word32Equal(kind, Int32Constant(FAST_STRING_WRAPPER_ELEMENTS)),
+ Word32Equal(kind, Int32Constant(SLOW_STRING_WRAPPER_ELEMENTS)));
+}
+
+TNode<BoolT> ObjectEntriesValuesBuiltinsAssembler::IsPropertyEnumerable(
+ TNode<Uint32T> details) {
+ TNode<Uint32T> attributes =
+ DecodeWord32<PropertyDetails::AttributesField>(details);
+ return IsNotSetWord32(attributes, PropertyAttributes::DONT_ENUM);
+}
+
+TNode<BoolT> ObjectEntriesValuesBuiltinsAssembler::IsPropertyKindAccessor(
+ TNode<Uint32T> kind) {
+ return Word32Equal(kind, Int32Constant(PropertyKind::kAccessor));
+}
+
+TNode<BoolT> ObjectEntriesValuesBuiltinsAssembler::IsPropertyKindData(
+ TNode<Uint32T> kind) {
+ return Word32Equal(kind, Int32Constant(PropertyKind::kData));
+}
+
+TNode<Uint32T> ObjectEntriesValuesBuiltinsAssembler::HasHiddenPrototype(
+ TNode<Map> map) {
+ TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
+ return DecodeWord32<Map::HasHiddenPrototypeBit>(bit_field3);
+}
+
+void ObjectEntriesValuesBuiltinsAssembler::GetOwnValuesOrEntries(
+ TNode<Context> context, TNode<Object> maybe_object,
+ CollectType collect_type) {
+ TNode<JSObject> object = TNode<JSObject>::UncheckedCast(
+ CallBuiltin(Builtins::kToObject, context, maybe_object));
+
+ Label if_call_runtime_with_fast_path(this, Label::kDeferred),
+ if_call_runtime(this, Label::kDeferred),
+ if_no_properties(this, Label::kDeferred);
+
+ TNode<Map> map = LoadMap(object);
+ GotoIfNot(IsJSObjectMap(map), &if_call_runtime);
+ GotoIfMapHasSlowProperties(map, &if_call_runtime);
+
+ TNode<FixedArrayBase> elements = LoadElements(object);
+ // If the object has elements, we treat it as slow case.
+ // So, we go to runtime call.
+ GotoIfNot(IsEmptyFixedArray(elements), &if_call_runtime_with_fast_path);
+
+ TNode<JSArray> result = FastGetOwnValuesOrEntries(
+ context, object, &if_call_runtime_with_fast_path, &if_no_properties,
+ collect_type);
+ Return(result);
+
+ BIND(&if_no_properties);
+ {
+ Node* native_context = LoadNativeContext(context);
+ Node* array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
+ Node* empty_array = AllocateJSArray(PACKED_ELEMENTS, array_map,
+ IntPtrConstant(0), SmiConstant(0));
+ Return(empty_array);
+ }
+
+ BIND(&if_call_runtime_with_fast_path);
+ {
+ // In slow case, we simply call runtime.
+ if (collect_type == CollectType::kEntries) {
+ Return(CallRuntime(Runtime::kObjectEntries, context, object));
+ } else {
+ DCHECK(collect_type == CollectType::kValues);
+ Return(CallRuntime(Runtime::kObjectValues, context, object));
+ }
+ }
+
+ BIND(&if_call_runtime);
+ {
+ // In slow case, we simply call runtime.
+ if (collect_type == CollectType::kEntries) {
+ Return(CallRuntime(Runtime::kObjectEntriesSkipFastPath, context, object));
+ } else {
+ DCHECK(collect_type == CollectType::kValues);
+ Return(CallRuntime(Runtime::kObjectValuesSkipFastPath, context, object));
+ }
+ }
+}
+
+void ObjectEntriesValuesBuiltinsAssembler::GotoIfMapHasSlowProperties(
+ TNode<Map> map, Label* if_slow) {
+ GotoIf(IsStringWrapperElementsKind(map), if_slow);
+ GotoIf(IsSpecialReceiverMap(map), if_slow);
+ GotoIf(HasHiddenPrototype(map), if_slow);
+ GotoIf(IsDictionaryMap(map), if_slow);
+}
+
+TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
+ TNode<Context> context, TNode<JSObject> object,
+ Label* if_call_runtime_with_fast_path, Label* if_no_properties,
+ CollectType collect_type) {
+ Node* native_context = LoadNativeContext(context);
+ TNode<Map> array_map =
+ LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
+ TNode<Map> map = LoadMap(object);
+ TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
+
+ Label if_has_enum_cache(this), if_not_has_enum_cache(this),
+ collect_entries(this);
+ Node* object_enum_length =
+ DecodeWordFromWord32<Map::EnumLengthBits>(bit_field3);
+ Node* has_enum_cache = WordNotEqual(
+ object_enum_length, IntPtrConstant(kInvalidEnumCacheSentinel));
+
+ // In case, we found enum_cache in object,
+ // we use it as array_length becuase it has same size for
+ // Object.(entries/values) result array object length.
+ // So object_enum_length use less memory space than
+ // NumberOfOwnDescriptorsBits value.
+ // And in case, if enum_cache_not_found,
+ // we call runtime and initialize enum_cache for subsequent call of
+ // CSA fast path.
+ Branch(has_enum_cache, &if_has_enum_cache, if_call_runtime_with_fast_path);
+
+ BIND(&if_has_enum_cache);
+ {
+ GotoIf(WordEqual(object_enum_length, IntPtrConstant(0)), if_no_properties);
+ TNode<FixedArray> values_or_entries = TNode<FixedArray>::UncheckedCast(
+ AllocateFixedArray(PACKED_ELEMENTS, object_enum_length,
+ INTPTR_PARAMETERS, kAllowLargeObjectAllocation));
+
+ // If in case we have enum_cache,
+ // we can't detect accessor of object until loop through descritpros.
+ // So if object might have accessor,
+ // we will remain invalid addresses of FixedArray.
+ // Because in that case, we need to jump to runtime call.
+ // So the array filled by the-hole even if enum_cache exists.
+ FillFixedArrayWithValue(PACKED_ELEMENTS, values_or_entries,
+ IntPtrConstant(0), object_enum_length,
+ Heap::kTheHoleValueRootIndex);
+
+ TVARIABLE(IntPtrT, var_result_index, IntPtrConstant(0));
+ TVARIABLE(IntPtrT, var_descriptor_index, IntPtrConstant(0));
+ Variable* vars[] = {&var_descriptor_index, &var_result_index};
+ // Let desc be ? O.[[GetOwnProperty]](key).
+ TNode<DescriptorArray> descriptors = LoadMapDescriptors(map);
+ Label loop(this, 2, vars), after_loop(this), loop_condition(this);
+ Branch(IntPtrEqual(var_descriptor_index, object_enum_length), &after_loop,
+ &loop);
+
+ // We dont use BuildFastLoop.
+ // Instead, we use hand-written loop
+ // because of we need to use 'continue' functionality.
+ BIND(&loop);
+ {
+ // Currently, we will not invoke getters,
+ // so, map will not be changed.
+ CSA_ASSERT(this, WordEqual(map, LoadMap(object)));
+ TNode<Uint32T> descriptor_index = TNode<Uint32T>::UncheckedCast(
+ TruncateWordToWord32(var_descriptor_index));
+ Node* next_key = DescriptorArrayGetKey(descriptors, descriptor_index);
+
+ // Skip Symbols.
+ GotoIf(IsSymbol(next_key), &loop_condition);
+
+ TNode<Uint32T> details = TNode<Uint32T>::UncheckedCast(
+ DescriptorArrayGetDetails(descriptors, descriptor_index));
+ TNode<Uint32T> kind = LoadPropertyKind(details);
+
+ // If property is accessor, we escape fast path and call runtime.
+ GotoIf(IsPropertyKindAccessor(kind), if_call_runtime_with_fast_path);
+ CSA_ASSERT(this, IsPropertyKindData(kind));
+
+ // If desc is not undefined and desc.[[Enumerable]] is true, then
+ GotoIfNot(IsPropertyEnumerable(details), &loop_condition);
+
+ VARIABLE(var_property_value, MachineRepresentation::kTagged,
+ UndefinedConstant());
+ Node* descriptor_name_index = DescriptorNumberToIndex(descriptor_index);
+
+ // Let value be ? Get(O, key).
+ LoadPropertyFromFastObject(object, map, descriptors,
+ descriptor_name_index, details,
+ &var_property_value);
+
+ // If kind is "value", append value to properties.
+ Node* value = var_property_value.value();
+
+ if (collect_type == CollectType::kEntries) {
+ // Let entry be CreateArrayFromList(« key, value »).
+ Node* array = nullptr;
+ Node* elements = nullptr;
+ std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
+ PACKED_ELEMENTS, array_map, SmiConstant(2), nullptr,
+ IntPtrConstant(2));
+ StoreFixedArrayElement(elements, 0, next_key, SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(elements, 1, value, SKIP_WRITE_BARRIER);
+ value = array;
+ }
+
+ StoreFixedArrayElement(values_or_entries, var_result_index, value);
+ Increment(&var_result_index, 1);
+ Goto(&loop_condition);
+
+ BIND(&loop_condition);
+ {
+ Increment(&var_descriptor_index, 1);
+ Branch(IntPtrEqual(var_descriptor_index, object_enum_length),
+ &after_loop, &loop);
+ }
+ }
+ BIND(&after_loop);
+ return FinalizeValuesOrEntriesJSArray(context, values_or_entries,
+ var_result_index, array_map,
+ if_no_properties);
+ }
+}
+
+TNode<JSArray>
+ObjectEntriesValuesBuiltinsAssembler::FinalizeValuesOrEntriesJSArray(
+ TNode<Context> context, TNode<FixedArray> result, TNode<IntPtrT> size,
+ TNode<Map> array_map, Label* if_empty) {
+ CSA_ASSERT(this, IsJSArrayMap(array_map));
+
+ GotoIf(IntPtrEqual(size, IntPtrConstant(0)), if_empty);
+ Node* array = AllocateUninitializedJSArrayWithoutElements(
+ array_map, SmiTag(size), nullptr);
+ StoreObjectField(array, JSArray::kElementsOffset, result);
+ return TNode<JSArray>::UncheckedCast(array);
+}
+
+TF_BUILTIN(ObjectPrototypeToLocaleString, CodeStubAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+
+ Label if_null_or_undefined(this, Label::kDeferred);
+ GotoIf(IsNullOrUndefined(receiver), &if_null_or_undefined);
+
+ TNode<Object> method =
+ CAST(GetProperty(context, receiver, factory()->toString_string()));
+ Return(CallJS(CodeFactory::Call(isolate()), context, method, receiver));
+
+ BIND(&if_null_or_undefined);
+ ThrowTypeError(context, MessageTemplate::kCalledOnNullOrUndefined,
+ "Object.prototype.toLocaleString");
+}
+
TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) {
Node* object = Parameter(Descriptor::kReceiver);
Node* key = Parameter(Descriptor::kKey);
@@ -250,6 +551,22 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
}
}
+TF_BUILTIN(ObjectValues, ObjectEntriesValuesBuiltinsAssembler) {
+ TNode<JSObject> object =
+ TNode<JSObject>::UncheckedCast(Parameter(Descriptor::kObject));
+ TNode<Context> context =
+ TNode<Context>::UncheckedCast(Parameter(Descriptor::kContext));
+ GetOwnValuesOrEntries(context, object, CollectType::kValues);
+}
+
+TF_BUILTIN(ObjectEntries, ObjectEntriesValuesBuiltinsAssembler) {
+ TNode<JSObject> object =
+ TNode<JSObject>::UncheckedCast(Parameter(Descriptor::kObject));
+ TNode<Context> context =
+ TNode<Context>::UncheckedCast(Parameter(Descriptor::kContext));
+ GetOwnValuesOrEntries(context, object, CollectType::kEntries);
+}
+
// ES #sec-object.prototype.isprototypeof
TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -550,7 +867,7 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
GotoIf(IsNull(holder), &return_default);
Node* holder_map = LoadMap(holder);
Node* holder_bit_field3 = LoadMapBitField3(holder_map);
- GotoIf(IsSetWord32<Map::MayHaveInterestingSymbols>(holder_bit_field3),
+ GotoIf(IsSetWord32<Map::MayHaveInterestingSymbolsBit>(holder_bit_field3),
&return_generic);
var_holder.Bind(LoadMapPrototype(holder_map));
Goto(&loop);
@@ -615,7 +932,7 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
&call_runtime);
// Handle dictionary objects or fast objects with properties in runtime.
Node* bit_field3 = LoadMapBitField3(properties_map);
- GotoIf(IsSetWord32<Map::DictionaryMap>(bit_field3), &call_runtime);
+ GotoIf(IsSetWord32<Map::IsDictionaryMapBit>(bit_field3), &call_runtime);
Branch(IsSetWord32<Map::NumberOfOwnDescriptorsBits>(bit_field3),
&call_runtime, &no_properties);
}
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
index 36f7ebfc0a..4e353b9260 100644
--- a/deps/v8/src/builtins/builtins-object.cc
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -395,31 +395,6 @@ BUILTIN(ObjectIsSealed) {
return isolate->heap()->ToBoolean(result.FromJust());
}
-BUILTIN(ObjectValues) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- Handle<JSReceiver> receiver;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
- Object::ToObject(isolate, object));
- Handle<FixedArray> values;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, values, JSReceiver::GetOwnValues(receiver, ENUMERABLE_STRINGS));
- return *isolate->factory()->NewJSArrayWithElements(values);
-}
-
-BUILTIN(ObjectEntries) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- Handle<JSReceiver> receiver;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
- Object::ToObject(isolate, object));
- Handle<FixedArray> entries;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, entries,
- JSReceiver::GetOwnEntries(receiver, ENUMERABLE_STRINGS));
- return *isolate->factory()->NewJSArrayWithElements(entries);
-}
-
BUILTIN(ObjectGetOwnPropertyDescriptors) {
HandleScope scope(isolate);
Handle<Object> object = args.atOrUndefined(isolate, 1);
diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc
index 67ebc85ba4..1a3ebcd892 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.cc
+++ b/deps/v8/src/builtins/builtins-promise-gen.cc
@@ -161,12 +161,12 @@ Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context,
Node* resolve =
LoadObjectField(capability, PromiseCapability::kResolveOffset);
GotoIf(TaggedIsSmi(resolve), &if_notcallable);
- GotoIfNot(IsCallableMap(LoadMap(resolve)), &if_notcallable);
+ GotoIfNot(IsCallable(resolve), &if_notcallable);
Node* reject =
LoadObjectField(capability, PromiseCapability::kRejectOffset);
GotoIf(TaggedIsSmi(reject), &if_notcallable);
- GotoIfNot(IsCallableMap(LoadMap(reject)), &if_notcallable);
+ GotoIfNot(IsCallable(reject), &if_notcallable);
StoreObjectField(capability, PromiseCapability::kPromiseOffset, promise);
@@ -189,25 +189,6 @@ Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context,
return var_result.value();
}
-void PromiseBuiltinsAssembler::InitializeFunctionContext(Node* native_context,
- Node* context,
- int slots) {
- DCHECK_GE(slots, Context::MIN_CONTEXT_SLOTS);
- StoreMapNoWriteBarrier(context, Heap::kFunctionContextMapRootIndex);
- StoreObjectFieldNoWriteBarrier(context, FixedArray::kLengthOffset,
- SmiConstant(slots));
-
- Node* const empty_fn =
- LoadContextElement(native_context, Context::CLOSURE_INDEX);
- StoreContextElementNoWriteBarrier(context, Context::CLOSURE_INDEX, empty_fn);
- StoreContextElementNoWriteBarrier(context, Context::PREVIOUS_INDEX,
- UndefinedConstant());
- StoreContextElementNoWriteBarrier(context, Context::EXTENSION_INDEX,
- TheHoleConstant());
- StoreContextElementNoWriteBarrier(context, Context::NATIVE_CONTEXT_INDEX,
- native_context);
-}
-
Node* PromiseBuiltinsAssembler::CreatePromiseContext(Node* native_context,
int slots) {
DCHECK_GE(slots, Context::MIN_CONTEXT_SLOTS);
@@ -366,8 +347,6 @@ Node* PromiseBuiltinsAssembler::InternalPromiseThen(Node* context,
VARIABLE(var_deferred_on_resolve, MachineRepresentation::kTagged);
VARIABLE(var_deferred_on_reject, MachineRepresentation::kTagged);
- GotoIfForceSlowPath(&promise_capability);
-
Branch(WordEqual(promise_fun, constructor), &fast_promise_capability,
&promise_capability);
@@ -415,16 +394,11 @@ Node* PromiseBuiltinsAssembler::InternalPerformPromiseThen(
append_callbacks(this);
GotoIf(TaggedIsSmi(on_resolve), &if_onresolvenotcallable);
- Isolate* isolate = this->isolate();
- Node* const on_resolve_map = LoadMap(on_resolve);
- Branch(IsCallableMap(on_resolve_map), &onrejectcheck,
- &if_onresolvenotcallable);
+ Branch(IsCallable(on_resolve), &onrejectcheck, &if_onresolvenotcallable);
BIND(&if_onresolvenotcallable);
{
- Node* const default_resolve_handler_symbol = HeapConstant(
- isolate->factory()->promise_default_resolve_handler_symbol());
- var_on_resolve.Bind(default_resolve_handler_symbol);
+ var_on_resolve.Bind(PromiseDefaultResolveHandlerSymbolConstant());
Goto(&onrejectcheck);
}
@@ -433,15 +407,11 @@ Node* PromiseBuiltinsAssembler::InternalPerformPromiseThen(
Label if_onrejectnotcallable(this);
GotoIf(TaggedIsSmi(on_reject), &if_onrejectnotcallable);
- Node* const on_reject_map = LoadMap(on_reject);
- Branch(IsCallableMap(on_reject_map), &append_callbacks,
- &if_onrejectnotcallable);
+ Branch(IsCallable(on_reject), &append_callbacks, &if_onrejectnotcallable);
BIND(&if_onrejectnotcallable);
{
- Node* const default_reject_handler_symbol = HeapConstant(
- isolate->factory()->promise_default_reject_handler_symbol());
- var_on_reject.Bind(default_reject_handler_symbol);
+ var_on_reject.Bind(PromiseDefaultRejectHandlerSymbolConstant());
Goto(&append_callbacks);
}
}
@@ -558,8 +528,7 @@ Node* PromiseBuiltinsAssembler::InternalPerformPromiseThen(
Node* info = AllocatePromiseReactionJobInfo(
result, var_on_resolve.value(), deferred_promise, deferred_on_resolve,
deferred_on_reject, context);
- // TODO(gsathya): Move this to TF
- CallRuntime(Runtime::kEnqueuePromiseReactionJob, context, info);
+ CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), info);
Goto(&out);
BIND(&reject);
@@ -578,8 +547,7 @@ Node* PromiseBuiltinsAssembler::InternalPerformPromiseThen(
Node* info = AllocatePromiseReactionJobInfo(
result, var_on_reject.value(), deferred_promise,
deferred_on_resolve, deferred_on_reject, context);
- // TODO(gsathya): Move this to TF
- CallRuntime(Runtime::kEnqueuePromiseReactionJob, context, info);
+ CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), info);
Goto(&out);
}
}
@@ -787,8 +755,7 @@ void PromiseBuiltinsAssembler::InternalResolvePromise(Node* context,
// 12. Perform EnqueueJob("PromiseJobs",
// PromiseResolveThenableJob, « promise, resolution, thenAction»).
BIND(&enqueue);
- // TODO(gsathya): Move this to TF
- CallRuntime(Runtime::kEnqueuePromiseResolveThenableJob, context, info);
+ CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), info);
Goto(&out);
}
@@ -846,7 +813,7 @@ void PromiseBuiltinsAssembler::PromiseFulfill(
result, tasks, deferred_promise, deferred_on_resolve, deferred_on_reject,
context);
- CallRuntime(Runtime::kEnqueuePromiseReactionJob, context, info);
+ CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), info);
Goto(&do_promisereset);
BIND(&do_promisereset);
@@ -1080,19 +1047,18 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) {
Node *resolve, *reject;
std::tie(resolve, reject) = CreatePromiseResolvingFunctions(
var_result.value(), TrueConstant(), native_context);
- Callable call_callable = CodeFactory::Call(isolate);
- Node* const maybe_exception = CallJS(call_callable, context, executor,
- UndefinedConstant(), resolve, reject);
+ Node* const maybe_exception = CallJS(
+ CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined),
+ context, executor, UndefinedConstant(), resolve, reject);
GotoIfException(maybe_exception, &if_rejectpromise, &var_reason);
Branch(is_debug_active, &debug_pop, &out);
BIND(&if_rejectpromise);
{
- Callable call_callable = CodeFactory::Call(isolate);
- CallJS(call_callable, context, reject, UndefinedConstant(),
- var_reason.value());
+ CallJS(CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined),
+ context, reject, UndefinedConstant(), var_reason.value());
Branch(is_debug_active, &debug_pop, &out);
}
@@ -1130,8 +1096,8 @@ TF_BUILTIN(PromiseInternalConstructor, PromiseBuiltinsAssembler) {
}
// ES#sec-promise.prototype.then
-// Promise.prototype.catch ( onFulfilled, onRejected )
-TF_BUILTIN(PromiseThen, PromiseBuiltinsAssembler) {
+// Promise.prototype.then ( onFulfilled, onRejected )
+TF_BUILTIN(PromisePrototypeThen, PromiseBuiltinsAssembler) {
// 1. Let promise be the this value.
Node* const promise = Parameter(Descriptor::kReceiver);
Node* const on_resolve = Parameter(Descriptor::kOnFullfilled);
@@ -1169,7 +1135,6 @@ TF_BUILTIN(PromiseHandleReject, PromiseBuiltinsAssembler) {
Node* const exception = Parameter(Descriptor::kException);
Node* const context = Parameter(Descriptor::kContext);
- Callable call_callable = CodeFactory::Call(isolate());
VARIABLE(var_unused, MachineRepresentation::kTagged);
Label if_internalhandler(this), if_customhandler(this, Label::kDeferred);
@@ -1183,7 +1148,15 @@ TF_BUILTIN(PromiseHandleReject, PromiseBuiltinsAssembler) {
BIND(&if_customhandler);
{
- CallJS(call_callable, context, on_reject, UndefinedConstant(), exception);
+ VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant());
+ Label if_exception(this);
+ Node* const ret = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, on_reject, UndefinedConstant(), exception);
+ GotoIfException(ret, &if_exception, &var_exception);
+ Return(UndefinedConstant());
+ BIND(&if_exception);
+ CallRuntime(Runtime::kReportMessage, context, var_exception.value());
Return(UndefinedConstant());
}
}
@@ -1225,9 +1198,7 @@ TF_BUILTIN(PromiseHandle, PromiseBuiltinsAssembler) {
BIND(&if_defaulthandler);
{
Label if_resolve(this), if_reject(this);
- Node* const default_resolve_handler_symbol = HeapConstant(
- isolate->factory()->promise_default_resolve_handler_symbol());
- Branch(WordEqual(default_resolve_handler_symbol, handler), &if_resolve,
+ Branch(IsPromiseDefaultResolveHandlerSymbol(handler), &if_resolve,
&if_reject);
BIND(&if_resolve);
@@ -1246,9 +1217,9 @@ TF_BUILTIN(PromiseHandle, PromiseBuiltinsAssembler) {
BIND(&if_callablehandler);
{
- Callable call_callable = CodeFactory::Call(isolate);
- Node* const result =
- CallJS(call_callable, context, handler, UndefinedConstant(), value);
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined),
+ context, handler, UndefinedConstant(), value);
var_result.Bind(result);
GotoIfException(result, &if_rejectpromise, &var_reason);
Branch(IsUndefined(deferred_on_resolve), &if_internalhandler,
@@ -1261,10 +1232,10 @@ TF_BUILTIN(PromiseHandle, PromiseBuiltinsAssembler) {
BIND(&if_customhandler);
{
- Callable call_callable = CodeFactory::Call(isolate);
- Node* const maybe_exception =
- CallJS(call_callable, context, deferred_on_resolve,
- UndefinedConstant(), var_result.value());
+ Node* const maybe_exception = CallJS(
+ CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined),
+ context, deferred_on_resolve, UndefinedConstant(),
+ var_result.value());
GotoIfException(maybe_exception, &if_rejectpromise, &var_reason);
Goto(&promisehook_after);
}
@@ -1297,9 +1268,23 @@ TF_BUILTIN(PromiseHandle, PromiseBuiltinsAssembler) {
}
}
+TF_BUILTIN(PromiseHandleJS, PromiseBuiltinsAssembler) {
+ Node* const value = Parameter(Descriptor::kValue);
+ Node* const handler = Parameter(Descriptor::kHandler);
+ Node* const deferred_promise = Parameter(Descriptor::kDeferredPromise);
+ Node* const deferred_on_resolve = Parameter(Descriptor::kDeferredOnResolve);
+ Node* const deferred_on_reject = Parameter(Descriptor::kDeferredOnReject);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ Node* const result =
+ CallBuiltin(Builtins::kPromiseHandle, context, value, handler,
+ deferred_promise, deferred_on_resolve, deferred_on_reject);
+ Return(result);
+}
+
// ES#sec-promise.prototype.catch
// Promise.prototype.catch ( onRejected )
-TF_BUILTIN(PromiseCatch, PromiseBuiltinsAssembler) {
+TF_BUILTIN(PromisePrototypeCatch, PromiseBuiltinsAssembler) {
// 1. Let promise be the this value.
Node* const promise = Parameter(Descriptor::kReceiver);
Node* const on_resolve = UndefinedConstant();
@@ -1321,9 +1306,9 @@ TF_BUILTIN(PromiseCatch, PromiseBuiltinsAssembler) {
{
Node* const then =
GetProperty(context, promise, isolate()->factory()->then_string());
- Callable call_callable = CodeFactory::Call(isolate());
- Node* const result =
- CallJS(call_callable, context, then, promise, on_resolve, on_reject);
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ context, then, promise, on_resolve, on_reject);
Return(result);
}
}
@@ -1407,10 +1392,10 @@ TF_BUILTIN(PromiseResolve, PromiseBuiltinsAssembler) {
{
Node* const capability = NewPromiseCapability(context, constructor);
- Callable call_callable = CodeFactory::Call(isolate);
Node* const resolve =
LoadObjectField(capability, PromiseCapability::kResolveOffset);
- CallJS(call_callable, context, resolve, UndefinedConstant(), value);
+ CallJS(CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined),
+ context, resolve, UndefinedConstant(), value);
Node* const result =
LoadObjectField(capability, PromiseCapability::kPromiseOffset);
@@ -1468,8 +1453,6 @@ TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) {
Label if_nativepromise(this), if_custompromise(this, Label::kDeferred);
Node* const native_context = LoadNativeContext(context);
- GotoIfForceSlowPath(&if_custompromise);
-
Node* const promise_fun =
LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
Branch(WordEqual(promise_fun, receiver), &if_nativepromise,
@@ -1492,8 +1475,8 @@ TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) {
// 4. Perform ? Call(promiseCapability.[[Reject]], undefined, « r »).
Node* const reject =
LoadObjectField(capability, PromiseCapability::kRejectOffset);
- Callable call_callable = CodeFactory::Call(isolate());
- CallJS(call_callable, context, reject, UndefinedConstant(), reason);
+ CallJS(CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, reject, UndefinedConstant(), reason);
// 5. Return promiseCapability.[[Promise]].
Node* const promise =
@@ -1567,9 +1550,9 @@ TF_BUILTIN(PromiseThenFinally, PromiseBuiltinsAssembler) {
CSA_ASSERT(this, IsCallable(on_finally));
// 3. Let result be ? Call(onFinally).
- Callable call_callable = CodeFactory::Call(isolate());
- Node* const result =
- CallJS(call_callable, context, on_finally, UndefinedConstant());
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, on_finally, UndefinedConstant());
// 4. Let C be F.[[Constructor]].
Node* const constructor = LoadContextElement(context, kConstructorSlot);
@@ -1588,8 +1571,9 @@ TF_BUILTIN(PromiseThenFinally, PromiseBuiltinsAssembler) {
// 8. Return ? Invoke(promise, "then", « valueThunk »).
Node* const promise_then =
GetProperty(context, promise, factory()->then_string());
- Node* const result_promise = CallJS(call_callable, context,
- promise_then, promise, value_thunk);
+ Node* const result_promise = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ context, promise_then, promise, value_thunk);
Return(result_promise);
}
@@ -1628,9 +1612,9 @@ TF_BUILTIN(PromiseCatchFinally, PromiseBuiltinsAssembler) {
CSA_ASSERT(this, IsCallable(on_finally));
// 3. Let result be ? Call(onFinally).
- Callable call_callable = CodeFactory::Call(isolate());
- Node* result =
- CallJS(call_callable, context, on_finally, UndefinedConstant());
+ Node* result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, on_finally, UndefinedConstant());
// 4. Let C be F.[[Constructor]].
Node* const constructor = LoadContextElement(context, kConstructorSlot);
@@ -1649,12 +1633,13 @@ TF_BUILTIN(PromiseCatchFinally, PromiseBuiltinsAssembler) {
// 8. Return ? Invoke(promise, "then", « thrower »).
Node* const promise_then =
GetProperty(context, promise, factory()->then_string());
- Node* const result_promise = CallJS(call_callable, context,
- promise_then, promise, thrower);
+ Node* const result_promise = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ context, promise_then, promise, thrower);
Return(result_promise);
}
-TF_BUILTIN(PromiseFinally, PromiseBuiltinsAssembler) {
+TF_BUILTIN(PromisePrototypeFinally, PromiseBuiltinsAssembler) {
CSA_ASSERT_JS_ARGC_EQ(this, 1);
// 1. Let promise be the this value.
@@ -1662,9 +1647,9 @@ TF_BUILTIN(PromiseFinally, PromiseBuiltinsAssembler) {
Node* const on_finally = Parameter(Descriptor::kOnFinally);
Node* const context = Parameter(Descriptor::kContext);
- // 2. If IsPromise(promise) is false, throw a TypeError exception.
- ThrowIfNotInstanceType(context, promise, JS_PROMISE_TYPE,
- "Promise.prototype.finally");
+ // 2. If Type(promise) is not Object, throw a TypeError exception.
+ ThrowIfNotJSReceiver(context, promise, MessageTemplate::kCalledOnNonObject,
+ "Promise.prototype.finally");
// 3. Let C be ? SpeciesConstructor(promise, %Promise%).
Node* const native_context = LoadNativeContext(context);
@@ -1714,9 +1699,10 @@ TF_BUILTIN(PromiseFinally, PromiseBuiltinsAssembler) {
BIND(&perform_finally);
Node* const promise_then =
GetProperty(context, promise, factory()->then_string());
- Node* const result_promise =
- CallJS(CodeFactory::Call(isolate()), context, promise_then, promise,
- var_then_finally.value(), var_catch_finally.value());
+ Node* const result_promise = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ context, promise_then, promise, var_then_finally.value(),
+ var_catch_finally.value());
Return(result_promise);
}
@@ -1758,8 +1744,9 @@ TF_BUILTIN(PerformNativePromiseThen, PromiseBuiltinsAssembler) {
}
Node* PromiseBuiltinsAssembler::PerformPromiseAll(
- Node* context, Node* constructor, Node* capability, Node* iterator,
- Label* if_exception, Variable* var_exception) {
+ Node* context, Node* constructor, Node* capability,
+ const IteratorRecord& iterator, Label* if_exception,
+ Variable* var_exception) {
IteratorBuiltinsAssembler iter_assembler(state());
Label close_iterator(this);
@@ -1805,8 +1792,9 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
GetProperty(context, constructor, factory()->resolve_string());
GotoIfException(promise_resolve, &close_iterator, var_exception);
- Node* const next_promise = CallJS(CodeFactory::Call(isolate()), context,
- promise_resolve, constructor, next_value);
+ Node* const next_promise = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ context, promise_resolve, constructor, next_value);
GotoIfException(next_promise, &close_iterator, var_exception);
// Let resolveElement be a new built-in function object as defined in
@@ -1844,7 +1832,7 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
BIND(&if_outofrange);
{
// If the incremented value is out of Smi range, crash.
- Abort(kOffsetOutOfRange);
+ Abort(AbortReason::kOffsetOutOfRange);
}
BIND(&done);
@@ -1857,7 +1845,8 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
GotoIfException(then, &close_iterator, var_exception);
Node* const then_call = CallJS(
- CodeFactory::Call(isolate()), context, then, next_promise, resolve,
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ context, then, next_promise, resolve,
LoadObjectField(capability, PromiseCapability::kRejectOffset));
GotoIfException(then_call, &close_iterator, var_exception);
@@ -1899,9 +1888,9 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
Node* const resolve =
LoadObjectField(capability, PromiseCapability::kResolveOffset);
- Node* const resolve_call =
- CallJS(CodeFactory::Call(isolate()), context, resolve,
- UndefinedConstant(), values_array);
+ Node* const resolve_call = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, resolve, UndefinedConstant(), values_array);
GotoIfException(resolve_call, if_exception, var_exception);
Goto(&return_promise);
@@ -1963,7 +1952,7 @@ TF_BUILTIN(PromiseAll, PromiseBuiltinsAssembler) {
// Let iterator be GetIterator(iterable).
// IfAbruptRejectPromise(iterator, promiseCapability).
Node* const iterable = Parameter(Descriptor::kIterable);
- Node* const iterator = iter_assembler.GetIterator(
+ IteratorRecord iterator = iter_assembler.GetIterator(
context, iterable, &reject_promise, &var_exception);
// Let result be PerformPromiseAll(iteratorRecord, C, promiseCapability).
@@ -1982,9 +1971,8 @@ TF_BUILTIN(PromiseAll, PromiseBuiltinsAssembler) {
CSA_SLOW_ASSERT(this, IsNotTheHole(var_exception.value()));
Node* const reject =
LoadObjectField(capability, PromiseCapability::kRejectOffset);
- Callable callable = CodeFactory::Call(isolate());
- CallJS(callable, context, reject, UndefinedConstant(),
- var_exception.value());
+ CallJS(CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, reject, UndefinedConstant(), var_exception.value());
Node* const promise =
LoadObjectField(capability, PromiseCapability::kPromiseOffset);
@@ -2059,8 +2047,8 @@ TF_BUILTIN(PromiseAllResolveElementClosure, PromiseBuiltinsAssembler) {
LoadContextElement(context, kPromiseAllResolveElementCapabilitySlot);
Node* const resolve =
LoadObjectField(capability, PromiseCapability::kResolveOffset);
- CallJS(CodeFactory::Call(isolate()), context, resolve, UndefinedConstant(),
- values_array);
+ CallJS(CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, resolve, UndefinedConstant(), values_array);
Return(UndefinedConstant());
BIND(&already_called);
@@ -2101,7 +2089,7 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
// Let iterator be GetIterator(iterable).
// IfAbruptRejectPromise(iterator, promiseCapability).
Node* const iterable = Parameter(Descriptor::kIterable);
- Node* const iterator = iter_assembler.GetIterator(
+ IteratorRecord iterator = iter_assembler.GetIterator(
context, iterable, &reject_promise, &var_exception);
// Let result be PerformPromiseRace(iteratorRecord, C, promiseCapability).
@@ -2134,8 +2122,10 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
GetProperty(context, receiver, factory()->resolve_string());
GotoIfException(promise_resolve, &close_iterator, &var_exception);
- Node* const next_promise = CallJS(CodeFactory::Call(isolate()), context,
- promise_resolve, receiver, next_value);
+ Node* const next_promise =
+ CallJS(CodeFactory::Call(isolate(),
+ ConvertReceiverMode::kNotNullOrUndefined),
+ context, promise_resolve, receiver, next_value);
GotoIfException(next_promise, &close_iterator, &var_exception);
// Perform ? Invoke(nextPromise, "then", « resolveElement,
@@ -2144,8 +2134,10 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
GetProperty(context, next_promise, factory()->then_string());
GotoIfException(then, &close_iterator, &var_exception);
- Node* const then_call = CallJS(CodeFactory::Call(isolate()), context,
- then, next_promise, resolve, reject);
+ Node* const then_call =
+ CallJS(CodeFactory::Call(isolate(),
+ ConvertReceiverMode::kNotNullOrUndefined),
+ context, then, next_promise, resolve, reject);
GotoIfException(then_call, &close_iterator, &var_exception);
// For catch prediction, mark that rejections here are semantically
@@ -2172,9 +2164,8 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
{
Node* const reject =
LoadObjectField(capability, PromiseCapability::kRejectOffset);
- Callable callable = CodeFactory::Call(isolate());
- CallJS(callable, context, reject, UndefinedConstant(),
- var_exception.value());
+ CallJS(CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, reject, UndefinedConstant(), var_exception.value());
Node* const promise =
LoadObjectField(capability, PromiseCapability::kPromiseOffset);
diff --git a/deps/v8/src/builtins/builtins-promise-gen.h b/deps/v8/src/builtins/builtins-promise-gen.h
index 759176757f..366c7c22cd 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.h
+++ b/deps/v8/src/builtins/builtins-promise-gen.h
@@ -137,7 +137,6 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
void BranchIfFastPath(Node* native_context, Node* promise_fun, Node* promise,
Label* if_isunmodified, Label* if_ismodified);
- void InitializeFunctionContext(Node* native_context, Node* context, int len);
Node* CreatePromiseContext(Node* native_context, int slots);
void PromiseFulfill(Node* context, Node* promise, Node* result,
v8::Promise::PromiseState status);
@@ -158,7 +157,7 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
Node* CreateThrowerFunction(Node* reason, Node* native_context);
Node* PerformPromiseAll(Node* context, Node* constructor, Node* capability,
- Node* iterator, Label* if_exception,
+ const IteratorRecord& record, Label* if_exception,
Variable* var_exception);
Node* IncrementSmiCell(Node* cell, Label* if_overflow = nullptr);
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index 2d81867d51..64e838d53a 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -126,6 +126,106 @@ TF_BUILTIN(ProxyConstructor_ConstructStub, ProxiesCodeStubAssembler) {
ThrowTypeError(context, MessageTemplate::kProxyHandlerOrTargetRevoked);
}
+Node* ProxiesCodeStubAssembler::CreateProxyRevokeFunctionContext(
+ Node* proxy, Node* native_context) {
+ Node* const context = Allocate(FixedArray::SizeFor(kProxyContextLength));
+ StoreMapNoWriteBarrier(context, Heap::kFunctionContextMapRootIndex);
+ InitializeFunctionContext(native_context, context, kProxyContextLength);
+ StoreContextElementNoWriteBarrier(context, kProxySlot, proxy);
+ return context;
+}
+
+Node* ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(Node* proxy,
+ Node* context) {
+ Node* const native_context = LoadNativeContext(context);
+
+ Node* const proxy_context =
+ CreateProxyRevokeFunctionContext(proxy, native_context);
+ Node* const revoke_map = LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+ Node* const revoke_info =
+ LoadContextElement(native_context, Context::PROXY_REVOKE_SHARED_FUN);
+
+ return AllocateFunctionWithMapAndContext(revoke_map, revoke_info,
+ proxy_context);
+}
+
+TF_BUILTIN(ProxyRevocable, ProxiesCodeStubAssembler) {
+ Node* const target = Parameter(Descriptor::kTarget);
+ Node* const handler = Parameter(Descriptor::kHandler);
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const native_context = LoadNativeContext(context);
+
+ Label throw_proxy_non_object(this, Label::kDeferred),
+ throw_proxy_handler_or_target_revoked(this, Label::kDeferred),
+ return_create_proxy(this);
+
+ GotoIf(TaggedIsSmi(target), &throw_proxy_non_object);
+ GotoIfNot(IsJSReceiver(target), &throw_proxy_non_object);
+ GotoIfRevokedProxy(target, &throw_proxy_handler_or_target_revoked);
+
+ GotoIf(TaggedIsSmi(handler), &throw_proxy_non_object);
+ GotoIfNot(IsJSReceiver(handler), &throw_proxy_non_object);
+ GotoIfRevokedProxy(handler, &throw_proxy_handler_or_target_revoked);
+
+ Node* const proxy = AllocateProxy(target, handler, context);
+ Node* const revoke = AllocateProxyRevokeFunction(proxy, context);
+
+ Node* const result = Allocate(JSProxyRevocableResult::kSize);
+ Node* const result_map = LoadContextElement(
+ native_context, Context::PROXY_REVOCABLE_RESULT_MAP_INDEX);
+ StoreMapNoWriteBarrier(result, result_map);
+ StoreObjectFieldRoot(result, JSProxyRevocableResult::kPropertiesOrHashOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldRoot(result, JSProxyRevocableResult::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldNoWriteBarrier(result, JSProxyRevocableResult::kProxyOffset,
+ proxy);
+ StoreObjectFieldNoWriteBarrier(result, JSProxyRevocableResult::kRevokeOffset,
+ revoke);
+ Return(result);
+
+ BIND(&throw_proxy_non_object);
+ ThrowTypeError(context, MessageTemplate::kProxyNonObject);
+
+ BIND(&throw_proxy_handler_or_target_revoked);
+ ThrowTypeError(context, MessageTemplate::kProxyHandlerOrTargetRevoked);
+}
+
+// Proxy Revocation Functions
+// https://tc39.github.io/ecma262/#sec-proxy-revocation-functions
+TF_BUILTIN(ProxyRevoke, ProxiesCodeStubAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
+
+ // 1. Let p be F.[[RevocableProxy]].
+ Node* const proxy_slot = IntPtrConstant(kProxySlot);
+ Node* const proxy = LoadContextElement(context, proxy_slot);
+
+ Label revoke_called(this);
+
+ // 2. If p is null, ...
+ GotoIf(IsNull(proxy), &revoke_called);
+
+ // 3. Set F.[[RevocableProxy]] to null.
+ StoreContextElement(context, proxy_slot, NullConstant());
+
+ // 4. Assert: p is a Proxy object.
+ CSA_ASSERT(this, IsJSProxy(proxy));
+
+ // 5. Set p.[[ProxyTarget]] to null.
+ StoreObjectField(proxy, JSProxy::kTargetOffset, NullConstant());
+
+ // 6. Set p.[[ProxyHandler]] to null.
+ StoreObjectField(proxy, JSProxy::kHandlerOffset, NullConstant());
+
+ // 7. Return undefined.
+ Return(UndefinedConstant());
+
+ BIND(&revoke_called);
+ // 2. ... return undefined.
+ Return(UndefinedConstant());
+}
+
TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
Node* argc = Parameter(Descriptor::kActualArgumentsCount);
Node* argc_ptr = ChangeInt32ToIntPtr(argc);
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.h b/deps/v8/src/builtins/builtins-proxy-gen.h
index 2b2ac54ebe..92b175bfde 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.h
+++ b/deps/v8/src/builtins/builtins-proxy-gen.h
@@ -27,17 +27,26 @@ class ProxiesCodeStubAssembler : public CodeStubAssembler {
Node* receiver);
protected:
+ enum ProxyRevokeFunctionContextSlot {
+ kProxySlot = Context::MIN_CONTEXT_SLOTS,
+ kProxyContextLength,
+ };
+
void GotoIfRevokedProxy(Node* object, Label* if_proxy_revoked);
Node* AllocateProxy(Node* target, Node* handler, Node* context);
Node* AllocateJSArrayForCodeStubArguments(Node* context,
CodeStubArguments& args, Node* argc,
ParameterMode mode);
+ Node* AllocateProxyRevokeFunction(Node* proxy, Node* context);
void CheckHasTrapResult(Node* context, Node* target, Node* proxy, Node* name,
Label* check_passed, Label* if_bailout);
void CheckGetSetTrapResult(Node* context, Node* target, Node* proxy,
Node* name, Node* trap_result, Label* if_not_found,
JSProxy::AccessKind access_kind);
+
+ private:
+ Node* CreateProxyRevokeFunctionContext(Node* proxy, Node* native_context);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 5ce4abd557..4227c628d1 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -62,15 +62,15 @@ Node* RegExpBuiltinsAssembler::AllocateRegExpResult(Node* context, Node* length,
LoadContextElement(native_context, Context::REGEXP_RESULT_MAP_INDEX);
StoreMapNoWriteBarrier(result, map);
- Node* const empty_array = EmptyFixedArrayConstant();
- DCHECK(Heap::RootIsImmortalImmovable(Heap::kEmptyFixedArrayRootIndex));
StoreObjectFieldNoWriteBarrier(result, JSArray::kPropertiesOrHashOffset,
- empty_array);
+ EmptyFixedArrayConstant());
StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset, elements);
StoreObjectFieldNoWriteBarrier(result, JSArray::kLengthOffset, length);
StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kIndexOffset, index);
- StoreObjectField(result, JSRegExpResult::kInputOffset, input);
+ StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kInputOffset, input);
+ StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kGroupsOffset,
+ UndefinedConstant());
// Initialize the elements.
@@ -223,8 +223,6 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
// Allocate a new object to store the named capture properties.
// TODO(jgruber): Could be optimized by adding the object map to the heap
// root list.
- // TODO(jgruber): Replace CreateDataProperty runtime calls once we have
- // equivalent functionality in CSA.
Node* const native_context = LoadNativeContext(context);
Node* const map = LoadContextElement(
@@ -233,14 +231,7 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
AllocateNameDictionary(NameDictionary::kInitialCapacity);
Node* const group_object = AllocateJSObjectFromMap(map, properties);
-
- // Store it on the result as a 'group' property.
-
- {
- Node* const name = HeapConstant(isolate()->factory()->groups_string());
- CallRuntime(Runtime::kCreateDataProperty, context, result, name,
- group_object);
- }
+ StoreObjectField(result, JSRegExpResult::kGroupsOffset, group_object);
// One or more named captures exist, add a property for each one.
@@ -267,6 +258,9 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
Node* const capture =
LoadFixedArrayElement(result_elements, SmiUntag(index));
+ // TODO(jgruber): Calling into runtime to create each property is slow.
+ // Either we should create properties entirely in CSA (should be doable),
+ // or only call runtime once and loop there.
CallRuntime(Runtime::kCreateDataProperty, context, group_object, name,
capture);
@@ -834,7 +828,7 @@ Node* RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(Node* const context,
Label out(this);
VARIABLE(var_result, MachineRepresentation::kWord32);
-#if defined(DEBUG) || defined(ENABLE_FASTSLOW_SWITCH)
+#ifdef V8_ENABLE_FORCE_SLOW_PATH
var_result.Bind(Int32Constant(0));
GotoIfForceSlowPath(&out);
#endif
@@ -1225,8 +1219,7 @@ TF_BUILTIN(RegExpPrototypeFlagsGetter, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
Label if_isfastpath(this), if_isslowpath(this, Label::kDeferred);
- Branch(IsFastRegExpNoPrototype(context, receiver, map), &if_isfastpath,
- &if_isslowpath);
+ BranchIfFastRegExp(context, receiver, map, &if_isfastpath, &if_isslowpath);
BIND(&if_isfastpath);
Return(FlagsGetter(context, receiver, true));
@@ -2543,7 +2536,7 @@ TF_BUILTIN(RegExpSplit, RegExpBuiltinsAssembler) {
// to verify the constructor property and jump to the slow path if it has
// been changed.
- // Convert {maybe_limit} to a uint32, capping at the maximal smi value.
+ // Verify {maybe_limit}.
VARIABLE(var_limit, MachineRepresentation::kTagged, maybe_limit);
Label if_limitissmimax(this), runtime(this, Label::kDeferred);
@@ -2552,21 +2545,12 @@ TF_BUILTIN(RegExpSplit, RegExpBuiltinsAssembler) {
Label next(this);
GotoIf(IsUndefined(maybe_limit), &if_limitissmimax);
- GotoIf(TaggedIsPositiveSmi(maybe_limit), &next);
-
- var_limit.Bind(ToUint32(context, maybe_limit));
- {
- // ToUint32(limit) could potentially change the shape of the RegExp
- // object. Recheck that we are still on the fast path and bail to runtime
- // otherwise.
- {
- Label next(this);
- BranchIfFastRegExp(context, regexp, &next, &runtime);
- BIND(&next);
- }
+ Branch(TaggedIsPositiveSmi(maybe_limit), &next, &runtime);
- Branch(TaggedIsPositiveSmi(var_limit.value()), &next, &if_limitissmimax);
- }
+ // We need to be extra-strict and require the given limit to be either
+ // undefined or a positive smi. We can't call ToUint32(maybe_limit) since
+ // that might move us onto the slow path, resulting in ordering spec
+ // violations (see https://crbug.com/801171).
BIND(&if_limitissmimax);
{
@@ -2590,13 +2574,8 @@ TF_BUILTIN(RegExpSplit, RegExpBuiltinsAssembler) {
RegExpPrototypeSplitBody(context, regexp, string, var_limit.value());
BIND(&runtime);
- {
- // The runtime call passes in limit to ensure the second ToUint32(limit)
- // call is not observable.
- CSA_ASSERT(this, IsNumber(var_limit.value()));
- Return(CallRuntime(Runtime::kRegExpSplit, context, regexp, string,
- var_limit.value()));
- }
+ Return(CallRuntime(Runtime::kRegExpSplit, context, regexp, string,
+ var_limit.value()));
}
// ES#sec-regexp.prototype-@@split
@@ -2740,7 +2719,7 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
TNode<IntPtrT> int_elem = SmiUntag(elem);
TNode<IntPtrT> new_match_start =
Signed(IntPtrAdd(WordShr(int_elem, IntPtrConstant(11)),
- WordAnd(int_elem, IntPtrConstant(0x7ff))));
+ WordAnd(int_elem, IntPtrConstant(0x7FF))));
var_match_start = SmiTag(new_match_start);
Goto(&loop_epilogue);
}
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
index 6122ff85da..278a48c68e 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
@@ -213,7 +213,7 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
ValidateAtomicIndex(array, index_word32, context);
Node* index_word = ChangeUint32ToWord(index_word32);
- Node* value_integer = ToInteger(context, value);
+ Node* value_integer = ToInteger_Inline(CAST(context), CAST(value));
Node* value_word32 = TruncateTaggedToWord32(context, value_integer);
#if DEBUG
@@ -266,7 +266,7 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
ValidateAtomicIndex(array, index_word32, context);
- Node* value_integer = ToInteger(context, value);
+ Node* value_integer = ToInteger_Inline(CAST(context), CAST(value));
#if DEBUG
DebugSanityCheckAtomicIndex(array, index_word32, context);
@@ -340,8 +340,8 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
ValidateAtomicIndex(array, index_word32, context);
- Node* old_value_integer = ToInteger(context, old_value);
- Node* new_value_integer = ToInteger(context, new_value);
+ Node* old_value_integer = ToInteger_Inline(CAST(context), CAST(old_value));
+ Node* new_value_integer = ToInteger_Inline(CAST(context), CAST(new_value));
#if DEBUG
DebugSanityCheckAtomicIndex(array, index_word32, context);
@@ -436,7 +436,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
ValidateAtomicIndex(array, index_word32, context);
- Node* value_integer = ToInteger(context, value);
+ Node* value_integer = ToInteger_Inline(CAST(context), CAST(value));
#if DEBUG
// In Debug mode, we re-validate the index as a sanity check because
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index 9d86f3105b..195572de8e 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -126,8 +126,8 @@ Node* StringBuiltinsAssembler::PointerToStringDataAtIndex(
void StringBuiltinsAssembler::ConvertAndBoundsCheckStartArgument(
Node* context, Variable* var_start, Node* start, Node* string_length) {
- TNode<Object> const start_int =
- ToInteger(context, start, CodeStubAssembler::kTruncateMinusZero);
+ TNode<Object> const start_int = ToInteger_Inline(
+ CAST(context), CAST(start), CodeStubAssembler::kTruncateMinusZero);
TNode<Smi> const zero = SmiConstant(0);
Label done(this);
@@ -319,6 +319,31 @@ void StringBuiltinsAssembler::StringEqual_Loop(
}
}
+void StringBuiltinsAssembler::GenerateStringAt(char const* method_name,
+ TNode<Context> context,
+ Node* receiver,
+ TNode<Object> maybe_position,
+ TNode<Object> default_return,
+ StringAtAccessor accessor) {
+ // Check that {receiver} is coercible to Object and convert it to a String.
+ TNode<String> string = ToThisString(context, receiver, method_name);
+
+ // Convert the {position} to a Smi and check that it's in bounds of the
+ // {string}.
+ Label if_outofbounds(this, Label::kDeferred);
+ TNode<Number> position = ToInteger_Inline(
+ context, maybe_position, CodeStubAssembler::kTruncateMinusZero);
+ GotoIfNot(TaggedIsSmi(position), &if_outofbounds);
+ TNode<IntPtrT> index = SmiUntag(CAST(position));
+ TNode<IntPtrT> length = LoadStringLengthAsWord(string);
+ GotoIfNot(UintPtrLessThan(index, length), &if_outofbounds);
+ TNode<Object> result = accessor(string, length, index);
+ Return(result);
+
+ BIND(&if_outofbounds);
+ Return(default_return);
+}
+
void StringBuiltinsAssembler::GenerateStringRelationalComparison(Node* context,
Node* left,
Node* right,
@@ -526,28 +551,43 @@ TF_BUILTIN(StringGreaterThanOrEqual, StringBuiltinsAssembler) {
Operation::kGreaterThanOrEqual);
}
-TF_BUILTIN(StringCharAt, CodeStubAssembler) {
+TF_BUILTIN(StringCharAt, StringBuiltinsAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* position = Parameter(Descriptor::kPosition);
// Load the character code at the {position} from the {receiver}.
- Node* code = StringCharCodeAt(receiver, position);
+ TNode<Int32T> code = StringCharCodeAt(receiver, position);
// And return the single character string with only that {code}
- Node* result = StringFromCharCode(code);
+ TNode<String> result = StringFromCharCode(code);
Return(result);
}
-TF_BUILTIN(StringCharCodeAt, CodeStubAssembler) {
+TF_BUILTIN(StringCharCodeAt, StringBuiltinsAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* position = Parameter(Descriptor::kPosition);
// Load the character code at the {position} from the {receiver}.
- Node* code = StringCharCodeAt(receiver, position);
+ TNode<Int32T> code = StringCharCodeAt(receiver, position);
+
+ // And return it as TaggedSigned value.
+ // TODO(turbofan): Allow builtins to return values untagged.
+ TNode<Smi> result = SmiFromWord32(code);
+ Return(result);
+}
+
+TF_BUILTIN(StringCodePointAt, StringBuiltinsAssembler) {
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* position = Parameter(Descriptor::kPosition);
+ // TODO(sigurds) Figure out if passing length as argument pays off.
+ TNode<IntPtrT> length = LoadStringLengthAsWord(receiver);
+ // Load the character code at the {position} from the {receiver}.
+ TNode<Int32T> code =
+ LoadSurrogatePairAt(receiver, length, position, UnicodeEncoding::UTF32);
// And return it as TaggedSigned value.
// TODO(turbofan): Allow builtins to return values untagged.
- Node* result = SmiFromWord32(code);
+ TNode<Smi> result = SmiFromWord32(code);
Return(result);
}
@@ -563,7 +603,7 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
CodeStubArguments arguments(this, ChangeInt32ToIntPtr(argc));
- TNode<Smi> smi_argc = SmiTag(arguments.GetLength());
+ TNode<Smi> smi_argc = SmiTag(arguments.GetLength(INTPTR_PARAMETERS));
// Check if we have exactly one argument (plus the implicit receiver), i.e.
// if the parent frame is not an arguments adaptor frame.
Label if_oneargument(this), if_notoneargument(this);
@@ -577,7 +617,8 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
// string on the fly otherwise.
Node* code = arguments.AtIndex(0);
Node* code32 = TruncateTaggedToWord32(context, code);
- Node* code16 = Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit));
+ TNode<Int32T> code16 =
+ Signed(Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit)));
Node* result = StringFromCharCode(code16);
arguments.PopAndReturn(result);
}
@@ -662,115 +703,49 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
}
// ES6 #sec-string.prototype.charat
-TF_BUILTIN(StringPrototypeCharAt, CodeStubAssembler) {
+TF_BUILTIN(StringPrototypeCharAt, StringBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* receiver = Parameter(Descriptor::kReceiver);
- Node* position = Parameter(Descriptor::kPosition);
- Node* context = Parameter(Descriptor::kContext);
-
- // Check that {receiver} is coercible to Object and convert it to a String.
- receiver = ToThisString(context, receiver, "String.prototype.charAt");
-
- // Convert the {position} to a Smi and check that it's in bounds of the
- // {receiver}.
- {
- Label return_emptystring(this, Label::kDeferred);
- position =
- ToInteger(context, position, CodeStubAssembler::kTruncateMinusZero);
- GotoIfNot(TaggedIsSmi(position), &return_emptystring);
-
- // Determine the actual length of the {receiver} String.
- TNode<Smi> receiver_length = LoadStringLengthAsSmi(receiver);
-
- // Return "" if the Smi {position} is outside the bounds of the {receiver}.
- Label if_positioninbounds(this);
- Branch(SmiAboveOrEqual(position, receiver_length), &return_emptystring,
- &if_positioninbounds);
-
- BIND(&return_emptystring);
- Return(EmptyStringConstant());
-
- BIND(&if_positioninbounds);
- }
-
- // Load the character code at the {position} from the {receiver}.
- CSA_ASSERT(this, IntPtrLessThan(SmiUntag(position),
- LoadStringLengthAsWord(receiver)));
- CSA_ASSERT(this,
- IntPtrGreaterThanOrEqual(SmiUntag(position), IntPtrConstant(0)));
- Node* code = StringCharCodeAt(receiver, SmiUntag(position));
+ TNode<Object> maybe_position = CAST(Parameter(Descriptor::kPosition));
- // And return the single character string with only that {code}.
- Node* result = StringFromCharCode(code);
- Return(result);
+ GenerateStringAt("String.prototype.charAt", context, receiver, maybe_position,
+ EmptyStringConstant(),
+ [this](TNode<String> string, TNode<IntPtrT> length,
+ TNode<IntPtrT> index) {
+ TNode<Int32T> code = StringCharCodeAt(string, index);
+ return StringFromCharCode(code);
+ });
}
// ES6 #sec-string.prototype.charcodeat
-TF_BUILTIN(StringPrototypeCharCodeAt, CodeStubAssembler) {
+TF_BUILTIN(StringPrototypeCharCodeAt, StringBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* receiver = Parameter(Descriptor::kReceiver);
- Node* position = Parameter(Descriptor::kPosition);
- Node* context = Parameter(Descriptor::kContext);
-
- // Check that {receiver} is coercible to Object and convert it to a String.
- receiver = ToThisString(context, receiver, "String.prototype.charCodeAt");
-
- // Convert the {position} to a Smi and check that it's in bounds of the
- // {receiver}.
- {
- Label return_nan(this, Label::kDeferred);
- position =
- ToInteger(context, position, CodeStubAssembler::kTruncateMinusZero);
- GotoIfNot(TaggedIsSmi(position), &return_nan);
-
- // Determine the actual length of the {receiver} String.
- TNode<Smi> receiver_length = LoadStringLengthAsSmi(receiver);
-
- // Return NaN if the Smi {position} is outside the bounds of the {receiver}.
- Label if_positioninbounds(this);
- Branch(SmiAboveOrEqual(position, receiver_length), &return_nan,
- &if_positioninbounds);
+ TNode<Object> maybe_position = CAST(Parameter(Descriptor::kPosition));
- BIND(&return_nan);
- Return(NaNConstant());
-
- BIND(&if_positioninbounds);
- }
-
- // Load the character at the {position} from the {receiver}.
- Node* value = StringCharCodeAt(receiver, SmiUntag(position));
- Node* result = SmiFromWord32(value);
- Return(result);
+ GenerateStringAt("String.prototype.charCodeAt", context, receiver,
+ maybe_position, NanConstant(),
+ [this](TNode<String> receiver, TNode<IntPtrT> length,
+ TNode<IntPtrT> index) {
+ Node* value = StringCharCodeAt(receiver, index);
+ return SmiFromWord32(value);
+ });
}
// ES6 #sec-string.prototype.codepointat
TF_BUILTIN(StringPrototypeCodePointAt, StringBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* receiver = Parameter(Descriptor::kReceiver);
- Node* position = Parameter(Descriptor::kPosition);
-
- // Check that {receiver} is coercible to Object and convert it to a String.
- receiver = ToThisString(context, receiver, "String.prototype.codePointAt");
-
- // Convert the {position} to a Smi and check that it's in bounds of the
- // {receiver}.
- Label if_inbounds(this), if_outofbounds(this, Label::kDeferred);
- position =
- ToInteger(context, position, CodeStubAssembler::kTruncateMinusZero);
- GotoIfNot(TaggedIsSmi(position), &if_outofbounds);
- TNode<IntPtrT> untagged_position = SmiUntag(position);
- TNode<IntPtrT> receiver_length = LoadStringLengthAsWord(receiver);
- Branch(UintPtrLessThan(untagged_position, receiver_length), &if_inbounds,
- &if_outofbounds);
-
- BIND(&if_inbounds);
- {
- Node* value = LoadSurrogatePairAt(
- receiver, receiver_length, untagged_position, UnicodeEncoding::UTF32);
- Node* result = SmiFromWord32(value);
- Return(result);
- }
+ TNode<Object> maybe_position = CAST(Parameter(Descriptor::kPosition));
- BIND(&if_outofbounds);
- Return(UndefinedConstant());
+ GenerateStringAt("String.prototype.codePointAt", context, receiver,
+ maybe_position, UndefinedConstant(),
+ [this](TNode<String> receiver, TNode<IntPtrT> length,
+ TNode<IntPtrT> index) {
+ Node* value = LoadSurrogatePairAt(receiver, length, index,
+ UnicodeEncoding::UTF32);
+ return SmiFromWord32(value);
+ });
}
// ES6 String.prototype.concat(...args)
@@ -999,7 +974,7 @@ void StringIncludesIndexOfAssembler::Generate(SearchVariant variant) {
CodeStubArguments arguments(this, ChangeInt32ToIntPtr(argc));
Node* const receiver = arguments.GetReceiver();
// From now on use word-size argc value.
- argc = arguments.GetLength();
+ argc = arguments.GetLength(INTPTR_PARAMETERS);
VARIABLE(var_search_string, MachineRepresentation::kTagged);
VARIABLE(var_position, MachineRepresentation::kTagged);
@@ -1217,16 +1192,17 @@ TF_BUILTIN(StringPrototypeRepeat, StringBuiltinsAssembler) {
Label invalid_count(this), invalid_string_length(this),
return_emptystring(this);
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const count = Parameter(Descriptor::kCount);
+ TNode<Object> count = CAST(Parameter(Descriptor::kCount));
Node* const string =
ToThisString(context, receiver, "String.prototype.repeat");
Node* const is_stringempty =
SmiEqual(LoadStringLengthAsSmi(string), SmiConstant(0));
- VARIABLE(var_count, MachineRepresentation::kTagged,
- ToInteger(context, count, CodeStubAssembler::kTruncateMinusZero));
+ VARIABLE(
+ var_count, MachineRepresentation::kTagged,
+ ToInteger_Inline(context, count, CodeStubAssembler::kTruncateMinusZero));
// Verifies a valid count and takes a fast path when the result will be an
// empty string.
@@ -1713,8 +1689,8 @@ TF_BUILTIN(StringPrototypeSlice, StringBuiltinsAssembler) {
CodeStubArguments args(this, argc);
Node* const receiver = args.GetReceiver();
Node* const start = args.GetOptionalArgumentValue(kStart);
- Node* const end = args.GetOptionalArgumentValue(kEnd);
- Node* const context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Object> end = CAST(args.GetOptionalArgumentValue(kEnd));
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
TNode<Smi> const smi_zero = SmiConstant(0);
@@ -1737,7 +1713,7 @@ TF_BUILTIN(StringPrototypeSlice, StringBuiltinsAssembler) {
// else let intEnd be ? ToInteger(end).
Node* const end_int =
- ToInteger(context, end, CodeStubAssembler::kTruncateMinusZero);
+ ToInteger_Inline(context, end, CodeStubAssembler::kTruncateMinusZero);
// 7. If intEnd < 0, let to be max(len + intEnd, 0);
// otherwise let to be min(intEnd, len).
@@ -1893,8 +1869,8 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
Node* const receiver = args.GetReceiver();
Node* const start = args.GetOptionalArgumentValue(kStartArg);
- Node* const length = args.GetOptionalArgumentValue(kLengthArg);
- Node* const context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Object> length = CAST(args.GetOptionalArgumentValue(kLengthArg));
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Label out(this);
@@ -1925,8 +1901,8 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
Goto(&if_issmi);
BIND(&if_isnotundefined);
- var_length =
- ToInteger(context, length, CodeStubAssembler::kTruncateMinusZero);
+ var_length = ToInteger_Inline(context, length,
+ CodeStubAssembler::kTruncateMinusZero);
}
TVARIABLE(Smi, var_result_length);
@@ -1984,7 +1960,7 @@ TNode<Smi> StringBuiltinsAssembler::ToSmiBetweenZeroAnd(
TVARIABLE(Smi, var_result);
TNode<Object> const value_int =
- this->ToInteger(context, value, CodeStubAssembler::kTruncateMinusZero);
+ ToInteger_Inline(context, value, CodeStubAssembler::kTruncateMinusZero);
Label if_issmi(this), if_isnotsmi(this, Label::kDeferred);
Branch(TaggedIsSmi(value_int), &if_issmi, &if_isnotsmi);
@@ -2296,14 +2272,14 @@ TF_BUILTIN(StringPrototypeIterator, CodeStubAssembler) {
// Return the |word32| codepoint at {index}. Supports SeqStrings and
// ExternalStrings.
-TNode<Uint32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
+TNode<Int32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
SloppyTNode<String> string, SloppyTNode<IntPtrT> length,
SloppyTNode<IntPtrT> index, UnicodeEncoding encoding) {
Label handle_surrogate_pair(this), return_result(this);
- TVARIABLE(Uint32T, var_result);
- TVARIABLE(Uint32T, var_trail);
+ TVARIABLE(Int32T, var_result);
+ TVARIABLE(Int32T, var_trail);
var_result = StringCharCodeAt(string, index);
- var_trail = Unsigned(Int32Constant(0));
+ var_trail = Int32Constant(0);
GotoIf(Word32NotEqual(Word32And(var_result, Int32Constant(0xFC00)),
Int32Constant(0xD800)),
@@ -2318,8 +2294,8 @@ TNode<Uint32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
BIND(&handle_surrogate_pair);
{
- TNode<Uint32T> lead = var_result;
- TNode<Uint32T> trail = var_trail;
+ TNode<Int32T> lead = var_result;
+ TNode<Int32T> trail = var_trail;
// Check that this path is only taken if a surrogate pair is found
CSA_SLOW_ASSERT(this,
@@ -2331,7 +2307,7 @@ TNode<Uint32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
switch (encoding) {
case UnicodeEncoding::UTF16:
- var_result = Unsigned(Word32Or(
+ var_result = Signed(Word32Or(
// Need to swap the order for big-endian platforms
#if V8_TARGET_BIG_ENDIAN
Word32Shl(lead, Int32Constant(16)), trail));
@@ -2347,8 +2323,8 @@ TNode<Uint32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
Int32Constant(0x10000 - (0xD800 << 10) - 0xDC00);
// (lead << 10) + trail + SURROGATE_OFFSET
- var_result = Unsigned(Int32Add(Word32Shl(lead, Int32Constant(10)),
- Int32Add(trail, surrogate_offset)));
+ var_result = Signed(Int32Add(Word32Shl(lead, Int32Constant(10)),
+ Int32Add(trail, surrogate_offset)));
break;
}
}
@@ -2387,8 +2363,8 @@ TF_BUILTIN(StringIteratorPrototypeNext, StringBuiltinsAssembler) {
BIND(&next_codepoint);
{
UnicodeEncoding encoding = UnicodeEncoding::UTF16;
- Node* ch = LoadSurrogatePairAt(string, length, position, encoding);
- Node* value = StringFromCodePoint(ch, encoding);
+ TNode<Int32T> ch = LoadSurrogatePairAt(string, length, position, encoding);
+ TNode<String> value = StringFromCodePoint(ch, encoding);
var_value.Bind(value);
TNode<IntPtrT> length = LoadStringLengthAsWord(value);
StoreObjectFieldNoWriteBarrier(iterator, JSStringIterator::kNextIndexOffset,
diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h
index f1111b3465..1bd5429fdb 100644
--- a/deps/v8/src/builtins/builtins-string-gen.h
+++ b/deps/v8/src/builtins/builtins-string-gen.h
@@ -57,10 +57,19 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
SloppyTNode<Object> value,
SloppyTNode<Smi> limit);
- TNode<Uint32T> LoadSurrogatePairAt(SloppyTNode<String> string,
- SloppyTNode<IntPtrT> length,
- SloppyTNode<IntPtrT> index,
- UnicodeEncoding encoding);
+ typedef std::function<TNode<Object>(
+ TNode<String> receiver, TNode<IntPtrT> length, TNode<IntPtrT> index)>
+ StringAtAccessor;
+
+ void GenerateStringAt(const char* method_name, TNode<Context> context,
+ Node* receiver, TNode<Object> maybe_position,
+ TNode<Object> default_return,
+ StringAtAccessor accessor);
+
+ TNode<Int32T> LoadSurrogatePairAt(SloppyTNode<String> string,
+ SloppyTNode<IntPtrT> length,
+ SloppyTNode<IntPtrT> index,
+ UnicodeEncoding encoding);
void StringIndexOf(Node* const subject_string, Node* const search_string,
Node* const position, std::function<void(Node*)> f_return);
diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc
index 14a74afb6d..d2e447538d 100644
--- a/deps/v8/src/builtins/builtins-string.cc
+++ b/deps/v8/src/builtins/builtins-string.cc
@@ -324,8 +324,8 @@ namespace {
inline bool ToUpperOverflows(uc32 character) {
// y with umlauts and the micro sign are the only characters that stop
// fitting into one-byte when converting to uppercase.
- static const uc32 yuml_code = 0xff;
- static const uc32 micro_code = 0xb5;
+ static const uc32 yuml_code = 0xFF;
+ static const uc32 micro_code = 0xB5;
return (character == yuml_code || character == micro_code);
}
diff --git a/deps/v8/src/builtins/builtins-typedarray-gen.cc b/deps/v8/src/builtins/builtins-typedarray-gen.cc
index df89d1ced3..b830a8597d 100644
--- a/deps/v8/src/builtins/builtins-typedarray-gen.cc
+++ b/deps/v8/src/builtins/builtins-typedarray-gen.cc
@@ -36,15 +36,17 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
const char* method_name,
IterationKind iteration_kind);
- void SetupTypedArray(Node* holder, Node* length, Node* byte_offset,
- Node* byte_length);
- void AttachBuffer(Node* holder, Node* buffer, Node* map, Node* length,
- Node* byte_offset);
-
- Node* LoadMapForType(Node* array);
- Node* CalculateExternalPointer(Node* backing_store, Node* byte_offset);
+ void SetupTypedArray(TNode<JSTypedArray> holder, TNode<Smi> length,
+ TNode<Number> byte_offset, TNode<Number> byte_length);
+ void AttachBuffer(TNode<JSTypedArray> holder, TNode<JSArrayBuffer> buffer,
+ TNode<Map> map, TNode<Smi> length,
+ TNode<Number> byte_offset);
+
+ TNode<Map> LoadMapForType(TNode<JSTypedArray> array);
+ TNode<UintPtrT> CalculateExternalPointer(TNode<UintPtrT> backing_store,
+ TNode<Number> byte_offset);
Node* LoadDataPtr(Node* typed_array);
- Node* ByteLengthIsValid(Node* byte_length);
+ TNode<BoolT> ByteLengthIsValid(TNode<Number> byte_length);
// Returns true if kind is either UINT8_ELEMENTS or UINT8_CLAMPED_ELEMENTS.
TNode<Word32T> IsUint8ElementsKind(TNode<Word32T> kind);
@@ -78,9 +80,8 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
TNode<IntPtrT> offset);
};
-Node* TypedArrayBuiltinsAssembler::LoadMapForType(Node* array) {
- CSA_ASSERT(this, IsJSTypedArray(array));
-
+TNode<Map> TypedArrayBuiltinsAssembler::LoadMapForType(
+ TNode<JSTypedArray> array) {
Label unreachable(this), done(this);
Label uint8_elements(this), uint8_clamped_elements(this), int8_elements(this),
uint16_elements(this), int16_elements(this), uint32_elements(this),
@@ -99,10 +100,10 @@ Node* TypedArrayBuiltinsAssembler::LoadMapForType(Node* array) {
DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kinds));
DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kind_labels));
- VARIABLE(var_typed_map, MachineRepresentation::kTagged);
+ TVARIABLE(Map, var_typed_map);
- Node* array_map = LoadMap(array);
- Node* elements_kind = LoadMapElementsKind(array_map);
+ TNode<Map> array_map = LoadMap(array);
+ TNode<Int32T> elements_kind = LoadMapElementsKind(array_map);
Switch(elements_kind, &unreachable, elements_kinds, elements_kind_labels,
kTypedElementsKindCount);
@@ -113,7 +114,7 @@ Node* TypedArrayBuiltinsAssembler::LoadMapForType(Node* array) {
ExternalArrayType type =
isolate()->factory()->GetArrayTypeFromElementsKind(kind);
Handle<Map> map(isolate()->heap()->MapForFixedTypedArray(type));
- var_typed_map.Bind(HeapConstant(map));
+ var_typed_map = HeapConstant(map);
Goto(&done);
}
}
@@ -121,7 +122,7 @@ Node* TypedArrayBuiltinsAssembler::LoadMapForType(Node* array) {
BIND(&unreachable);
{ Unreachable(); }
BIND(&done);
- return var_typed_map.value();
+ return var_typed_map;
}
// The byte_offset can be higher than Smi range, in which case to perform the
@@ -131,10 +132,10 @@ Node* TypedArrayBuiltinsAssembler::LoadMapForType(Node* array) {
// can't allocate an array bigger than our 32-bit arithmetic range anyway. 64
// bit platforms could theoretically have an offset up to 2^35 - 1, so we may
// need to convert the float heap number to an intptr.
-Node* TypedArrayBuiltinsAssembler::CalculateExternalPointer(Node* backing_store,
- Node* byte_offset) {
- return IntPtrAdd(backing_store,
- ChangeNonnegativeNumberToUintPtr(byte_offset));
+TNode<UintPtrT> TypedArrayBuiltinsAssembler::CalculateExternalPointer(
+ TNode<UintPtrT> backing_store, TNode<Number> byte_offset) {
+ return Unsigned(
+ IntPtrAdd(backing_store, ChangeNonnegativeNumberToUintPtr(byte_offset)));
}
// Setup the TypedArray which is under construction.
@@ -142,14 +143,10 @@ Node* TypedArrayBuiltinsAssembler::CalculateExternalPointer(Node* backing_store,
// - Set the byte_offset.
// - Set the byte_length.
// - Set EmbedderFields to 0.
-void TypedArrayBuiltinsAssembler::SetupTypedArray(Node* holder, Node* length,
- Node* byte_offset,
- Node* byte_length) {
- CSA_ASSERT(this, IsJSTypedArray(holder));
- CSA_ASSERT(this, TaggedIsSmi(length));
- CSA_ASSERT(this, IsNumber(byte_offset));
- CSA_ASSERT(this, IsNumber(byte_length));
-
+void TypedArrayBuiltinsAssembler::SetupTypedArray(TNode<JSTypedArray> holder,
+ TNode<Smi> length,
+ TNode<Number> byte_offset,
+ TNode<Number> byte_length) {
StoreObjectField(holder, JSTypedArray::kLengthOffset, length);
StoreObjectField(holder, JSArrayBufferView::kByteOffsetOffset, byte_offset);
StoreObjectField(holder, JSArrayBufferView::kByteLengthOffset, byte_length);
@@ -160,15 +157,11 @@ void TypedArrayBuiltinsAssembler::SetupTypedArray(Node* holder, Node* length,
}
// Attach an off-heap buffer to a TypedArray.
-void TypedArrayBuiltinsAssembler::AttachBuffer(Node* holder, Node* buffer,
- Node* map, Node* length,
- Node* byte_offset) {
- CSA_ASSERT(this, IsJSTypedArray(holder));
- CSA_ASSERT(this, IsJSArrayBuffer(buffer));
- CSA_ASSERT(this, IsMap(map));
- CSA_ASSERT(this, TaggedIsSmi(length));
- CSA_ASSERT(this, IsNumber(byte_offset));
-
+void TypedArrayBuiltinsAssembler::AttachBuffer(TNode<JSTypedArray> holder,
+ TNode<JSArrayBuffer> buffer,
+ TNode<Map> map,
+ TNode<Smi> length,
+ TNode<Number> byte_offset) {
StoreObjectField(holder, JSArrayBufferView::kBufferOffset, buffer);
Node* elements = Allocate(FixedTypedArrayBase::kHeaderSize);
@@ -177,10 +170,11 @@ void TypedArrayBuiltinsAssembler::AttachBuffer(Node* holder, Node* buffer,
StoreObjectFieldNoWriteBarrier(
elements, FixedTypedArrayBase::kBasePointerOffset, SmiConstant(0));
- Node* backing_store = LoadObjectField(
- buffer, JSArrayBuffer::kBackingStoreOffset, MachineType::Pointer());
+ TNode<UintPtrT> backing_store =
+ LoadObjectField<UintPtrT>(buffer, JSArrayBuffer::kBackingStoreOffset);
- Node* external_pointer = CalculateExternalPointer(backing_store, byte_offset);
+ TNode<UintPtrT> external_pointer =
+ CalculateExternalPointer(backing_store, byte_offset);
StoreObjectFieldNoWriteBarrier(
elements, FixedTypedArrayBase::kExternalPointerOffset, external_pointer,
MachineType::PointerRepresentation());
@@ -189,23 +183,16 @@ void TypedArrayBuiltinsAssembler::AttachBuffer(Node* holder, Node* buffer,
}
TF_BUILTIN(TypedArrayInitializeWithBuffer, TypedArrayBuiltinsAssembler) {
- Node* holder = Parameter(Descriptor::kHolder);
- Node* length = Parameter(Descriptor::kLength);
- Node* buffer = Parameter(Descriptor::kBuffer);
- Node* element_size = Parameter(Descriptor::kElementSize);
- Node* byte_offset = Parameter(Descriptor::kByteOffset);
-
- CSA_ASSERT(this, IsJSTypedArray(holder));
- CSA_ASSERT(this, TaggedIsSmi(length));
- CSA_ASSERT(this, IsJSArrayBuffer(buffer));
- CSA_ASSERT(this, TaggedIsSmi(element_size));
- CSA_ASSERT(this, IsNumber(byte_offset));
+ TNode<JSTypedArray> holder = CAST(Parameter(Descriptor::kHolder));
+ TNode<Smi> length = CAST(Parameter(Descriptor::kLength));
+ TNode<JSArrayBuffer> buffer = CAST(Parameter(Descriptor::kBuffer));
+ TNode<Smi> element_size = CAST(Parameter(Descriptor::kElementSize));
+ TNode<Number> byte_offset = CAST(Parameter(Descriptor::kByteOffset));
- Node* fixed_typed_map = LoadMapForType(holder);
+ TNode<Map> fixed_typed_map = LoadMapForType(holder);
// SmiMul returns a heap number in case of Smi overflow.
- Node* byte_length = SmiMul(length, element_size);
- CSA_ASSERT(this, IsNumber(byte_length));
+ TNode<Number> byte_length = SmiMul(length, element_size);
SetupTypedArray(holder, length, byte_offset, byte_length);
AttachBuffer(holder, buffer, fixed_typed_map, length, byte_offset);
@@ -213,18 +200,17 @@ TF_BUILTIN(TypedArrayInitializeWithBuffer, TypedArrayBuiltinsAssembler) {
}
TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
- Node* holder = Parameter(Descriptor::kHolder);
- Node* length = Parameter(Descriptor::kLength);
- Node* element_size = Parameter(Descriptor::kElementSize);
+ TNode<JSTypedArray> holder = CAST(Parameter(Descriptor::kHolder));
+ TNode<Smi> length = CAST(Parameter(Descriptor::kLength));
+ TNode<Smi> element_size = CAST(Parameter(Descriptor::kElementSize));
Node* initialize = Parameter(Descriptor::kInitialize);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- CSA_ASSERT(this, IsJSTypedArray(holder));
CSA_ASSERT(this, TaggedIsPositiveSmi(length));
CSA_ASSERT(this, TaggedIsPositiveSmi(element_size));
CSA_ASSERT(this, IsBoolean(initialize));
- Node* byte_offset = SmiConstant(0);
+ TNode<Smi> byte_offset = SmiConstant(0);
static const int32_t fta_base_data_offset =
FixedTypedArrayBase::kDataOffset - kHeapObjectTag;
@@ -235,16 +221,16 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
VARIABLE(var_total_size, MachineType::PointerRepresentation());
// SmiMul returns a heap number in case of Smi overflow.
- Node* byte_length = SmiMul(length, element_size);
- CSA_ASSERT(this, IsNumber(byte_length));
+ TNode<Number> byte_length = SmiMul(length, element_size);
SetupTypedArray(holder, length, byte_offset, byte_length);
- Node* fixed_typed_map = LoadMapForType(holder);
+ TNode<Map> fixed_typed_map = LoadMapForType(holder);
GotoIf(TaggedIsNotSmi(byte_length), &allocate_off_heap);
GotoIf(
SmiGreaterThan(byte_length, SmiConstant(V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP)),
&allocate_off_heap);
+ TNode<IntPtrT> word_byte_length = SmiToWord(CAST(byte_length));
Goto(&allocate_on_heap);
BIND(&allocate_on_heap);
@@ -297,7 +283,7 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
DCHECK_EQ(0, FixedTypedArrayBase::kHeaderSize & kObjectAlignmentMask);
Node* aligned_header_size =
IntPtrConstant(FixedTypedArrayBase::kHeaderSize + kObjectAlignmentMask);
- Node* size = IntPtrAdd(SmiToWord(byte_length), aligned_header_size);
+ Node* size = IntPtrAdd(word_byte_length, aligned_header_size);
var_total_size.Bind(WordAnd(size, IntPtrConstant(~kObjectAlignmentMask)));
Goto(&allocate_elements);
}
@@ -305,7 +291,7 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
BIND(&aligned);
{
Node* header_size = IntPtrConstant(FixedTypedArrayBase::kHeaderSize);
- var_total_size.Bind(IntPtrAdd(SmiToWord(byte_length), header_size));
+ var_total_size.Bind(IntPtrAdd(word_byte_length, header_size));
Goto(&allocate_elements);
}
@@ -344,11 +330,11 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
ExternalConstant(ExternalReference::libc_memset_function(isolate()));
CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
MachineType::IntPtr(), MachineType::UintPtr(), memset,
- backing_store, IntPtrConstant(0), SmiToWord(byte_length));
+ backing_store, IntPtrConstant(0), word_byte_length);
Goto(&done);
}
- VARIABLE(var_buffer, MachineRepresentation::kTagged);
+ TVARIABLE(JSArrayBuffer, var_buffer);
BIND(&allocate_off_heap);
{
@@ -356,8 +342,8 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
Node* buffer_constructor = LoadContextElement(
LoadNativeContext(context), Context::ARRAY_BUFFER_FUN_INDEX);
- var_buffer.Bind(ConstructJS(CodeFactory::Construct(isolate()), context,
- buffer_constructor, byte_length));
+ var_buffer = CAST(ConstructJS(CodeFactory::Construct(isolate()), context,
+ buffer_constructor, byte_length));
Goto(&attach_buffer);
}
@@ -365,16 +351,15 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
{
Node* buffer_constructor_noinit = LoadContextElement(
LoadNativeContext(context), Context::ARRAY_BUFFER_NOINIT_FUN_INDEX);
- var_buffer.Bind(CallJS(CodeFactory::Call(isolate()), context,
- buffer_constructor_noinit, UndefinedConstant(),
- byte_length));
+ var_buffer = CAST(CallJS(CodeFactory::Call(isolate()), context,
+ buffer_constructor_noinit, UndefinedConstant(),
+ byte_length));
Goto(&attach_buffer);
}
BIND(&attach_buffer);
{
- AttachBuffer(holder, var_buffer.value(), fixed_typed_map, length,
- byte_offset);
+ AttachBuffer(holder, var_buffer, fixed_typed_map, length, byte_offset);
Goto(&done);
}
@@ -385,18 +370,18 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
// ES6 #sec-typedarray-length
TF_BUILTIN(TypedArrayConstructByLength, TypedArrayBuiltinsAssembler) {
Node* holder = Parameter(Descriptor::kHolder);
- Node* length = Parameter(Descriptor::kLength);
- Node* element_size = Parameter(Descriptor::kElementSize);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Object> maybe_length = CAST(Parameter(Descriptor::kLength));
+ TNode<Object> element_size = CAST(Parameter(Descriptor::kElementSize));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CSA_ASSERT(this, IsJSTypedArray(holder));
CSA_ASSERT(this, TaggedIsPositiveSmi(element_size));
- Node* initialize = TrueConstant();
-
Label invalid_length(this);
- length = ToInteger(context, length, CodeStubAssembler::kTruncateMinusZero);
+ TNode<Number> length = ToInteger_Inline(
+ context, maybe_length, CodeStubAssembler::kTruncateMinusZero);
+
// The maximum length of a TypedArray is MaxSmi().
// Note: this is not per spec, but rather a constraint of our current
// representation (which uses smi's).
@@ -404,7 +389,7 @@ TF_BUILTIN(TypedArrayConstructByLength, TypedArrayBuiltinsAssembler) {
GotoIf(SmiLessThan(length, SmiConstant(0)), &invalid_length);
CallBuiltin(Builtins::kTypedArrayInitialize, context, holder, length,
- element_size, initialize);
+ element_size, TrueConstant());
Return(UndefinedConstant());
BIND(&invalid_length);
@@ -419,10 +404,10 @@ TF_BUILTIN(TypedArrayConstructByLength, TypedArrayBuiltinsAssembler) {
TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
Node* holder = Parameter(Descriptor::kHolder);
Node* buffer = Parameter(Descriptor::kBuffer);
- Node* byte_offset = Parameter(Descriptor::kByteOffset);
+ TNode<Object> byte_offset = CAST(Parameter(Descriptor::kByteOffset));
Node* length = Parameter(Descriptor::kLength);
Node* element_size = Parameter(Descriptor::kElementSize);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CSA_ASSERT(this, IsJSTypedArray(holder));
CSA_ASSERT(this, IsJSArrayBuffer(buffer));
@@ -440,8 +425,8 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
GotoIf(IsUndefined(byte_offset), &check_length);
- offset.Bind(
- ToInteger(context, byte_offset, CodeStubAssembler::kTruncateMinusZero));
+ offset.Bind(ToInteger_Inline(context, byte_offset,
+ CodeStubAssembler::kTruncateMinusZero));
Branch(TaggedIsSmi(offset.value()), &offset_is_smi, &offset_not_smi);
// Check that the offset is a multiple of the element size.
@@ -569,25 +554,27 @@ Node* TypedArrayBuiltinsAssembler::LoadDataPtr(Node* typed_array) {
return IntPtrAdd(base_pointer, external_pointer);
}
-Node* TypedArrayBuiltinsAssembler::ByteLengthIsValid(Node* byte_length) {
+TNode<BoolT> TypedArrayBuiltinsAssembler::ByteLengthIsValid(
+ TNode<Number> byte_length) {
Label smi(this), done(this);
- VARIABLE(is_valid, MachineRepresentation::kWord32);
+ TVARIABLE(BoolT, is_valid);
GotoIf(TaggedIsSmi(byte_length), &smi);
- CSA_ASSERT(this, IsHeapNumber(byte_length));
- Node* float_value = LoadHeapNumberValue(byte_length);
- Node* max_byte_length_double =
+ TNode<Float64T> float_value = LoadHeapNumberValue(CAST(byte_length));
+ TNode<Float64T> max_byte_length_double =
Float64Constant(FixedTypedArrayBase::kMaxByteLength);
- is_valid.Bind(Float64LessThanOrEqual(float_value, max_byte_length_double));
+ is_valid = Float64LessThanOrEqual(float_value, max_byte_length_double);
Goto(&done);
BIND(&smi);
- Node* max_byte_length = IntPtrConstant(FixedTypedArrayBase::kMaxByteLength);
- is_valid.Bind(UintPtrLessThanOrEqual(SmiUntag(byte_length), max_byte_length));
+ TNode<IntPtrT> max_byte_length =
+ IntPtrConstant(FixedTypedArrayBase::kMaxByteLength);
+ is_valid =
+ UintPtrLessThanOrEqual(SmiUntag(CAST(byte_length)), max_byte_length);
Goto(&done);
BIND(&done);
- return is_valid.value();
+ return is_valid;
}
TF_BUILTIN(TypedArrayConstructByArrayLike, TypedArrayBuiltinsAssembler) {
@@ -611,8 +598,8 @@ TF_BUILTIN(TypedArrayConstructByArrayLike, TypedArrayBuiltinsAssembler) {
Return(UndefinedConstant());
BIND(&fill);
- Node* holder_kind = LoadMapElementsKind(LoadMap(holder));
- Node* source_kind = LoadMapElementsKind(LoadMap(array_like));
+ TNode<Int32T> holder_kind = LoadMapElementsKind(LoadMap(holder));
+ TNode<Int32T> source_kind = LoadMapElementsKind(LoadMap(array_like));
GotoIf(Word32Equal(holder_kind, source_kind), &fast_copy);
// Copy using the elements accessor.
@@ -632,9 +619,10 @@ TF_BUILTIN(TypedArrayConstructByArrayLike, TypedArrayBuiltinsAssembler) {
array_like, JSTypedArray::kBufferOffset)),
Int32Constant(0)));
- Node* byte_length = SmiMul(length, element_size);
+ TNode<Number> byte_length = SmiMul(length, element_size);
CSA_ASSERT(this, ByteLengthIsValid(byte_length));
- Node* byte_length_intptr = ChangeNonnegativeNumberToUintPtr(byte_length);
+ TNode<UintPtrT> byte_length_intptr =
+ ChangeNonnegativeNumberToUintPtr(byte_length);
CSA_ASSERT(this, UintPtrLessThanOrEqual(
byte_length_intptr,
IntPtrConstant(FixedTypedArrayBase::kMaxByteLength)));
@@ -831,24 +819,9 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource(
BIND(&fast_c_call);
{
- // Overlapping backing stores of different element kinds are handled in
- // runtime. We're a bit conservative here and bail to runtime if ranges
- // overlap and element kinds differ.
-
- TNode<IntPtrT> target_byte_length =
- IntPtrMul(target_length, target_el_size);
CSA_ASSERT(
- this, UintPtrGreaterThanOrEqual(target_byte_length, IntPtrConstant(0)));
-
- TNode<IntPtrT> target_data_end_ptr =
- IntPtrAdd(target_data_ptr, target_byte_length);
- TNode<IntPtrT> source_data_end_ptr =
- IntPtrAdd(source_data_ptr, source_byte_length);
-
- GotoIfNot(
- Word32Or(UintPtrLessThanOrEqual(target_data_end_ptr, source_data_ptr),
- UintPtrLessThanOrEqual(source_data_end_ptr, target_data_ptr)),
- call_runtime);
+ this, UintPtrGreaterThanOrEqual(
+ IntPtrMul(target_length, target_el_size), IntPtrConstant(0)));
TNode<IntPtrT> source_length =
LoadAndUntagObjectField(source, JSTypedArray::kLengthOffset);
@@ -959,8 +932,8 @@ TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) {
// Normalize offset argument (using ToInteger) and handle heap number cases.
TNode<Object> offset = args.GetOptionalArgumentValue(1, SmiConstant(0));
- TNode<Number> offset_num = ToInteger(context, offset, kTruncateMinusZero);
- CSA_ASSERT(this, IsNumberNormalized(offset_num));
+ TNode<Number> offset_num =
+ ToInteger_Inline(context, offset, kTruncateMinusZero);
// Since ToInteger always returns a Smi if the given value is within Smi
// range, and the only corner case of -0.0 has already been truncated to 0.0,
diff --git a/deps/v8/src/builtins/builtins-utils-gen.h b/deps/v8/src/builtins/builtins-utils-gen.h
index f328268288..6af5eff357 100644
--- a/deps/v8/src/builtins/builtins-utils-gen.h
+++ b/deps/v8/src/builtins/builtins-utils-gen.h
@@ -47,6 +47,9 @@ class CodeAssemblerState;
void Builtins::Generate_##Name(compiler::CodeAssemblerState* state) { \
Name##Assembler assembler(state); \
state->SetInitialDebugInformation(#Name, __FILE__, __LINE__); \
+ if (Builtins::KindOf(Builtins::k##Name) == Builtins::TFJ) { \
+ assembler.PerformStackCheck(assembler.GetJSContextParameter()); \
+ } \
assembler.Generate##Name##Impl(); \
} \
void Name##Assembler::Generate##Name##Impl()
diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h
index 27199c8462..3493e776b6 100644
--- a/deps/v8/src/builtins/builtins-utils.h
+++ b/deps/v8/src/builtins/builtins-utils.h
@@ -85,7 +85,8 @@ class BuiltinArguments : public Arguments {
V8_NOINLINE static Object* Builtin_Impl_Stats_##name( \
int args_length, Object** args_object, Isolate* isolate) { \
BuiltinArguments args(args_length, args_object); \
- RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Builtin_##name); \
+ RuntimeCallTimerScope timer(isolate, \
+ RuntimeCallCounterId::kBuiltin_##name); \
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), \
"V8.Builtin_" #name); \
return Builtin_Impl_##name(args, isolate); \
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index 55fc1c8cd8..dc175e50b7 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -172,11 +172,25 @@ Callable Builtins::CallableFor(Isolate* isolate, Name name) {
#undef CASE_OTHER
case kArrayFilterLoopEagerDeoptContinuation:
case kArrayFilterLoopLazyDeoptContinuation:
+ case kArrayEveryLoopEagerDeoptContinuation:
+ case kArrayEveryLoopLazyDeoptContinuation:
+ case kArrayFindIndexLoopAfterCallbackLazyDeoptContinuation:
+ case kArrayFindIndexLoopEagerDeoptContinuation:
+ case kArrayFindIndexLoopLazyDeoptContinuation:
+ case kArrayFindLoopAfterCallbackLazyDeoptContinuation:
+ case kArrayFindLoopEagerDeoptContinuation:
+ case kArrayFindLoopLazyDeoptContinuation:
case kArrayForEach:
case kArrayForEachLoopEagerDeoptContinuation:
case kArrayForEachLoopLazyDeoptContinuation:
case kArrayMapLoopEagerDeoptContinuation:
case kArrayMapLoopLazyDeoptContinuation:
+ case kArrayReduceLoopEagerDeoptContinuation:
+ case kArrayReduceLoopLazyDeoptContinuation:
+ case kArrayReduceRightLoopEagerDeoptContinuation:
+ case kArrayReduceRightLoopLazyDeoptContinuation:
+ case kArraySomeLoopEagerDeoptContinuation:
+ case kArraySomeLoopLazyDeoptContinuation:
case kConsoleAssert:
return Callable(code, BuiltinDescriptor(isolate));
default:
@@ -213,12 +227,30 @@ bool Builtins::IsLazy(int index) {
// TODO(wasm): Remove wasm builtins once immovability is no longer required.
switch (index) {
case kAbort: // Required by wasm.
+ case kArrayFindLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayFindLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
+ // https://crbug.com/v8/6786.
+ case kArrayFindLoopAfterCallbackLazyDeoptContinuation:
+ // https://crbug.com/v8/6786.
+ case kArrayFindIndexLoopEagerDeoptContinuation:
+ // https://crbug.com/v8/6786.
+ case kArrayFindIndexLoopLazyDeoptContinuation:
+ // https://crbug.com/v8/6786.
+ case kArrayFindIndexLoopAfterCallbackLazyDeoptContinuation:
case kArrayForEachLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
case kArrayForEachLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
case kArrayMapLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
case kArrayMapLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayEveryLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayEveryLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
case kArrayFilterLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
case kArrayFilterLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayReduceLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayReduceLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayReduceRightLoopEagerDeoptContinuation:
+ case kArrayReduceRightLoopLazyDeoptContinuation:
+ case kArraySomeLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArraySomeLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
case kCheckOptimizationMarker:
case kCompileLazy:
case kDeserializeLazy:
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 7635bada49..368e6670c1 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -211,13 +211,15 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ Push(esi);
__ Push(ecx);
__ Push(edi);
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
__ Push(edx);
// ----------- S t a t e -------------
// -- sp[0*kPointerSize]: new target
- // -- edi and sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: argument count
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- edi and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: argument count
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
@@ -237,10 +239,11 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- eax: implicit receiver
- // -- Slot 3 / sp[0*kPointerSize]: new target
- // -- Slot 2 / sp[1*kPointerSize]: constructor function
- // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
- // -- Slot 0 / sp[3*kPointerSize]: context
+ // -- Slot 4 / sp[0*kPointerSize]: new target
+ // -- Slot 3 / sp[1*kPointerSize]: padding
+ // -- Slot 2 / sp[2*kPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kPointerSize]: context
// -----------------------------------
// Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
@@ -260,9 +263,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- edx: new target
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
- // -- sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: padding
+ // -- sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
// Restore constructor function and argument count.
@@ -283,9 +287,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- ecx: counter (tagged)
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
- // -- edi and sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: padding
+ // -- edi and sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
__ jmp(&entry, Label::kNear);
__ bind(&loop);
@@ -301,9 +306,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- eax: constructor result
// -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: number of arguments
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
// Store offset of return address for deoptimizer.
@@ -572,7 +578,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
__ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
- __ Assert(equal, kMissingBytecodeArray);
+ __ Assert(equal, AbortReason::kMissingBytecodeArray);
}
// Resume (Ignition/TurboFan) generator object.
@@ -694,6 +700,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ j(equal, &fallthrough);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
@@ -708,7 +717,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ cmp(
optimized_code_entry,
Immediate(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
- __ Assert(equal, kExpectedOptimizationSentinel);
+ __ Assert(equal, AbortReason::kExpectedOptimizationSentinel);
}
__ jmp(&fallthrough);
}
@@ -791,7 +800,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
__ add(bytecode_size_table,
Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size, Label::kNear);
// Load the size of the current bytecode.
__ bind(&load_size);
@@ -852,7 +860,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ AssertNotSmi(kInterpreterBytecodeArrayRegister);
__ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
eax);
- __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ equal,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Reset code age.
@@ -1239,7 +1249,9 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ AssertNotSmi(kInterpreterBytecodeArrayRegister);
__ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
ebx);
- __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ equal,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Get the target bytecode offset from the frame.
@@ -1300,7 +1312,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
- __ Assert(not_equal, BailoutReason::kExpectedFeedbackVector);
+ __ Assert(not_equal, AbortReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
@@ -1818,9 +1830,11 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ test(ebx, Immediate(kSmiTagMask));
- __ Assert(not_zero, kUnexpectedInitialMapForInternalArrayFunction);
+ __ Assert(not_zero,
+ AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
__ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ Assert(equal, kUnexpectedInitialMapForInternalArrayFunction);
+ __ Assert(equal,
+ AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
}
// Run the native code for the InternalArray function called as a normal
@@ -1847,9 +1861,9 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ test(ebx, Immediate(kSmiTagMask));
- __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(not_zero, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
// Run the native code for the Array function called as a normal function.
@@ -1875,6 +1889,8 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTagSize == 1);
__ lea(edi, Operand(eax, eax, times_1, kSmiTag));
__ push(edi);
+
+ __ Push(Immediate(0)); // Padding.
}
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -1980,7 +1996,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ JumpIfSmi(edx, &new_target_not_constructor, Label::kNear);
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
__ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
+ Immediate(Map::IsConstructorBit::kMask));
__ j(not_zero, &new_target_constructor, Label::kNear);
__ bind(&new_target_not_constructor);
{
@@ -2294,7 +2310,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Check if target is a proxy and call CallProxy external builtin
__ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsCallable));
+ Immediate(Map::IsCallableBit::kMask));
__ j(zero, &non_callable);
// Call CallProxy external builtin
@@ -2389,7 +2405,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Check if target has a [[Construct]] internal method.
__ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
+ Immediate(Map::IsConstructorBit::kMask));
__ j(zero, &non_constructor, Label::kNear);
// Only dispatch to bound functions after checking whether they are
@@ -2464,19 +2480,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kAbort);
}
-// static
-void Builtins::Generate_AbortJS(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- edx : message as String object
- // -- esp[0] : return address
- // -----------------------------------
- __ PopReturnAddressTo(ecx);
- __ Push(edx);
- __ PushReturnAddressFrom(ecx);
- __ Move(esi, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbortJS);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : actual number of arguments
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 167bc1b829..7af02bb32e 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -110,11 +110,11 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Initial map for the builtin InternalArray functions should be maps.
__ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(a2, t0);
- __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, t0,
- Operand(zero_reg));
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForInternalArrayFunction,
+ t0, Operand(zero_reg));
__ GetObjectType(a2, a3, t0);
- __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction, t0,
- Operand(MAP_TYPE));
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForInternalArrayFunction,
+ t0, Operand(MAP_TYPE));
}
// Run the native code for the InternalArray function called as a normal
@@ -139,10 +139,10 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// Initial map for the builtin Array functions should be maps.
__ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(a2, t0);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction1, t0,
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction1, t0,
Operand(zero_reg));
__ GetObjectType(a2, a3, t0);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction2, t0,
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction2, t0,
Operand(MAP_TYPE));
}
@@ -271,13 +271,16 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ SmiTag(a0);
- __ Push(cp, a0, a1, a3);
+ __ Push(cp, a0, a1);
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ Push(a3);
// ----------- S t a t e -------------
// -- sp[0*kPointerSize]: new target
- // -- a1 and sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: number of arguments (tagged)
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- a1 and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
__ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
@@ -298,10 +301,11 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- v0: receiver
- // -- Slot 3 / sp[0*kPointerSize]: new target
- // -- Slot 2 / sp[1*kPointerSize]: constructor function
- // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
- // -- Slot 0 / sp[3*kPointerSize]: context
+ // -- Slot 4 / sp[0*kPointerSize]: new target
+ // -- Slot 3 / sp[1*kPointerSize]: padding
+ // -- Slot 2 / sp[2*kPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kPointerSize]: context
// -----------------------------------
// Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
@@ -319,9 +323,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- r3: new target
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
- // -- sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: padding
+ // -- sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
// Restore constructor function and argument count.
@@ -342,9 +347,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- t3: counter
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
- // -- a1 and sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: padding
+ // -- a1 and sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
__ jmp(&entry);
__ bind(&loop);
@@ -362,9 +368,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- v0: constructor result
// -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: number of arguments
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
// Store offset of return address for deoptimizer.
@@ -635,7 +642,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
__ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
__ GetObjectType(a3, a3, a3);
- __ Assert(eq, kMissingBytecodeArray, a3, Operand(BYTECODE_ARRAY_TYPE));
+ __ Assert(eq, AbortReason::kMissingBytecodeArray, a3,
+ Operand(BYTECODE_ARRAY_TYPE));
}
// Resume (Ignition/TurboFan) generator object.
@@ -752,6 +760,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Operand(Smi::FromEnum(OptimizationMarker::kNone)));
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
@@ -764,7 +775,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ Assert(
- eq, kExpectedOptimizationSentinel, optimized_code_entry,
+ eq, AbortReason::kExpectedOptimizationSentinel,
+ optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
}
__ jmp(&fallthrough);
@@ -843,7 +855,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ lbu(bytecode, MemOperand(scratch2));
__ Addu(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size);
// Load the size of the current bytecode.
__ bind(&load_size);
@@ -907,11 +918,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ SmiTst(kInterpreterBytecodeArrayRegister, t0);
- __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, t0,
- Operand(zero_reg));
+ __ Assert(ne,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ t0, Operand(zero_reg));
__ GetObjectType(kInterpreterBytecodeArrayRegister, t0, t0);
- __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, t0,
- Operand(BYTECODE_ARRAY_TYPE));
+ __ Assert(eq,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ t0, Operand(BYTECODE_ARRAY_TYPE));
}
// Reset code age.
@@ -1189,11 +1202,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
__ SmiTst(kInterpreterBytecodeArrayRegister, at);
- __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, at,
- Operand(zero_reg));
+ __ Assert(ne,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ at, Operand(zero_reg));
__ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
- __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a1,
- Operand(BYTECODE_ARRAY_TYPE));
+ __ Assert(eq,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ a1, Operand(BYTECODE_ARRAY_TYPE));
}
// Get the target bytecode offset from the frame.
@@ -1257,7 +1272,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Assert(ne, BailoutReason::kExpectedFeedbackVector, feedback_vector,
+ __ Assert(ne, AbortReason::kExpectedFeedbackVector, feedback_vector,
Operand(at));
}
@@ -1804,8 +1819,9 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ sll(a0, a0, kSmiTagSize);
__ li(t0, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ MultiPush(a0.bit() | a1.bit() | t0.bit() | fp.bit() | ra.bit());
- __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize));
+ __ Push(Smi::kZero); // Padding.
+ __ Addu(fp, sp,
+ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
}
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -1814,8 +1830,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// -----------------------------------
// Get the number of arguments passed (as a smi), tear down the frame and
// then tear down the parameters.
- __ lw(a1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize)));
+ __ lw(a1, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ mov(sp, fp);
__ MultiPop(fp.bit() | ra.bit());
__ Lsa(sp, sp, a1, kPointerSizeLog2 - kSmiTagSize);
@@ -1891,7 +1906,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ JumpIfSmi(a3, &new_target_not_constructor);
__ lw(t1, FieldMemOperand(a3, HeapObject::kMapOffset));
__ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
- __ And(t1, t1, Operand(1 << Map::kIsConstructor));
+ __ And(t1, t1, Operand(Map::IsConstructorBit::kMask));
__ Branch(&new_target_constructor, ne, t1, Operand(zero_reg));
__ bind(&new_target_not_constructor);
{
@@ -2165,7 +2180,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Check if target has a [[Call]] internal method.
__ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
- __ And(t1, t1, Operand(1 << Map::kIsCallable));
+ __ And(t1, t1, Operand(Map::IsCallableBit::kMask));
__ Branch(&non_callable, eq, t1, Operand(zero_reg));
// Check if target is a proxy and call CallProxy external builtin
@@ -2321,7 +2336,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Check if target has a [[Construct]] internal method.
__ lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset));
- __ And(t3, t3, Operand(1 << Map::kIsConstructor));
+ __ And(t3, t3, Operand(Map::IsConstructorBit::kMask));
__ Branch(&non_constructor, eq, t3, Operand(zero_reg));
// Only dispatch to bound functions after checking whether they are
@@ -2389,17 +2404,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kAbort);
}
-// static
-void Builtins::Generate_AbortJS(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : message as String object
- // -- ra : return address
- // -----------------------------------
- __ Push(a0);
- __ Move(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbortJS);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// State setup as expected by MacroAssembler::InvokePrologue.
// ----------- S t a t e -------------
@@ -2489,8 +2493,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ sll(t2, a2, kPointerSizeLog2);
__ Subu(t1, fp, Operand(t2));
// Adjust for frame.
- __ Subu(t1, t1, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
- 2 * kPointerSize));
+ __ Subu(t1, t1,
+ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize));
Label fill;
__ bind(&fill);
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 811ae637ad..266393070c 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -110,11 +110,11 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Initial map for the builtin InternalArray functions should be maps.
__ Ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(a2, a4);
- __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, a4,
- Operand(zero_reg));
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForInternalArrayFunction,
+ a4, Operand(zero_reg));
__ GetObjectType(a2, a3, a4);
- __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction, a4,
- Operand(MAP_TYPE));
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForInternalArrayFunction,
+ a4, Operand(MAP_TYPE));
}
// Run the native code for the InternalArray function called as a normal
@@ -139,10 +139,10 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// Initial map for the builtin Array functions should be maps.
__ Ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(a2, a4);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction1, a4,
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction1, a4,
Operand(zero_reg));
__ GetObjectType(a2, a3, a4);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction2, a4,
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction2, a4,
Operand(MAP_TYPE));
}
@@ -273,13 +273,16 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ SmiTag(a0);
- __ Push(cp, a0, a1, a3);
+ __ Push(cp, a0, a1);
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ Push(a3);
// ----------- S t a t e -------------
// -- sp[0*kPointerSize]: new target
- // -- a1 and sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: number of arguments (tagged)
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- a1 and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
__ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
@@ -300,10 +303,11 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- v0: receiver
- // -- Slot 3 / sp[0*kPointerSize]: new target
- // -- Slot 2 / sp[1*kPointerSize]: constructor function
- // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
- // -- Slot 0 / sp[3*kPointerSize]: context
+ // -- Slot 4 / sp[0*kPointerSize]: new target
+ // -- Slot 3 / sp[1*kPointerSize]: padding
+ // -- Slot 2 / sp[2*kPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kPointerSize]: context
// -----------------------------------
// Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
@@ -321,9 +325,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- r3: new target
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
- // -- sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: padding
+ // -- sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
// Restore constructor function and argument count.
@@ -344,9 +349,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- t3: counter
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
- // -- a1 and sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: padding
+ // -- a1 and sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
__ jmp(&entry);
__ bind(&loop);
@@ -364,9 +370,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- v0: constructor result
// -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: number of arguments
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
// Store offset of return address for deoptimizer.
@@ -526,7 +533,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
__ Ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
__ GetObjectType(a3, a3, a3);
- __ Assert(eq, kMissingBytecodeArray, a3, Operand(BYTECODE_ARRAY_TYPE));
+ __ Assert(eq, AbortReason::kMissingBytecodeArray, a3,
+ Operand(BYTECODE_ARRAY_TYPE));
}
// Resume (Ignition/TurboFan) generator object.
@@ -752,6 +760,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Operand(Smi::FromEnum(OptimizationMarker::kNone)));
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
@@ -764,7 +775,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ Assert(
- eq, kExpectedOptimizationSentinel, optimized_code_entry,
+ eq, AbortReason::kExpectedOptimizationSentinel,
+ optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
}
__ jmp(&fallthrough);
@@ -843,7 +855,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ Lbu(bytecode, MemOperand(scratch2));
__ Daddu(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size);
// Load the size of the current bytecode.
__ bind(&load_size);
@@ -907,11 +918,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ SmiTst(kInterpreterBytecodeArrayRegister, a4);
- __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a4,
- Operand(zero_reg));
+ __ Assert(ne,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ a4, Operand(zero_reg));
__ GetObjectType(kInterpreterBytecodeArrayRegister, a4, a4);
- __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a4,
- Operand(BYTECODE_ARRAY_TYPE));
+ __ Assert(eq,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ a4, Operand(BYTECODE_ARRAY_TYPE));
}
// Reset code age.
@@ -1189,11 +1202,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
__ SmiTst(kInterpreterBytecodeArrayRegister, at);
- __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, at,
- Operand(zero_reg));
+ __ Assert(ne,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ at, Operand(zero_reg));
__ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
- __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a1,
- Operand(BYTECODE_ARRAY_TYPE));
+ __ Assert(eq,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ a1, Operand(BYTECODE_ARRAY_TYPE));
}
// Get the target bytecode offset from the frame.
@@ -1257,7 +1272,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Assert(ne, BailoutReason::kExpectedFeedbackVector, feedback_vector,
+ __ Assert(ne, AbortReason::kExpectedFeedbackVector, feedback_vector,
Operand(at));
}
@@ -1820,8 +1835,9 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ dsll32(a0, a0, 0);
__ li(a4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ MultiPush(a0.bit() | a1.bit() | a4.bit() | fp.bit() | ra.bit());
- __ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize));
+ __ Push(Smi::kZero); // Padding.
+ __ Daddu(fp, sp,
+ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
}
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -1830,8 +1846,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// -----------------------------------
// Get the number of arguments passed (as a smi), tear down the frame and
// then tear down the parameters.
- __ Ld(a1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize)));
+ __ Ld(a1, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ mov(sp, fp);
__ MultiPop(fp.bit() | ra.bit());
__ SmiScale(a4, a1, kPointerSizeLog2);
@@ -1915,7 +1930,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ JumpIfSmi(a3, &new_target_not_constructor);
__ ld(t1, FieldMemOperand(a3, HeapObject::kMapOffset));
__ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
- __ And(t1, t1, Operand(1 << Map::kIsConstructor));
+ __ And(t1, t1, Operand(Map::IsConstructorBit::kMask));
__ Branch(&new_target_constructor, ne, t1, Operand(zero_reg));
__ bind(&new_target_not_constructor);
{
@@ -2187,7 +2202,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Check if target has a [[Call]] internal method.
__ Lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
- __ And(t1, t1, Operand(1 << Map::kIsCallable));
+ __ And(t1, t1, Operand(Map::IsCallableBit::kMask));
__ Branch(&non_callable, eq, t1, Operand(zero_reg));
__ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE));
@@ -2340,7 +2355,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Check if target has a [[Construct]] internal method.
__ Lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset));
- __ And(t3, t3, Operand(1 << Map::kIsConstructor));
+ __ And(t3, t3, Operand(Map::IsConstructorBit::kMask));
__ Branch(&non_constructor, eq, t3, Operand(zero_reg));
// Only dispatch to bound functions after checking whether they are
@@ -2408,17 +2423,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kAbort);
}
-// static
-void Builtins::Generate_AbortJS(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : message as String object
- // -- ra : return address
- // -----------------------------------
- __ Push(a0);
- __ Move(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbortJS);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// State setup as expected by MacroAssembler::InvokePrologue.
// ----------- S t a t e -------------
@@ -2510,8 +2514,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ dsll(a6, a2, kPointerSizeLog2);
__ Dsubu(a4, fp, Operand(a6));
// Adjust for frame.
- __ Dsubu(a4, a4, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
- 2 * kPointerSize));
+ __ Dsubu(a4, a4,
+ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize));
Label fill;
__ bind(&fill);
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index e0db87cc0c..34da70ff0f 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -109,9 +109,10 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Initial map for the builtin InternalArray functions should be maps.
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
__ TestIfSmi(r5, r0);
- __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, cr0);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForInternalArrayFunction,
+ cr0);
__ CompareObjectType(r5, r6, r7, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
}
// Run the native code for the InternalArray function called as a normal
@@ -136,9 +137,9 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// Initial map for the builtin Array functions should be maps.
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
__ TestIfSmi(r5, r0);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
__ CompareObjectType(r5, r6, r7, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
__ mr(r6, r4);
@@ -278,13 +279,16 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ SmiTag(r3);
- __ Push(cp, r3, r4, r6);
+ __ Push(cp, r3, r4);
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ Push(r6);
// ----------- S t a t e -------------
// -- sp[0*kPointerSize]: new target
- // -- r4 and sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: number of arguments (tagged)
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- r4 and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
@@ -305,10 +309,11 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- r3: receiver
- // -- Slot 3 / sp[0*kPointerSize]: new target
- // -- Slot 2 / sp[1*kPointerSize]: constructor function
- // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
- // -- Slot 0 / sp[3*kPointerSize]: context
+ // -- Slot 4 / sp[0*kPointerSize]: new target
+ // -- Slot 3 / sp[1*kPointerSize]: padding
+ // -- Slot 2 / sp[2*kPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kPointerSize]: context
// -----------------------------------
// Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
@@ -326,9 +331,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- r6: new target
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
- // -- sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: padding
+ // -- sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
// Restore constructor function and argument count.
@@ -348,9 +354,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- cr0: condition indicating whether r3 is zero
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
- // -- r4 and sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: padding
+ // -- r4 and sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
__ beq(&no_args, cr0);
__ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
@@ -373,9 +380,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- r0: constructor result
// -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: number of arguments
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
// Store offset of return address for deoptimizer.
@@ -545,7 +553,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
__ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kMissingBytecodeArray);
+ __ Assert(eq, AbortReason::kMissingBytecodeArray);
}
// Resume (Ignition/TurboFan) generator object.
@@ -636,8 +644,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(cp, Operand(context_address));
__ LoadP(cp, MemOperand(cp));
- __ InitializeRootRegister();
-
// Push the function and the receiver onto the stack.
__ Push(r4, r5);
@@ -773,6 +779,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ beq(&fallthrough);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
@@ -787,7 +796,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ CmpSmiLiteral(
optimized_code_entry,
Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0);
- __ Assert(eq, kExpectedOptimizationSentinel);
+ __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
}
__ b(&fallthrough);
}
@@ -868,11 +877,9 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ lbzx(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ addi(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ b(&load_size);
// Load the size of the current bytecode.
__ bind(&load_size);
-
__ ShiftLeftImm(scratch2, bytecode, Operand(2));
__ lwzx(scratch2, MemOperand(bytecode_size_table, scratch2));
__ add(bytecode_offset, bytecode_offset, scratch2);
@@ -941,10 +948,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ TestIfSmi(kInterpreterBytecodeArrayRegister, r0);
- __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, cr0);
+ __ Assert(ne,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ cr0);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r3, no_reg,
BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Reset code age.
@@ -1226,10 +1236,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
__ TestIfSmi(kInterpreterBytecodeArrayRegister, r0);
- __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, cr0);
+ __ Assert(ne,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ cr0);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r4, no_reg,
BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Get the target bytecode offset from the frame.
@@ -1291,7 +1304,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
- __ Assert(ne, BailoutReason::kExpectedFeedbackVector);
+ __ Assert(ne, AbortReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
@@ -1867,8 +1880,9 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
} else {
__ Push(fp, r7, r4, r3);
}
- __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize));
+ __ Push(Smi::kZero); // Padding.
+ __ addi(fp, sp,
+ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
}
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -1877,8 +1891,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// -----------------------------------
// Get the number of arguments passed (as a smi), tear down the frame and
// then tear down the parameters.
- __ LoadP(r4, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize)));
+ __ LoadP(r4, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
int stack_adjustment = kPointerSize; // adjust for receiver
__ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR, stack_adjustment);
__ SmiToPtrArrayOffset(r0, r4);
@@ -1956,7 +1969,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ JumpIfSmi(r6, &new_target_not_constructor);
__ LoadP(scratch, FieldMemOperand(r6, HeapObject::kMapOffset));
__ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ TestBit(scratch, Map::kIsConstructor, r0);
+ __ TestBit(scratch, Map::IsConstructorBit::kShift, r0);
__ bne(&new_target_constructor, cr0);
__ bind(&new_target_not_constructor);
{
@@ -2253,7 +2266,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Check if target has a [[Call]] internal method.
__ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
- __ TestBit(r7, Map::kIsCallable, r0);
+ __ TestBit(r7, Map::IsCallableBit::kShift, r0);
__ beq(&non_callable, cr0);
// Check if target is a proxy and call CallProxy external builtin
@@ -2349,7 +2362,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Check if target has a [[Construct]] internal method.
__ lbz(r5, FieldMemOperand(r7, Map::kBitFieldOffset));
- __ TestBit(r5, Map::kIsConstructor, r0);
+ __ TestBit(r5, Map::IsConstructorBit::kShift, r0);
__ beq(&non_constructor, cr0);
// Only dispatch to bound functions after checking whether they are
@@ -2419,17 +2432,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kAbort);
}
-// static
-void Builtins::Generate_AbortJS(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r4 : message as String object
- // -- lr : return address
- // -----------------------------------
- __ push(r4);
- __ LoadSmiLiteral(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbortJS);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : actual number of arguments
@@ -2524,8 +2526,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ ShiftLeftImm(r7, r5, Operand(kPointerSizeLog2));
__ sub(r7, fp, r7);
// Adjust for frame.
- __ subi(r7, r7, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
- 2 * kPointerSize));
+ __ subi(r7, r7,
+ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize));
Label fill;
__ bind(&fill);
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 42c478bd42..020b04b91d 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -109,9 +109,10 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Initial map for the builtin InternalArray functions should be maps.
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
__ TestIfSmi(r4);
- __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, cr0);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForInternalArrayFunction,
+ cr0);
__ CompareObjectType(r4, r5, r6, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
}
// Run the native code for the InternalArray function called as a normal
@@ -136,9 +137,9 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// Initial map for the builtin Array functions should be maps.
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
__ TestIfSmi(r4);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
__ CompareObjectType(r4, r5, r6, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
__ LoadRR(r5, r3);
@@ -272,13 +273,16 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ SmiTag(r2);
- __ Push(cp, r2, r3, r5);
+ __ Push(cp, r2, r3);
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ Push(r5);
// ----------- S t a t e -------------
// -- sp[0*kPointerSize]: new target
- // -- r3 and sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: number of arguments (tagged)
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- r3 and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
__ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
@@ -300,10 +304,11 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- r2: receiver
- // -- Slot 3 / sp[0*kPointerSize]: new target
- // -- Slot 2 / sp[1*kPointerSize]: constructor function
- // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
- // -- Slot 0 / sp[3*kPointerSize]: context
+ // -- Slot 4 / sp[0*kPointerSize]: new target
+ // -- Slot 3 / sp[1*kPointerSize]: padding
+ // -- Slot 2 / sp[2*kPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kPointerSize]: context
// -----------------------------------
// Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
@@ -321,9 +326,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- r5: new target
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
- // -- sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: padding
+ // -- sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
// Restore constructor function and argument count.
@@ -343,9 +349,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- cr0: condition indicating whether r2 is zero
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
- // -- r3 and sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: padding
+ // -- r3 and sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
__ beq(&no_args);
@@ -366,9 +373,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- r0: constructor result
// -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: number of arguments
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
// Store offset of return address for deoptimizer.
@@ -540,7 +548,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
__ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kMissingBytecodeArray);
+ __ Assert(eq, AbortReason::kMissingBytecodeArray);
}
// Resume (Ignition/TurboFan) generator object.
@@ -632,8 +640,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(cp, Operand(context_address));
__ LoadP(cp, MemOperand(cp));
- __ InitializeRootRegister();
-
// Push the function and the receiver onto the stack.
__ Push(r3, r4);
@@ -776,6 +782,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ beq(&fallthrough);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
@@ -790,7 +799,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ CmpSmiLiteral(
optimized_code_entry,
Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0);
- __ Assert(eq, kExpectedOptimizationSentinel);
+ __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
}
__ b(&fallthrough, Label::kNear);
}
@@ -870,10 +879,9 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ LoadlB(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ AddP(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ b(&load_size);
+
// Load the size of the current bytecode.
__ bind(&load_size);
-
__ ShiftLeftP(scratch2, bytecode, Operand(2));
__ LoadlW(scratch2, MemOperand(bytecode_size_table, scratch2));
__ AddP(bytecode_offset, bytecode_offset, scratch2);
@@ -938,10 +946,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ TestIfSmi(kInterpreterBytecodeArrayRegister);
- __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ ne, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r2, no_reg,
BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Reset code age.
@@ -1224,10 +1234,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
__ TestIfSmi(kInterpreterBytecodeArrayRegister);
- __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ ne, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r3, no_reg,
BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Get the target bytecode offset from the frame.
@@ -1289,7 +1301,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
- __ Assert(ne, BailoutReason::kExpectedFeedbackVector);
+ __ Assert(ne, AbortReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
@@ -1854,7 +1866,8 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
// Old FP <--- New FP
// Argument Adapter SMI
// Function
- // ArgC as SMI <--- New SP
+ // ArgC as SMI
+ // Padding <--- New SP
__ lay(sp, MemOperand(sp, -5 * kPointerSize));
// Cleanse the top nibble of 31-bit pointers.
@@ -1864,8 +1877,9 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ StoreP(r6, MemOperand(sp, 2 * kPointerSize));
__ StoreP(r3, MemOperand(sp, 1 * kPointerSize));
__ StoreP(r2, MemOperand(sp, 0 * kPointerSize));
- __ la(fp, MemOperand(sp, StandardFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize));
+ __ Push(Smi::kZero); // Padding.
+ __ la(fp,
+ MemOperand(sp, ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
}
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -1874,8 +1888,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// -----------------------------------
// Get the number of arguments passed (as a smi), tear down the frame and
// then tear down the parameters.
- __ LoadP(r3, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize)));
+ __ LoadP(r3, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
int stack_adjustment = kPointerSize; // adjust for receiver
__ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR, stack_adjustment);
__ SmiToPtrArrayOffset(r3, r3);
@@ -1954,7 +1967,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ JumpIfSmi(r5, &new_target_not_constructor);
__ LoadP(scratch, FieldMemOperand(r5, HeapObject::kMapOffset));
__ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ tmll(scratch, Operand(Map::kIsConstructor));
+ __ tmll(scratch, Operand(Map::IsConstructorBit::kShift));
__ bne(&new_target_constructor);
__ bind(&new_target_not_constructor);
{
@@ -2252,7 +2265,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Check if target has a [[Call]] internal method.
__ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
- __ TestBit(r6, Map::kIsCallable);
+ __ TestBit(r6, Map::IsCallableBit::kShift);
__ beq(&non_callable);
// Check if target is a proxy and call CallProxy external builtin
@@ -2348,7 +2361,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Check if target has a [[Construct]] internal method.
__ LoadlB(r4, FieldMemOperand(r6, Map::kBitFieldOffset));
- __ TestBit(r4, Map::kIsConstructor);
+ __ TestBit(r4, Map::IsConstructorBit::kShift);
__ beq(&non_constructor);
// Only dispatch to bound functions after checking whether they are
@@ -2418,17 +2431,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kAbort);
}
-// static
-void Builtins::Generate_AbortJS(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : message as String object
- // -- lr : return address
- // -----------------------------------
- __ push(r3);
- __ LoadSmiLiteral(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbortJS);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : actual number of arguments
@@ -2522,8 +2524,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2));
__ SubP(r6, fp, r6);
// Adjust for frame.
- __ SubP(r6, r6, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
- 2 * kPointerSize));
+ __ SubP(r6, r6,
+ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize));
Label fill;
__ bind(&fill);
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index b9073e1f13..5a09658867 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -107,7 +107,11 @@ Code* BuildWithCodeStubAssemblerJS(Isolate* isolate, int32_t builtin_index,
// Canonicalize handles, so that we can share constant pool entries pointing
// to code targets without dereferencing their handles.
CanonicalHandleScope canonical(isolate);
- Zone zone(isolate->allocator(), ZONE_NAME);
+
+ SegmentSize segment_size = isolate->serializer_enabled()
+ ? SegmentSize::kLarge
+ : SegmentSize::kDefault;
+ Zone zone(isolate->allocator(), ZONE_NAME, segment_size);
const int argc_with_recv =
(argc == SharedFunctionInfo::kDontAdaptArgumentsSentinel) ? 0 : argc + 1;
compiler::CodeAssemblerState state(isolate, &zone, argc_with_recv,
@@ -127,7 +131,10 @@ Code* BuildWithCodeStubAssemblerCS(Isolate* isolate, int32_t builtin_index,
// Canonicalize handles, so that we can share constant pool entries pointing
// to code targets without dereferencing their handles.
CanonicalHandleScope canonical(isolate);
- Zone zone(isolate->allocator(), ZONE_NAME);
+ SegmentSize segment_size = isolate->serializer_enabled()
+ ? SegmentSize::kLarge
+ : SegmentSize::kDefault;
+ Zone zone(isolate->allocator(), ZONE_NAME, segment_size);
// The interface descriptor with given key must be initialized at this point
// and this construction just queries the details from the descriptors table.
CallInterfaceDescriptor descriptor(isolate, interface_descriptor);
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index f2820fa410..cd35abb362 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -170,7 +170,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Push(Operand(rbx, rcx, times_pointer_size, 0));
__ bind(&entry);
__ decp(rcx);
- __ j(greater_equal, &loop);
+ __ j(greater_equal, &loop, Label::kNear);
// Call the function.
// rax: number of arguments (untagged)
@@ -217,19 +217,21 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ Push(rsi);
__ Push(rcx);
__ Push(rdi);
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
__ Push(rdx);
// ----------- S t a t e -------------
// -- sp[0*kPointerSize]: new target
- // -- rdi and sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: argument count
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- rdi and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: argument count
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
__ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ testl(FieldOperand(rbx, SharedFunctionInfo::kCompilerHintsOffset),
Immediate(SharedFunctionInfo::kDerivedConstructorMask));
- __ j(not_zero, &not_create_implicit_receiver);
+ __ j(not_zero, &not_create_implicit_receiver, Label::kNear);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
@@ -243,10 +245,11 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- rax implicit receiver
- // -- Slot 3 / sp[0*kPointerSize] new target
- // -- Slot 2 / sp[1*kPointerSize] constructor function
- // -- Slot 1 / sp[2*kPointerSize] number of arguments (tagged)
- // -- Slot 0 / sp[3*kPointerSize] context
+ // -- Slot 4 / sp[0*kPointerSize] new target
+ // -- Slot 3 / sp[1*kPointerSize] padding
+ // -- Slot 2 / sp[2*kPointerSize] constructor function
+ // -- Slot 1 / sp[3*kPointerSize] number of arguments (tagged)
+ // -- Slot 0 / sp[4*kPointerSize] context
// -----------------------------------
// Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
@@ -265,9 +268,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- sp[0*kPointerSize] implicit receiver
// -- sp[1*kPointerSize] implicit receiver
- // -- sp[2*kPointerSize] constructor function
- // -- sp[3*kPointerSize] number of arguments (tagged)
- // -- sp[4*kPointerSize] context
+ // -- sp[2*kPointerSize] padding
+ // -- sp[3*kPointerSize] constructor function
+ // -- sp[4*kPointerSize] number of arguments (tagged)
+ // -- sp[5*kPointerSize] context
// -----------------------------------
// Restore constructor function and argument count.
@@ -288,16 +292,17 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- rcx: counter (tagged)
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
- // -- rdi and sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: padding
+ // -- rdi and sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
__ jmp(&entry, Label::kNear);
__ bind(&loop);
__ Push(Operand(rbx, rcx, times_pointer_size, 0));
__ bind(&entry);
__ decp(rcx);
- __ j(greater_equal, &loop);
+ __ j(greater_equal, &loop, Label::kNear);
// Call the function.
ParameterCount actual(rax);
@@ -306,9 +311,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- rax constructor result
// -- sp[0*kPointerSize] implicit receiver
- // -- sp[1*kPointerSize] constructor function
- // -- sp[2*kPointerSize] number of arguments
- // -- sp[3*kPointerSize] context
+ // -- sp[1*kPointerSize] padding
+ // -- sp[2*kPointerSize] constructor function
+ // -- sp[3*kPointerSize] number of arguments
+ // -- sp[4*kPointerSize] context
// -----------------------------------
// Store offset of return address for deoptimizer.
@@ -363,7 +369,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// on-stack receiver as the result.
__ bind(&use_receiver);
__ movp(rax, Operand(rsp, 0 * kPointerSize));
- __ JumpIfRoot(rax, Heap::kTheHoleValueRootIndex, &do_throw);
+ __ JumpIfRoot(rax, Heap::kTheHoleValueRootIndex, &do_throw, Label::kNear);
__ bind(&leave_frame);
// Restore the arguments count.
@@ -519,7 +525,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Argument count in rax. Clobbers rcx.
Label enough_stack_space, stack_overflow;
Generate_StackOverflowCheck(masm, rax, rcx, &stack_overflow, Label::kNear);
- __ jmp(&enough_stack_space);
+ __ jmp(&enough_stack_space, Label::kNear);
__ bind(&stack_overflow);
__ CallRuntime(Runtime::kThrowStackOverflow);
@@ -540,7 +546,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ addp(rcx, Immediate(1));
__ bind(&entry);
__ cmpp(rcx, rax);
- __ j(not_equal, &loop);
+ __ j(not_equal, &loop, Label::kNear);
// Invoke the builtin code.
Handle<Code> builtin = is_construct
@@ -642,7 +648,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
__ CmpObjectType(rcx, BYTECODE_ARRAY_TYPE, rcx);
- __ Assert(equal, kMissingBytecodeArray);
+ __ Assert(equal, AbortReason::kMissingBytecodeArray);
}
// Resume (Ignition/TurboFan) generator object.
@@ -768,6 +774,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ j(equal, &fallthrough);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
@@ -781,7 +790,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
if (FLAG_debug_code) {
__ SmiCompare(optimized_code_entry,
Smi::FromEnum(OptimizationMarker::kInOptimizationQueue));
- __ Assert(equal, kExpectedOptimizationSentinel);
+ __ Assert(equal, AbortReason::kExpectedOptimizationSentinel);
}
__ jmp(&fallthrough);
}
@@ -859,7 +868,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ movzxbp(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
__ addp(bytecode_size_table,
Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size, Label::kNear);
// Load the size of the current bytecode.
__ bind(&load_size);
@@ -922,7 +930,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ AssertNotSmi(kInterpreterBytecodeArrayRegister);
__ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
rax);
- __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ equal,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Reset code age.
@@ -958,7 +968,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Label loop_header;
Label loop_check;
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(always, &loop_check);
+ __ j(always, &loop_check, Label::kNear);
__ bind(&loop_header);
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
__ Push(rax);
@@ -1051,7 +1061,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// Push the arguments.
Label loop_header, loop_check;
- __ j(always, &loop_check);
+ __ j(always, &loop_check, Label::kNear);
__ bind(&loop_header);
__ Push(Operand(start_address, 0));
__ subp(start_address, Immediate(kPointerSize));
@@ -1212,7 +1222,9 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ AssertNotSmi(kInterpreterBytecodeArrayRegister);
__ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
rbx);
- __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ equal,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Get the target bytecode offset from the frame.
@@ -1274,7 +1286,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
- __ Assert(not_equal, BailoutReason::kExpectedFeedbackVector);
+ __ Assert(not_equal, AbortReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
@@ -1806,9 +1818,10 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Will both indicate a nullptr and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
- __ Check(not_smi, kUnexpectedInitialMapForInternalArrayFunction);
+ __ Check(not_smi,
+ AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
__ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ Check(equal, kUnexpectedInitialMapForInternalArrayFunction);
+ __ Check(equal, AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
}
// Run the native code for the InternalArray function called as a normal
@@ -1835,9 +1848,9 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// Will both indicate a nullptr and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
- __ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
+ __ Check(not_smi, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ Check(equal, kUnexpectedInitialMapForArrayFunction);
+ __ Check(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
__ movp(rdx, rdi);
@@ -1863,6 +1876,8 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
// arguments and the receiver.
__ Integer32ToSmi(r8, rax);
__ Push(r8);
+
+ __ Push(Immediate(0)); // Padding.
}
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -1922,19 +1937,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kAbort);
}
-// static
-void Builtins::Generate_AbortJS(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rdx : message as String object
- // -- rsp[0] : return address
- // -----------------------------------
- __ PopReturnAddressTo(rcx);
- __ Push(rdx);
- __ PushReturnAddressFrom(rcx);
- __ Move(rsi, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbortJS);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : actual number of arguments
@@ -2115,7 +2117,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ JumpIfSmi(rdx, &new_target_not_constructor, Label::kNear);
__ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
+ Immediate(Map::IsConstructorBit::kMask));
__ j(not_zero, &new_target_constructor, Label::kNear);
__ bind(&new_target_not_constructor);
{
@@ -2242,7 +2244,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Patch receiver to global proxy.
__ LoadGlobalProxy(rcx);
}
- __ jmp(&convert_receiver);
+ __ jmp(&convert_receiver, Label::kNear);
}
__ bind(&convert_to_object);
{
@@ -2419,12 +2421,12 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Check if target has a [[Call]] internal method.
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsCallable));
- __ j(zero, &non_callable);
+ Immediate(Map::IsCallableBit::kMask));
+ __ j(zero, &non_callable, Label::kNear);
// Check if target is a proxy and call CallProxy external builtin
__ CmpInstanceType(rcx, JS_PROXY_TYPE);
- __ j(not_equal, &non_function);
+ __ j(not_equal, &non_function, Label::kNear);
__ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET);
// 2. Call to something else, which might have a [[Call]] internal method (if
@@ -2516,7 +2518,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Check if target has a [[Construct]] internal method.
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
+ Immediate(Map::IsConstructorBit::kMask));
__ j(zero, &non_constructor, Label::kNear);
// Only dispatch to bound functions after checking whether they are
@@ -2527,7 +2529,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Only dispatch to proxies after checking whether they are constructors.
__ CmpInstanceType(rcx, JS_PROXY_TYPE);
- __ j(not_equal, &non_proxy);
+ __ j(not_equal, &non_proxy, Label::kNear);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
RelocInfo::CODE_TARGET);
@@ -2568,7 +2570,7 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
Label skip;
// If the code object is null, just return to the caller.
- __ cmpp(rax, Immediate(0));
+ __ testp(rax, rax);
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
diff --git a/deps/v8/src/cached-powers.cc b/deps/v8/src/cached-powers.cc
index b160c11bed..08c9781414 100644
--- a/deps/v8/src/cached-powers.cc
+++ b/deps/v8/src/cached-powers.cc
@@ -22,93 +22,93 @@ struct CachedPower {
};
static const CachedPower kCachedPowers[] = {
- {V8_2PART_UINT64_C(0xfa8fd5a0, 081c0288), -1220, -348},
- {V8_2PART_UINT64_C(0xbaaee17f, a23ebf76), -1193, -340},
- {V8_2PART_UINT64_C(0x8b16fb20, 3055ac76), -1166, -332},
- {V8_2PART_UINT64_C(0xcf42894a, 5dce35ea), -1140, -324},
- {V8_2PART_UINT64_C(0x9a6bb0aa, 55653b2d), -1113, -316},
- {V8_2PART_UINT64_C(0xe61acf03, 3d1a45df), -1087, -308},
- {V8_2PART_UINT64_C(0xab70fe17, c79ac6ca), -1060, -300},
- {V8_2PART_UINT64_C(0xff77b1fc, bebcdc4f), -1034, -292},
- {V8_2PART_UINT64_C(0xbe5691ef, 416bd60c), -1007, -284},
- {V8_2PART_UINT64_C(0x8dd01fad, 907ffc3c), -980, -276},
- {V8_2PART_UINT64_C(0xd3515c28, 31559a83), -954, -268},
- {V8_2PART_UINT64_C(0x9d71ac8f, ada6c9b5), -927, -260},
- {V8_2PART_UINT64_C(0xea9c2277, 23ee8bcb), -901, -252},
- {V8_2PART_UINT64_C(0xaecc4991, 4078536d), -874, -244},
- {V8_2PART_UINT64_C(0x823c1279, 5db6ce57), -847, -236},
- {V8_2PART_UINT64_C(0xc2109436, 4dfb5637), -821, -228},
- {V8_2PART_UINT64_C(0x9096ea6f, 3848984f), -794, -220},
- {V8_2PART_UINT64_C(0xd77485cb, 25823ac7), -768, -212},
- {V8_2PART_UINT64_C(0xa086cfcd, 97bf97f4), -741, -204},
- {V8_2PART_UINT64_C(0xef340a98, 172aace5), -715, -196},
- {V8_2PART_UINT64_C(0xb23867fb, 2a35b28e), -688, -188},
- {V8_2PART_UINT64_C(0x84c8d4df, d2c63f3b), -661, -180},
- {V8_2PART_UINT64_C(0xc5dd4427, 1ad3cdba), -635, -172},
- {V8_2PART_UINT64_C(0x936b9fce, bb25c996), -608, -164},
- {V8_2PART_UINT64_C(0xdbac6c24, 7d62a584), -582, -156},
- {V8_2PART_UINT64_C(0xa3ab6658, 0d5fdaf6), -555, -148},
- {V8_2PART_UINT64_C(0xf3e2f893, dec3f126), -529, -140},
- {V8_2PART_UINT64_C(0xb5b5ada8, aaff80b8), -502, -132},
- {V8_2PART_UINT64_C(0x87625f05, 6c7c4a8b), -475, -124},
- {V8_2PART_UINT64_C(0xc9bcff60, 34c13053), -449, -116},
- {V8_2PART_UINT64_C(0x964e858c, 91ba2655), -422, -108},
- {V8_2PART_UINT64_C(0xdff97724, 70297ebd), -396, -100},
- {V8_2PART_UINT64_C(0xa6dfbd9f, b8e5b88f), -369, -92},
- {V8_2PART_UINT64_C(0xf8a95fcf, 88747d94), -343, -84},
- {V8_2PART_UINT64_C(0xb9447093, 8fa89bcf), -316, -76},
- {V8_2PART_UINT64_C(0x8a08f0f8, bf0f156b), -289, -68},
- {V8_2PART_UINT64_C(0xcdb02555, 653131b6), -263, -60},
- {V8_2PART_UINT64_C(0x993fe2c6, d07b7fac), -236, -52},
- {V8_2PART_UINT64_C(0xe45c10c4, 2a2b3b06), -210, -44},
- {V8_2PART_UINT64_C(0xaa242499, 697392d3), -183, -36},
- {V8_2PART_UINT64_C(0xfd87b5f2, 8300ca0e), -157, -28},
- {V8_2PART_UINT64_C(0xbce50864, 92111aeb), -130, -20},
- {V8_2PART_UINT64_C(0x8cbccc09, 6f5088cc), -103, -12},
- {V8_2PART_UINT64_C(0xd1b71758, e219652c), -77, -4},
- {V8_2PART_UINT64_C(0x9c400000, 00000000), -50, 4},
- {V8_2PART_UINT64_C(0xe8d4a510, 00000000), -24, 12},
- {V8_2PART_UINT64_C(0xad78ebc5, ac620000), 3, 20},
- {V8_2PART_UINT64_C(0x813f3978, f8940984), 30, 28},
- {V8_2PART_UINT64_C(0xc097ce7b, c90715b3), 56, 36},
- {V8_2PART_UINT64_C(0x8f7e32ce, 7bea5c70), 83, 44},
- {V8_2PART_UINT64_C(0xd5d238a4, abe98068), 109, 52},
- {V8_2PART_UINT64_C(0x9f4f2726, 179a2245), 136, 60},
- {V8_2PART_UINT64_C(0xed63a231, d4c4fb27), 162, 68},
- {V8_2PART_UINT64_C(0xb0de6538, 8cc8ada8), 189, 76},
- {V8_2PART_UINT64_C(0x83c7088e, 1aab65db), 216, 84},
- {V8_2PART_UINT64_C(0xc45d1df9, 42711d9a), 242, 92},
- {V8_2PART_UINT64_C(0x924d692c, a61be758), 269, 100},
- {V8_2PART_UINT64_C(0xda01ee64, 1a708dea), 295, 108},
- {V8_2PART_UINT64_C(0xa26da399, 9aef774a), 322, 116},
- {V8_2PART_UINT64_C(0xf209787b, b47d6b85), 348, 124},
- {V8_2PART_UINT64_C(0xb454e4a1, 79dd1877), 375, 132},
- {V8_2PART_UINT64_C(0x865b8692, 5b9bc5c2), 402, 140},
- {V8_2PART_UINT64_C(0xc83553c5, c8965d3d), 428, 148},
- {V8_2PART_UINT64_C(0x952ab45c, fa97a0b3), 455, 156},
- {V8_2PART_UINT64_C(0xde469fbd, 99a05fe3), 481, 164},
- {V8_2PART_UINT64_C(0xa59bc234, db398c25), 508, 172},
- {V8_2PART_UINT64_C(0xf6c69a72, a3989f5c), 534, 180},
- {V8_2PART_UINT64_C(0xb7dcbf53, 54e9bece), 561, 188},
- {V8_2PART_UINT64_C(0x88fcf317, f22241e2), 588, 196},
- {V8_2PART_UINT64_C(0xcc20ce9b, d35c78a5), 614, 204},
- {V8_2PART_UINT64_C(0x98165af3, 7b2153df), 641, 212},
- {V8_2PART_UINT64_C(0xe2a0b5dc, 971f303a), 667, 220},
- {V8_2PART_UINT64_C(0xa8d9d153, 5ce3b396), 694, 228},
- {V8_2PART_UINT64_C(0xfb9b7cd9, a4a7443c), 720, 236},
- {V8_2PART_UINT64_C(0xbb764c4c, a7a44410), 747, 244},
- {V8_2PART_UINT64_C(0x8bab8eef, b6409c1a), 774, 252},
- {V8_2PART_UINT64_C(0xd01fef10, a657842c), 800, 260},
- {V8_2PART_UINT64_C(0x9b10a4e5, e9913129), 827, 268},
- {V8_2PART_UINT64_C(0xe7109bfb, a19c0c9d), 853, 276},
- {V8_2PART_UINT64_C(0xac2820d9, 623bf429), 880, 284},
- {V8_2PART_UINT64_C(0x80444b5e, 7aa7cf85), 907, 292},
- {V8_2PART_UINT64_C(0xbf21e440, 03acdd2d), 933, 300},
- {V8_2PART_UINT64_C(0x8e679c2f, 5e44ff8f), 960, 308},
- {V8_2PART_UINT64_C(0xd433179d, 9c8cb841), 986, 316},
- {V8_2PART_UINT64_C(0x9e19db92, b4e31ba9), 1013, 324},
- {V8_2PART_UINT64_C(0xeb96bf6e, badf77d9), 1039, 332},
- {V8_2PART_UINT64_C(0xaf87023b, 9bf0ee6b), 1066, 340},
+ {V8_2PART_UINT64_C(0xFA8FD5A0, 081C0288), -1220, -348},
+ {V8_2PART_UINT64_C(0xBAAEE17F, A23EBF76), -1193, -340},
+ {V8_2PART_UINT64_C(0x8B16FB20, 3055AC76), -1166, -332},
+ {V8_2PART_UINT64_C(0xCF42894A, 5DCE35EA), -1140, -324},
+ {V8_2PART_UINT64_C(0x9A6BB0AA, 55653B2D), -1113, -316},
+ {V8_2PART_UINT64_C(0xE61ACF03, 3D1A45DF), -1087, -308},
+ {V8_2PART_UINT64_C(0xAB70FE17, C79AC6CA), -1060, -300},
+ {V8_2PART_UINT64_C(0xFF77B1FC, BEBCDC4F), -1034, -292},
+ {V8_2PART_UINT64_C(0xBE5691EF, 416BD60C), -1007, -284},
+ {V8_2PART_UINT64_C(0x8DD01FAD, 907FFC3C), -980, -276},
+ {V8_2PART_UINT64_C(0xD3515C28, 31559A83), -954, -268},
+ {V8_2PART_UINT64_C(0x9D71AC8F, ADA6C9B5), -927, -260},
+ {V8_2PART_UINT64_C(0xEA9C2277, 23EE8BCB), -901, -252},
+ {V8_2PART_UINT64_C(0xAECC4991, 4078536D), -874, -244},
+ {V8_2PART_UINT64_C(0x823C1279, 5DB6CE57), -847, -236},
+ {V8_2PART_UINT64_C(0xC2109436, 4DFB5637), -821, -228},
+ {V8_2PART_UINT64_C(0x9096EA6F, 3848984F), -794, -220},
+ {V8_2PART_UINT64_C(0xD77485CB, 25823AC7), -768, -212},
+ {V8_2PART_UINT64_C(0xA086CFCD, 97BF97F4), -741, -204},
+ {V8_2PART_UINT64_C(0xEF340A98, 172AACE5), -715, -196},
+ {V8_2PART_UINT64_C(0xB23867FB, 2A35B28E), -688, -188},
+ {V8_2PART_UINT64_C(0x84C8D4DF, D2C63F3B), -661, -180},
+ {V8_2PART_UINT64_C(0xC5DD4427, 1AD3CDBA), -635, -172},
+ {V8_2PART_UINT64_C(0x936B9FCE, BB25C996), -608, -164},
+ {V8_2PART_UINT64_C(0xDBAC6C24, 7D62A584), -582, -156},
+ {V8_2PART_UINT64_C(0xA3AB6658, 0D5FDAF6), -555, -148},
+ {V8_2PART_UINT64_C(0xF3E2F893, DEC3F126), -529, -140},
+ {V8_2PART_UINT64_C(0xB5B5ADA8, AAFF80B8), -502, -132},
+ {V8_2PART_UINT64_C(0x87625F05, 6C7C4A8B), -475, -124},
+ {V8_2PART_UINT64_C(0xC9BCFF60, 34C13053), -449, -116},
+ {V8_2PART_UINT64_C(0x964E858C, 91BA2655), -422, -108},
+ {V8_2PART_UINT64_C(0xDFF97724, 70297EBD), -396, -100},
+ {V8_2PART_UINT64_C(0xA6DFBD9F, B8E5B88F), -369, -92},
+ {V8_2PART_UINT64_C(0xF8A95FCF, 88747D94), -343, -84},
+ {V8_2PART_UINT64_C(0xB9447093, 8FA89BCF), -316, -76},
+ {V8_2PART_UINT64_C(0x8A08F0F8, BF0F156B), -289, -68},
+ {V8_2PART_UINT64_C(0xCDB02555, 653131B6), -263, -60},
+ {V8_2PART_UINT64_C(0x993FE2C6, D07B7FAC), -236, -52},
+ {V8_2PART_UINT64_C(0xE45C10C4, 2A2B3B06), -210, -44},
+ {V8_2PART_UINT64_C(0xAA242499, 697392D3), -183, -36},
+ {V8_2PART_UINT64_C(0xFD87B5F2, 8300CA0E), -157, -28},
+ {V8_2PART_UINT64_C(0xBCE50864, 92111AEB), -130, -20},
+ {V8_2PART_UINT64_C(0x8CBCCC09, 6F5088CC), -103, -12},
+ {V8_2PART_UINT64_C(0xD1B71758, E219652C), -77, -4},
+ {V8_2PART_UINT64_C(0x9C400000, 00000000), -50, 4},
+ {V8_2PART_UINT64_C(0xE8D4A510, 00000000), -24, 12},
+ {V8_2PART_UINT64_C(0xAD78EBC5, AC620000), 3, 20},
+ {V8_2PART_UINT64_C(0x813F3978, F8940984), 30, 28},
+ {V8_2PART_UINT64_C(0xC097CE7B, C90715B3), 56, 36},
+ {V8_2PART_UINT64_C(0x8F7E32CE, 7BEA5C70), 83, 44},
+ {V8_2PART_UINT64_C(0xD5D238A4, ABE98068), 109, 52},
+ {V8_2PART_UINT64_C(0x9F4F2726, 179A2245), 136, 60},
+ {V8_2PART_UINT64_C(0xED63A231, D4C4FB27), 162, 68},
+ {V8_2PART_UINT64_C(0xB0DE6538, 8CC8ADA8), 189, 76},
+ {V8_2PART_UINT64_C(0x83C7088E, 1AAB65DB), 216, 84},
+ {V8_2PART_UINT64_C(0xC45D1DF9, 42711D9A), 242, 92},
+ {V8_2PART_UINT64_C(0x924D692C, A61BE758), 269, 100},
+ {V8_2PART_UINT64_C(0xDA01EE64, 1A708DEA), 295, 108},
+ {V8_2PART_UINT64_C(0xA26DA399, 9AEF774A), 322, 116},
+ {V8_2PART_UINT64_C(0xF209787B, B47D6B85), 348, 124},
+ {V8_2PART_UINT64_C(0xB454E4A1, 79DD1877), 375, 132},
+ {V8_2PART_UINT64_C(0x865B8692, 5B9BC5C2), 402, 140},
+ {V8_2PART_UINT64_C(0xC83553C5, C8965D3D), 428, 148},
+ {V8_2PART_UINT64_C(0x952AB45C, FA97A0B3), 455, 156},
+ {V8_2PART_UINT64_C(0xDE469FBD, 99A05FE3), 481, 164},
+ {V8_2PART_UINT64_C(0xA59BC234, DB398C25), 508, 172},
+ {V8_2PART_UINT64_C(0xF6C69A72, A3989F5C), 534, 180},
+ {V8_2PART_UINT64_C(0xB7DCBF53, 54E9BECE), 561, 188},
+ {V8_2PART_UINT64_C(0x88FCF317, F22241E2), 588, 196},
+ {V8_2PART_UINT64_C(0xCC20CE9B, D35C78A5), 614, 204},
+ {V8_2PART_UINT64_C(0x98165AF3, 7B2153DF), 641, 212},
+ {V8_2PART_UINT64_C(0xE2A0B5DC, 971F303A), 667, 220},
+ {V8_2PART_UINT64_C(0xA8D9D153, 5CE3B396), 694, 228},
+ {V8_2PART_UINT64_C(0xFB9B7CD9, A4A7443C), 720, 236},
+ {V8_2PART_UINT64_C(0xBB764C4C, A7A44410), 747, 244},
+ {V8_2PART_UINT64_C(0x8BAB8EEF, B6409C1A), 774, 252},
+ {V8_2PART_UINT64_C(0xD01FEF10, A657842C), 800, 260},
+ {V8_2PART_UINT64_C(0x9B10A4E5, E9913129), 827, 268},
+ {V8_2PART_UINT64_C(0xE7109BFB, A19C0C9D), 853, 276},
+ {V8_2PART_UINT64_C(0xAC2820D9, 623BF429), 880, 284},
+ {V8_2PART_UINT64_C(0x80444B5E, 7AA7CF85), 907, 292},
+ {V8_2PART_UINT64_C(0xBF21E440, 03ACDD2D), 933, 300},
+ {V8_2PART_UINT64_C(0x8E679C2F, 5E44FF8F), 960, 308},
+ {V8_2PART_UINT64_C(0xD433179D, 9C8CB841), 986, 316},
+ {V8_2PART_UINT64_C(0x9E19DB92, B4E31BA9), 1013, 324},
+ {V8_2PART_UINT64_C(0xEB96BF6E, BADF77D9), 1039, 332},
+ {V8_2PART_UINT64_C(0xAF87023B, 9BF0EE6B), 1066, 340},
};
#ifdef DEBUG
diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/code-factory.cc
index 245f2334f6..e5b72b6fab 100644
--- a/deps/v8/src/code-factory.cc
+++ b/deps/v8/src/code-factory.cc
@@ -30,16 +30,6 @@ Handle<Code> CodeFactory::RuntimeCEntry(Isolate* isolate, int result_size) {
}
// static
-Callable CodeFactory::LoadICProtoArray(Isolate* isolate,
- bool throw_if_nonexistent) {
- return Callable(
- throw_if_nonexistent
- ? BUILTIN_CODE(isolate, LoadICProtoArrayThrowIfNonexistent)
- : BUILTIN_CODE(isolate, LoadICProtoArray),
- LoadICProtoArrayDescriptor(isolate));
-}
-
-// static
Callable CodeFactory::ApiGetter(Isolate* isolate) {
CallApiGetterStub stub(isolate);
return make_callable(stub);
@@ -84,22 +74,6 @@ Callable CodeFactory::StoreOwnICInOptimizedCode(Isolate* isolate) {
}
// static
-Callable CodeFactory::StoreGlobalIC(Isolate* isolate,
- LanguageMode language_mode) {
- // TODO(ishell): Use StoreGlobalIC[Strict]Trampoline when it's ready.
- return Callable(BUILTIN_CODE(isolate, StoreICTrampoline),
- StoreDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::StoreGlobalICInOptimizedCode(Isolate* isolate,
- LanguageMode language_mode) {
- // TODO(ishell): Use StoreGlobalIC[Strict] when it's ready.
- return Callable(BUILTIN_CODE(isolate, StoreIC),
- StoreWithVectorDescriptor(isolate));
-}
-
-// static
Callable CodeFactory::BinaryOperation(Isolate* isolate, Operation op) {
switch (op) {
case Operation::kShiftRight:
diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/code-factory.h
index d85ca5f073..079f16899a 100644
--- a/deps/v8/src/code-factory.h
+++ b/deps/v8/src/code-factory.h
@@ -24,13 +24,9 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Handle<Code> RuntimeCEntry(Isolate* isolate, int result_size = 1);
// Initial states for ICs.
- static Callable LoadICProtoArray(Isolate* isolate, bool throw_if_nonexistent);
static Callable LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode);
static Callable LoadGlobalICInOptimizedCode(Isolate* isolate,
TypeofMode typeof_mode);
- static Callable StoreGlobalIC(Isolate* isolate, LanguageMode mode);
- static Callable StoreGlobalICInOptimizedCode(Isolate* isolate,
- LanguageMode mode);
static Callable StoreOwnIC(Isolate* isolate);
static Callable StoreOwnICInOptimizedCode(Isolate* isolate);
diff --git a/deps/v8/src/code-stub-assembler.cc b/deps/v8/src/code-stub-assembler.cc
index e36a5cc796..f98e7fe519 100644
--- a/deps/v8/src/code-stub-assembler.cc
+++ b/deps/v8/src/code-stub-assembler.cc
@@ -175,7 +175,9 @@ Node* CodeStubAssembler::SelectSmiConstant(Node* condition, Smi* true_value,
MachineRepresentation::kTaggedSigned);
}
-Node* CodeStubAssembler::NoContextConstant() { return SmiConstant(0); }
+Node* CodeStubAssembler::NoContextConstant() {
+ return SmiConstant(Context::kNoContext);
+}
#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
compiler::TNode<std::remove_reference<decltype( \
@@ -546,8 +548,8 @@ TNode<Object> CodeStubAssembler::NumberMax(SloppyTNode<Object> a,
// TODO(danno): This could be optimized by specifically handling smi cases.
VARIABLE(result, MachineRepresentation::kTagged);
Label done(this), greater_than_equal_a(this), greater_than_equal_b(this);
- GotoIfNumericGreaterThanOrEqual(a, b, &greater_than_equal_a);
- GotoIfNumericGreaterThanOrEqual(b, a, &greater_than_equal_b);
+ GotoIfNumberGreaterThanOrEqual(a, b, &greater_than_equal_a);
+ GotoIfNumberGreaterThanOrEqual(b, a, &greater_than_equal_b);
result.Bind(NanConstant());
Goto(&done);
BIND(&greater_than_equal_a);
@@ -565,8 +567,8 @@ TNode<Object> CodeStubAssembler::NumberMin(SloppyTNode<Object> a,
// TODO(danno): This could be optimized by specifically handling smi cases.
VARIABLE(result, MachineRepresentation::kTagged);
Label done(this), greater_than_equal_a(this), greater_than_equal_b(this);
- GotoIfNumericGreaterThanOrEqual(a, b, &greater_than_equal_a);
- GotoIfNumericGreaterThanOrEqual(b, a, &greater_than_equal_b);
+ GotoIfNumberGreaterThanOrEqual(a, b, &greater_than_equal_a);
+ GotoIfNumberGreaterThanOrEqual(b, a, &greater_than_equal_b);
result.Bind(NanConstant());
Goto(&done);
BIND(&greater_than_equal_a);
@@ -642,8 +644,9 @@ Node* CodeStubAssembler::SmiMod(Node* a, Node* b) {
return TNode<Object>::UncheckedCast(var_result.value());
}
-Node* CodeStubAssembler::SmiMul(Node* a, Node* b) {
- VARIABLE(var_result, MachineRepresentation::kTagged);
+TNode<Number> CodeStubAssembler::SmiMul(SloppyTNode<Smi> a,
+ SloppyTNode<Smi> b) {
+ TVARIABLE(Number, var_result);
VARIABLE(var_lhs_float64, MachineRepresentation::kFloat64);
VARIABLE(var_rhs_float64, MachineRepresentation::kFloat64);
Label return_result(this, &var_result);
@@ -668,7 +671,7 @@ Node* CodeStubAssembler::SmiMul(Node* a, Node* b) {
Branch(Word32Equal(answer, zero), &answer_zero, &answer_not_zero);
BIND(&answer_not_zero);
{
- var_result.Bind(ChangeInt32ToTagged(answer));
+ var_result = ChangeInt32ToTagged(answer);
Goto(&return_result);
}
BIND(&answer_zero);
@@ -679,12 +682,12 @@ Node* CodeStubAssembler::SmiMul(Node* a, Node* b) {
&if_should_be_zero);
BIND(&if_should_be_negative_zero);
{
- var_result.Bind(MinusZeroConstant());
+ var_result = MinusZeroConstant();
Goto(&return_result);
}
BIND(&if_should_be_zero);
{
- var_result.Bind(SmiConstant(0));
+ var_result = SmiConstant(0);
Goto(&return_result);
}
}
@@ -694,13 +697,12 @@ Node* CodeStubAssembler::SmiMul(Node* a, Node* b) {
var_lhs_float64.Bind(SmiToFloat64(a));
var_rhs_float64.Bind(SmiToFloat64(b));
Node* value = Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
- Node* result = AllocateHeapNumberWithValue(value);
- var_result.Bind(result);
+ var_result = AllocateHeapNumberWithValue(value);
Goto(&return_result);
}
BIND(&return_result);
- return var_result.value();
+ return var_result;
}
Node* CodeStubAssembler::TrySmiDiv(Node* dividend, Node* divisor,
@@ -898,7 +900,7 @@ void CodeStubAssembler::BranchIfFastJSArrayForCopy(Node* object, Node* context,
}
void CodeStubAssembler::GotoIfForceSlowPath(Label* if_true) {
-#if defined(DEBUG) || defined(ENABLE_FASTSLOW_SWITCH)
+#ifdef V8_ENABLE_FORCE_SLOW_PATH
Node* const force_slow_path_addr =
ExternalConstant(ExternalReference::force_slow_path(isolate()));
Node* const force_slow = Load(MachineType::Uint8(), force_slow_path_addr);
@@ -1534,14 +1536,15 @@ Node* CodeStubAssembler::LoadJSValueValue(Node* object) {
return LoadObjectField(object, JSValue::kValueOffset);
}
-Node* CodeStubAssembler::LoadWeakCellValueUnchecked(Node* weak_cell) {
+TNode<Object> CodeStubAssembler::LoadWeakCellValueUnchecked(Node* weak_cell) {
// TODO(ishell): fix callers.
return LoadObjectField(weak_cell, WeakCell::kValueOffset);
}
-Node* CodeStubAssembler::LoadWeakCellValue(Node* weak_cell, Label* if_cleared) {
+TNode<Object> CodeStubAssembler::LoadWeakCellValue(
+ SloppyTNode<WeakCell> weak_cell, Label* if_cleared) {
CSA_ASSERT(this, IsWeakCell(weak_cell));
- Node* value = LoadWeakCellValueUnchecked(weak_cell);
+ TNode<Object> value = LoadWeakCellValueUnchecked(weak_cell);
if (if_cleared != nullptr) {
GotoIf(WordEqual(value, IntPtrConstant(0)), if_cleared);
}
@@ -1624,17 +1627,16 @@ Node* CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
}
}
-Node* CodeStubAssembler::LoadFeedbackVectorSlot(Node* object,
- Node* slot_index_node,
- int additional_offset,
- ParameterMode parameter_mode) {
+TNode<Object> CodeStubAssembler::LoadFeedbackVectorSlot(
+ Node* object, Node* slot_index_node, int additional_offset,
+ ParameterMode parameter_mode) {
CSA_SLOW_ASSERT(this, IsFeedbackVector(object));
CSA_SLOW_ASSERT(this, MatchesParameterMode(slot_index_node, parameter_mode));
int32_t header_size =
FeedbackVector::kFeedbackSlotsOffset + additional_offset - kHeapObjectTag;
Node* offset = ElementOffsetFromIndex(slot_index_node, HOLEY_ELEMENTS,
parameter_mode, header_size);
- return Load(MachineType::AnyTagged(), object, offset);
+ return UncheckedCast<Object>(Load(MachineType::AnyTagged(), object, offset));
}
Node* CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
@@ -1788,8 +1790,8 @@ Node* CodeStubAssembler::LoadJSFunctionPrototype(Node* function,
CSA_ASSERT(this, TaggedIsNotSmi(function));
CSA_ASSERT(this, IsJSFunction(function));
CSA_ASSERT(this, IsFunctionWithPrototypeSlotMap(LoadMap(function)));
- CSA_ASSERT(this, IsClearWord32(LoadMapBitField(LoadMap(function)),
- 1 << Map::kHasNonInstancePrototype));
+ CSA_ASSERT(this, IsClearWord32<Map::HasNonInstancePrototypeBit>(
+ LoadMapBitField(LoadMap(function))));
Node* proto_or_map =
LoadObjectField(function, JSFunction::kPrototypeOrInitialMapOffset);
GotoIf(IsTheHole(proto_or_map), if_bailout);
@@ -1943,10 +1945,10 @@ Node* CodeStubAssembler::EnsureArrayPushable(Node* receiver, Label* bailout) {
Comment("Disallow pushing onto prototypes");
Node* map = LoadMap(receiver);
Node* bit_field2 = LoadMapBitField2(map);
- int mask = static_cast<int>(Map::IsPrototypeMapBits::kMask) |
- (1 << Map::kIsExtensible);
+ int mask = Map::IsPrototypeMapBit::kMask | Map::IsExtensibleBit::kMask;
Node* test = Word32And(bit_field2, Int32Constant(mask));
- GotoIf(Word32NotEqual(test, Int32Constant(1 << Map::kIsExtensible)), bailout);
+ GotoIf(Word32NotEqual(test, Int32Constant(Map::IsExtensibleBit::kMask)),
+ bailout);
// Disallow pushing onto arrays in dictionary named property mode. We need
// to figure out whether the length property is still writable.
@@ -1994,7 +1996,10 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
// Resize the capacity of the fixed array if it doesn't fit.
TNode<IntPtrT> first = *arg_index;
- Node* growth = WordToParameter(IntPtrSub(args->GetLength(), first), mode);
+ Node* growth = WordToParameter(
+ IntPtrSub(UncheckedCast<IntPtrT>(args->GetLength(INTPTR_PARAMETERS)),
+ first),
+ mode);
PossiblyGrowElementsCapacity(mode, kind, array, var_length.value(),
&var_elements, growth, &pre_bailout);
@@ -2548,8 +2553,8 @@ void CodeStubAssembler::InitializeJSObjectFromMap(
void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking(
Node* object, Node* map, Node* instance_size, int start_offset) {
STATIC_ASSERT(Map::kNoSlackTracking == 0);
- CSA_ASSERT(this,
- IsClearWord32<Map::ConstructionCounter>(LoadMapBitField3(map)));
+ CSA_ASSERT(
+ this, IsClearWord32<Map::ConstructionCounterBits>(LoadMapBitField3(map)));
InitializeFieldsWithRoot(object, IntPtrConstant(start_offset), instance_size,
Heap::kUndefinedValueRootIndex);
}
@@ -2564,7 +2569,8 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
Node* bit_field3 = LoadMapBitField3(map);
Label end(this), slack_tracking(this), complete(this, Label::kDeferred);
STATIC_ASSERT(Map::kNoSlackTracking == 0);
- GotoIf(IsSetWord32<Map::ConstructionCounter>(bit_field3), &slack_tracking);
+ GotoIf(IsSetWord32<Map::ConstructionCounterBits>(bit_field3),
+ &slack_tracking);
Comment("No slack tracking");
InitializeJSObjectBodyNoSlackTracking(object, map, instance_size);
Goto(&end);
@@ -2574,9 +2580,9 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
Comment("Decrease construction counter");
// Slack tracking is only done on initial maps.
CSA_ASSERT(this, IsUndefined(LoadMapBackPointer(map)));
- STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+ STATIC_ASSERT(Map::ConstructionCounterBits::kNext == 32);
Node* new_bit_field3 = Int32Sub(
- bit_field3, Int32Constant(1 << Map::ConstructionCounter::kShift));
+ bit_field3, Int32Constant(1 << Map::ConstructionCounterBits::kShift));
StoreObjectFieldNoWriteBarrier(map, Map::kBitField3Offset, new_bit_field3,
MachineRepresentation::kWord32);
STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
@@ -2595,7 +2601,9 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
InitializeFieldsWithRoot(object, IntPtrConstant(start_offset), used_size,
Heap::kUndefinedValueRootIndex);
- GotoIf(IsClearWord32<Map::ConstructionCounter>(new_bit_field3), &complete);
+ STATIC_ASSERT(Map::kNoSlackTracking == 0);
+ GotoIf(IsClearWord32<Map::ConstructionCounterBits>(new_bit_field3),
+ &complete);
Goto(&end);
}
@@ -3346,7 +3354,8 @@ Node* CodeStubAssembler::CalculateNewElementsCapacity(Node* old_capacity,
CSA_SLOW_ASSERT(this, MatchesParameterMode(old_capacity, mode));
Node* half_old_capacity = WordOrSmiShr(old_capacity, 1, mode);
Node* new_capacity = IntPtrOrSmiAdd(half_old_capacity, old_capacity, mode);
- Node* padding = IntPtrOrSmiConstant(16, mode);
+ Node* padding =
+ IntPtrOrSmiConstant(JSObject::kMinAddedElementsCapacity, mode);
return IntPtrOrSmiAdd(new_capacity, padding, mode);
}
@@ -3505,8 +3514,8 @@ Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) {
Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) {
VARIABLE(var_result, MachineRepresentation::kWord32);
Label done(this);
- TaggedToWord32OrBigIntImpl<Feedback::kNone, Object::Conversion::kToNumber>(
- context, value, &done, &var_result);
+ TaggedToWord32OrBigIntImpl<Object::Conversion::kToNumber>(context, value,
+ &done, &var_result);
BIND(&done);
return var_result.value();
}
@@ -3518,7 +3527,7 @@ void CodeStubAssembler::TaggedToWord32OrBigInt(Node* context, Node* value,
Variable* var_word32,
Label* if_bigint,
Variable* var_bigint) {
- TaggedToWord32OrBigIntImpl<Feedback::kNone, Object::Conversion::kToNumeric>(
+ TaggedToWord32OrBigIntImpl<Object::Conversion::kToNumeric>(
context, value, if_number, var_word32, if_bigint, var_bigint);
}
@@ -3528,13 +3537,12 @@ void CodeStubAssembler::TaggedToWord32OrBigInt(Node* context, Node* value,
void CodeStubAssembler::TaggedToWord32OrBigIntWithFeedback(
Node* context, Node* value, Label* if_number, Variable* var_word32,
Label* if_bigint, Variable* var_bigint, Variable* var_feedback) {
- TaggedToWord32OrBigIntImpl<Feedback::kCollect,
- Object::Conversion::kToNumeric>(
+ TaggedToWord32OrBigIntImpl<Object::Conversion::kToNumeric>(
context, value, if_number, var_word32, if_bigint, var_bigint,
var_feedback);
}
-template <CodeStubAssembler::Feedback feedback, Object::Conversion conversion>
+template <Object::Conversion conversion>
void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
Node* context, Node* value, Label* if_number, Variable* var_word32,
Label* if_bigint, Variable* var_bigint, Variable* var_feedback) {
@@ -3546,14 +3554,10 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
// We might need to loop after conversion.
VARIABLE(var_value, MachineRepresentation::kTagged, value);
- if (feedback == Feedback::kCollect) {
- var_feedback->Bind(SmiConstant(BinaryOperationFeedback::kNone));
- } else {
- DCHECK(var_feedback == nullptr);
- }
+ OverwriteFeedback(var_feedback, BinaryOperationFeedback::kNone);
Variable* loop_vars[] = {&var_value, var_feedback};
- int num_vars = feedback == Feedback::kCollect ? arraysize(loop_vars)
- : arraysize(loop_vars) - 1;
+ int num_vars =
+ var_feedback != nullptr ? arraysize(loop_vars) : arraysize(loop_vars) - 1;
Label loop(this, num_vars, loop_vars);
Goto(&loop);
BIND(&loop);
@@ -3565,11 +3569,7 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
// {value} is a Smi.
var_word32->Bind(SmiToWord32(value));
- if (feedback == Feedback::kCollect) {
- var_feedback->Bind(
- SmiOr(var_feedback->value(),
- SmiConstant(BinaryOperationFeedback::kSignedSmall)));
- }
+ CombineFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall);
Goto(if_number);
BIND(&not_smi);
@@ -3582,7 +3582,7 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
// Not HeapNumber (or BigInt if conversion == kToNumeric).
{
- if (feedback == Feedback::kCollect) {
+ if (var_feedback != nullptr) {
// We do not require an Or with earlier feedback here because once we
// convert the value to a Numeric, we cannot reach this path. We can
// only reach this path on the first pass when the feedback is kNone.
@@ -3595,36 +3595,25 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
? Builtins::kNonNumberToNumeric
: Builtins::kNonNumberToNumber;
var_value.Bind(CallBuiltin(builtin, context, value));
- if (feedback == Feedback::kCollect) {
- var_feedback->Bind(SmiConstant(BinaryOperationFeedback::kAny));
- }
+ OverwriteFeedback(var_feedback, BinaryOperationFeedback::kAny);
Goto(&loop);
BIND(&is_oddball);
var_value.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
- if (feedback == Feedback::kCollect) {
- var_feedback->Bind(
- SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
- }
+ OverwriteFeedback(var_feedback,
+ BinaryOperationFeedback::kNumberOrOddball);
Goto(&loop);
}
BIND(&is_heap_number);
var_word32->Bind(TruncateHeapNumberValueToWord32(value));
- if (feedback == Feedback::kCollect) {
- var_feedback->Bind(SmiOr(var_feedback->value(),
- SmiConstant(BinaryOperationFeedback::kNumber)));
- }
+ CombineFeedback(var_feedback, BinaryOperationFeedback::kNumber);
Goto(if_number);
if (conversion == Object::Conversion::kToNumeric) {
BIND(&is_bigint);
var_bigint->Bind(value);
- if (feedback == Feedback::kCollect) {
- var_feedback->Bind(
- SmiOr(var_feedback->value(),
- SmiConstant(BinaryOperationFeedback::kBigInt)));
- }
+ CombineFeedback(var_feedback, BinaryOperationFeedback::kBigInt);
Goto(if_bigint);
}
}
@@ -3822,9 +3811,7 @@ TNode<Float64T> CodeStubAssembler::ChangeNumberToFloat64(
}
TNode<UintPtrT> CodeStubAssembler::ChangeNonnegativeNumberToUintPtr(
- SloppyTNode<Number> value) {
- // TODO(tebbi): Remove assert once argument is TNode instead of SloppyTNode.
- CSA_SLOW_ASSERT(this, IsNumber(value));
+ TNode<Number> value) {
TVARIABLE(UintPtrT, result);
Label smi(this), done(this, &result);
GotoIf(TaggedIsSmi(value), &smi);
@@ -4032,43 +4019,30 @@ Node* CodeStubAssembler::InstanceTypeEqual(Node* instance_type, int type) {
return Word32Equal(instance_type, Int32Constant(type));
}
-Node* CodeStubAssembler::IsSpecialReceiverMap(Node* map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
- Node* is_special = IsSpecialReceiverInstanceType(LoadMapInstanceType(map));
- uint32_t mask =
- 1 << Map::kHasNamedInterceptor | 1 << Map::kIsAccessCheckNeeded;
- USE(mask);
- // Interceptors or access checks imply special receiver.
- CSA_ASSERT(this,
- SelectConstant(IsSetWord32(LoadMapBitField(map), mask), is_special,
- Int32Constant(1), MachineRepresentation::kWord32));
- return is_special;
-}
-
TNode<BoolT> CodeStubAssembler::IsDictionaryMap(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
Node* bit_field3 = LoadMapBitField3(map);
- return IsSetWord32<Map::DictionaryMap>(bit_field3);
+ return IsSetWord32<Map::IsDictionaryMapBit>(bit_field3);
}
Node* CodeStubAssembler::IsExtensibleMap(Node* map) {
CSA_ASSERT(this, IsMap(map));
- return IsSetWord32(LoadMapBitField2(map), 1 << Map::kIsExtensible);
+ return IsSetWord32<Map::IsExtensibleBit>(LoadMapBitField2(map));
}
Node* CodeStubAssembler::IsCallableMap(Node* map) {
CSA_ASSERT(this, IsMap(map));
- return IsSetWord32(LoadMapBitField(map), 1 << Map::kIsCallable);
+ return IsSetWord32<Map::IsCallableBit>(LoadMapBitField(map));
}
Node* CodeStubAssembler::IsDeprecatedMap(Node* map) {
CSA_ASSERT(this, IsMap(map));
- return IsSetWord32<Map::Deprecated>(LoadMapBitField3(map));
+ return IsSetWord32<Map::IsDeprecatedBit>(LoadMapBitField3(map));
}
Node* CodeStubAssembler::IsUndetectableMap(Node* map) {
CSA_ASSERT(this, IsMap(map));
- return IsSetWord32(LoadMapBitField(map), 1 << Map::kIsUndetectable);
+ return IsSetWord32<Map::IsUndetectableBit>(LoadMapBitField(map));
}
Node* CodeStubAssembler::IsNoElementsProtectorCellInvalid() {
@@ -4104,7 +4078,7 @@ Node* CodeStubAssembler::IsCell(Node* object) {
Node* CodeStubAssembler::IsConstructorMap(Node* map) {
CSA_ASSERT(this, IsMap(map));
- return IsSetWord32(LoadMapBitField(map), 1 << Map::kIsConstructor);
+ return IsSetWord32<Map::IsConstructorBit>(LoadMapBitField(map));
}
Node* CodeStubAssembler::IsConstructor(Node* object) {
@@ -4113,7 +4087,7 @@ Node* CodeStubAssembler::IsConstructor(Node* object) {
Node* CodeStubAssembler::IsFunctionWithPrototypeSlotMap(Node* map) {
CSA_ASSERT(this, IsMap(map));
- return IsSetWord32(LoadMapBitField(map), 1 << Map::kHasPrototypeSlot);
+ return IsSetWord32<Map::HasPrototypeSlotBit>(LoadMapBitField(map));
}
Node* CodeStubAssembler::IsSpecialReceiverInstanceType(Node* instance_type) {
@@ -4500,13 +4474,12 @@ Node* CodeStubAssembler::IsNumberArrayIndex(Node* number) {
Label check_upper_bound(this), check_is_integer(this), out(this),
return_false(this);
- GotoIfNumericGreaterThanOrEqual(number, NumberConstant(0),
- &check_upper_bound);
+ GotoIfNumberGreaterThanOrEqual(number, NumberConstant(0), &check_upper_bound);
Goto(&return_false);
BIND(&check_upper_bound);
- GotoIfNumericGreaterThanOrEqual(number, NumberConstant(kMaxUInt32),
- &return_false);
+ GotoIfNumberGreaterThanOrEqual(number, NumberConstant(kMaxUInt32),
+ &return_false);
Goto(&check_is_integer);
BIND(&check_is_integer);
@@ -4525,14 +4498,14 @@ Node* CodeStubAssembler::IsNumberArrayIndex(Node* number) {
return var_result.value();
}
-TNode<Uint32T> CodeStubAssembler::StringCharCodeAt(SloppyTNode<String> string,
- SloppyTNode<IntPtrT> index) {
+TNode<Int32T> CodeStubAssembler::StringCharCodeAt(SloppyTNode<String> string,
+ SloppyTNode<IntPtrT> index) {
CSA_ASSERT(this, IsString(string));
CSA_ASSERT(this, IntPtrGreaterThanOrEqual(index, IntPtrConstant(0)));
CSA_ASSERT(this, IntPtrLessThan(index, LoadStringLengthAsWord(string)));
- VARIABLE(var_result, MachineRepresentation::kWord32);
+ TVARIABLE(Int32T, var_result);
Label return_result(this), if_runtime(this, Label::kDeferred),
if_stringistwobyte(this), if_stringisonebyte(this);
@@ -4550,14 +4523,16 @@ TNode<Uint32T> CodeStubAssembler::StringCharCodeAt(SloppyTNode<String> string,
BIND(&if_stringisonebyte);
{
- var_result.Bind(Load(MachineType::Uint8(), string_data, offset));
+ var_result =
+ UncheckedCast<Int32T>(Load(MachineType::Uint8(), string_data, offset));
Goto(&return_result);
}
BIND(&if_stringistwobyte);
{
- var_result.Bind(Load(MachineType::Uint16(), string_data,
- WordShl(offset, IntPtrConstant(1))));
+ var_result =
+ UncheckedCast<Int32T>(Load(MachineType::Uint16(), string_data,
+ WordShl(offset, IntPtrConstant(1))));
Goto(&return_result);
}
@@ -4565,15 +4540,15 @@ TNode<Uint32T> CodeStubAssembler::StringCharCodeAt(SloppyTNode<String> string,
{
Node* result = CallRuntime(Runtime::kStringCharCodeAt, NoContextConstant(),
string, SmiTag(index));
- var_result.Bind(SmiToWord32(result));
+ var_result = SmiToWord32(result);
Goto(&return_result);
}
BIND(&return_result);
- return UncheckedCast<Uint32T>(var_result.value());
+ return var_result;
}
-Node* CodeStubAssembler::StringFromCharCode(Node* code) {
+TNode<String> CodeStubAssembler::StringFromCharCode(TNode<Int32T> code) {
VARIABLE(var_result, MachineRepresentation::kTagged);
// Check if the {code} is a one-byte char code.
@@ -4627,7 +4602,7 @@ Node* CodeStubAssembler::StringFromCharCode(Node* code) {
BIND(&if_done);
CSA_ASSERT(this, IsString(var_result.value()));
- return var_result.value();
+ return CAST(var_result.value());
}
// A wrapper around CopyStringCharacters which determines the correct string
@@ -4787,7 +4762,7 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
// Substrings of length 1 are generated through CharCodeAt and FromCharCode.
BIND(&single_char);
{
- Node* char_code = StringCharCodeAt(string, SmiUntag(from));
+ TNode<Int32T> char_code = StringCharCodeAt(string, SmiUntag(from));
var_result.Bind(StringFromCharCode(char_code));
Goto(&end);
}
@@ -5161,8 +5136,8 @@ Node* CodeStubAssembler::StringAdd(Node* context, Node* left, Node* right,
return result.value();
}
-Node* CodeStubAssembler::StringFromCodePoint(Node* codepoint,
- UnicodeEncoding encoding) {
+TNode<String> CodeStubAssembler::StringFromCodePoint(TNode<Int32T> codepoint,
+ UnicodeEncoding encoding) {
VARIABLE(var_result, MachineRepresentation::kTagged, EmptyStringConstant());
Label if_isword16(this), if_isword32(this), return_result(this);
@@ -5194,7 +5169,7 @@ Node* CodeStubAssembler::StringFromCodePoint(Node* codepoint,
Int32Constant(0xDC00));
// codpoint = (trail << 16) | lead;
- codepoint = Word32Or(Word32Shl(trail, Int32Constant(16)), lead);
+ codepoint = Signed(Word32Or(Word32Shl(trail, Int32Constant(16)), lead));
break;
}
}
@@ -5209,12 +5184,10 @@ Node* CodeStubAssembler::StringFromCodePoint(Node* codepoint,
}
BIND(&return_result);
- CSA_ASSERT(this, IsString(var_result.value()));
- return var_result.value();
+ return CAST(var_result.value());
}
-TNode<Number> CodeStubAssembler::StringToNumber(SloppyTNode<Context> context,
- SloppyTNode<String> input) {
+TNode<Number> CodeStubAssembler::StringToNumber(SloppyTNode<String> input) {
CSA_SLOW_ASSERT(this, IsString(input));
Label runtime(this, Label::kDeferred);
Label end(this);
@@ -5226,12 +5199,14 @@ TNode<Number> CodeStubAssembler::StringToNumber(SloppyTNode<Context> context,
GotoIf(IsSetWord32(hash, Name::kDoesNotContainCachedArrayIndexMask),
&runtime);
- var_result = SmiTag(DecodeWordFromWord32<String::ArrayIndexValueBits>(hash));
+ var_result =
+ SmiTag(Signed(DecodeWordFromWord32<String::ArrayIndexValueBits>(hash)));
Goto(&end);
BIND(&runtime);
{
- var_result = CAST(CallRuntime(Runtime::kStringToNumber, context, input));
+ var_result =
+ CAST(CallRuntime(Runtime::kStringToNumber, NoContextConstant(), input));
Goto(&end);
}
@@ -5239,7 +5214,7 @@ TNode<Number> CodeStubAssembler::StringToNumber(SloppyTNode<Context> context,
return var_result;
}
-Node* CodeStubAssembler::NumberToString(Node* context, Node* argument) {
+Node* CodeStubAssembler::NumberToString(Node* argument) {
VARIABLE(result, MachineRepresentation::kTagged);
Label runtime(this, Label::kDeferred), smi(this), done(this, &result);
@@ -5290,7 +5265,8 @@ Node* CodeStubAssembler::NumberToString(Node* context, Node* argument) {
BIND(&runtime);
{
// No cache entry, go to the runtime.
- result.Bind(CallRuntime(Runtime::kNumberToString, context, argument));
+ result.Bind(CallRuntime(Runtime::kNumberToStringSkipCache,
+ NoContextConstant(), argument));
}
Goto(&done);
@@ -5393,7 +5369,7 @@ Node* CodeStubAssembler::NonNumberToNumberOrNumeric(
BIND(&if_inputisstring);
{
// The {input} is a String, use the fast stub to convert it to a Number.
- var_result.Bind(StringToNumber(context, input));
+ var_result.Bind(StringToNumber(input));
Goto(&end);
}
@@ -5525,18 +5501,17 @@ TNode<Number> CodeStubAssembler::ToNumber(SloppyTNode<Context> context,
void CodeStubAssembler::TaggedToNumeric(Node* context, Node* value, Label* done,
Variable* var_numeric) {
- TaggedToNumeric<Feedback::kNone>(context, value, done, var_numeric);
+ TaggedToNumeric(context, value, done, var_numeric, nullptr);
}
void CodeStubAssembler::TaggedToNumericWithFeedback(Node* context, Node* value,
Label* done,
Variable* var_numeric,
Variable* var_feedback) {
- TaggedToNumeric<Feedback::kCollect>(context, value, done, var_numeric,
- var_feedback);
+ DCHECK_NOT_NULL(var_feedback);
+ TaggedToNumeric(context, value, done, var_numeric, var_feedback);
}
-template <CodeStubAssembler::Feedback feedback>
void CodeStubAssembler::TaggedToNumeric(Node* context, Node* value, Label* done,
Variable* var_numeric,
Variable* var_feedback) {
@@ -5551,34 +5526,24 @@ void CodeStubAssembler::TaggedToNumeric(Node* context, Node* value, Label* done,
// {value} is not a Numeric yet.
GotoIf(Word32Equal(instance_type, Int32Constant(ODDBALL_TYPE)), &if_oddball);
var_numeric->Bind(CallBuiltin(Builtins::kNonNumberToNumeric, context, value));
- if (feedback == Feedback::kCollect) {
- var_feedback->Bind(SmiConstant(BinaryOperationFeedback::kAny));
- }
+ OverwriteFeedback(var_feedback, BinaryOperationFeedback::kAny);
Goto(done);
BIND(&if_smi);
- if (feedback == Feedback::kCollect) {
- var_feedback->Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall));
- }
+ OverwriteFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall);
Goto(done);
BIND(&if_heapnumber);
- if (feedback == Feedback::kCollect) {
- var_feedback->Bind(SmiConstant(BinaryOperationFeedback::kNumber));
- }
+ OverwriteFeedback(var_feedback, BinaryOperationFeedback::kNumber);
Goto(done);
BIND(&if_bigint);
- if (feedback == Feedback::kCollect) {
- var_feedback->Bind(SmiConstant(BinaryOperationFeedback::kBigInt));
- }
+ OverwriteFeedback(var_feedback, BinaryOperationFeedback::kBigInt);
Goto(done);
BIND(&if_oddball);
+ OverwriteFeedback(var_feedback, BinaryOperationFeedback::kNumberOrOddball);
var_numeric->Bind(LoadObjectField(value, Oddball::kToNumberOffset));
- if (feedback == Feedback::kCollect) {
- var_feedback->Bind(SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
- }
Goto(done);
}
@@ -5702,7 +5667,7 @@ TNode<String> CodeStubAssembler::ToString(SloppyTNode<Context> context,
Branch(IsHeapNumberMap(input_map), &is_number, &not_heap_number);
BIND(&is_number);
- result.Bind(NumberToString(context, input));
+ result.Bind(NumberToString(input));
Goto(&done);
BIND(&not_heap_number);
@@ -5774,8 +5739,8 @@ Node* CodeStubAssembler::ToSmiIndex(Node* const input, Node* const context,
Branch(IsUndefined(result.value()), &return_zero, &defined);
BIND(&defined);
- result.Bind(ToInteger(context, result.value(),
- CodeStubAssembler::kTruncateMinusZero));
+ result.Bind(ToInteger_Inline(CAST(context), CAST(result.value()),
+ CodeStubAssembler::kTruncateMinusZero));
GotoIfNot(TaggedIsSmi(result.value()), range_error);
CSA_ASSERT(this, TaggedIsSmi(result.value()));
Goto(&negative_check);
@@ -5799,8 +5764,8 @@ Node* CodeStubAssembler::ToSmiLength(Node* input, Node* const context,
Branch(TaggedIsSmi(result.value()), &negative_check, &to_integer);
BIND(&to_integer);
- result.Bind(ToInteger(context, result.value(),
- CodeStubAssembler::kTruncateMinusZero));
+ result.Bind(ToInteger_Inline(CAST(context), CAST(result.value()),
+ CodeStubAssembler::kTruncateMinusZero));
GotoIf(TaggedIsSmi(result.value()), &negative_check);
// result.value() can still be a negative HeapNumber here.
Branch(IsTrue(CallBuiltin(Builtins::kLessThan, context, result.value(),
@@ -5828,6 +5793,16 @@ Node* CodeStubAssembler::ToLength_Inline(Node* const context,
MachineRepresentation::kTagged);
}
+TNode<Number> CodeStubAssembler::ToInteger_Inline(
+ TNode<Context> context, TNode<Object> input, ToIntegerTruncationMode mode) {
+ Builtins::Name builtin = (mode == kNoTruncation)
+ ? Builtins::kToInteger
+ : Builtins::kToInteger_TruncateMinusZero;
+ return CAST(Select(TaggedIsSmi(input), [=] { return input; },
+ [=] { return CallBuiltin(builtin, context, input); },
+ MachineRepresentation::kTagged));
+}
+
TNode<Number> CodeStubAssembler::ToInteger(SloppyTNode<Context> context,
SloppyTNode<Object> input,
ToIntegerTruncationMode mode) {
@@ -5886,6 +5861,7 @@ TNode<Number> CodeStubAssembler::ToInteger(SloppyTNode<Context> context,
}
BIND(&out);
+ if (mode == kTruncateMinusZero) CSA_ASSERT(this, IsNumberNormalized(var_arg));
return CAST(var_arg);
}
@@ -5895,8 +5871,10 @@ TNode<Uint32T> CodeStubAssembler::DecodeWord32(SloppyTNode<Word32T> word32,
Word32And(word32, Int32Constant(mask)), static_cast<int>(shift)));
}
-Node* CodeStubAssembler::DecodeWord(Node* word, uint32_t shift, uint32_t mask) {
- return WordShr(WordAnd(word, IntPtrConstant(mask)), static_cast<int>(shift));
+TNode<UintPtrT> CodeStubAssembler::DecodeWord(SloppyTNode<WordT> word,
+ uint32_t shift, uint32_t mask) {
+ return Unsigned(
+ WordShr(WordAnd(word, IntPtrConstant(mask)), static_cast<int>(shift)));
}
Node* CodeStubAssembler::UpdateWord(Node* word, Node* value, uint32_t shift,
@@ -6187,14 +6165,14 @@ Node* CodeStubAssembler::ComputeIntegerHash(Node* key, Node* seed) {
// See v8::internal::ComputeIntegerHash()
Node* hash = TruncateWordToWord32(key);
hash = Word32Xor(hash, seed);
- hash = Int32Add(Word32Xor(hash, Int32Constant(0xffffffff)),
+ hash = Int32Add(Word32Xor(hash, Int32Constant(0xFFFFFFFF)),
Word32Shl(hash, Int32Constant(15)));
hash = Word32Xor(hash, Word32Shr(hash, Int32Constant(12)));
hash = Int32Add(hash, Word32Shl(hash, Int32Constant(2)));
hash = Word32Xor(hash, Word32Shr(hash, Int32Constant(4)));
hash = Int32Mul(hash, Int32Constant(2057));
hash = Word32Xor(hash, Word32Shr(hash, Int32Constant(16)));
- return Word32And(hash, Int32Constant(0x3fffffff));
+ return Word32And(hash, Int32Constant(0x3FFFFFFF));
}
void CodeStubAssembler::NumberDictionaryLookup(Node* dictionary,
@@ -6391,36 +6369,38 @@ Node* CodeStubAssembler::DescriptorArrayNumberOfEntries(Node* descriptors) {
descriptors, IntPtrConstant(DescriptorArray::kDescriptorLengthIndex));
}
-namespace {
-
-Node* DescriptorNumberToIndex(CodeStubAssembler* a, Node* descriptor_number) {
- Node* descriptor_size = a->Int32Constant(DescriptorArray::kEntrySize);
- Node* index = a->Int32Mul(descriptor_number, descriptor_size);
- return a->ChangeInt32ToIntPtr(index);
+Node* CodeStubAssembler::DescriptorNumberToIndex(
+ SloppyTNode<Uint32T> descriptor_number) {
+ Node* descriptor_size = Int32Constant(DescriptorArray::kEntrySize);
+ Node* index = Int32Mul(descriptor_number, descriptor_size);
+ return ChangeInt32ToIntPtr(index);
}
-} // namespace
-
Node* CodeStubAssembler::DescriptorArrayToKeyIndex(Node* descriptor_number) {
return IntPtrAdd(IntPtrConstant(DescriptorArray::ToKeyIndex(0)),
- DescriptorNumberToIndex(this, descriptor_number));
+ DescriptorNumberToIndex(descriptor_number));
}
Node* CodeStubAssembler::DescriptorArrayGetSortedKeyIndex(
Node* descriptors, Node* descriptor_number) {
- const int details_offset = DescriptorArray::ToDetailsIndex(0) * kPointerSize;
- Node* details = LoadAndUntagToWord32FixedArrayElement(
- descriptors, DescriptorNumberToIndex(this, descriptor_number),
- details_offset);
+ Node* details = DescriptorArrayGetDetails(
+ TNode<DescriptorArray>::UncheckedCast(descriptors),
+ TNode<Uint32T>::UncheckedCast(descriptor_number));
return DecodeWord32<PropertyDetails::DescriptorPointer>(details);
}
Node* CodeStubAssembler::DescriptorArrayGetKey(Node* descriptors,
Node* descriptor_number) {
const int key_offset = DescriptorArray::ToKeyIndex(0) * kPointerSize;
- return LoadFixedArrayElement(descriptors,
- DescriptorNumberToIndex(this, descriptor_number),
- key_offset);
+ return LoadFixedArrayElement(
+ descriptors, DescriptorNumberToIndex(descriptor_number), key_offset);
+}
+
+TNode<Uint32T> CodeStubAssembler::DescriptorArrayGetDetails(
+ TNode<DescriptorArray> descriptors, TNode<Uint32T> descriptor_number) {
+ const int details_offset = DescriptorArray::ToDetailsIndex(0) * kPointerSize;
+ return TNode<Uint32T>::UncheckedCast(LoadAndUntagToWord32FixedArrayElement(
+ descriptors, DescriptorNumberToIndex(descriptor_number), details_offset));
}
void CodeStubAssembler::DescriptorLookupBinary(Node* unique_name,
@@ -6531,13 +6511,13 @@ void CodeStubAssembler::TryLookupProperty(
&if_objectisspecial);
uint32_t mask =
- 1 << Map::kHasNamedInterceptor | 1 << Map::kIsAccessCheckNeeded;
+ Map::HasNamedInterceptorBit::kMask | Map::IsAccessCheckNeededBit::kMask;
CSA_ASSERT(this, Word32BinaryNot(IsSetWord32(LoadMapBitField(map), mask)));
USE(mask);
Node* bit_field3 = LoadMapBitField3(map);
Label if_isfastmap(this), if_isslowmap(this);
- Branch(IsSetWord32<Map::DictionaryMap>(bit_field3), &if_isslowmap,
+ Branch(IsSetWord32<Map::IsDictionaryMapBit>(bit_field3), &if_isslowmap,
&if_isfastmap);
BIND(&if_isfastmap);
{
@@ -6563,7 +6543,8 @@ void CodeStubAssembler::TryLookupProperty(
// Handle interceptors and access checks in runtime.
Node* bit_field = LoadMapBitField(map);
- int mask = 1 << Map::kHasNamedInterceptor | 1 << Map::kIsAccessCheckNeeded;
+ int mask =
+ Map::HasNamedInterceptorBit::kMask | Map::IsAccessCheckNeededBit::kMask;
GotoIf(IsSetWord32(bit_field, mask), if_bailout);
Node* dictionary = LoadSlowProperties(object);
@@ -6618,12 +6599,22 @@ void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
Variable* var_value) {
DCHECK_EQ(MachineRepresentation::kWord32, var_details->rep());
DCHECK_EQ(MachineRepresentation::kTagged, var_value->rep());
- Comment("[ LoadPropertyFromFastObject");
Node* details =
LoadDetailsByKeyIndex<DescriptorArray>(descriptors, name_index);
var_details->Bind(details);
+ LoadPropertyFromFastObject(object, map, descriptors, name_index, details,
+ var_value);
+}
+
+void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
+ Node* descriptors,
+ Node* name_index,
+ Node* details,
+ Variable* var_value) {
+ Comment("[ LoadPropertyFromFastObject");
+
Node* location = DecodeWord32<PropertyDetails::LocationField>(details);
Label if_in_field(this), if_in_descriptor(this), done(this);
@@ -6826,13 +6817,12 @@ Node* CodeStubAssembler::CallGetterIfAccessor(Node* value, Node* details,
// if (!(has_prototype_slot() && !has_non_instance_prototype())) use
// generic property loading mechanism.
- int has_prototype_slot_mask = 1 << Map::kHasPrototypeSlot;
- int has_non_instance_prototype_mask = 1 << Map::kHasNonInstancePrototype;
GotoIfNot(
- Word32Equal(Word32And(LoadMapBitField(receiver_map),
- Int32Constant(has_prototype_slot_mask |
- has_non_instance_prototype_mask)),
- Int32Constant(has_prototype_slot_mask)),
+ Word32Equal(
+ Word32And(LoadMapBitField(receiver_map),
+ Int32Constant(Map::HasPrototypeSlotBit::kMask |
+ Map::HasNonInstancePrototypeBit::kMask)),
+ Int32Constant(Map::HasPrototypeSlotBit::kMask)),
if_bailout);
var_value.Bind(LoadJSFunctionPrototype(receiver, if_bailout));
Goto(&done);
@@ -7061,6 +7051,35 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
}
}
+void CodeStubAssembler::BranchIfMaybeSpecialIndex(TNode<String> name_string,
+ Label* if_maybe_special_index,
+ Label* if_not_special_index) {
+ // TODO(cwhan.tunz): Implement fast cases more.
+
+ // If a name is empty or too long, it's not a special index
+ // Max length of canonical double: -X.XXXXXXXXXXXXXXXXX-eXXX
+ const int kBufferSize = 24;
+ TNode<Smi> string_length = LoadStringLengthAsSmi(name_string);
+ GotoIf(SmiEqual(string_length, SmiConstant(0)), if_not_special_index);
+ GotoIf(SmiGreaterThan(string_length, SmiConstant(kBufferSize)),
+ if_not_special_index);
+
+ // If the first character of name is not a digit or '-', or we can't match it
+ // to Infinity or NaN, then this is not a special index.
+ TNode<Int32T> first_char = StringCharCodeAt(name_string, IntPtrConstant(0));
+ // If the name starts with '-', it can be a negative index.
+ GotoIf(Word32Equal(first_char, Int32Constant('-')), if_maybe_special_index);
+ // If the name starts with 'I', it can be "Infinity".
+ GotoIf(Word32Equal(first_char, Int32Constant('I')), if_maybe_special_index);
+ // If the name starts with 'N', it can be "NaN".
+ GotoIf(Word32Equal(first_char, Int32Constant('N')), if_maybe_special_index);
+ // Finally, if the first character is not a digit either, then we are sure
+ // that the name is not a special index.
+ GotoIf(Uint32LessThan(first_char, Int32Constant('0')), if_not_special_index);
+ GotoIf(Uint32LessThan(Int32Constant('9'), first_char), if_not_special_index);
+ Goto(if_maybe_special_index);
+}
+
void CodeStubAssembler::TryPrototypeChainLookup(
Node* receiver, Node* key, const LookupInHolder& lookup_property_in_holder,
const LookupInHolder& lookup_element_in_holder, Label* if_end,
@@ -7108,15 +7127,22 @@ void CodeStubAssembler::TryPrototypeChainLookup(
Node* holder_map = var_holder_map.value();
Node* holder_instance_type = var_holder_instance_type.value();
- Label next_proto(this);
+ Label next_proto(this), check_integer_indexed_exotic(this);
lookup_property_in_holder(receiver, var_holder.value(), holder_map,
holder_instance_type, var_unique.value(),
- &next_proto, if_bailout);
- BIND(&next_proto);
+ &check_integer_indexed_exotic, if_bailout);
- // Bailout if it can be an integer indexed exotic case.
- GotoIf(InstanceTypeEqual(holder_instance_type, JS_TYPED_ARRAY_TYPE),
- if_bailout);
+ BIND(&check_integer_indexed_exotic);
+ {
+ // Bailout if it can be an integer indexed exotic case.
+ GotoIfNot(InstanceTypeEqual(holder_instance_type, JS_TYPED_ARRAY_TYPE),
+ &next_proto);
+ GotoIfNot(IsString(var_unique.value()), &next_proto);
+ BranchIfMaybeSpecialIndex(CAST(var_unique.value()), if_bailout,
+ &next_proto);
+ }
+
+ BIND(&next_proto);
Node* proto = LoadMapPrototype(holder_map);
@@ -7192,8 +7218,8 @@ Node* CodeStubAssembler::HasInPrototypeChain(Node* context, Node* object,
GotoIf(InstanceTypeEqual(object_instance_type, JS_PROXY_TYPE),
&return_runtime);
Node* object_bitfield = LoadMapBitField(object_map);
- int mask =
- 1 << Map::kHasNamedInterceptor | 1 << Map::kIsAccessCheckNeeded;
+ int mask = Map::HasNamedInterceptorBit::kMask |
+ Map::IsAccessCheckNeededBit::kMask;
Branch(IsSetWord32(object_bitfield, mask), &return_runtime,
&if_objectisdirect);
}
@@ -7252,12 +7278,12 @@ Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
// Goto runtime if {callable} is not a constructor or has
// a non-instance "prototype".
Node* callable_bitfield = LoadMapBitField(callable_map);
- GotoIfNot(
- Word32Equal(Word32And(callable_bitfield,
- Int32Constant((1 << Map::kHasNonInstancePrototype) |
- (1 << Map::kIsConstructor))),
- Int32Constant(1 << Map::kIsConstructor)),
- &return_runtime);
+ GotoIfNot(Word32Equal(
+ Word32And(callable_bitfield,
+ Int32Constant(Map::HasNonInstancePrototypeBit::kMask |
+ Map::IsConstructorBit::kMask)),
+ Int32Constant(Map::IsConstructorBit::kMask)),
+ &return_runtime);
// Get the "prototype" (or initial map) of the {callable}.
Node* callable_prototype =
@@ -7326,7 +7352,7 @@ Node* CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
? index_node
: ((element_size_shift > 0)
? WordShl(index_node, IntPtrConstant(element_size_shift))
- : WordShr(index_node, IntPtrConstant(-element_size_shift)));
+ : WordSar(index_node, IntPtrConstant(-element_size_shift)));
return IntPtrAdd(IntPtrConstant(base_size), shifted_index);
}
@@ -7377,8 +7403,22 @@ void CodeStubAssembler::ReportFeedbackUpdate(
#endif // V8_TRACE_FEEDBACK_UPDATES
}
+void CodeStubAssembler::OverwriteFeedback(Variable* existing_feedback,
+ int new_feedback) {
+ if (existing_feedback == nullptr) return;
+ existing_feedback->Bind(SmiConstant(new_feedback));
+}
+
+void CodeStubAssembler::CombineFeedback(Variable* existing_feedback,
+ int feedback) {
+ if (existing_feedback == nullptr) return;
+ existing_feedback->Bind(
+ SmiOr(existing_feedback->value(), SmiConstant(feedback)));
+}
+
void CodeStubAssembler::CombineFeedback(Variable* existing_feedback,
Node* feedback) {
+ if (existing_feedback == nullptr) return;
existing_feedback->Bind(SmiOr(existing_feedback->value(), feedback));
}
@@ -7524,15 +7564,16 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments(Node* receiver, Node* key,
return var_result.value();
}
-Node* CodeStubAssembler::LoadScriptContext(Node* context, int context_index) {
- Node* native_context = LoadNativeContext(context);
- Node* script_context_table =
- LoadContextElement(native_context, Context::SCRIPT_CONTEXT_TABLE_INDEX);
+TNode<Context> CodeStubAssembler::LoadScriptContext(
+ TNode<Context> context, TNode<IntPtrT> context_index) {
+ TNode<Context> native_context = LoadNativeContext(context);
+ TNode<ScriptContextTable> script_context_table = CAST(
+ LoadContextElement(native_context, Context::SCRIPT_CONTEXT_TABLE_INDEX));
- int offset =
- ScriptContextTable::GetContextOffset(context_index) - kHeapObjectTag;
- return Load(MachineType::AnyTagged(), script_context_table,
- IntPtrConstant(offset));
+ Node* script_context = LoadFixedArrayElement(
+ script_context_table, context_index,
+ ScriptContextTable::kFirstContextSlotIndex * kPointerSize);
+ return CAST(script_context);
}
namespace {
@@ -7567,7 +7608,7 @@ void CodeStubAssembler::StoreElement(Node* elements, ElementsKind kind,
if (IsFixedTypedArrayElementsKind(kind)) {
if (kind == UINT8_CLAMPED_ELEMENTS) {
CSA_ASSERT(this,
- Word32Equal(value, Word32And(Int32Constant(0xff), value)));
+ Word32Equal(value, Word32And(Int32Constant(0xFF), value)));
}
Node* offset = ElementOffsetFromIndex(index, kind, mode, 0);
MachineRepresentation rep = ElementsKindToMachineRepresentation(kind);
@@ -8029,8 +8070,8 @@ Node* CodeStubAssembler::BuildFastLoop(
? MachineType::PointerRepresentation()
: MachineRepresentation::kTaggedSigned;
VARIABLE(var, index_rep, start_index);
- VariableList vars_copy(vars, zone());
- vars_copy.Add(&var, zone());
+ VariableList vars_copy(vars.begin(), vars.end(), zone());
+ vars_copy.push_back(&var);
Label loop(this, vars_copy);
Label after_loop(this);
// Introduce an explicit second check of the termination condition before the
@@ -8135,109 +8176,86 @@ void CodeStubAssembler::InitializeFieldsWithRoot(
CodeStubAssembler::IndexAdvanceMode::kPre);
}
-void CodeStubAssembler::BranchIfNumericRelationalComparison(
- Operation op, Node* lhs, Node* rhs, Label* if_true, Label* if_false) {
- CSA_SLOW_ASSERT(this, IsNumber(lhs));
- CSA_SLOW_ASSERT(this, IsNumber(rhs));
+void CodeStubAssembler::BranchIfNumberRelationalComparison(
+ Operation op, Node* left, Node* right, Label* if_true, Label* if_false) {
+ CSA_SLOW_ASSERT(this, IsNumber(left));
+ CSA_SLOW_ASSERT(this, IsNumber(right));
- Label end(this);
- VARIABLE(result, MachineRepresentation::kTagged);
-
- // Shared entry for floating point comparison.
- Label do_fcmp(this);
- VARIABLE(var_fcmp_lhs, MachineRepresentation::kFloat64);
- VARIABLE(var_fcmp_rhs, MachineRepresentation::kFloat64);
+ Label do_float_comparison(this);
+ TVARIABLE(Float64T, var_left_float);
+ TVARIABLE(Float64T, var_right_float);
- // Check if the {lhs} is a Smi or a HeapObject.
- Label if_lhsissmi(this), if_lhsisnotsmi(this);
- Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+ Label if_left_smi(this), if_left_not_smi(this);
+ Branch(TaggedIsSmi(left), &if_left_smi, &if_left_not_smi);
- BIND(&if_lhsissmi);
+ BIND(&if_left_smi);
{
- // Check if {rhs} is a Smi or a HeapObject.
- Label if_rhsissmi(this), if_rhsisnotsmi(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+ Label if_right_not_smi(this);
+ GotoIfNot(TaggedIsSmi(right), &if_right_not_smi);
- BIND(&if_rhsissmi);
- {
- // Both {lhs} and {rhs} are Smi, so just perform a fast Smi comparison.
- switch (op) {
- case Operation::kLessThan:
- BranchIfSmiLessThan(lhs, rhs, if_true, if_false);
- break;
- case Operation::kLessThanOrEqual:
- BranchIfSmiLessThanOrEqual(lhs, rhs, if_true, if_false);
- break;
- case Operation::kGreaterThan:
- BranchIfSmiLessThan(rhs, lhs, if_true, if_false);
- break;
- case Operation::kGreaterThanOrEqual:
- BranchIfSmiLessThanOrEqual(rhs, lhs, if_true, if_false);
- break;
- default:
- UNREACHABLE();
- }
+ // Both {left} and {right} are Smi, so just perform a fast Smi comparison.
+ switch (op) {
+ case Operation::kLessThan:
+ BranchIfSmiLessThan(left, right, if_true, if_false);
+ break;
+ case Operation::kLessThanOrEqual:
+ BranchIfSmiLessThanOrEqual(left, right, if_true, if_false);
+ break;
+ case Operation::kGreaterThan:
+ BranchIfSmiLessThan(right, left, if_true, if_false);
+ break;
+ case Operation::kGreaterThanOrEqual:
+ BranchIfSmiLessThanOrEqual(right, left, if_true, if_false);
+ break;
+ default:
+ UNREACHABLE();
}
- BIND(&if_rhsisnotsmi);
+ BIND(&if_right_not_smi);
{
- CSA_ASSERT(this, IsHeapNumber(rhs));
- // Convert the {lhs} and {rhs} to floating point values, and
- // perform a floating point comparison.
- var_fcmp_lhs.Bind(SmiToFloat64(lhs));
- var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fcmp);
+ CSA_ASSERT(this, IsHeapNumber(right));
+ var_left_float = SmiToFloat64(left);
+ var_right_float = LoadHeapNumberValue(right);
+ Goto(&do_float_comparison);
}
}
- BIND(&if_lhsisnotsmi);
+ BIND(&if_left_not_smi);
{
- CSA_ASSERT(this, IsHeapNumber(lhs));
+ CSA_ASSERT(this, IsHeapNumber(left));
+ var_left_float = LoadHeapNumberValue(left);
- // Check if {rhs} is a Smi or a HeapObject.
- Label if_rhsissmi(this), if_rhsisnotsmi(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+ Label if_right_not_smi(this);
+ GotoIfNot(TaggedIsSmi(right), &if_right_not_smi);
+ var_right_float = SmiToFloat64(right);
+ Goto(&do_float_comparison);
- BIND(&if_rhsissmi);
+ BIND(&if_right_not_smi);
{
- // Convert the {lhs} and {rhs} to floating point values, and
- // perform a floating point comparison.
- var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fcmp_rhs.Bind(SmiToFloat64(rhs));
- Goto(&do_fcmp);
- }
-
- BIND(&if_rhsisnotsmi);
- {
- CSA_ASSERT(this, IsHeapNumber(rhs));
-
- // Convert the {lhs} and {rhs} to floating point values, and
- // perform a floating point comparison.
- var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fcmp);
+ CSA_ASSERT(this, IsHeapNumber(right));
+ var_right_float = LoadHeapNumberValue(right);
+ Goto(&do_float_comparison);
}
}
- BIND(&do_fcmp);
+ BIND(&do_float_comparison);
{
- // Load the {lhs} and {rhs} floating point values.
- Node* lhs = var_fcmp_lhs.value();
- Node* rhs = var_fcmp_rhs.value();
-
- // Perform a fast floating point comparison.
switch (op) {
case Operation::kLessThan:
- Branch(Float64LessThan(lhs, rhs), if_true, if_false);
+ Branch(Float64LessThan(var_left_float, var_right_float), if_true,
+ if_false);
break;
case Operation::kLessThanOrEqual:
- Branch(Float64LessThanOrEqual(lhs, rhs), if_true, if_false);
+ Branch(Float64LessThanOrEqual(var_left_float, var_right_float), if_true,
+ if_false);
break;
case Operation::kGreaterThan:
- Branch(Float64GreaterThan(lhs, rhs), if_true, if_false);
+ Branch(Float64GreaterThan(var_left_float, var_right_float), if_true,
+ if_false);
break;
case Operation::kGreaterThanOrEqual:
- Branch(Float64GreaterThanOrEqual(lhs, rhs), if_true, if_false);
+ Branch(Float64GreaterThanOrEqual(var_left_float, var_right_float),
+ if_true, if_false);
break;
default:
UNREACHABLE();
@@ -8245,11 +8263,11 @@ void CodeStubAssembler::BranchIfNumericRelationalComparison(
}
}
-void CodeStubAssembler::GotoIfNumericGreaterThanOrEqual(Node* lhs, Node* rhs,
- Label* if_true) {
+void CodeStubAssembler::GotoIfNumberGreaterThanOrEqual(Node* left, Node* right,
+ Label* if_true) {
Label if_false(this);
- BranchIfNumericRelationalComparison(Operation::kGreaterThanOrEqual, lhs, rhs,
- if_true, &if_false);
+ BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual, left,
+ right, if_true, &if_false);
BIND(&if_false);
}
@@ -8271,423 +8289,354 @@ Operation Reverse(Operation op) {
}
} // anonymous namespace
-Node* CodeStubAssembler::RelationalComparison(Operation op, Node* lhs,
- Node* rhs, Node* context,
+Node* CodeStubAssembler::RelationalComparison(Operation op, Node* left,
+ Node* right, Node* context,
Variable* var_type_feedback) {
- Label return_true(this), return_false(this), end(this);
- VARIABLE(result, MachineRepresentation::kTagged);
-
- // Shared entry for floating point comparison.
- Label do_fcmp(this);
- VARIABLE(var_fcmp_lhs, MachineRepresentation::kFloat64);
- VARIABLE(var_fcmp_rhs, MachineRepresentation::kFloat64);
+ Label return_true(this), return_false(this), do_float_comparison(this),
+ end(this);
+ TVARIABLE(Oddball, var_result); // Actually only "true" or "false".
+ TVARIABLE(Float64T, var_left_float);
+ TVARIABLE(Float64T, var_right_float);
// We might need to loop several times due to ToPrimitive and/or ToNumeric
// conversions.
- VARIABLE(var_lhs, MachineRepresentation::kTagged, lhs);
- VARIABLE(var_rhs, MachineRepresentation::kTagged, rhs);
- VariableList loop_variable_list({&var_lhs, &var_rhs}, zone());
+ VARIABLE(var_left, MachineRepresentation::kTagged, left);
+ VARIABLE(var_right, MachineRepresentation::kTagged, right);
+ VariableList loop_variable_list({&var_left, &var_right}, zone());
if (var_type_feedback != nullptr) {
// Initialize the type feedback to None. The current feedback is combined
// with the previous feedback.
var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kNone));
- loop_variable_list.Add(var_type_feedback, zone());
+ loop_variable_list.push_back(var_type_feedback);
}
Label loop(this, loop_variable_list);
Goto(&loop);
BIND(&loop);
{
- // Load the current {lhs} and {rhs} values.
- lhs = var_lhs.value();
- rhs = var_rhs.value();
+ left = var_left.value();
+ right = var_right.value();
- // Check if the {lhs} is a Smi or a HeapObject.
- Label if_lhsissmi(this), if_lhsisnotsmi(this);
- Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+ Label if_left_smi(this), if_left_not_smi(this);
+ Branch(TaggedIsSmi(left), &if_left_smi, &if_left_not_smi);
- BIND(&if_lhsissmi);
+ BIND(&if_left_smi);
{
- Label if_rhsissmi(this), if_rhsisheapnumber(this),
- if_rhsisbigint(this, Label::kDeferred),
- if_rhsisnotnumeric(this, Label::kDeferred);
- GotoIf(TaggedIsSmi(rhs), &if_rhsissmi);
- Node* rhs_map = LoadMap(rhs);
- GotoIf(IsHeapNumberMap(rhs_map), &if_rhsisheapnumber);
- Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
- Branch(IsBigIntInstanceType(rhs_instance_type), &if_rhsisbigint,
- &if_rhsisnotnumeric);
+ Label if_right_smi(this), if_right_heapnumber(this),
+ if_right_bigint(this, Label::kDeferred),
+ if_right_not_numeric(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(right), &if_right_smi);
+ Node* right_map = LoadMap(right);
+ GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber);
+ Node* right_instance_type = LoadMapInstanceType(right_map);
+ Branch(IsBigIntInstanceType(right_instance_type), &if_right_bigint,
+ &if_right_not_numeric);
- BIND(&if_rhsissmi);
+ BIND(&if_right_smi);
{
- // Both {lhs} and {rhs} are Smi, so just perform a fast Smi comparison.
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kSignedSmall));
- }
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kSignedSmall);
switch (op) {
case Operation::kLessThan:
- BranchIfSmiLessThan(lhs, rhs, &return_true, &return_false);
+ BranchIfSmiLessThan(left, right, &return_true, &return_false);
break;
case Operation::kLessThanOrEqual:
- BranchIfSmiLessThanOrEqual(lhs, rhs, &return_true, &return_false);
+ BranchIfSmiLessThanOrEqual(left, right, &return_true,
+ &return_false);
break;
case Operation::kGreaterThan:
- BranchIfSmiLessThan(rhs, lhs, &return_true, &return_false);
+ BranchIfSmiLessThan(right, left, &return_true, &return_false);
break;
case Operation::kGreaterThanOrEqual:
- BranchIfSmiLessThanOrEqual(rhs, lhs, &return_true, &return_false);
+ BranchIfSmiLessThanOrEqual(right, left, &return_true,
+ &return_false);
break;
default:
UNREACHABLE();
}
}
- BIND(&if_rhsisheapnumber);
+ BIND(&if_right_heapnumber);
{
- // Convert the {lhs} and {rhs} to floating point values, and
- // perform a floating point comparison.
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kNumber));
- }
- var_fcmp_lhs.Bind(SmiToFloat64(lhs));
- var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fcmp);
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
+ var_left_float = SmiToFloat64(left);
+ var_right_float = LoadHeapNumberValue(right);
+ Goto(&do_float_comparison);
}
- BIND(&if_rhsisbigint);
+ BIND(&if_right_bigint);
{
- // The {lhs} is a Smi and {rhs} is a BigInt.
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny));
- }
- result.Bind(CallRuntime(Runtime::kBigIntCompareToNumber,
- NoContextConstant(), SmiConstant(Reverse(op)),
- rhs, lhs));
+ OverwriteFeedback(var_type_feedback, CompareOperationFeedback::kAny);
+ var_result = CAST(CallRuntime(Runtime::kBigIntCompareToNumber,
+ NoContextConstant(),
+ SmiConstant(Reverse(op)), right, left));
Goto(&end);
}
- BIND(&if_rhsisnotnumeric);
+ BIND(&if_right_not_numeric);
{
- // The {lhs} is a Smi and {rhs} is not a Numeric.
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny));
- }
- // Convert the {rhs} to a Numeric; we don't need to perform the
- // dedicated ToPrimitive(rhs, hint Number) operation, as the
- // ToNumeric(rhs) will by itself already invoke ToPrimitive with
+ OverwriteFeedback(var_type_feedback, CompareOperationFeedback::kAny);
+ // Convert {right} to a Numeric; we don't need to perform the
+ // dedicated ToPrimitive(right, hint Number) operation, as the
+ // ToNumeric(right) will by itself already invoke ToPrimitive with
// a Number hint.
- var_rhs.Bind(CallBuiltin(Builtins::kNonNumberToNumeric, context, rhs));
+ var_right.Bind(
+ CallBuiltin(Builtins::kNonNumberToNumeric, context, right));
Goto(&loop);
}
}
- BIND(&if_lhsisnotsmi);
+ BIND(&if_left_not_smi);
{
- Node* lhs_map = LoadMap(lhs);
+ Node* left_map = LoadMap(left);
- // Check if {rhs} is a Smi or a HeapObject.
- Label if_rhsissmi(this), if_rhsisnotsmi(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+ Label if_right_smi(this), if_right_not_smi(this);
+ Branch(TaggedIsSmi(right), &if_right_smi, &if_right_not_smi);
- BIND(&if_rhsissmi);
+ BIND(&if_right_smi);
{
- Label if_lhsisheapnumber(this), if_lhsisbigint(this, Label::kDeferred),
- if_lhsisnotnumeric(this, Label::kDeferred);
- GotoIf(IsHeapNumberMap(lhs_map), &if_lhsisheapnumber);
- Node* lhs_instance_type = LoadMapInstanceType(lhs_map);
- Branch(IsBigIntInstanceType(lhs_instance_type), &if_lhsisbigint,
- &if_lhsisnotnumeric);
-
- BIND(&if_lhsisheapnumber);
+ Label if_left_heapnumber(this), if_left_bigint(this, Label::kDeferred),
+ if_left_not_numeric(this, Label::kDeferred);
+ GotoIf(IsHeapNumberMap(left_map), &if_left_heapnumber);
+ Node* left_instance_type = LoadMapInstanceType(left_map);
+ Branch(IsBigIntInstanceType(left_instance_type), &if_left_bigint,
+ &if_left_not_numeric);
+
+ BIND(&if_left_heapnumber);
{
- // Convert the {lhs} and {rhs} to floating point values, and
- // perform a floating point comparison.
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kNumber));
- }
- var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fcmp_rhs.Bind(SmiToFloat64(rhs));
- Goto(&do_fcmp);
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
+ var_left_float = LoadHeapNumberValue(left);
+ var_right_float = SmiToFloat64(right);
+ Goto(&do_float_comparison);
}
- BIND(&if_lhsisbigint);
+ BIND(&if_left_bigint);
{
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
- }
- result.Bind(CallRuntime(Runtime::kBigIntCompareToNumber,
- NoContextConstant(), SmiConstant(op), lhs,
- rhs));
+ OverwriteFeedback(var_type_feedback, CompareOperationFeedback::kAny);
+ var_result = CAST(CallRuntime(Runtime::kBigIntCompareToNumber,
+ NoContextConstant(), SmiConstant(op),
+ left, right));
Goto(&end);
}
- BIND(&if_lhsisnotnumeric);
+ BIND(&if_left_not_numeric);
{
- // The {lhs} is not a Numeric and {rhs} is an Smi.
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
- }
- // Convert the {lhs} to a Numeric; we don't need to perform the
- // dedicated ToPrimitive(lhs, hint Number) operation, as the
- // ToNumeric(lhs) will by itself already invoke ToPrimitive with
+ OverwriteFeedback(var_type_feedback, CompareOperationFeedback::kAny);
+ // Convert {left} to a Numeric; we don't need to perform the
+ // dedicated ToPrimitive(left, hint Number) operation, as the
+ // ToNumeric(left) will by itself already invoke ToPrimitive with
// a Number hint.
- var_lhs.Bind(
- CallBuiltin(Builtins::kNonNumberToNumeric, context, lhs));
+ var_left.Bind(
+ CallBuiltin(Builtins::kNonNumberToNumeric, context, left));
Goto(&loop);
}
}
- BIND(&if_rhsisnotsmi);
+ BIND(&if_right_not_smi);
{
- // Load the map of {rhs}.
- Node* rhs_map = LoadMap(rhs);
+ Node* right_map = LoadMap(right);
- // Further analyze {lhs}.
- Label if_lhsisheapnumber(this), if_lhsisbigint(this, Label::kDeferred),
- if_lhsisstring(this), if_lhsisother(this, Label::kDeferred);
- GotoIf(IsHeapNumberMap(lhs_map), &if_lhsisheapnumber);
- Node* lhs_instance_type = LoadMapInstanceType(lhs_map);
- GotoIf(IsBigIntInstanceType(lhs_instance_type), &if_lhsisbigint);
- Branch(IsStringInstanceType(lhs_instance_type), &if_lhsisstring,
- &if_lhsisother);
+ Label if_left_heapnumber(this), if_left_bigint(this, Label::kDeferred),
+ if_left_string(this), if_left_other(this, Label::kDeferred);
+ GotoIf(IsHeapNumberMap(left_map), &if_left_heapnumber);
+ Node* left_instance_type = LoadMapInstanceType(left_map);
+ GotoIf(IsBigIntInstanceType(left_instance_type), &if_left_bigint);
+ Branch(IsStringInstanceType(left_instance_type), &if_left_string,
+ &if_left_other);
- BIND(&if_lhsisheapnumber);
+ BIND(&if_left_heapnumber);
{
- // Further inspect {rhs}.
- Label if_rhsisheapnumber(this),
- if_rhsisbigint(this, Label::kDeferred),
- if_rhsisnotnumeric(this, Label::kDeferred);
- GotoIf(WordEqual(rhs_map, lhs_map), &if_rhsisheapnumber);
- Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
- Branch(IsBigIntInstanceType(rhs_instance_type), &if_rhsisbigint,
- &if_rhsisnotnumeric);
-
- BIND(&if_rhsisheapnumber);
+ Label if_right_heapnumber(this),
+ if_right_bigint(this, Label::kDeferred),
+ if_right_not_numeric(this, Label::kDeferred);
+ GotoIf(WordEqual(right_map, left_map), &if_right_heapnumber);
+ Node* right_instance_type = LoadMapInstanceType(right_map);
+ Branch(IsBigIntInstanceType(right_instance_type), &if_right_bigint,
+ &if_right_not_numeric);
+
+ BIND(&if_right_heapnumber);
{
- // Convert the {lhs} and {rhs} to floating point values, and
- // perform a floating point comparison.
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kNumber));
- }
- var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fcmp);
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kNumber);
+ var_left_float = LoadHeapNumberValue(left);
+ var_right_float = LoadHeapNumberValue(right);
+ Goto(&do_float_comparison);
}
- BIND(&if_rhsisbigint);
+ BIND(&if_right_bigint);
{
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
- }
- result.Bind(CallRuntime(Runtime::kBigIntCompareToNumber,
- NoContextConstant(),
- SmiConstant(Reverse(op)), rhs, lhs));
+ OverwriteFeedback(var_type_feedback,
+ CompareOperationFeedback::kAny);
+ var_result = CAST(CallRuntime(
+ Runtime::kBigIntCompareToNumber, NoContextConstant(),
+ SmiConstant(Reverse(op)), right, left));
Goto(&end);
}
- BIND(&if_rhsisnotnumeric);
+ BIND(&if_right_not_numeric);
{
- // The {lhs} is a HeapNumber and {rhs} is not a Numeric.
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
- }
- // Convert the {rhs} to a Numeric; we don't need to perform
- // dedicated ToPrimitive(rhs, hint Number) operation, as the
- // ToNumeric(rhs) will by itself already invoke ToPrimitive with
+ OverwriteFeedback(var_type_feedback,
+ CompareOperationFeedback::kAny);
+ // Convert {right} to a Numeric; we don't need to perform
+ // dedicated ToPrimitive(right, hint Number) operation, as the
+ // ToNumeric(right) will by itself already invoke ToPrimitive with
// a Number hint.
- var_rhs.Bind(
- CallBuiltin(Builtins::kNonNumberToNumeric, context, rhs));
+ var_right.Bind(
+ CallBuiltin(Builtins::kNonNumberToNumeric, context, right));
Goto(&loop);
}
}
- BIND(&if_lhsisbigint);
+ BIND(&if_left_bigint);
{
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
- }
-
- Label if_rhsisheapnumber(this), if_rhsisbigint(this),
- if_rhsisnotnumeric(this);
- GotoIf(IsHeapNumberMap(rhs_map), &if_rhsisheapnumber);
- Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
- Branch(IsBigIntInstanceType(rhs_instance_type), &if_rhsisbigint,
- &if_rhsisnotnumeric);
-
- BIND(&if_rhsisheapnumber);
+ Label if_right_heapnumber(this), if_right_bigint(this),
+ if_right_not_numeric(this);
+ GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber);
+ Node* right_instance_type = LoadMapInstanceType(right_map);
+ Branch(IsBigIntInstanceType(right_instance_type), &if_right_bigint,
+ &if_right_not_numeric);
+
+ BIND(&if_right_heapnumber);
{
- result.Bind(CallRuntime(Runtime::kBigIntCompareToNumber,
- NoContextConstant(), SmiConstant(op), lhs,
- rhs));
+ OverwriteFeedback(var_type_feedback,
+ CompareOperationFeedback::kAny);
+ var_result = CAST(CallRuntime(Runtime::kBigIntCompareToNumber,
+ NoContextConstant(), SmiConstant(op),
+ left, right));
Goto(&end);
}
- BIND(&if_rhsisbigint);
+ BIND(&if_right_bigint);
{
- result.Bind(CallRuntime(Runtime::kBigIntCompareToBigInt,
- NoContextConstant(), SmiConstant(op), lhs,
- rhs));
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kBigInt);
+ var_result = CAST(CallRuntime(Runtime::kBigIntCompareToBigInt,
+ NoContextConstant(), SmiConstant(op),
+ left, right));
Goto(&end);
}
- BIND(&if_rhsisnotnumeric);
+ BIND(&if_right_not_numeric);
{
- // Convert the {rhs} to a Numeric; we don't need to perform
- // dedicated ToPrimitive(rhs, hint Number) operation, as the
- // ToNumeric(rhs) will by itself already invoke ToPrimitive with
+ OverwriteFeedback(var_type_feedback,
+ CompareOperationFeedback::kAny);
+ // Convert {right} to a Numeric; we don't need to perform
+ // dedicated ToPrimitive(right, hint Number) operation, as the
+ // ToNumeric(right) will by itself already invoke ToPrimitive with
// a Number hint.
- var_rhs.Bind(
- CallBuiltin(Builtins::kNonNumberToNumeric, context, rhs));
+ var_right.Bind(
+ CallBuiltin(Builtins::kNonNumberToNumeric, context, right));
Goto(&loop);
}
}
- BIND(&if_lhsisstring);
+ BIND(&if_left_string);
{
- // Load the instance type of {rhs}.
- Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
-
- // Check if {rhs} is also a String.
- Label if_rhsisstring(this, Label::kDeferred),
- if_rhsisnotstring(this, Label::kDeferred);
- Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
- &if_rhsisnotstring);
-
- BIND(&if_rhsisstring);
- {
- // Both {lhs} and {rhs} are strings.
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kString));
- }
- switch (op) {
- case Operation::kLessThan:
- result.Bind(
- CallBuiltin(Builtins::kStringLessThan, context, lhs, rhs));
- Goto(&end);
- break;
- case Operation::kLessThanOrEqual:
- result.Bind(CallBuiltin(Builtins::kStringLessThanOrEqual,
- context, lhs, rhs));
- Goto(&end);
- break;
- case Operation::kGreaterThan:
- result.Bind(CallBuiltin(Builtins::kStringGreaterThan, context,
- lhs, rhs));
- Goto(&end);
- break;
- case Operation::kGreaterThanOrEqual:
- result.Bind(CallBuiltin(Builtins::kStringGreaterThanOrEqual,
- context, lhs, rhs));
- Goto(&end);
- break;
- default:
- UNREACHABLE();
- }
+ Node* right_instance_type = LoadMapInstanceType(right_map);
+
+ Label if_right_not_string(this, Label::kDeferred);
+ GotoIfNot(IsStringInstanceType(right_instance_type),
+ &if_right_not_string);
+
+ // Both {left} and {right} are strings.
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kString);
+ Builtins::Name builtin;
+ switch (op) {
+ case Operation::kLessThan:
+ builtin = Builtins::kStringLessThan;
+ break;
+ case Operation::kLessThanOrEqual:
+ builtin = Builtins::kStringLessThanOrEqual;
+ break;
+ case Operation::kGreaterThan:
+ builtin = Builtins::kStringGreaterThan;
+ break;
+ case Operation::kGreaterThanOrEqual:
+ builtin = Builtins::kStringGreaterThanOrEqual;
+ break;
+ default:
+ UNREACHABLE();
}
+ var_result = CAST(CallBuiltin(builtin, context, left, right));
+ Goto(&end);
- BIND(&if_rhsisnotstring);
+ BIND(&if_right_not_string);
{
- // The {lhs} is a String and {rhs} is not a String.
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
- }
- // The {lhs} is a String, while {rhs} isn't. So we call
- // ToPrimitive(rhs, hint Number) if {rhs} is a receiver, or
- // ToNumeric(lhs) and then ToNumeric(rhs) in the other cases.
+ OverwriteFeedback(var_type_feedback,
+ CompareOperationFeedback::kAny);
+ // {left} is a String, while {right} isn't. So we call
+ // ToPrimitive(right, hint Number) if {right} is a receiver, or
+ // ToNumeric(left) and then ToNumeric(right) in the other cases.
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- Label if_rhsisreceiver(this, Label::kDeferred),
- if_rhsisnotreceiver(this, Label::kDeferred);
- Branch(IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
+ Label if_right_receiver(this, Label::kDeferred);
+ GotoIf(IsJSReceiverInstanceType(right_instance_type),
+ &if_right_receiver);
+
+ var_left.Bind(
+ CallBuiltin(Builtins::kNonNumberToNumeric, context, left));
+ var_right.Bind(CallBuiltin(Builtins::kToNumeric, context, right));
+ Goto(&loop);
- BIND(&if_rhsisreceiver);
+ BIND(&if_right_receiver);
{
- // Convert {rhs} to a primitive first passing Number hint.
Callable callable = CodeFactory::NonPrimitiveToPrimitive(
isolate(), ToPrimitiveHint::kNumber);
- var_rhs.Bind(CallStub(callable, context, rhs));
- Goto(&loop);
- }
-
- BIND(&if_rhsisnotreceiver);
- {
- // Convert both {lhs} and {rhs} to Numeric.
- var_lhs.Bind(
- CallBuiltin(Builtins::kNonNumberToNumeric, context, lhs));
- var_rhs.Bind(CallBuiltin(Builtins::kToNumeric, context, rhs));
+ var_right.Bind(CallStub(callable, context, right));
Goto(&loop);
}
}
}
- BIND(&if_lhsisother);
+ BIND(&if_left_other);
{
- // The {lhs} is neither a Numeric nor a String, and {rhs} is not
- // an Smi.
+ // {left} is neither a Numeric nor a String, and {right} is not a Smi.
if (var_type_feedback != nullptr) {
- // Collect NumberOrOddball feedback if {lhs} is an Oddball
- // and {rhs} is either a HeapNumber or Oddball. Otherwise collect
+ // Collect NumberOrOddball feedback if {left} is an Oddball
+ // and {right} is either a HeapNumber or Oddball. Otherwise collect
// Any feedback.
Label collect_any_feedback(this), collect_oddball_feedback(this),
collect_feedback_done(this);
- GotoIfNot(InstanceTypeEqual(lhs_instance_type, ODDBALL_TYPE),
+ GotoIfNot(InstanceTypeEqual(left_instance_type, ODDBALL_TYPE),
&collect_any_feedback);
- Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
- GotoIf(InstanceTypeEqual(rhs_instance_type, HEAP_NUMBER_TYPE),
- &collect_oddball_feedback);
- Branch(InstanceTypeEqual(rhs_instance_type, ODDBALL_TYPE),
+ GotoIf(IsHeapNumberMap(right_map), &collect_oddball_feedback);
+ Node* right_instance_type = LoadMapInstanceType(right_map);
+ Branch(InstanceTypeEqual(right_instance_type, ODDBALL_TYPE),
&collect_oddball_feedback, &collect_any_feedback);
BIND(&collect_oddball_feedback);
{
- CombineFeedback(
- var_type_feedback,
- SmiConstant(CompareOperationFeedback::kNumberOrOddball));
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kNumberOrOddball);
Goto(&collect_feedback_done);
}
BIND(&collect_any_feedback);
{
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
+ OverwriteFeedback(var_type_feedback,
+ CompareOperationFeedback::kAny);
Goto(&collect_feedback_done);
}
BIND(&collect_feedback_done);
}
- // If {lhs} is a receiver, we must call ToPrimitive(lhs, hint Number).
- // Otherwise we must call ToNumeric(lhs) and then ToNumeric(rhs).
+ // If {left} is a receiver, call ToPrimitive(left, hint Number).
+ // Otherwise call ToNumeric(left) and then ToNumeric(right).
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- Label if_lhsisreceiver(this, Label::kDeferred),
- if_lhsisnotreceiver(this, Label::kDeferred);
- Branch(IsJSReceiverInstanceType(lhs_instance_type), &if_lhsisreceiver,
- &if_lhsisnotreceiver);
+ Label if_left_receiver(this, Label::kDeferred);
+ GotoIf(IsJSReceiverInstanceType(left_instance_type),
+ &if_left_receiver);
+
+ var_left.Bind(
+ CallBuiltin(Builtins::kNonNumberToNumeric, context, left));
+ var_right.Bind(CallBuiltin(Builtins::kToNumeric, context, right));
+ Goto(&loop);
- BIND(&if_lhsisreceiver);
+ BIND(&if_left_receiver);
{
Callable callable = CodeFactory::NonPrimitiveToPrimitive(
isolate(), ToPrimitiveHint::kNumber);
- var_lhs.Bind(CallStub(callable, context, lhs));
- Goto(&loop);
- }
-
- BIND(&if_lhsisnotreceiver);
- {
- var_lhs.Bind(
- CallBuiltin(Builtins::kNonNumberToNumeric, context, lhs));
- var_rhs.Bind(CallBuiltin(Builtins::kToNumeric, context, rhs));
+ var_left.Bind(CallStub(callable, context, left));
Goto(&loop);
}
}
@@ -8695,26 +8644,24 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, Node* lhs,
}
}
- BIND(&do_fcmp);
+ BIND(&do_float_comparison);
{
- // Load the {lhs} and {rhs} floating point values.
- Node* lhs = var_fcmp_lhs.value();
- Node* rhs = var_fcmp_rhs.value();
-
- // Perform a fast floating point comparison.
switch (op) {
case Operation::kLessThan:
- Branch(Float64LessThan(lhs, rhs), &return_true, &return_false);
+ Branch(Float64LessThan(var_left_float, var_right_float), &return_true,
+ &return_false);
break;
case Operation::kLessThanOrEqual:
- Branch(Float64LessThanOrEqual(lhs, rhs), &return_true, &return_false);
+ Branch(Float64LessThanOrEqual(var_left_float, var_right_float),
+ &return_true, &return_false);
break;
case Operation::kGreaterThan:
- Branch(Float64GreaterThan(lhs, rhs), &return_true, &return_false);
+ Branch(Float64GreaterThan(var_left_float, var_right_float),
+ &return_true, &return_false);
break;
case Operation::kGreaterThanOrEqual:
- Branch(Float64GreaterThanOrEqual(lhs, rhs), &return_true,
- &return_false);
+ Branch(Float64GreaterThanOrEqual(var_left_float, var_right_float),
+ &return_true, &return_false);
break;
default:
UNREACHABLE();
@@ -8723,18 +8670,18 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, Node* lhs,
BIND(&return_true);
{
- result.Bind(TrueConstant());
+ var_result = TrueConstant();
Goto(&end);
}
BIND(&return_false);
{
- result.Bind(FalseConstant());
+ var_result = FalseConstant();
Goto(&end);
}
BIND(&end);
- return result.value();
+ return var_result;
}
Node* CodeStubAssembler::CollectFeedbackForString(Node* instance_type) {
@@ -8764,10 +8711,11 @@ void CodeStubAssembler::GenerateEqual_Same(Node* value, Label* if_equal,
if (var_type_feedback != nullptr) {
Node* instance_type = LoadMapInstanceType(value_map);
- Label if_string(this), if_receiver(this), if_symbol(this),
+ Label if_string(this), if_receiver(this), if_symbol(this), if_bigint(this),
if_other(this, Label::kDeferred);
GotoIf(IsStringInstanceType(instance_type), &if_string);
GotoIf(IsJSReceiverInstanceType(instance_type), &if_receiver);
+ GotoIf(IsBigIntInstanceType(instance_type), &if_bigint);
Branch(IsSymbolInstanceType(instance_type), &if_symbol, &if_other);
BIND(&if_string);
@@ -8779,25 +8727,25 @@ void CodeStubAssembler::GenerateEqual_Same(Node* value, Label* if_equal,
BIND(&if_symbol);
{
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kSymbol));
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kSymbol);
Goto(if_equal);
}
BIND(&if_receiver);
{
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kReceiver));
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kReceiver);
Goto(if_equal);
}
- // TODO(neis): Introduce BigInt CompareOperationFeedback and collect here
- // and elsewhere?
+ BIND(&if_bigint);
+ {
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt);
+ Goto(if_equal);
+ }
BIND(&if_other);
{
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kAny));
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kAny);
Goto(if_equal);
}
} else {
@@ -8806,20 +8754,14 @@ void CodeStubAssembler::GenerateEqual_Same(Node* value, Label* if_equal,
BIND(&if_heapnumber);
{
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kNumber));
- }
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
Node* number_value = LoadHeapNumberValue(value);
BranchIfFloat64IsNaN(number_value, if_notequal, if_equal);
}
BIND(&if_smi);
{
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kSignedSmall));
- }
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kSignedSmall);
Goto(if_equal);
}
}
@@ -8847,10 +8789,10 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
VARIABLE(var_right, MachineRepresentation::kTagged, right);
VariableList loop_variable_list({&var_left, &var_right}, zone());
if (var_type_feedback != nullptr) {
- // Initialize the type feedback to None. The current feedback is combined
- // with the previous feedback.
- var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kNone));
- loop_variable_list.Add(var_type_feedback, zone());
+ // Initialize the type feedback to None. The current feedback will be
+ // combined with the previous feedback.
+ OverwriteFeedback(var_type_feedback, CompareOperationFeedback::kNone);
+ loop_variable_list.push_back(var_type_feedback);
}
Label loop(this, loop_variable_list);
Goto(&loop);
@@ -8880,10 +8822,8 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
{
// We have already checked for {left} and {right} being the same value,
// so when we get here they must be different Smis.
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kSignedSmall));
- }
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kSignedSmall);
Goto(&if_notequal);
}
@@ -8908,10 +8848,7 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
{
var_left_float = SmiToFloat64(left);
var_right_float = LoadHeapNumberValue(right);
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kNumber));
- }
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
Goto(&do_float_comparison);
}
@@ -8961,11 +8898,9 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
{
GotoIfNot(IsStringInstanceType(right_type), &use_symmetry);
result.Bind(CallBuiltin(Builtins::kStringEqual, context, left, right));
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiOr(CollectFeedbackForString(left_type),
- CollectFeedbackForString(right_type)));
- }
+ CombineFeedback(var_type_feedback,
+ SmiOr(CollectFeedbackForString(left_type),
+ CollectFeedbackForString(right_type)));
Goto(&end);
}
@@ -8976,10 +8911,7 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
var_left_float = LoadHeapNumberValue(left);
var_right_float = LoadHeapNumberValue(right);
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kNumber));
- }
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
Goto(&do_float_comparison);
BIND(&if_right_not_number);
@@ -9005,10 +8937,6 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
BIND(&if_left_bigint);
{
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny));
- }
-
Label if_right_heapnumber(this), if_right_bigint(this),
if_right_string(this), if_right_boolean(this);
GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber);
@@ -9020,6 +8948,10 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
BIND(&if_right_heapnumber);
{
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ }
result.Bind(CallRuntime(Runtime::kBigIntEqualToNumber,
NoContextConstant(), left, right));
Goto(&end);
@@ -9027,6 +8959,7 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
BIND(&if_right_bigint);
{
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt);
result.Bind(CallRuntime(Runtime::kBigIntEqualToBigInt,
NoContextConstant(), left, right));
Goto(&end);
@@ -9034,6 +8967,10 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
BIND(&if_right_string);
{
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ }
result.Bind(CallRuntime(Runtime::kBigIntEqualToString,
NoContextConstant(), left, right));
Goto(&end);
@@ -9041,6 +8978,10 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
BIND(&if_right_boolean);
{
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ }
var_right.Bind(LoadObjectField(right, Oddball::kToNumberOffset));
Goto(&loop);
}
@@ -9083,7 +9024,7 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
BIND(&if_right_symbol);
{
CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kSymbol));
+ CompareOperationFeedback::kSymbol);
Goto(&if_notequal);
}
} else {
@@ -9109,10 +9050,7 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
GotoIfNot(IsJSReceiverInstanceType(right_type), &if_right_not_receiver);
// {left} and {right} are different JSReceiver references.
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kReceiver));
- }
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kReceiver);
Goto(&if_notequal);
BIND(&if_right_not_receiver);
@@ -9380,10 +9318,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_rhsisbigint);
{
if (var_type_feedback != nullptr) {
- CSA_ASSERT(
- this,
- WordEqual(var_type_feedback->value(),
- SmiConstant(CompareOperationFeedback::kAny)));
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kBigInt));
}
result.Bind(CallRuntime(Runtime::kBigIntEqualToBigInt,
NoContextConstant(), lhs, rhs));
@@ -9607,8 +9543,10 @@ void CodeStubAssembler::BranchIfSameValue(Node* lhs, Node* rhs, Label* if_true,
}
}
-Node* CodeStubAssembler::HasProperty(Node* object, Node* key, Node* context,
- HasPropertyLookupMode mode) {
+TNode<Oddball> CodeStubAssembler::HasProperty(SloppyTNode<HeapObject> object,
+ SloppyTNode<Name> key,
+ SloppyTNode<Context> context,
+ HasPropertyLookupMode mode) {
Label call_runtime(this, Label::kDeferred), return_true(this),
return_false(this), end(this), if_proxy(this, Label::kDeferred);
@@ -9633,16 +9571,16 @@ Node* CodeStubAssembler::HasProperty(Node* object, Node* key, Node* context,
lookup_element_in_holder, &return_false,
&call_runtime, &if_proxy);
- VARIABLE(result, MachineRepresentation::kTagged);
+ TVARIABLE(Oddball, result);
BIND(&if_proxy);
{
- Node* name = ToName(context, key);
+ TNode<Name> name = CAST(ToName(context, key));
switch (mode) {
case kHasProperty:
GotoIf(IsPrivateSymbol(name), &return_false);
- result.Bind(
+ result = CAST(
CallBuiltin(Builtins::kProxyHasProperty, context, object, name));
Goto(&end);
break;
@@ -9654,13 +9592,13 @@ Node* CodeStubAssembler::HasProperty(Node* object, Node* key, Node* context,
BIND(&return_true);
{
- result.Bind(TrueConstant());
+ result = TrueConstant();
Goto(&end);
}
BIND(&return_false);
{
- result.Bind(FalseConstant());
+ result = FalseConstant();
Goto(&end);
}
@@ -9676,13 +9614,14 @@ Node* CodeStubAssembler::HasProperty(Node* object, Node* key, Node* context,
break;
}
- result.Bind(
- CallRuntime(fallback_runtime_function_id, context, object, key));
+ result =
+ CAST(CallRuntime(fallback_runtime_function_id, context, object, key));
Goto(&end);
}
BIND(&end);
- return result.value();
+ CSA_ASSERT(this, IsBoolean(result));
+ return result;
}
Node* CodeStubAssembler::ClassOf(Node* value) {
@@ -9769,10 +9708,10 @@ Node* CodeStubAssembler::Typeof(Node* value) {
Node* callable_or_undetectable_mask = Word32And(
LoadMapBitField(map),
- Int32Constant(1 << Map::kIsCallable | 1 << Map::kIsUndetectable));
+ Int32Constant(Map::IsCallableBit::kMask | Map::IsUndetectableBit::kMask));
GotoIf(Word32Equal(callable_or_undetectable_mask,
- Int32Constant(1 << Map::kIsCallable)),
+ Int32Constant(Map::IsCallableBit::kMask)),
&return_function);
GotoIfNot(Word32Equal(callable_or_undetectable_mask, Int32Constant(0)),
@@ -10139,17 +10078,17 @@ Node* CodeStubAssembler::BitwiseOp(Node* left32, Node* right32,
return ChangeInt32ToTagged(Signed(Word32Xor(left32, right32)));
case Operation::kShiftLeft:
if (!Word32ShiftIsSafe()) {
- right32 = Word32And(right32, Int32Constant(0x1f));
+ right32 = Word32And(right32, Int32Constant(0x1F));
}
return ChangeInt32ToTagged(Signed(Word32Shl(left32, right32)));
case Operation::kShiftRight:
if (!Word32ShiftIsSafe()) {
- right32 = Word32And(right32, Int32Constant(0x1f));
+ right32 = Word32And(right32, Int32Constant(0x1F));
}
return ChangeInt32ToTagged(Signed(Word32Sar(left32, right32)));
case Operation::kShiftRightLogical:
if (!Word32ShiftIsSafe()) {
- right32 = Word32And(right32, Int32Constant(0x1f));
+ right32 = Word32And(right32, Int32Constant(0x1F));
}
return ChangeUint32ToTagged(Unsigned(Word32Shr(left32, right32)));
default:
@@ -10426,7 +10365,7 @@ Node* CodeStubAssembler::IsDetachedBuffer(Node* buffer) {
}
CodeStubArguments::CodeStubArguments(
- CodeStubAssembler* assembler, SloppyTNode<IntPtrT> argc, Node* fp,
+ CodeStubAssembler* assembler, Node* argc, Node* fp,
CodeStubAssembler::ParameterMode param_mode, ReceiverMode receiver_mode)
: assembler_(assembler),
argc_mode_(param_mode),
@@ -10463,7 +10402,7 @@ TNode<Object> CodeStubArguments::AtIndex(
Node* index, CodeStubAssembler::ParameterMode mode) const {
DCHECK_EQ(argc_mode_, mode);
CSA_ASSERT(assembler_,
- assembler_->UintPtrOrSmiLessThan(index, GetLength(), mode));
+ assembler_->UintPtrOrSmiLessThan(index, GetLength(mode), mode));
return assembler_->UncheckedCast<Object>(
assembler_->Load(MachineType::AnyTagged(), AtIndexPtr(index, mode)));
}
@@ -10529,7 +10468,9 @@ void CodeStubArguments::PopAndReturn(Node* value) {
} else {
pop_count = argc_;
}
- assembler_->PopAndReturn(pop_count, value);
+
+ assembler_->PopAndReturn(assembler_->ParameterToWord(pop_count, argc_mode_),
+ value);
}
Node* CodeStubAssembler::IsFastElementsKind(Node* elements_kind) {
@@ -10586,6 +10527,7 @@ Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map,
CSA_ASSERT(this, Word32BinaryNot(IsConstructorMap(map)));
CSA_ASSERT(this, Word32BinaryNot(IsFunctionWithPrototypeSlotMap(map)));
Node* const fun = Allocate(JSFunction::kSizeWithoutPrototype);
+ STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
StoreMapNoWriteBarrier(fun, map);
StoreObjectFieldRoot(fun, JSObject::kPropertiesOrHashOffset,
Heap::kEmptyFixedArrayRootIndex);
@@ -10752,5 +10694,23 @@ void CodeStubAssembler::PerformStackCheck(Node* context) {
BIND(&ok);
}
+void CodeStubAssembler::InitializeFunctionContext(Node* native_context,
+ Node* context, int slots) {
+ DCHECK_GE(slots, Context::MIN_CONTEXT_SLOTS);
+ StoreMapNoWriteBarrier(context, Heap::kFunctionContextMapRootIndex);
+ StoreObjectFieldNoWriteBarrier(context, FixedArray::kLengthOffset,
+ SmiConstant(slots));
+
+ Node* const empty_fn =
+ LoadContextElement(native_context, Context::CLOSURE_INDEX);
+ StoreContextElementNoWriteBarrier(context, Context::CLOSURE_INDEX, empty_fn);
+ StoreContextElementNoWriteBarrier(context, Context::PREVIOUS_INDEX,
+ UndefinedConstant());
+ StoreContextElementNoWriteBarrier(context, Context::EXTENSION_INDEX,
+ TheHoleConstant());
+ StoreContextElementNoWriteBarrier(context, Context::NATIVE_CONTEXT_INDEX,
+ native_context);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/code-stub-assembler.h b/deps/v8/src/code-stub-assembler.h
index 44becb3981..4a72b203a7 100644
--- a/deps/v8/src/code-stub-assembler.h
+++ b/deps/v8/src/code-stub-assembler.h
@@ -22,48 +22,65 @@ class StubCache;
enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
-#define HEAP_CONSTANT_LIST(V) \
- V(AccessorInfoMap, accessor_info_map, AccessorInfoMap) \
- V(AccessorPairMap, accessor_pair_map, AccessorPairMap) \
- V(AllocationSiteMap, allocation_site_map, AllocationSiteMap) \
- V(BooleanMap, boolean_map, BooleanMap) \
- V(CodeMap, code_map, CodeMap) \
- V(EmptyPropertyDictionary, empty_property_dictionary, \
- EmptyPropertyDictionary) \
- V(EmptyFixedArray, empty_fixed_array, EmptyFixedArray) \
- V(EmptySlowElementDictionary, empty_slow_element_dictionary, \
- EmptySlowElementDictionary) \
- V(empty_string, empty_string, EmptyString) \
- V(EmptyWeakCell, empty_weak_cell, EmptyWeakCell) \
- V(FalseValue, false_value, False) \
- V(FeedbackVectorMap, feedback_vector_map, FeedbackVectorMap) \
- V(FixedArrayMap, fixed_array_map, FixedArrayMap) \
- V(FixedCOWArrayMap, fixed_cow_array_map, FixedCOWArrayMap) \
- V(FixedDoubleArrayMap, fixed_double_array_map, FixedDoubleArrayMap) \
- V(FunctionTemplateInfoMap, function_template_info_map, \
- FunctionTemplateInfoMap) \
- V(GlobalPropertyCellMap, global_property_cell_map, PropertyCellMap) \
- V(has_instance_symbol, has_instance_symbol, HasInstanceSymbol) \
- V(HeapNumberMap, heap_number_map, HeapNumberMap) \
- V(length_string, length_string, LengthString) \
- V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \
- V(MetaMap, meta_map, MetaMap) \
- V(MinusZeroValue, minus_zero_value, MinusZero) \
- V(MutableHeapNumberMap, mutable_heap_number_map, MutableHeapNumberMap) \
- V(NanValue, nan_value, Nan) \
- V(NoClosuresCellMap, no_closures_cell_map, NoClosuresCellMap) \
- V(NullValue, null_value, Null) \
- V(OneClosureCellMap, one_closure_cell_map, OneClosureCellMap) \
- V(prototype_string, prototype_string, PrototypeString) \
- V(SpeciesProtector, species_protector, SpeciesProtector) \
- V(SymbolMap, symbol_map, SymbolMap) \
- V(TheHoleValue, the_hole_value, TheHole) \
- V(TrueValue, true_value, True) \
- V(Tuple2Map, tuple2_map, Tuple2Map) \
- V(Tuple3Map, tuple3_map, Tuple3Map) \
- V(UndefinedValue, undefined_value, Undefined) \
- V(WeakCellMap, weak_cell_map, WeakCellMap) \
- V(SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfoMap)
+#define HEAP_CONSTANT_LIST(V) \
+ V(AccessorInfoMap, accessor_info_map, AccessorInfoMap) \
+ V(AccessorPairMap, accessor_pair_map, AccessorPairMap) \
+ V(AllocationSiteMap, allocation_site_map, AllocationSiteMap) \
+ V(BooleanMap, boolean_map, BooleanMap) \
+ V(CodeMap, code_map, CodeMap) \
+ V(EmptyPropertyDictionary, empty_property_dictionary, \
+ EmptyPropertyDictionary) \
+ V(EmptyFixedArray, empty_fixed_array, EmptyFixedArray) \
+ V(EmptySlowElementDictionary, empty_slow_element_dictionary, \
+ EmptySlowElementDictionary) \
+ V(empty_string, empty_string, EmptyString) \
+ V(EmptyWeakCell, empty_weak_cell, EmptyWeakCell) \
+ V(FalseValue, false_value, False) \
+ V(FeedbackVectorMap, feedback_vector_map, FeedbackVectorMap) \
+ V(FixedArrayMap, fixed_array_map, FixedArrayMap) \
+ V(FixedCOWArrayMap, fixed_cow_array_map, FixedCOWArrayMap) \
+ V(FixedDoubleArrayMap, fixed_double_array_map, FixedDoubleArrayMap) \
+ V(FunctionTemplateInfoMap, function_template_info_map, \
+ FunctionTemplateInfoMap) \
+ V(GlobalPropertyCellMap, global_property_cell_map, PropertyCellMap) \
+ V(has_instance_symbol, has_instance_symbol, HasInstanceSymbol) \
+ V(HeapNumberMap, heap_number_map, HeapNumberMap) \
+ V(length_string, length_string, LengthString) \
+ V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \
+ V(MetaMap, meta_map, MetaMap) \
+ V(MinusZeroValue, minus_zero_value, MinusZero) \
+ V(MutableHeapNumberMap, mutable_heap_number_map, MutableHeapNumberMap) \
+ V(NanValue, nan_value, Nan) \
+ V(NoClosuresCellMap, no_closures_cell_map, NoClosuresCellMap) \
+ V(NullValue, null_value, Null) \
+ V(OneClosureCellMap, one_closure_cell_map, OneClosureCellMap) \
+ V(prototype_string, prototype_string, PrototypeString) \
+ V(SpeciesProtector, species_protector, SpeciesProtector) \
+ V(StoreHandler0Map, store_handler0_map, StoreHandler0Map) \
+ V(SymbolMap, symbol_map, SymbolMap) \
+ V(TheHoleValue, the_hole_value, TheHole) \
+ V(TrueValue, true_value, True) \
+ V(Tuple2Map, tuple2_map, Tuple2Map) \
+ V(Tuple3Map, tuple3_map, Tuple3Map) \
+ V(UndefinedValue, undefined_value, Undefined) \
+ V(WeakCellMap, weak_cell_map, WeakCellMap) \
+ V(SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfoMap) \
+ V(promise_default_reject_handler_symbol, \
+ promise_default_reject_handler_symbol, PromiseDefaultRejectHandlerSymbol) \
+ V(promise_default_resolve_handler_symbol, \
+ promise_default_resolve_handler_symbol, \
+ PromiseDefaultResolveHandlerSymbol)
+
+// Returned from IteratorBuiltinsAssembler::GetIterator(). Struct is declared
+// here to simplify use in other generated builtins.
+struct IteratorRecord {
+ public:
+ // iteratorRecord.[[Iterator]]
+ compiler::TNode<JSReceiver> object;
+
+ // iteratorRecord.[[NextMethod]]
+ compiler::TNode<Object> next;
+};
// Provides JavaScript-specific "macro-assembler" functionality on top of the
// CodeAssembler. By factoring the JavaScript-isms out of the CodeAssembler,
@@ -78,6 +95,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
template <class T>
using SloppyTNode = compiler::SloppyTNode<T>;
+ template <typename T>
+ using LazyNode = std::function<TNode<T>()>;
+
CodeStubAssembler(compiler::CodeAssemblerState* state);
enum AllocationFlag : uint8_t {
@@ -274,7 +294,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Computes a % b for Smi inputs a and b; result is not necessarily a Smi.
Node* SmiMod(Node* a, Node* b);
// Computes a * b for Smi inputs a and b; result is not necessarily a Smi.
- Node* SmiMul(Node* a, Node* b);
+ TNode<Number> SmiMul(SloppyTNode<Smi> a, SloppyTNode<Smi> b);
// Tries to computes dividend / divisor for Smi inputs; branching to bailout
// if the division needs to be performed as a floating point operation.
Node* TrySmiDiv(Node* dividend, Node* divisor, Label* bailout);
@@ -417,8 +437,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// It's used for testing to ensure that slow path implementation behave
// equivalent to corresponding fast paths (where applicable).
//
- // Works only in DEBUG mode or with ENABLE_FASTSLOW_SWITCH compile time flag.
- // Nop otherwise.
+ // Works only with V8_ENABLE_FORCE_SLOW_PATH compile time flag. Nop otherwise.
void GotoIfForceSlowPath(Label* if_true);
// Load value from current frame by given offset in bytes.
@@ -553,8 +572,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Load value field of a JSValue object.
Node* LoadJSValueValue(Node* object);
// Load value field of a WeakCell object.
- Node* LoadWeakCellValueUnchecked(Node* weak_cell);
- Node* LoadWeakCellValue(Node* weak_cell, Label* if_cleared = nullptr);
+ TNode<Object> LoadWeakCellValueUnchecked(Node* weak_cell);
+ TNode<Object> LoadWeakCellValue(SloppyTNode<WeakCell> weak_cell,
+ Label* if_cleared = nullptr);
// Load an array element from a FixedArray.
Node* LoadFixedArrayElement(Node* object, Node* index,
@@ -577,7 +597,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Label* if_hole = nullptr);
// Load a feedback slot from a FeedbackVector.
- Node* LoadFeedbackVectorSlot(
+ TNode<Object> LoadFeedbackVectorSlot(
Node* object, Node* index, int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
@@ -988,7 +1008,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Number> ChangeInt32ToTagged(SloppyTNode<Int32T> value);
TNode<Number> ChangeUint32ToTagged(SloppyTNode<Uint32T> value);
TNode<Float64T> ChangeNumberToFloat64(SloppyTNode<Number> value);
- TNode<UintPtrT> ChangeNonnegativeNumberToUintPtr(SloppyTNode<Number> value);
+ TNode<UintPtrT> ChangeNonnegativeNumberToUintPtr(TNode<Number> value);
void TaggedToNumeric(Node* context, Node* value, Label* done,
Variable* var_numeric);
@@ -1105,7 +1125,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IsSequentialStringInstanceType(Node* instance_type);
Node* IsShortExternalStringInstanceType(Node* instance_type);
Node* IsSpecialReceiverInstanceType(Node* instance_type);
- Node* IsSpecialReceiverMap(Node* map);
Node* IsSpeciesProtectorCellInvalid();
Node* IsStringInstanceType(Node* instance_type);
Node* IsString(Node* object);
@@ -1145,10 +1164,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// String helpers.
// Load a character from a String (might flatten a ConsString).
- TNode<Uint32T> StringCharCodeAt(SloppyTNode<String> string,
- SloppyTNode<IntPtrT> index);
+ TNode<Int32T> StringCharCodeAt(SloppyTNode<String> string,
+ SloppyTNode<IntPtrT> index);
// Return the single character string with only {code}.
- Node* StringFromCharCode(Node* code);
+ TNode<String> StringFromCharCode(TNode<Int32T> code);
enum class SubStringFlags { NONE, FROM_TO_ARE_BOUNDED };
@@ -1179,14 +1198,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Variable* var_right, Node* right_instance_type,
Label* did_something);
- Node* StringFromCodePoint(Node* codepoint, UnicodeEncoding encoding);
+ TNode<String> StringFromCodePoint(TNode<Int32T> codepoint,
+ UnicodeEncoding encoding);
// Type conversion helpers.
enum class BigIntHandling { kConvertToNumber, kThrow };
// Convert a String to a Number.
- TNode<Number> StringToNumber(SloppyTNode<Context> context,
- SloppyTNode<String> input);
- Node* NumberToString(Node* context, Node* input);
+ TNode<Number> StringToNumber(SloppyTNode<String> input);
+ // Convert a Number to a String.
+ Node* NumberToString(Node* input);
// Convert an object to a name.
Node* ToName(Node* context, Node* input);
// Convert a Non-Number object to a Number.
@@ -1233,7 +1253,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// ES6 7.1.15 ToLength, but with inlined fast path.
Node* ToLength_Inline(Node* const context, Node* const input);
- // Convert any object to an Integer.
+ // ES6 7.1.4 ToInteger ( argument )
+ TNode<Number> ToInteger_Inline(TNode<Context> context, TNode<Object> input,
+ ToIntegerTruncationMode mode = kNoTruncation);
TNode<Number> ToInteger(SloppyTNode<Context> context,
SloppyTNode<Object> input,
ToIntegerTruncationMode mode = kNoTruncation);
@@ -1248,22 +1270,23 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Returns a node that contains a decoded (unsigned!) value of a bit
// field |BitField| in |word|. Returns result as a word-size node.
template <typename BitField>
- Node* DecodeWord(Node* word) {
+ TNode<UintPtrT> DecodeWord(SloppyTNode<WordT> word) {
return DecodeWord(word, BitField::kShift, BitField::kMask);
}
// Returns a node that contains a decoded (unsigned!) value of a bit
// field |BitField| in |word32|. Returns result as a word-size node.
template <typename BitField>
- Node* DecodeWordFromWord32(Node* word32) {
+ TNode<UintPtrT> DecodeWordFromWord32(SloppyTNode<Word32T> word32) {
return DecodeWord<BitField>(ChangeUint32ToWord(word32));
}
// Returns a node that contains a decoded (unsigned!) value of a bit
// field |BitField| in |word|. Returns result as an uint32 node.
template <typename BitField>
- Node* DecodeWord32FromWord(Node* word) {
- return TruncateWordToWord32(DecodeWord<BitField>(word));
+ TNode<Uint32T> DecodeWord32FromWord(SloppyTNode<WordT> word) {
+ return UncheckedCast<Uint32T>(
+ TruncateWordToWord32(Signed(DecodeWord<BitField>(word))));
}
// Decodes an unsigned (!) value from |word32| to an uint32 node.
@@ -1271,7 +1294,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
uint32_t mask);
// Decodes an unsigned (!) value from |word| to a word-size node.
- Node* DecodeWord(Node* word, uint32_t shift, uint32_t mask);
+ TNode<UintPtrT> DecodeWord(SloppyTNode<WordT> word, uint32_t shift,
+ uint32_t mask);
// Returns a node that contains the updated values of a |BitField|.
template <typename BitField>
@@ -1559,6 +1583,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* name_index, Variable* var_details,
Variable* var_value);
+ void LoadPropertyFromFastObject(Node* object, Node* map, Node* descriptors,
+ Node* name_index, Node* details,
+ Variable* var_value);
+
void LoadPropertyFromNameDictionary(Node* dictionary, Node* entry,
Variable* var_details,
Variable* var_value);
@@ -1600,6 +1628,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Label* if_bailout)>
LookupInHolder;
+ // For integer indexed exotic cases, check if the given string cannot be a
+ // special index. If we are not sure that the given string is not a special
+ // index with a simple check, return False. Note that "False" return value
+ // does not mean that the name_string is a special index in the current
+ // implementation.
+ void BranchIfMaybeSpecialIndex(TNode<String> name_string,
+ Label* if_maybe_special_index,
+ Label* if_not_special_index);
+
// Generic property prototype chain lookup generator.
// For properties it generates lookup using given {lookup_property_in_holder}
// and for elements it uses {lookup_element_in_holder}.
@@ -1635,9 +1672,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void ReportFeedbackUpdate(SloppyTNode<FeedbackVector> feedback_vector,
SloppyTNode<IntPtrT> slot_id, const char* reason);
- // Combine the new feedback with the existing_feedback.
+ // Combine the new feedback with the existing_feedback. Do nothing if
+ // existing_feedback is nullptr.
+ void CombineFeedback(Variable* existing_feedback, int feedback);
void CombineFeedback(Variable* existing_feedback, Node* feedback);
+ // Overwrite the existing feedback with new_feedback. Do nothing if
+ // existing_feedback is nullptr.
+ void OverwriteFeedback(Variable* existing_feedback, int new_feedback);
+
// Check if a property name might require protector invalidation when it is
// used for a property store or deletion.
void CheckForAssociatedProtector(Node* name, Label* if_protector);
@@ -1657,7 +1700,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
}
// Loads script context from the script context table.
- Node* LoadScriptContext(Node* context, int context_index);
+ TNode<Context> LoadScriptContext(TNode<Context> context,
+ TNode<IntPtrT> context_index);
Node* Int32ToUint8Clamped(Node* int32_value);
Node* Float64ToUint8Clamped(Node* float64_value);
@@ -1759,11 +1803,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void InitializeFieldsWithRoot(Node* object, Node* start_offset,
Node* end_offset, Heap::RootListIndex root);
- Node* RelationalComparison(Operation op, Node* lhs, Node* rhs, Node* context,
+ Node* RelationalComparison(Operation op, Node* left, Node* right,
+ Node* context,
Variable* var_type_feedback = nullptr);
- void BranchIfNumericRelationalComparison(Operation op, Node* lhs, Node* rhs,
- Label* if_true, Label* if_false);
+ void BranchIfNumberRelationalComparison(Operation op, Node* left, Node* right,
+ Label* if_true, Label* if_false);
void BranchIfAccessorPair(Node* value, Label* if_accessor_pair,
Label* if_not_accessor_pair) {
@@ -1771,7 +1816,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Branch(IsAccessorPair(value), if_accessor_pair, if_not_accessor_pair);
}
- void GotoIfNumericGreaterThanOrEqual(Node* lhs, Node* rhs, Label* if_false);
+ void GotoIfNumberGreaterThanOrEqual(Node* left, Node* right, Label* if_false);
Node* Equal(Node* lhs, Node* rhs, Node* context,
Variable* var_type_feedback = nullptr);
@@ -1786,8 +1831,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
enum HasPropertyLookupMode { kHasProperty, kForInHasProperty };
- Node* HasProperty(Node* object, Node* key, Node* context,
- HasPropertyLookupMode mode);
+ TNode<Oddball> HasProperty(SloppyTNode<HeapObject> object,
+ SloppyTNode<Name> key,
+ SloppyTNode<Context> context,
+ HasPropertyLookupMode mode);
Node* ClassOf(Node* object);
@@ -1845,7 +1892,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
UndefinedConstant(), SmiConstant(message), args...);
}
- void Abort(BailoutReason reason) {
+ void Abort(AbortReason reason) {
CallRuntime(Runtime::kAbort, NoContextConstant(), SmiConstant(reason));
Unreachable();
}
@@ -1862,11 +1909,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void DescriptorLookupBinary(Node* unique_name, Node* descriptors, Node* nof,
Label* if_found, Variable* var_name_index,
Label* if_not_found);
+ Node* DescriptorNumberToIndex(SloppyTNode<Uint32T> descriptor_number);
// Implements DescriptorArray::ToKeyIndex.
// Returns an untagged IntPtr.
Node* DescriptorArrayToKeyIndex(Node* descriptor_number);
// Implements DescriptorArray::GetKey.
Node* DescriptorArrayGetKey(Node* descriptors, Node* descriptor_number);
+ // Implements DescriptorArray::GetKey.
+ TNode<Uint32T> DescriptorArrayGetDetails(TNode<DescriptorArray> descriptors,
+ TNode<Uint32T> descriptor_number);
Node* CallGetterIfAccessor(Node* value, Node* details, Node* context,
Node* receiver, Label* if_bailout,
@@ -1878,6 +1929,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Label* definitely_no_elements,
Label* possibly_elements);
+ void InitializeFunctionContext(Node* native_context, Node* context,
+ int slots);
+
private:
friend class CodeStubArguments;
@@ -1932,12 +1986,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* context, Node* input, Object::Conversion mode,
BigIntHandling bigint_handling = BigIntHandling::kThrow);
- enum class Feedback { kCollect, kNone };
- template <Feedback feedback>
void TaggedToNumeric(Node* context, Node* value, Label* done,
- Variable* var_numeric, Variable* var_feedback = nullptr);
+ Variable* var_numeric, Variable* var_feedback);
- template <Feedback feedback, Object::Conversion conversion>
+ template <Object::Conversion conversion>
void TaggedToWord32OrBigIntImpl(Node* context, Node* value, Label* if_number,
Variable* var_word32,
Label* if_bigint = nullptr,
@@ -1957,15 +2009,16 @@ class CodeStubArguments {
// |argc| is an intptr value which specifies the number of arguments passed
// to the builtin excluding the receiver. The arguments will include a
// receiver iff |receiver_mode| is kHasReceiver.
- CodeStubArguments(CodeStubAssembler* assembler, SloppyTNode<IntPtrT> argc,
+ CodeStubArguments(CodeStubAssembler* assembler, Node* argc,
ReceiverMode receiver_mode = ReceiverMode::kHasReceiver)
: CodeStubArguments(assembler, argc, nullptr,
CodeStubAssembler::INTPTR_PARAMETERS, receiver_mode) {
}
+
// |argc| is either a smi or intptr depending on |param_mode|. The arguments
// include a receiver iff |receiver_mode| is kHasReceiver.
- CodeStubArguments(CodeStubAssembler* assembler, SloppyTNode<IntPtrT> argc,
- Node* fp, CodeStubAssembler::ParameterMode param_mode,
+ CodeStubArguments(CodeStubAssembler* assembler, Node* argc, Node* fp,
+ CodeStubAssembler::ParameterMode param_mode,
ReceiverMode receiver_mode = ReceiverMode::kHasReceiver);
TNode<Object> GetReceiver() const;
@@ -1987,7 +2040,10 @@ class CodeStubArguments {
TNode<Object> GetOptionalArgumentValue(int index,
SloppyTNode<Object> default_value);
- TNode<IntPtrT> GetLength() const { return argc_; }
+ Node* GetLength(CodeStubAssembler::ParameterMode mode) const {
+ DCHECK_EQ(mode, argc_mode_);
+ return argc_;
+ }
typedef std::function<void(Node* arg)> ForEachBodyFunction;
@@ -2013,7 +2069,7 @@ class CodeStubArguments {
CodeStubAssembler* assembler_;
CodeStubAssembler::ParameterMode argc_mode_;
ReceiverMode receiver_mode_;
- TNode<IntPtrT> argc_;
+ Node* argc_;
TNode<RawPtr<Object>> arguments_;
Node* fp_;
};
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 4b2bd1eaf4..2b98a5bfc7 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -404,30 +404,6 @@ TF_STUB(KeyedStoreSloppyArgumentsStub, CodeStubAssembler) {
}
}
-TF_STUB(LoadScriptContextFieldStub, CodeStubAssembler) {
- Comment("LoadScriptContextFieldStub: context_index=%d, slot=%d",
- stub->context_index(), stub->slot_index());
-
- Node* context = Parameter(Descriptor::kContext);
-
- Node* script_context = LoadScriptContext(context, stub->context_index());
- Node* result = LoadFixedArrayElement(script_context, stub->slot_index());
- Return(result);
-}
-
-TF_STUB(StoreScriptContextFieldStub, CodeStubAssembler) {
- Comment("StoreScriptContextFieldStub: context_index=%d, slot=%d",
- stub->context_index(), stub->slot_index());
-
- Node* value = Parameter(Descriptor::kValue);
- Node* context = Parameter(Descriptor::kContext);
-
- Node* script_context = LoadScriptContext(context, stub->context_index());
- StoreFixedArrayElement(script_context, IntPtrConstant(stub->slot_index()),
- value);
- Return(value);
-}
-
// TODO(ishell): move to builtins-handler-gen.
TF_STUB(StoreInterceptorStub, CodeStubAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -640,7 +616,7 @@ void ArrayConstructorAssembler::GenerateConstructor(
Branch(SmiEqual(array_size, SmiConstant(0)), &small_smi_size, &abort);
BIND(&abort);
- Node* reason = SmiConstant(kAllocatingNonEmptyPackedArray);
+ Node* reason = SmiConstant(AbortReason::kAllocatingNonEmptyPackedArray);
TailCallRuntime(Runtime::kAbort, context, reason);
} else {
int element_size =
@@ -701,23 +677,6 @@ TF_STUB(InternalArraySingleArgumentConstructorStub, ArrayConstructorAssembler) {
stub->elements_kind(), DONT_TRACK_ALLOCATION_SITE);
}
-TF_STUB(GrowArrayElementsStub, CodeStubAssembler) {
- Label runtime(this, CodeStubAssembler::Label::kDeferred);
-
- Node* object = Parameter(Descriptor::kObject);
- Node* key = Parameter(Descriptor::kKey);
- Node* context = Parameter(Descriptor::kContext);
- ElementsKind kind = stub->elements_kind();
-
- Node* elements = LoadElements(object);
- Node* new_elements =
- TryGrowElementsCapacity(object, elements, kind, key, &runtime);
- Return(new_elements);
-
- BIND(&runtime);
- TailCallRuntime(Runtime::kGrowArrayElements, context, object, key);
-}
-
ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)
: PlatformCodeStub(isolate) {}
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 76057ffcc2..751a89fdbd 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -26,36 +26,33 @@ class Node;
}
// List of code stubs used on all platforms.
-#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
- /* --- PlatformCodeStubs --- */ \
- V(ArrayConstructor) \
- V(CallApiCallback) \
- V(CallApiGetter) \
- V(CEntry) \
- V(DoubleToI) \
- V(InternalArrayConstructor) \
- V(JSEntry) \
- V(MathPow) \
- V(ProfileEntryHook) \
- V(StoreSlowElement) \
- /* --- TurboFanCodeStubs --- */ \
- V(ArrayNoArgumentConstructor) \
- V(ArraySingleArgumentConstructor) \
- V(ArrayNArgumentsConstructor) \
- V(InternalArrayNoArgumentConstructor) \
- V(InternalArraySingleArgumentConstructor) \
- V(ElementsTransitionAndStore) \
- V(KeyedLoadSloppyArguments) \
- V(KeyedStoreSloppyArguments) \
- V(LoadScriptContextField) \
- V(StoreScriptContextField) \
- V(StringAdd) \
- V(GetProperty) \
- V(StoreFastElement) \
- V(StoreInterceptor) \
- V(TransitionElementsKind) \
- V(LoadIndexedInterceptor) \
- V(GrowArrayElements)
+#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
+ /* --- PlatformCodeStubs --- */ \
+ V(ArrayConstructor) \
+ V(CallApiCallback) \
+ V(CallApiGetter) \
+ V(CEntry) \
+ V(DoubleToI) \
+ V(InternalArrayConstructor) \
+ V(JSEntry) \
+ V(MathPow) \
+ V(ProfileEntryHook) \
+ V(StoreSlowElement) \
+ /* --- TurboFanCodeStubs --- */ \
+ V(ArrayNoArgumentConstructor) \
+ V(ArraySingleArgumentConstructor) \
+ V(ArrayNArgumentsConstructor) \
+ V(InternalArrayNoArgumentConstructor) \
+ V(InternalArraySingleArgumentConstructor) \
+ V(ElementsTransitionAndStore) \
+ V(KeyedLoadSloppyArguments) \
+ V(KeyedStoreSloppyArguments) \
+ V(StringAdd) \
+ V(GetProperty) \
+ V(StoreFastElement) \
+ V(StoreInterceptor) \
+ V(TransitionElementsKind) \
+ V(LoadIndexedInterceptor)
// List of code stubs only used on ARM 32 bits platforms.
#if V8_TARGET_ARCH_ARM
@@ -493,23 +490,6 @@ class GetPropertyStub : public TurboFanCodeStub {
DEFINE_TURBOFAN_CODE_STUB(GetProperty, TurboFanCodeStub);
};
-class GrowArrayElementsStub : public TurboFanCodeStub {
- public:
- GrowArrayElementsStub(Isolate* isolate, ElementsKind kind)
- : TurboFanCodeStub(isolate) {
- minor_key_ = ElementsKindBits::encode(GetHoleyElementsKind(kind));
- }
-
- ElementsKind elements_kind() const {
- return ElementsKindBits::decode(minor_key_);
- }
-
- private:
- class ElementsKindBits : public BitField<ElementsKind, 0, 8> {};
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(GrowArrayElements);
- DEFINE_TURBOFAN_CODE_STUB(GrowArrayElements, TurboFanCodeStub);
-};
enum AllocationSiteOverrideMode {
DONT_OVERRIDE,
@@ -602,7 +582,7 @@ class KeyedStoreSloppyArgumentsStub : public TurboFanCodeStub {
class CallApiCallbackStub : public PlatformCodeStub {
public:
- static const int kArgBits = 3;
+ static const int kArgBits = 7;
static const int kArgMax = (1 << kArgBits) - 1;
CallApiCallbackStub(Isolate* isolate, int argc)
@@ -697,10 +677,18 @@ class CEntryStub : public PlatformCodeStub {
class JSEntryStub : public PlatformCodeStub {
public:
+ enum class SpecialTarget { kNone, kRunMicrotasks };
JSEntryStub(Isolate* isolate, StackFrame::Type type)
: PlatformCodeStub(isolate) {
DCHECK(type == StackFrame::ENTRY || type == StackFrame::CONSTRUCT_ENTRY);
- minor_key_ = StackFrameTypeBits::encode(type);
+ minor_key_ = StackFrameTypeBits::encode(type) |
+ SpecialTargetBits::encode(SpecialTarget::kNone);
+ }
+
+ JSEntryStub(Isolate* isolate, SpecialTarget target)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = StackFrameTypeBits::encode(StackFrame::ENTRY) |
+ SpecialTargetBits::encode(target);
}
private:
@@ -715,7 +703,26 @@ class JSEntryStub : public PlatformCodeStub {
return StackFrameTypeBits::decode(minor_key_);
}
+ SpecialTarget special_target() const {
+ return SpecialTargetBits::decode(minor_key_);
+ }
+
+ Handle<Code> EntryTrampoline() {
+ switch (special_target()) {
+ case SpecialTarget::kNone:
+ return (type() == StackFrame::CONSTRUCT_ENTRY)
+ ? BUILTIN_CODE(isolate(), JSConstructEntryTrampoline)
+ : BUILTIN_CODE(isolate(), JSEntryTrampoline);
+ case SpecialTarget::kRunMicrotasks:
+ return BUILTIN_CODE(isolate(), RunMicrotasks);
+ }
+ UNREACHABLE();
+ return Handle<Code>();
+ }
+
class StackFrameTypeBits : public BitField<StackFrame::Type, 0, 5> {};
+ class SpecialTargetBits
+ : public BitField<SpecialTarget, StackFrameTypeBits::kNext, 1> {};
int handler_offset_;
@@ -766,59 +773,6 @@ class DoubleToIStub : public PlatformCodeStub {
DEFINE_PLATFORM_CODE_STUB(DoubleToI, PlatformCodeStub);
};
-class ScriptContextFieldStub : public TurboFanCodeStub {
- public:
- ScriptContextFieldStub(Isolate* isolate,
- const ScriptContextTable::LookupResult* lookup_result)
- : TurboFanCodeStub(isolate) {
- DCHECK(Accepted(lookup_result));
- minor_key_ = ContextIndexBits::encode(lookup_result->context_index) |
- SlotIndexBits::encode(lookup_result->slot_index);
- }
-
- int context_index() const { return ContextIndexBits::decode(minor_key_); }
-
- int slot_index() const { return SlotIndexBits::decode(minor_key_); }
-
- static bool Accepted(const ScriptContextTable::LookupResult* lookup_result) {
- return ContextIndexBits::is_valid(lookup_result->context_index) &&
- SlotIndexBits::is_valid(lookup_result->slot_index);
- }
-
- private:
- static const int kContextIndexBits = 9;
- static const int kSlotIndexBits = 12;
- class ContextIndexBits : public BitField<int, 0, kContextIndexBits> {};
- class SlotIndexBits
- : public BitField<int, kContextIndexBits, kSlotIndexBits> {};
-
- DEFINE_CODE_STUB_BASE(ScriptContextFieldStub, TurboFanCodeStub);
-};
-
-
-class LoadScriptContextFieldStub : public ScriptContextFieldStub {
- public:
- LoadScriptContextFieldStub(
- Isolate* isolate, const ScriptContextTable::LookupResult* lookup_result)
- : ScriptContextFieldStub(isolate, lookup_result) {}
-
- private:
- DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
- DEFINE_TURBOFAN_CODE_STUB(LoadScriptContextField, ScriptContextFieldStub);
-};
-
-
-class StoreScriptContextFieldStub : public ScriptContextFieldStub {
- public:
- StoreScriptContextFieldStub(
- Isolate* isolate, const ScriptContextTable::LookupResult* lookup_result)
- : ScriptContextFieldStub(isolate, lookup_result) {}
-
- private:
- DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
- DEFINE_TURBOFAN_CODE_STUB(StoreScriptContextField, ScriptContextFieldStub);
-};
-
class StoreFastElementStub : public TurboFanCodeStub {
public:
StoreFastElementStub(Isolate* isolate, bool is_js_array,
diff --git a/deps/v8/src/compilation-info.cc b/deps/v8/src/compilation-info.cc
index b722cc4e5c..27e6dbb9da 100644
--- a/deps/v8/src/compilation-info.cc
+++ b/deps/v8/src/compilation-info.cc
@@ -74,7 +74,7 @@ CompilationInfo::CompilationInfo(Vector<const char> debug_name,
zone_(zone),
deferred_handles_(nullptr),
dependencies_(nullptr),
- bailout_reason_(kNoReason),
+ bailout_reason_(BailoutReason::kNoReason),
parameter_count_(0),
optimization_id_(-1),
debug_name_(debug_name) {}
diff --git a/deps/v8/src/compilation-info.h b/deps/v8/src/compilation-info.h
index e0f5c73a9c..bb5812002e 100644
--- a/deps/v8/src/compilation-info.h
+++ b/deps/v8/src/compilation-info.h
@@ -211,13 +211,13 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
void ReopenHandlesInNewHandleScope();
void AbortOptimization(BailoutReason reason) {
- DCHECK_NE(reason, kNoReason);
- if (bailout_reason_ == kNoReason) bailout_reason_ = reason;
+ DCHECK_NE(reason, BailoutReason::kNoReason);
+ if (bailout_reason_ == BailoutReason::kNoReason) bailout_reason_ = reason;
SetFlag(kDisableFutureOptimization);
}
void RetryOptimization(BailoutReason reason) {
- DCHECK_NE(reason, kNoReason);
+ DCHECK_NE(reason, BailoutReason::kNoReason);
if (GetFlag(kDisableFutureOptimization)) return;
bailout_reason_ = reason;
}
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
index e365e301d1..1adfd090cd 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
@@ -305,7 +305,7 @@ void CompilerDispatcher::WaitForJobIfRunningOnBackground(
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompilerDispatcherWaitForBackgroundJob");
RuntimeCallTimerScope runtimeTimer(
- isolate_, &RuntimeCallStats::CompileWaitForDispatcher);
+ isolate_, RuntimeCallCounterId::kCompileWaitForDispatcher);
base::LockGuard<base::Mutex> lock(&mutex_);
if (running_background_jobs_.find(job) == running_background_jobs_.end()) {
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index e508f5a5a7..e2f8ee0f39 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -114,8 +114,8 @@ CompilationJob::Status CompilationJob::FinalizeJob(Isolate* isolate) {
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
DisallowCodeDependencyChange no_dependency_change;
DisallowJavascriptExecution no_js(isolate);
- DCHECK(!compilation_info()->dependencies() ||
- !compilation_info()->dependencies()->HasAborted());
+ CHECK(!compilation_info()->dependencies() ||
+ !compilation_info()->dependencies()->HasAborted());
// Delegate to the underlying implementation.
DCHECK_EQ(state(), State::kReadyToFinalize);
@@ -340,7 +340,7 @@ void SetSharedFunctionFlagsFromLiteral(FunctionLiteral* literal,
shared_info->set_has_duplicate_parameters(
literal->has_duplicate_parameters());
shared_info->SetExpectedNofPropertiesFromEstimate(literal);
- if (literal->dont_optimize_reason() != kNoReason) {
+ if (literal->dont_optimize_reason() != BailoutReason::kNoReason) {
shared_info->DisableOptimization(literal->dont_optimize_reason());
}
}
@@ -375,8 +375,8 @@ bool Renumber(ParseInfo* parse_info,
RuntimeCallTimerScope runtimeTimer(
parse_info->runtime_call_stats(),
parse_info->on_background_thread()
- ? &RuntimeCallStats::CompileBackgroundRenumber
- : &RuntimeCallStats::CompileRenumber);
+ ? RuntimeCallCounterId::kCompileBackgroundRenumber
+ : RuntimeCallCounterId::kCompileRenumber);
return AstNumbering::Renumber(parse_info->stack_limit(), parse_info->zone(),
parse_info->literal(), eager_literals);
}
@@ -487,7 +487,7 @@ MUST_USE_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
Handle<JSFunction> function, BailoutId osr_offset) {
RuntimeCallTimerScope runtimeTimer(
function->GetIsolate(),
- &RuntimeCallStats::CompileGetFromOptimizedCodeMap);
+ RuntimeCallCounterId::kCompileGetFromOptimizedCodeMap);
Handle<SharedFunctionInfo> shared(function->shared());
DisallowHeapAllocation no_gc;
if (osr_offset.IsNone()) {
@@ -513,7 +513,7 @@ void ClearOptimizedCodeCache(CompilationInfo* compilation_info) {
if (compilation_info->osr_offset().IsNone()) {
Handle<FeedbackVector> vector =
handle(function->feedback_vector(), function->GetIsolate());
- vector->ClearOptimizedCode();
+ vector->ClearOptimizationMarker();
}
}
@@ -543,8 +543,8 @@ void InsertCodeIntoOptimizedCodeCache(CompilationInfo* compilation_info) {
bool GetOptimizedCodeNow(CompilationJob* job, Isolate* isolate) {
TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
- RuntimeCallTimerScope runtimeTimer(isolate,
- &RuntimeCallStats::RecompileSynchronous);
+ RuntimeCallTimerScope runtimeTimer(
+ isolate, RuntimeCallCounterId::kRecompileSynchronous);
CompilationInfo* compilation_info = job->compilation_info();
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.RecompileSynchronous");
@@ -590,8 +590,8 @@ bool GetOptimizedCodeLater(CompilationJob* job, Isolate* isolate) {
}
TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
- RuntimeCallTimerScope runtimeTimer(isolate,
- &RuntimeCallStats::RecompileSynchronous);
+ RuntimeCallTimerScope runtimeTimer(
+ isolate, RuntimeCallCounterId::kRecompileSynchronous);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.RecompileSynchronous");
@@ -653,26 +653,29 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
// Do not use TurboFan if we need to be able to set break points.
if (compilation_info->shared_info()->HasBreakInfo()) {
- compilation_info->AbortOptimization(kFunctionBeingDebugged);
+ compilation_info->AbortOptimization(BailoutReason::kFunctionBeingDebugged);
return MaybeHandle<Code>();
}
// Do not use TurboFan when %NeverOptimizeFunction was applied.
if (shared->optimization_disabled() &&
- shared->disable_optimization_reason() == kOptimizationDisabledForTest) {
- compilation_info->AbortOptimization(kOptimizationDisabledForTest);
+ shared->disable_optimization_reason() ==
+ BailoutReason::kOptimizationDisabledForTest) {
+ compilation_info->AbortOptimization(
+ BailoutReason::kOptimizationDisabledForTest);
return MaybeHandle<Code>();
}
// Do not use TurboFan if optimization is disabled or function doesn't pass
// turbo_filter.
if (!FLAG_opt || !shared->PassesFilter(FLAG_turbo_filter)) {
- compilation_info->AbortOptimization(kOptimizationDisabled);
+ compilation_info->AbortOptimization(BailoutReason::kOptimizationDisabled);
return MaybeHandle<Code>();
}
TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
- RuntimeCallTimerScope runtimeTimer(isolate, &RuntimeCallStats::OptimizeCode);
+ RuntimeCallTimerScope runtimeTimer(isolate,
+ RuntimeCallCounterId::kOptimizeCode);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.OptimizeCode");
// In case of concurrent recompilation, all handles below this point will be
@@ -716,8 +719,8 @@ CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job,
CompilationInfo* compilation_info = job->compilation_info();
TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
- RuntimeCallTimerScope runtimeTimer(isolate,
- &RuntimeCallStats::RecompileSynchronous);
+ RuntimeCallTimerScope runtimeTimer(
+ isolate, RuntimeCallCounterId::kRecompileSynchronous);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.RecompileSynchronous");
@@ -735,9 +738,9 @@ CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job,
// 4) Code generation may have failed.
if (job->state() == CompilationJob::State::kReadyToFinalize) {
if (shared->optimization_disabled()) {
- job->RetryOptimization(kOptimizationDisabled);
+ job->RetryOptimization(BailoutReason::kOptimizationDisabled);
} else if (compilation_info->dependencies()->HasAborted()) {
- job->RetryOptimization(kBailedOutDueToDependencyChange);
+ job->RetryOptimization(BailoutReason::kBailedOutDueToDependencyChange);
} else if (job->FinalizeJob(isolate) == CompilationJob::SUCCEEDED) {
job->RecordOptimizedCompilationStats();
job->RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG,
@@ -809,8 +812,8 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(ParseInfo* parse_info,
PostponeInterruptsScope postpone(isolate);
DCHECK(!isolate->native_context().is_null());
RuntimeCallTimerScope runtimeTimer(
- isolate, parse_info->is_eval() ? &RuntimeCallStats::CompileEval
- : &RuntimeCallStats::CompileScript);
+ isolate, parse_info->is_eval() ? RuntimeCallCounterId::kCompileEval
+ : RuntimeCallCounterId::kCompileScript);
VMState<BYTECODE_COMPILER> state(isolate);
if (parse_info->literal() == nullptr &&
!parsing::ParseProgram(parse_info, isolate)) {
@@ -860,8 +863,8 @@ bool Compiler::Analyze(ParseInfo* parse_info,
RuntimeCallTimerScope runtimeTimer(
parse_info->runtime_call_stats(),
parse_info->on_background_thread()
- ? &RuntimeCallStats::CompileBackgroundAnalyse
- : &RuntimeCallStats::CompileAnalyse);
+ ? RuntimeCallCounterId::kCompileBackgroundAnalyse
+ : RuntimeCallCounterId::kCompileAnalyse);
if (!Rewriter::Rewrite(parse_info)) return false;
DeclarationScope::Analyze(parse_info);
if (!Renumber(parse_info, eager_literals)) return false;
@@ -890,7 +893,7 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
PostponeInterruptsScope postpone(isolate);
TimerEventScope<TimerEventCompileCode> compile_timer(isolate);
RuntimeCallTimerScope runtimeTimer(isolate,
- &RuntimeCallStats::CompileFunction);
+ RuntimeCallCounterId::kCompileFunction);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
AggregatedHistogramTimerScope timer(isolate->counters()->compile_lazy());
@@ -1174,17 +1177,59 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
return result;
}
-namespace {
+MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
+ Handle<String> source, Handle<FixedArray> arguments,
+ Handle<Context> context, int line_offset, int column_offset,
+ Handle<Object> script_name, ScriptOriginOptions options) {
+ Isolate* isolate = source->GetIsolate();
+ int source_length = source->length();
+ isolate->counters()->total_compile_size()->Increment(source_length);
-bool ContainsAsmModule(Handle<Script> script) {
- DisallowHeapAllocation no_gc;
- SharedFunctionInfo::ScriptIterator iter(script);
- while (SharedFunctionInfo* info = iter.Next()) {
- if (info->HasAsmWasmData()) return true;
+ Handle<Script> script = isolate->factory()->NewScript(source);
+ if (isolate->NeedsSourcePositionsForProfiling()) {
+ Script::InitLineEnds(script);
}
- return false;
+ if (!script_name.is_null()) {
+ script->set_name(*script_name);
+ script->set_line_offset(line_offset);
+ script->set_column_offset(column_offset);
+ }
+ script->set_wrapped_arguments(*arguments);
+ script->set_origin_options(options);
+
+ ParseInfo parse_info(script);
+ parse_info.set_eval(); // Use an eval scope as declaration scope.
+ parse_info.set_wrapped_as_function();
+ if (!context->IsNativeContext()) {
+ parse_info.set_outer_scope_info(handle(context->scope_info()));
+ }
+
+ Handle<SharedFunctionInfo> top_level;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, top_level,
+ CompileToplevel(&parse_info, isolate), JSFunction);
+
+ Handle<JSFunction> top_level_fun =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(top_level, context,
+ NOT_TENURED);
+
+ // TODO(yangguo): consider not having to call the top-level function, and
+ // instead instantiate the wrapper function directly.
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ Execution::Call(isolate, top_level_fun, isolate->global_proxy(), 0,
+ nullptr),
+ JSFunction);
+
+ // OnAfterCompile has to be called after we create the JSFunction, which we
+ // may require to recompile the eval for debugging, if we find a function
+ // that contains break points in the eval script.
+ isolate->debug()->OnAfterCompile(script);
+ return Handle<JSFunction>::cast(result);
}
+namespace {
+
bool ShouldProduceCodeCache(ScriptCompiler::CompileOptions options) {
return options == ScriptCompiler::kProduceCodeCache ||
options == ScriptCompiler::kProduceFullCodeCache;
@@ -1369,6 +1414,13 @@ struct ScriptCompileTimerScope {
return CacheBehaviour::kNoCacheBecauseInDocumentWrite;
case ScriptCompiler::kNoCacheBecauseResourceWithNoCacheHandler:
return CacheBehaviour::kNoCacheBecauseResourceWithNoCacheHandler;
+ case ScriptCompiler::kNoCacheBecauseDeferredProduceCodeCache: {
+ if (hit_isolate_cache_) {
+ return CacheBehaviour::kHitIsolateCacheWhenProduceCodeCache;
+ } else {
+ return CacheBehaviour::kProduceCodeCache;
+ }
+ }
}
UNREACHABLE();
}
@@ -1438,7 +1490,8 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
Isolate* isolate = source->GetIsolate();
ScriptCompileTimerScope compile_timer(isolate, no_cache_reason);
- if (compile_options == ScriptCompiler::kNoCompileOptions) {
+ if (compile_options == ScriptCompiler::kNoCompileOptions ||
+ compile_options == ScriptCompiler::kEagerCompile) {
cached_data = nullptr;
} else if (compile_options == ScriptCompiler::kProduceParserCache ||
ShouldProduceCodeCache(compile_options)) {
@@ -1477,8 +1530,8 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
compile_timer.set_consuming_code_cache();
// Then check cached code provided by embedder.
HistogramTimerScope timer(isolate->counters()->compile_deserialize());
- RuntimeCallTimerScope runtimeTimer(isolate,
- &RuntimeCallStats::CompileDeserialize);
+ RuntimeCallTimerScope runtimeTimer(
+ isolate, RuntimeCallCounterId::kCompileDeserialize);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileDeserialize");
Handle<SharedFunctionInfo> inner_result;
@@ -1493,6 +1546,9 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
inner_result, vector);
Handle<Script> script(Script::cast(inner_result->script()), isolate);
isolate->debug()->OnAfterCompile(script);
+ if (isolate->NeedsSourcePositionsForProfiling()) {
+ Script::InitLineEnds(script);
+ }
return inner_result;
}
// Deserializer failed. Fall through to compile.
@@ -1556,8 +1612,9 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
if (!context->IsNativeContext()) {
parse_info.set_outer_scope_info(handle(context->scope_info()));
}
- parse_info.set_eager(compile_options ==
- ScriptCompiler::kProduceFullCodeCache);
+ parse_info.set_eager(
+ (compile_options == ScriptCompiler::kProduceFullCodeCache) ||
+ (compile_options == ScriptCompiler::kEagerCompile));
parse_info.set_language_mode(
stricter_language_mode(parse_info.language_mode(), language_mode));
@@ -1572,13 +1629,13 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
compilation_cache->PutScript(source, context, language_mode, result,
vector);
if (ShouldProduceCodeCache(compile_options) &&
- !ContainsAsmModule(script)) {
+ !script->ContainsAsmModule()) {
compile_timer.set_producing_code_cache();
HistogramTimerScope histogram_timer(
isolate->counters()->compile_serialize());
- RuntimeCallTimerScope runtimeTimer(isolate,
- &RuntimeCallStats::CompileSerialize);
+ RuntimeCallTimerScope runtimeTimer(
+ isolate, RuntimeCallCounterId::kCompileSerialize);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileSerialize");
*cached_data = CodeSerializer::Serialize(isolate, result, source);
@@ -1610,8 +1667,8 @@ std::unique_ptr<CompilationJob> Compiler::CompileTopLevelOnBackgroundThread(
"V8.CompileCodeBackground");
RuntimeCallTimerScope runtimeTimer(
parse_info->runtime_call_stats(),
- parse_info->is_eval() ? &RuntimeCallStats::CompileBackgroundEval
- : &RuntimeCallStats::CompileBackgroundScript);
+ parse_info->is_eval() ? RuntimeCallCounterId::kCompileBackgroundEval
+ : RuntimeCallCounterId::kCompileBackgroundScript);
LanguageMode language_mode = construct_language_mode(FLAG_use_strict);
parse_info->set_language_mode(
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index cc63697221..b84134c14e 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -101,6 +101,14 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
int column_offset = 0, Handle<Object> script_name = Handle<Object>(),
ScriptOriginOptions options = ScriptOriginOptions());
+ // Create a function that results from wrapping |source| in a function,
+ // with |arguments| being a list of parameters for that function.
+ MUST_USE_RESULT static MaybeHandle<JSFunction> GetWrappedFunction(
+ Handle<String> source, Handle<FixedArray> arguments,
+ Handle<Context> context, int line_offset = 0, int column_offset = 0,
+ Handle<Object> script_name = Handle<Object>(),
+ ScriptOriginOptions options = ScriptOriginOptions());
+
// Returns true if the embedder permits compiling the given source string in
// the given context.
static bool CodeGenerationFromStringsAllowed(Isolate* isolate,
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index b63f5431e2..2e9052e0c3 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -3,13 +3,16 @@ set noparent
bmeurer@chromium.org
jarin@chromium.org
mstarzinger@chromium.org
-mtrofin@chromium.org
titzer@chromium.org
danno@chromium.org
tebbi@chromium.org
neis@chromium.org
mvstanton@chromium.org
+# For backend
+bbudge@chromium.org
+mtrofin@chromium.org
+
per-file wasm-*=ahaas@chromium.org
per-file wasm-*=bbudge@chromium.org
per-file wasm-*=bradnelson@chromium.org
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index ac4fc4363b..13d6801c32 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -52,6 +52,14 @@ FieldAccess AccessBuilder::ForHeapNumberValue() {
return access;
}
+// static
+FieldAccess AccessBuilder::ForBigIntBitfield() {
+ FieldAccess access = {
+ kTaggedBase, BigInt::kBitfieldOffset, MaybeHandle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get().kInt32, MachineType::IntPtr(),
+ kNoWriteBarrier};
+ return access;
+}
// static
FieldAccess AccessBuilder::ForJSObjectPropertiesOrHash() {
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index e348c0f71b..a2ce1f800b 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -38,6 +38,9 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to HeapNumber::value() field.
static FieldAccess ForHeapNumberValue();
+ // Provides access to BigInt's bit field.
+ static FieldAccess ForBigIntBitfield();
+
// Provides access to JSObject::properties() field.
static FieldAccess ForJSObjectPropertiesOrHash();
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index 1a66e5b7d4..a238cf29d4 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -152,49 +152,6 @@ class ArmOperandConverter final : public InstructionOperandConverter {
namespace {
-class OutOfLineLoadFloat final : public OutOfLineCode {
- public:
- OutOfLineLoadFloat(CodeGenerator* gen, SwVfpRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- // Compute sqrtf(-1.0f), which results in a quiet single-precision NaN.
- __ vmov(result_, Float32(-1.0f));
- __ vsqrt(result_, result_);
- }
-
- private:
- SwVfpRegister const result_;
-};
-
-class OutOfLineLoadDouble final : public OutOfLineCode {
- public:
- OutOfLineLoadDouble(CodeGenerator* gen, DwVfpRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- // Compute sqrt(-1.0), which results in a quiet double-precision NaN.
- __ vmov(result_, Double(-1.0));
- __ vsqrt(result_, result_);
- }
-
- private:
- DwVfpRegister const result_;
-};
-
-
-class OutOfLineLoadInteger final : public OutOfLineCode {
- public:
- OutOfLineLoadInteger(CodeGenerator* gen, Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final { __ mov(result_, Operand::Zero()); }
-
- private:
- Register const result_;
-};
-
-
class OutOfLineRecordWrite final : public OutOfLineCode {
public:
OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
@@ -359,64 +316,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} // namespace
-#define ASSEMBLE_CHECKED_LOAD_FP(Type) \
- do { \
- auto result = i.Output##Type##Register(); \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- auto ool = new (zone()) OutOfLineLoad##Type(this, result); \
- __ b(hs, ool->entry()); \
- __ vldr(result, i.InputOffset(2)); \
- __ bind(ool->exit()); \
- DCHECK_EQ(LeaveCC, i.OutputSBit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
- do { \
- auto result = i.OutputRegister(); \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
- __ b(hs, ool->entry()); \
- __ asm_instr(result, i.InputOffset(2)); \
- __ bind(ool->exit()); \
- DCHECK_EQ(LeaveCC, i.OutputSBit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_FP(Type) \
- do { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- auto value = i.Input##Type##Register(2); \
- __ vstr(value, i.InputOffset(3), lo); \
- DCHECK_EQ(LeaveCC, i.OutputSBit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
- do { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- auto value = i.InputRegister(2); \
- __ asm_instr(value, i.InputOffset(3), lo); \
- DCHECK_EQ(LeaveCC, i.OutputSBit()); \
- } while (0)
-
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
do { \
__ asm_instr(i.OutputRegister(), \
@@ -432,51 +331,51 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
__ dmb(ISH); \
} while (0)
-#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr) \
- do { \
- Label exchange; \
- __ add(i.InputRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ dmb(ISH); \
- __ bind(&exchange); \
- __ load_instr(i.OutputRegister(0), i.InputRegister(0)); \
- __ store_instr(i.TempRegister(0), i.InputRegister(2), i.InputRegister(0)); \
- __ teq(i.TempRegister(0), Operand(0)); \
- __ b(ne, &exchange); \
- __ dmb(ISH); \
- } while (0)
-
-#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr) \
- do { \
- Label compareExchange; \
- Label exit; \
- __ add(i.InputRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ dmb(ISH); \
- __ bind(&compareExchange); \
- __ load_instr(i.OutputRegister(0), i.InputRegister(0)); \
- __ teq(i.InputRegister(2), Operand(i.OutputRegister(0))); \
- __ b(ne, &exit); \
- __ store_instr(i.TempRegister(0), i.InputRegister(3), i.InputRegister(0)); \
- __ teq(i.TempRegister(0), Operand(0)); \
- __ b(ne, &compareExchange); \
- __ bind(&exit); \
- __ dmb(ISH); \
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr) \
+ do { \
+ Label exchange; \
+ __ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1)); \
+ __ dmb(ISH); \
+ __ bind(&exchange); \
+ __ load_instr(i.OutputRegister(0), i.TempRegister(1)); \
+ __ store_instr(i.TempRegister(0), i.InputRegister(2), i.TempRegister(1)); \
+ __ teq(i.TempRegister(0), Operand(0)); \
+ __ b(ne, &exchange); \
+ __ dmb(ISH); \
} while (0)
-#define ASSEMBLE_ATOMIC_BINOP(load_instr, store_instr, bin_instr) \
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr, \
+ cmp_reg) \
do { \
- Label binop; \
- __ add(i.InputRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ Label compareExchange; \
+ Label exit; \
__ dmb(ISH); \
- __ bind(&binop); \
- __ load_instr(i.OutputRegister(0), i.InputRegister(0)); \
- __ bin_instr(i.TempRegister(0), i.OutputRegister(0), \
- Operand(i.InputRegister(2))); \
- __ store_instr(i.TempRegister(1), i.TempRegister(0), i.InputRegister(0)); \
- __ teq(i.TempRegister(1), Operand(0)); \
- __ b(ne, &binop); \
+ __ bind(&compareExchange); \
+ __ load_instr(i.OutputRegister(0), i.TempRegister(1)); \
+ __ teq(cmp_reg, Operand(i.OutputRegister(0))); \
+ __ b(ne, &exit); \
+ __ store_instr(i.TempRegister(0), i.InputRegister(3), i.TempRegister(1)); \
+ __ teq(i.TempRegister(0), Operand(0)); \
+ __ b(ne, &compareExchange); \
+ __ bind(&exit); \
__ dmb(ISH); \
} while (0)
+#define ASSEMBLE_ATOMIC_BINOP(load_instr, store_instr, bin_instr) \
+ do { \
+ Label binop; \
+ __ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1)); \
+ __ dmb(ISH); \
+ __ bind(&binop); \
+ __ load_instr(i.OutputRegister(0), i.TempRegister(1)); \
+ __ bin_instr(i.TempRegister(0), i.OutputRegister(0), \
+ Operand(i.InputRegister(2))); \
+ __ store_instr(i.TempRegister(2), i.TempRegister(0), i.TempRegister(1)); \
+ __ teq(i.TempRegister(2), Operand(0)); \
+ __ b(ne, &binop); \
+ __ dmb(ISH); \
+ } while (0)
+
#define ASSEMBLE_IEEE754_BINOP(name) \
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
@@ -675,17 +574,18 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
-// 1. load the address of the current instruction;
+// 1. compute the offset of the {CodeDataContainer} from our current location
+// and load it.
// 2. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
// 3. test kMarkedForDeoptimizationBit in those flags; and
// 4. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
int pc_offset = __ pc_offset();
- int offset =
- Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc_offset + 8);
+ int offset = Code::kCodeDataContainerOffset -
+ (Code::kHeaderSize + pc_offset + TurboAssembler::kPcLoadDelta);
// We can use the register pc - 8 for the address of the current instruction.
- __ ldr(ip, MemOperand(pc, offset));
+ __ ldr_pcrel(ip, offset);
__ ldr(ip, FieldMemOperand(ip, CodeDataContainer::kKindSpecificFlagsOffset));
__ tst(ip, Operand(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle(
@@ -804,7 +704,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Check the function's context matches the context argument.
__ ldr(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
__ cmp(cp, kScratchReg);
- __ Assert(eq, kWrongFunctionContext);
+ __ Assert(eq, AbortReason::kWrongFunctionContext);
}
__ ldr(ip, FieldMemOperand(func, JSFunction::kCodeOffset));
__ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -1681,13 +1581,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmPush:
if (instr->InputAt(0)->IsFPRegister()) {
LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
- if (op->representation() == MachineRepresentation::kFloat64) {
- __ vpush(i.InputDoubleRegister(0));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
- } else {
- DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
- __ vpush(i.InputFloatRegister(0));
- frame_access_state()->IncreaseSPDelta(1);
+ switch (op->representation()) {
+ case MachineRepresentation::kFloat32:
+ __ vpush(i.InputFloatRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
+ break;
+ case MachineRepresentation::kFloat64:
+ __ vpush(i.InputDoubleRegister(0));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ break;
+ case MachineRepresentation::kSimd128: {
+ __ vpush(i.InputSimd128Register(0));
+ frame_access_state()->IncreaseSPDelta(kSimd128Size / kPointerSize);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
} else {
__ push(i.InputRegister(0));
@@ -1701,6 +1611,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kArmPeek: {
+ // The incoming value is 0-based, but we need a 1-based value.
+ int reverse_slot = i.InputInt32(0) + 1;
+ int offset =
+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ vldr(i.OutputDoubleRegister(), MemOperand(fp, offset));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+ __ vldr(i.OutputFloatRegister(), MemOperand(fp, offset));
+ }
+ } else {
+ __ ldr(i.OutputRegister(), MemOperand(fp, offset));
+ }
+ break;
+ }
case kArmF32x4Splat: {
int src_code = i.InputFloatRegister(0).code();
__ vdup(Neon32, i.OutputSimd128Register(),
@@ -2558,47 +2486,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ExtractLane(i.OutputRegister(), kScratchDoubleReg, NeonS8, 0);
break;
}
- case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsb);
- break;
- case kCheckedLoadUint8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(ldrb);
- break;
- case kCheckedLoadInt16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsh);
- break;
- case kCheckedLoadUint16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(ldrh);
- break;
- case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(ldr);
- break;
- case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FP(Float);
- break;
- case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FP(Double);
- break;
- case kCheckedStoreWord8:
- ASSEMBLE_CHECKED_STORE_INTEGER(strb);
- break;
- case kCheckedStoreWord16:
- ASSEMBLE_CHECKED_STORE_INTEGER(strh);
- break;
- case kCheckedStoreWord32:
- ASSEMBLE_CHECKED_STORE_INTEGER(str);
- break;
- case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FP(Float);
- break;
- case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FP(Double);
- break;
- case kCheckedLoadWord64:
- case kCheckedStoreWord64:
- UNREACHABLE(); // currently unsupported checked int64 load/store.
- break;
-
case kAtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsb);
break;
@@ -2642,25 +2529,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrex, strex);
break;
case kAtomicCompareExchangeInt8:
- __ uxtb(i.InputRegister(2), i.InputRegister(2));
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb);
+ __ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
+ __ uxtb(i.TempRegister(2), i.InputRegister(2));
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb,
+ i.TempRegister(2));
__ sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
case kAtomicCompareExchangeUint8:
- __ uxtb(i.InputRegister(2), i.InputRegister(2));
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb);
+ __ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
+ __ uxtb(i.TempRegister(2), i.InputRegister(2));
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb,
+ i.TempRegister(2));
break;
case kAtomicCompareExchangeInt16:
- __ uxth(i.InputRegister(2), i.InputRegister(2));
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh);
+ __ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
+ __ uxth(i.TempRegister(2), i.InputRegister(2));
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh,
+ i.TempRegister(2));
__ sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
case kAtomicCompareExchangeUint16:
- __ uxth(i.InputRegister(2), i.InputRegister(2));
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh);
+ __ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
+ __ uxth(i.TempRegister(2), i.InputRegister(2));
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh,
+ i.TempRegister(2));
break;
case kAtomicCompareExchangeWord32:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrex, strex);
+ __ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrex, strex,
+ i.InputRegister(2));
break;
#define ATOMIC_BINOP_CASE(op, inst) \
case kAtomic##op##Int8: \
@@ -2686,10 +2583,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, orr)
ATOMIC_BINOP_CASE(Xor, eor)
#undef ATOMIC_BINOP_CASE
-#undef ASSEMBLE_CHECKED_LOAD_FP
-#undef ASSEMBLE_CHECKED_LOAD_INTEGER
-#undef ASSEMBLE_CHECKED_STORE_FP
-#undef ASSEMBLE_CHECKED_STORE_INTEGER
#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
#undef ASSEMBLE_ATOMIC_STORE_INTEGER
#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
@@ -2774,7 +2667,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
}
}
}
@@ -2878,7 +2771,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the unoptimized
// frame is still on the stack. Optimized code uses OSR values directly from
@@ -2929,15 +2822,16 @@ void CodeGenerator::AssembleConstructFrame() {
RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetBailoutReason(kUnexpectedReturnFromThrow));
+ __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow));
}
__ bind(&done);
}
}
- // Skip callee-saved slots, which are pushed below.
+ // Skip callee-saved and return slots, which are pushed below.
shrink_slots -= base::bits::CountPopulation(saves);
+ shrink_slots -= frame()->GetReturnSlotCount();
shrink_slots -= 2 * base::bits::CountPopulation(saves_fp);
if (shrink_slots > 0) {
__ sub(sp, sp, Operand(shrink_slots * kPointerSize));
@@ -2953,16 +2847,29 @@ void CodeGenerator::AssembleConstructFrame() {
__ vstm(db_w, sp, DwVfpRegister::from_code(first),
DwVfpRegister::from_code(last));
}
+
if (saves != 0) {
// Save callee-saved registers.
__ stm(db_w, sp, saves);
}
+
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ // Create space for returns.
+ __ sub(sp, sp, Operand(returns * kPointerSize));
+ }
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ // Free space of returns.
+ __ add(sp, sp, Operand(returns * kPointerSize));
+ }
+
// Restore registers.
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
diff --git a/deps/v8/src/compiler/arm/instruction-codes-arm.h b/deps/v8/src/compiler/arm/instruction-codes-arm.h
index c839d25cab..a7cf80450a 100644
--- a/deps/v8/src/compiler/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/arm/instruction-codes-arm.h
@@ -124,6 +124,7 @@ namespace compiler {
V(ArmStr) \
V(ArmPush) \
V(ArmPoke) \
+ V(ArmPeek) \
V(ArmF32x4Splat) \
V(ArmF32x4ExtractLane) \
V(ArmF32x4ReplaceLane) \
diff --git a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
index 0092a9dbe5..a592515179 100644
--- a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
@@ -262,6 +262,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmLdrh:
case kArmLdrsh:
case kArmLdr:
+ case kArmPeek:
return kIsLoadOperation;
case kArmVstrF32:
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index 4ded82fa5b..f94d114d07 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -300,7 +300,8 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -721,93 +722,6 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
}
}
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- ArmOperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- InstructionOperand offset_operand = g.UseRegister(offset);
- InstructionOperand length_operand = g.CanBeImmediate(length, kArmCmp)
- ? g.UseImmediate(length)
- : g.UseRegister(length);
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
- g.DefineAsRegister(node), offset_operand, length_operand,
- g.UseRegister(buffer), offset_operand);
-}
-
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- ArmOperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- InstructionOperand offset_operand = g.UseRegister(offset);
- InstructionOperand length_operand = g.CanBeImmediate(length, kArmCmp)
- ? g.UseImmediate(length)
- : g.UseRegister(length);
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), g.NoOutput(),
- offset_operand, length_operand, g.UseRegister(value),
- g.UseRegister(buffer), offset_operand);
-}
-
-
namespace {
void EmitBic(InstructionSelector* selector, Node* node, Node* left,
@@ -868,14 +782,14 @@ void InstructionSelector::VisitWord32And(Node* node) {
uint32_t const shift = mshr.right().Value();
if (((shift == 8) || (shift == 16) || (shift == 24)) &&
- (value == 0xff)) {
+ (value == 0xFF)) {
// Merge SHR into AND by emitting a UXTB instruction with a
// bytewise rotation.
Emit(kArmUxtb, g.DefineAsRegister(m.node()),
g.UseRegister(mshr.left().node()),
g.TempImmediate(mshr.right().Value()));
return;
- } else if (((shift == 8) || (shift == 16)) && (value == 0xffff)) {
+ } else if (((shift == 8) || (shift == 16)) && (value == 0xFFFF)) {
// Merge SHR into AND by emitting a UXTH instruction with a
// bytewise rotation.
Emit(kArmUxth, g.DefineAsRegister(m.node()),
@@ -897,9 +811,9 @@ void InstructionSelector::VisitWord32And(Node* node) {
}
}
}
- } else if (value == 0xffff) {
+ } else if (value == 0xFFFF) {
// Emit UXTH for this AND. We don't bother testing for UXTB, as it's no
- // better than AND 0xff for this operation.
+ // better than AND 0xFF for this operation.
Emit(kArmUxth, g.DefineAsRegister(m.node()),
g.UseRegister(m.left().node()), g.TempImmediate(0));
return;
@@ -995,7 +909,8 @@ void VisitShift(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -1206,6 +1121,7 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitInt32Add(Node* node) {
ArmOperandGenerator g(this);
@@ -1230,12 +1146,12 @@ void InstructionSelector::VisitInt32Add(Node* node) {
}
case IrOpcode::kWord32And: {
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().Is(0xff)) {
+ if (mleft.right().Is(0xFF)) {
Emit(kArmUxtab, g.DefineAsRegister(node),
g.UseRegister(m.right().node()),
g.UseRegister(mleft.left().node()), g.TempImmediate(0));
return;
- } else if (mleft.right().Is(0xffff)) {
+ } else if (mleft.right().Is(0xFFFF)) {
Emit(kArmUxtah, g.DefineAsRegister(node),
g.UseRegister(m.right().node()),
g.UseRegister(mleft.left().node()), g.TempImmediate(0));
@@ -1284,12 +1200,12 @@ void InstructionSelector::VisitInt32Add(Node* node) {
}
case IrOpcode::kWord32And: {
Int32BinopMatcher mright(m.right().node());
- if (mright.right().Is(0xff)) {
+ if (mright.right().Is(0xFF)) {
Emit(kArmUxtab, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()), g.TempImmediate(0));
return;
- } else if (mright.right().Is(0xffff)) {
+ } else if (mright.right().Is(0xFFFF)) {
Emit(kArmUxtah, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()), g.TempImmediate(0));
@@ -1358,7 +1274,8 @@ void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
} else if (cont->IsDeoptimize()) {
InstructionOperand in[] = {temp_operand, result_operand, shift_31};
selector->EmitDeoptimize(opcode, 0, nullptr, 3, in, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), temp_operand,
result_operand, shift_31);
@@ -1596,22 +1513,44 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments.
for (size_t n = 0; n < arguments->size(); ++n) {
PushParameter input = (*arguments)[n];
- if (input.node()) {
+ if (input.node) {
int slot = static_cast<int>(n);
Emit(kArmPoke | MiscField::encode(slot), g.NoOutput(),
- g.UseRegister(input.node()));
+ g.UseRegister(input.node));
}
}
} else {
// Push any stack arguments.
for (PushParameter input : base::Reversed(*arguments)) {
// Skip any alignment holes in pushed nodes.
- if (input.node() == nullptr) continue;
- Emit(kArmPush, g.NoOutput(), g.UseRegister(input.node()));
+ if (input.node == nullptr) continue;
+ Emit(kArmPush, g.NoOutput(), g.UseRegister(input.node));
}
}
}
+void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
+ const CallDescriptor* descriptor,
+ Node* node) {
+ ArmOperandGenerator g(this);
+
+ int reverse_slot = 0;
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ // Skip any alignment holes in nodes.
+ if (output.node != nullptr) {
+ DCHECK(!descriptor->IsCFunctionCall());
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ }
+ Emit(kArmPeek, g.DefineAsRegister(output.node),
+ g.UseImmediate(reverse_slot));
+ }
+ reverse_slot += output.location.GetSizeInPointers();
+ }
+}
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
@@ -1630,7 +1569,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -1825,7 +1765,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -1984,7 +1925,8 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand, value_operand,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
value_operand);
@@ -2006,14 +1948,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -2276,15 +2218,14 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
AddressingMode addressing_mode = kMode_Offset_RR;
InstructionOperand inputs[3];
size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseRegister(base);
inputs[input_count++] = g.UseRegister(index);
inputs[input_count++] = g.UseUniqueRegister(value);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
- InstructionOperand temp[1];
- temp[0] = g.TempRegister();
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 1, outputs, input_count, inputs, 1, temp);
+ Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
@@ -2313,16 +2254,16 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
AddressingMode addressing_mode = kMode_Offset_RR;
InstructionOperand inputs[4];
size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseRegister(base);
inputs[input_count++] = g.UseRegister(index);
inputs[input_count++] = g.UseUniqueRegister(old_value);
inputs[input_count++] = g.UseUniqueRegister(new_value);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
- InstructionOperand temp[1];
- temp[0] = g.TempRegister();
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
+ g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 1, outputs, input_count, inputs, 1, temp);
+ Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
void InstructionSelector::VisitAtomicBinaryOperation(
@@ -2352,17 +2293,15 @@ void InstructionSelector::VisitAtomicBinaryOperation(
AddressingMode addressing_mode = kMode_Offset_RR;
InstructionOperand inputs[3];
size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseRegister(base);
inputs[input_count++] = g.UseRegister(index);
inputs[input_count++] = g.UseUniqueRegister(value);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
- InstructionOperand temps[2];
- size_t temp_count = 0;
- temps[temp_count++] = g.TempRegister();
- temps[temp_count++] = g.TempRegister();
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
+ g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 1, outputs, input_count, inputs, temp_count, temps);
+ Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
#define VISIT_ATOMIC_BINOP(op) \
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index 3673ee2426..147d85a171 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -264,46 +264,6 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
namespace {
-class OutOfLineLoadNaN32 final : public OutOfLineCode {
- public:
- OutOfLineLoadNaN32(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ Fmov(result_, std::numeric_limits<float>::quiet_NaN());
- }
-
- private:
- DoubleRegister const result_;
-};
-
-
-class OutOfLineLoadNaN64 final : public OutOfLineCode {
- public:
- OutOfLineLoadNaN64(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ Fmov(result_, std::numeric_limits<double>::quiet_NaN());
- }
-
- private:
- DoubleRegister const result_;
-};
-
-
-class OutOfLineLoadZero final : public OutOfLineCode {
- public:
- OutOfLineLoadZero(CodeGenerator* gen, Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final { __ Mov(result_, 0); }
-
- private:
- Register const result_;
-};
-
-
class OutOfLineRecordWrite final : public OutOfLineCode {
public:
OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand index,
@@ -336,14 +296,14 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
- __ Push(lr);
+ __ Push(lr, padreg);
unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(),
__ StackPointer());
}
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode);
if (must_save_lr_) {
- __ Pop(lr);
+ __ Pop(padreg, lr);
unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
}
}
@@ -416,90 +376,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} // namespace
-#define ASSEMBLE_BOUNDS_CHECK(offset, length, out_of_bounds) \
- do { \
- if (length.IsImmediate() && \
- base::bits::IsPowerOfTwo(length.ImmediateValue())) { \
- __ Tst(offset, ~(length.ImmediateValue() - 1)); \
- __ B(ne, out_of_bounds); \
- } else { \
- __ Cmp(offset, length); \
- __ B(hs, out_of_bounds); \
- } \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(width) \
- do { \
- auto result = i.OutputFloat##width##Register(); \
- auto buffer = i.InputRegister(0); \
- auto offset = i.InputRegister32(1); \
- auto length = i.InputOperand32(2); \
- auto ool = new (zone()) OutOfLineLoadNaN##width(this, result); \
- ASSEMBLE_BOUNDS_CHECK(offset, length, ool->entry()); \
- __ Ldr(result, MemOperand(buffer, offset, UXTW)); \
- __ Bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
- do { \
- auto result = i.OutputRegister32(); \
- auto buffer = i.InputRegister(0); \
- auto offset = i.InputRegister32(1); \
- auto length = i.InputOperand32(2); \
- auto ool = new (zone()) OutOfLineLoadZero(this, result); \
- ASSEMBLE_BOUNDS_CHECK(offset, length, ool->entry()); \
- __ asm_instr(result, MemOperand(buffer, offset, UXTW)); \
- __ Bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER_64(asm_instr) \
- do { \
- auto result = i.OutputRegister(); \
- auto buffer = i.InputRegister(0); \
- auto offset = i.InputRegister32(1); \
- auto length = i.InputOperand32(2); \
- auto ool = new (zone()) OutOfLineLoadZero(this, result); \
- ASSEMBLE_BOUNDS_CHECK(offset, length, ool->entry()); \
- __ asm_instr(result, MemOperand(buffer, offset, UXTW)); \
- __ Bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT(width) \
- do { \
- auto buffer = i.InputRegister(0); \
- auto offset = i.InputRegister32(1); \
- auto length = i.InputOperand32(2); \
- auto value = i.InputFloat##width##OrZeroRegister(3); \
- Label done; \
- ASSEMBLE_BOUNDS_CHECK(offset, length, &done); \
- __ Str(value, MemOperand(buffer, offset, UXTW)); \
- __ Bind(&done); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
- do { \
- auto buffer = i.InputRegister(0); \
- auto offset = i.InputRegister32(1); \
- auto length = i.InputOperand32(2); \
- auto value = i.InputOrZeroRegister32(3); \
- Label done; \
- ASSEMBLE_BOUNDS_CHECK(offset, length, &done); \
- __ asm_instr(value, MemOperand(buffer, offset, UXTW)); \
- __ Bind(&done); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER_64(asm_instr) \
- do { \
- auto buffer = i.InputRegister(0); \
- auto offset = i.InputRegister32(1); \
- auto length = i.InputOperand32(2); \
- auto value = i.InputOrZeroRegister64(3); \
- Label done; \
- ASSEMBLE_BOUNDS_CHECK(offset, length, &done); \
- __ asm_instr(value, MemOperand(buffer, offset, UXTW)); \
- __ Bind(&done); \
- } while (0)
-
#define ASSEMBLE_SHIFT(asm_instr, width) \
do { \
if (instr->InputAt(1)->IsRegister()) { \
@@ -579,12 +455,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} while (0)
void CodeGenerator::AssembleDeconstructFrame() {
- const CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->IsCFunctionCall() || descriptor->UseNativeStack()) {
- __ Mov(csp, fp);
- } else {
- __ Mov(jssp, fp);
- }
+ __ Mov(csp, fp);
__ Pop(fp, lr);
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
@@ -633,6 +504,7 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
int current_sp_offset = state->GetSPToFPSlotCount() +
StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+ DCHECK_EQ(stack_slot_delta % 2, 0);
if (stack_slot_delta > 0) {
tasm->Claim(stack_slot_delta);
state->IncreaseSPDelta(stack_slot_delta);
@@ -652,31 +524,48 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_stack_slot) {
+ DCHECK_EQ(first_unused_stack_slot % 2, 0);
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot);
+ DCHECK(instr->IsTailCall());
+ InstructionOperandConverter g(this, instr);
+ int optional_padding_slot = g.InputInt32(instr->InputCount() - 2);
+ if (optional_padding_slot % 2) {
+ __ Poke(padreg, optional_padding_slot * kPointerSize);
+ }
}
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
-// 1. load the address of the current instruction;
+// 1. compute the offset of the {CodeDataContainer} from our current location
+// and load it.
// 2. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
// 3. test kMarkedForDeoptimizationBit in those flags; and
// 4. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
- Label current;
- // The Adr instruction gets the address of the current instruction.
- __ Adr(x2, &current);
- __ Bind(&current);
- int pc = __ pc_offset();
- int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc);
- __ Ldr(x2, MemOperand(x2, offset));
- __ Ldr(x2, FieldMemOperand(x2, CodeDataContainer::kKindSpecificFlagsOffset));
- __ Tst(x2, Immediate(1 << Code::kMarkedForDeoptimizationBit));
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.AcquireX();
+ {
+ // Since we always emit a bailout check at the very beginning we can be
+ // certain that the distance between here and the {CodeDataContainer} is
+ // fixed and always in range of a load.
+ int data_container_offset =
+ (Code::kCodeDataContainerOffset - Code::kHeaderSize) - __ pc_offset();
+ DCHECK_GE(0, data_container_offset);
+ DCHECK_EQ(0, data_container_offset % 4);
+ InstructionAccurateScope scope(tasm());
+ __ ldr_pcrel(scratch, data_container_offset >> 2);
+ }
+ __ Ldr(scratch,
+ FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
+ Label not_deoptimized;
+ __ Tbz(scratch, Code::kMarkedForDeoptimizationBit, &not_deoptimized);
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
- __ Jump(code, RelocInfo::CODE_TARGET, ne);
+ __ Jump(code, RelocInfo::CODE_TARGET);
+ __ Bind(&not_deoptimized);
}
// Assembles an instruction after register allocation, producing machine code.
@@ -700,18 +589,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Call(target);
}
RecordCallPosition(instr);
- // TODO(titzer): this is ugly. JSSP should be a caller-save register
- // in this case, but it is not possible to express in the register
- // allocator.
- CallDescriptor::Flags flags(MiscField::decode(opcode));
- if (flags & CallDescriptor::kRestoreJSSP) {
- __ Ldr(jssp, MemOperand(csp));
- __ Mov(csp, jssp);
- }
- if (flags & CallDescriptor::kRestoreCSP) {
- __ Mov(csp, jssp);
- __ AssertCspAligned();
- }
frame_access_state()->ClearSPDelta();
break;
}
@@ -734,18 +611,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Call(target);
}
RecordCallPosition(instr);
- // TODO(titzer): this is ugly. JSSP should be a caller-save register
- // in this case, but it is not possible to express in the register
- // allocator.
- CallDescriptor::Flags flags(MiscField::decode(opcode));
- if (flags & CallDescriptor::kRestoreJSSP) {
- __ Ldr(jssp, MemOperand(csp));
- __ Mov(csp, jssp);
- }
- if (flags & CallDescriptor::kRestoreCSP) {
- __ Mov(csp, jssp);
- __ AssertCspAligned();
- }
frame_access_state()->ClearSPDelta();
break;
}
@@ -813,24 +678,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register temp = scope.AcquireX();
__ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset));
__ cmp(cp, temp);
- __ Assert(eq, kWrongFunctionContext);
+ __ Assert(eq, AbortReason::kWrongFunctionContext);
}
__ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeOffset));
__ Add(x10, x10, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(x10);
RecordCallPosition(instr);
- // TODO(titzer): this is ugly. JSSP should be a caller-save register
- // in this case, but it is not possible to express in the register
- // allocator.
- CallDescriptor::Flags flags(MiscField::decode(opcode));
- if (flags & CallDescriptor::kRestoreJSSP) {
- __ Ldr(jssp, MemOperand(csp));
- __ Mov(csp, jssp);
- }
- if (flags & CallDescriptor::kRestoreCSP) {
- __ Mov(csp, jssp);
- __ AssertCspAligned();
- }
frame_access_state()->ClearSPDelta();
break;
}
@@ -1339,75 +1192,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64CompareAndBranch:
// Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
break;
- case kArm64ClaimCSP: {
- int count = RoundUp(i.InputInt32(0), 2);
- Register prev = __ StackPointer();
- if (prev.Is(jssp)) {
- // TODO(titzer): make this a macro-assembler method.
- // Align the CSP and store the previous JSSP on the stack. We do not
- // need to modify the SP delta here, as we will continue to access the
- // frame via JSSP.
- UseScratchRegisterScope scope(tasm());
- Register tmp = scope.AcquireX();
-
- // TODO(arm64): Storing JSSP on the stack is redundant when calling a C
- // function, as JSSP is callee-saved (we still need to do this when
- // calling a code object that uses the CSP as the stack pointer). See
- // the code generation for kArchCallCodeObject vs. kArchCallCFunction
- // (the latter does not restore CSP/JSSP).
- // TurboAssembler::CallCFunction() (safely) drops this extra slot
- // anyway.
- int sp_alignment = __ ActivationFrameAlignment();
- __ Sub(tmp, jssp, kPointerSize);
- __ Bic(csp, tmp, sp_alignment - 1);
- __ Str(jssp, MemOperand(csp));
- if (count > 0) {
- __ SetStackPointer(csp);
- __ Claim(count);
- __ SetStackPointer(prev);
- }
- } else {
- __ AssertCspAligned();
- if (count > 0) {
- __ Claim(count);
- frame_access_state()->IncreaseSPDelta(count);
- }
- }
- break;
- }
- case kArm64ClaimJSSP: {
+ case kArm64Claim: {
int count = i.InputInt32(0);
- if (csp.Is(__ StackPointer())) {
- // No JSSP is set up. Compute it from the CSP.
- __ AssertCspAligned();
- if (count > 0) {
- int even = RoundUp(count, 2);
- __ Sub(jssp, csp, count * kPointerSize);
- // We must also update CSP to maintain stack consistency:
- __ Sub(csp, csp, even * kPointerSize); // Must always be aligned.
- __ AssertStackConsistency();
- frame_access_state()->IncreaseSPDelta(even);
- } else {
- __ Mov(jssp, csp);
- }
- } else {
- // JSSP is the current stack pointer, just use regular Claim().
+ DCHECK_EQ(count % 2, 0);
+ __ AssertCspAligned();
+ if (count > 0) {
__ Claim(count);
frame_access_state()->IncreaseSPDelta(count);
}
break;
}
- case kArm64PokeCSP: // fall through
- case kArm64PokeJSSP: {
- Register prev = __ StackPointer();
- __ SetStackPointer(arch_opcode == kArm64PokeCSP ? csp : jssp);
+ case kArm64Poke: {
Operand operand(i.InputInt32(1) * kPointerSize);
- if (instr->InputAt(0)->IsFPRegister()) {
+ if (instr->InputAt(0)->IsSimd128Register()) {
+ __ Poke(i.InputSimd128Register(0), operand);
+ } else if (instr->InputAt(0)->IsFPRegister()) {
__ Poke(i.InputFloat64Register(0), operand);
} else {
- __ Poke(i.InputRegister(0), operand);
+ __ Poke(i.InputOrZeroRegister64(0), operand);
}
- __ SetStackPointer(prev);
break;
}
case kArm64PokePair: {
@@ -1421,6 +1224,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kArm64Peek: {
+ int reverse_slot = i.InputInt32(0);
+ int offset =
+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ Ldr(i.OutputDoubleRegister(), MemOperand(fp, offset));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+ __ Ldr(i.OutputFloatRegister(), MemOperand(fp, offset));
+ }
+ } else {
+ __ Ldr(i.OutputRegister(), MemOperand(fp, offset));
+ }
+ break;
+ }
case kArm64Clz:
__ Clz(i.OutputRegister64(), i.InputRegister64(0));
break;
@@ -1652,28 +1472,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Fmov(i.OutputRegister32(), i.InputFloat32Register(0));
break;
case kArm64Float64ExtractHighWord32:
- // TODO(arm64): This should use MOV (to general) when NEON is supported.
- __ Fmov(i.OutputRegister(), i.InputFloat64Register(0));
- __ Lsr(i.OutputRegister(), i.OutputRegister(), 32);
+ __ Umov(i.OutputRegister32(), i.InputFloat64Register(0).V2S(), 1);
break;
- case kArm64Float64InsertLowWord32: {
- // TODO(arm64): This should use MOV (from general) when NEON is supported.
- UseScratchRegisterScope scope(tasm());
- Register tmp = scope.AcquireX();
- __ Fmov(tmp, i.InputFloat64Register(0));
- __ Bfi(tmp, i.InputRegister(1), 0, 32);
- __ Fmov(i.OutputFloat64Register(), tmp);
+ case kArm64Float64InsertLowWord32:
+ DCHECK(i.OutputFloat64Register().Is(i.InputFloat64Register(0)));
+ __ Ins(i.OutputFloat64Register().V2S(), 0, i.InputRegister32(1));
break;
- }
- case kArm64Float64InsertHighWord32: {
- // TODO(arm64): This should use MOV (from general) when NEON is supported.
- UseScratchRegisterScope scope(tasm());
- Register tmp = scope.AcquireX();
- __ Fmov(tmp.W(), i.InputFloat32Register(0));
- __ Bfi(tmp, i.InputRegister(1), 32, 32);
- __ Fmov(i.OutputFloat64Register(), tmp);
+ case kArm64Float64InsertHighWord32:
+ DCHECK(i.OutputFloat64Register().Is(i.InputFloat64Register(0)));
+ __ Ins(i.OutputFloat64Register().V2S(), 1, i.InputRegister32(1));
break;
- }
case kArm64Float64MoveU64:
__ Fmov(i.OutputFloat64Register(), i.InputRegister(0));
break;
@@ -1734,48 +1542,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64StrQ:
__ Str(i.InputSimd128Register(0), i.MemoryOperand(1));
break;
- case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsb);
- break;
- case kCheckedLoadUint8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrb);
- break;
- case kCheckedLoadInt16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsh);
- break;
- case kCheckedLoadUint16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrh);
- break;
- case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Ldr);
- break;
- case kCheckedLoadWord64:
- ASSEMBLE_CHECKED_LOAD_INTEGER_64(Ldr);
- break;
- case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(32);
- break;
- case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(64);
- break;
- case kCheckedStoreWord8:
- ASSEMBLE_CHECKED_STORE_INTEGER(Strb);
- break;
- case kCheckedStoreWord16:
- ASSEMBLE_CHECKED_STORE_INTEGER(Strh);
- break;
- case kCheckedStoreWord32:
- ASSEMBLE_CHECKED_STORE_INTEGER(Str);
- break;
- case kCheckedStoreWord64:
- ASSEMBLE_CHECKED_STORE_INTEGER_64(Str);
- break;
- case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT(32);
- break;
- case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FLOAT(64);
- break;
case kAtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
@@ -1860,13 +1626,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, Orr)
ATOMIC_BINOP_CASE(Xor, Eor)
#undef ATOMIC_BINOP_CASE
-#undef ASSEMBLE_BOUNDS_CHECK
-#undef ASSEMBLE_CHECKED_LOAD_FLOAT
-#undef ASSEMBLE_CHECKED_LOAD_INTEGER
-#undef ASSEMBLE_CHECKED_LOAD_INTEGER_64
-#undef ASSEMBLE_CHECKED_STORE_FLOAT
-#undef ASSEMBLE_CHECKED_STORE_INTEGER
-#undef ASSEMBLE_CHECKED_STORE_INTEGER_64
#undef ASSEMBLE_SHIFT
#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
#undef ASSEMBLE_ATOMIC_STORE_INTEGER
@@ -2437,8 +2196,6 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Ret();
} else {
DCHECK(csp.Is(__ StackPointer()));
- // Initialize the jssp because it is required for the runtime call.
- __ Mov(jssp, csp);
gen_->AssembleSourcePosition(instr_);
__ Call(__ isolate()->builtins()->builtin_handle(trap_id),
RelocInfo::CODE_TARGET);
@@ -2512,12 +2269,6 @@ void CodeGenerator::FinishFrame(Frame* frame) {
frame->AlignFrame(16);
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->UseNativeStack() || descriptor->IsCFunctionCall()) {
- __ SetStackPointer(csp);
- } else {
- __ SetStackPointer(jssp);
- }
-
// Save FP registers.
CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
descriptor->CalleeSavedFPRegisters());
@@ -2540,10 +2291,10 @@ void CodeGenerator::FinishFrame(Frame* frame) {
void CodeGenerator::AssembleConstructFrame() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->UseNativeStack()) {
- __ AssertCspAligned();
- }
+ __ AssertCspAligned();
+ // The frame has been previously padded in CodeGenerator::FinishFrame().
+ DCHECK_EQ(frame()->GetTotalFrameSlotCount() % 2, 0);
int shrink_slots =
frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
@@ -2551,11 +2302,13 @@ void CodeGenerator::AssembleConstructFrame() {
descriptor->CalleeSavedRegisters());
CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
descriptor->CalleeSavedFPRegisters());
+ // The number of slots for returns has to be even to ensure the correct stack
+ // alignment.
+ const int returns = RoundUp(frame()->GetReturnSlotCount(), 2);
if (frame_access_state()->has_frame()) {
// Link the frame
if (descriptor->IsJSFunctionCall()) {
- DCHECK(!descriptor->UseNativeStack());
__ Prologue();
} else {
__ Push(lr, fp);
@@ -2566,7 +2319,7 @@ void CodeGenerator::AssembleConstructFrame() {
// Create OSR entry if applicable
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the
// unoptimized frame is still on the stack. Optimized code uses OSR values
@@ -2604,10 +2357,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ EnterFrame(StackFrame::WASM_COMPILED);
}
DCHECK(__ StackPointer().Is(csp));
- __ SetStackPointer(jssp);
__ AssertStackConsistency();
- // Initialize the jssp because it is required for the runtime call.
- __ Mov(jssp, csp);
__ Mov(cp, Smi::kZero);
__ CallRuntimeDelayed(zone(), Runtime::kThrowWasmStackOverflow);
// We come from WebAssembly, there are no references for the GC.
@@ -2617,7 +2367,6 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_debug_code) {
__ Brk(0);
}
- __ SetStackPointer(csp);
__ AssertStackConsistency();
__ Bind(&done);
}
@@ -2625,6 +2374,7 @@ void CodeGenerator::AssembleConstructFrame() {
// Skip callee-saved slots, which are pushed below.
shrink_slots -= saves.Count();
shrink_slots -= saves_fp.Count();
+ shrink_slots -= returns;
// Build remainder of frame, including accounting for and filling-in
// frame-specific header information, i.e. claiming the extra slot that
@@ -2667,11 +2417,21 @@ void CodeGenerator::AssembleConstructFrame() {
// CPURegList::GetCalleeSaved(): x30 is missing.
// DCHECK(saves.list() == CPURegList::GetCalleeSaved().list());
__ PushCPURegList(saves);
+
+ if (returns != 0) {
+ __ Claim(returns);
+ }
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ const int returns = RoundUp(frame()->GetReturnSlotCount(), 2);
+
+ if (returns != 0) {
+ __ Drop(returns);
+ }
+
// Restore registers.
CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
descriptor->CalleeSavedRegisters());
@@ -2698,33 +2458,22 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
} else {
__ Bind(&return_label_);
AssembleDeconstructFrame();
- if (descriptor->UseNativeStack()) {
- pop_count += (pop_count & 1); // align
- }
}
} else {
AssembleDeconstructFrame();
- if (descriptor->UseNativeStack()) {
- pop_count += (pop_count & 1); // align
- }
}
- } else if (descriptor->UseNativeStack()) {
- pop_count += (pop_count & 1); // align
}
if (pop->IsImmediate()) {
- DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
pop_count += g.ToConstant(pop).ToInt32();
- __ Drop(pop_count);
+ __ DropArguments(pop_count);
} else {
Register pop_reg = g.ToRegister(pop);
__ Add(pop_reg, pop_reg, pop_count);
- __ Drop(pop_reg);
+ __ DropArguments(pop_reg);
}
- if (descriptor->UseNativeStack()) {
- __ AssertCspAligned();
- }
+ __ AssertCspAligned();
__ Ret();
}
diff --git a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
index 6354dfc4db..820b55a99d 100644
--- a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
@@ -79,11 +79,10 @@ namespace compiler {
V(Arm64TestAndBranch) \
V(Arm64CompareAndBranch32) \
V(Arm64CompareAndBranch) \
- V(Arm64ClaimCSP) \
- V(Arm64ClaimJSSP) \
- V(Arm64PokeCSP) \
- V(Arm64PokeJSSP) \
+ V(Arm64Claim) \
+ V(Arm64Poke) \
V(Arm64PokePair) \
+ V(Arm64Peek) \
V(Arm64Float32Cmp) \
V(Arm64Float32Add) \
V(Arm64Float32Sub) \
@@ -326,8 +325,6 @@ namespace compiler {
V(Operand2_R_SXTH) /* %r0 SXTH (signed extend halfword) */ \
V(Operand2_R_SXTW) /* %r0 SXTW (signed extend word) */
-enum ResetJSSPAfterCall { kNoResetJSSP, kResetJSSP };
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
index 0294c828da..c2b0a4e386 100644
--- a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
@@ -128,6 +128,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Float64ExtractHighWord32:
case kArm64Float64InsertLowWord32:
case kArm64Float64InsertHighWord32:
+ case kArm64Float64Mod:
case kArm64Float64MoveU64:
case kArm64U64MoveFloat64:
case kArm64Float64SilenceNaN:
@@ -292,14 +293,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Ldrsw:
case kArm64LdrW:
case kArm64Ldr:
+ case kArm64Peek:
return kIsLoadOperation;
- case kArm64Float64Mod: // This opcode will call a C Function which can
- // alter CSP. TODO(arm64): Remove once JSSP is gone.
- case kArm64ClaimCSP:
- case kArm64ClaimJSSP:
- case kArm64PokeCSP:
- case kArm64PokeJSSP:
+ case kArm64Claim:
+ case kArm64Poke:
case kArm64PokePair:
case kArm64StrS:
case kArm64StrD:
@@ -387,16 +385,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kArm64Ldrsw:
return 11;
- case kCheckedLoadInt8:
- case kCheckedLoadUint8:
- case kCheckedLoadInt16:
- case kCheckedLoadUint16:
- case kCheckedLoadWord32:
- case kCheckedLoadWord64:
- case kCheckedLoadFloat32:
- case kCheckedLoadFloat64:
- return 5;
-
case kArm64Str:
case kArm64StrD:
case kArm64StrS:
@@ -405,14 +393,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kArm64Strh:
return 1;
- case kCheckedStoreWord8:
- case kCheckedStoreWord16:
- case kCheckedStoreWord32:
- case kCheckedStoreWord64:
- case kCheckedStoreFloat32:
- case kCheckedStoreFloat64:
- return 1;
-
case kArm64Madd32:
case kArm64Mneg32:
case kArm64Msub32:
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index 201c0613c4..d6082c9f0a 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -43,7 +43,7 @@ class Arm64OperandGenerator final : public OperandGenerator {
InstructionOperand UseRegisterOrImmediateZero(Node* node) {
if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
(IsFloatConstant(node) &&
- (bit_cast<int64_t>(GetFloatConstantValue(node)) == V8_INT64_C(0)))) {
+ (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
return UseImmediate(node);
}
return UseRegister(node);
@@ -295,12 +295,12 @@ bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector,
if (nm.IsWord32And()) {
Int32BinopMatcher mright(right_node);
- if (mright.right().Is(0xff) || mright.right().Is(0xffff)) {
+ if (mright.right().Is(0xFF) || mright.right().Is(0xFFFF)) {
int32_t mask = mright.right().Value();
*left_op = g->UseRegister(left_node);
*right_op = g->UseRegister(mright.left().node());
*opcode |= AddressingModeField::encode(
- (mask == 0xff) ? kMode_Operand2_R_UXTB : kMode_Operand2_R_UXTH);
+ (mask == 0xFF) ? kMode_Operand2_R_UXTB : kMode_Operand2_R_UXTH);
return true;
}
} else if (nm.IsWord32Sar()) {
@@ -488,7 +488,8 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -760,110 +761,6 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- Arm64OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kCheckedLoadWord64;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- // If the length is a constant power of two, allow the code generator to
- // pick a more efficient bounds check sequence by passing the length as an
- // immediate.
- if (length->opcode() == IrOpcode::kInt32Constant) {
- Int32Matcher m(length);
- if (m.IsPowerOf2()) {
- Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
- g.UseRegister(offset), g.UseImmediate(length));
- return;
- }
- }
- Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
- g.UseRegister(offset), g.UseOperand(length, kArithmeticImm));
-}
-
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- Arm64OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kCheckedStoreWord64;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- // If the length is a constant power of two, allow the code generator to
- // pick a more efficient bounds check sequence by passing the length as an
- // immediate.
- if (length->opcode() == IrOpcode::kInt32Constant) {
- Int32Matcher m(length);
- if (m.IsPowerOf2()) {
- Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
- g.UseImmediate(length), g.UseRegisterOrImmediateZero(value));
- return;
- }
- }
- Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
- g.UseOperand(length, kArithmeticImm),
- g.UseRegisterOrImmediateZero(value));
-}
-
-
template <typename Matcher>
static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
ArchOpcode opcode, bool left_can_cover,
@@ -950,7 +847,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
// Any shift value can match; int32 shifts use `value % 32`.
- uint32_t lsb = mleft.right().Value() & 0x1f;
+ uint32_t lsb = mleft.right().Value() & 0x1F;
// Ubfx cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@@ -991,7 +888,7 @@ void InstructionSelector::VisitWord64And(Node* node) {
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
// Any shift value can match; int64 shifts use `value % 64`.
- uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3f);
+ uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3F);
// Ubfx cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@@ -1105,16 +1002,16 @@ bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
Arm64OperandGenerator g(selector);
Int32BinopMatcher m(node);
if (selector->CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
- // Select Ubfx or Sbfx for (x << (K & 0x1f)) OP (K & 0x1f), where
- // OP is >>> or >> and (K & 0x1f) != 0.
+ // Select Ubfx or Sbfx for (x << (K & 0x1F)) OP (K & 0x1F), where
+ // OP is >>> or >> and (K & 0x1F) != 0.
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && m.right().HasValue() &&
- (mleft.right().Value() & 0x1f) != 0 &&
- (mleft.right().Value() & 0x1f) == (m.right().Value() & 0x1f)) {
+ (mleft.right().Value() & 0x1F) != 0 &&
+ (mleft.right().Value() & 0x1F) == (m.right().Value() & 0x1F)) {
DCHECK(m.IsWord32Shr() || m.IsWord32Sar());
ArchOpcode opcode = m.IsWord32Sar() ? kArm64Sbfx32 : kArm64Ubfx32;
- int right_val = m.right().Value() & 0x1f;
+ int right_val = m.right().Value() & 0x1F;
DCHECK_NE(right_val, 0);
selector->Emit(opcode, g.DefineAsRegister(node),
@@ -1132,7 +1029,7 @@ bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher m(node);
if (m.left().IsWord32And() && m.right().HasValue()) {
- uint32_t lsb = m.right().Value() & 0x1f;
+ uint32_t lsb = m.right().Value() & 0x1F;
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && mleft.right().Value() != 0) {
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
@@ -1160,7 +1057,7 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
// by Uint32MulHigh.
Arm64OperandGenerator g(this);
Node* left = m.left().node();
- int shift = m.right().Value() & 0x1f;
+ int shift = m.right().Value() & 0x1F;
InstructionOperand const smull_operand = g.TempRegister();
Emit(kArm64Umull, smull_operand, g.UseRegister(left->InputAt(0)),
g.UseRegister(left->InputAt(1)));
@@ -1176,7 +1073,7 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
void InstructionSelector::VisitWord64Shr(Node* node) {
Int64BinopMatcher m(node);
if (m.left().IsWord64And() && m.right().HasValue()) {
- uint32_t lsb = m.right().Value() & 0x3f;
+ uint32_t lsb = m.right().Value() & 0x3F;
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && mleft.right().Value() != 0) {
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
@@ -1211,7 +1108,7 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
// by Int32MulHigh.
Arm64OperandGenerator g(this);
Node* left = m.left().node();
- int shift = m.right().Value() & 0x1f;
+ int shift = m.right().Value() & 0x1F;
InstructionOperand const smull_operand = g.TempRegister();
Emit(kArm64Smull, smull_operand, g.UseRegister(left->InputAt(0)),
g.UseRegister(left->InputAt(1)));
@@ -1361,6 +1258,8 @@ void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
+
void InstructionSelector::VisitInt32Add(Node* node) {
Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -1483,7 +1382,8 @@ void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
} else if (cont->IsDeoptimize()) {
InstructionOperand in[] = {result, result};
selector->EmitDeoptimize(opcode, 0, nullptr, 2, in, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), result, result);
} else {
@@ -1784,29 +1684,33 @@ void InstructionSelector::EmitPrepareArguments(
Node* node) {
Arm64OperandGenerator g(this);
- bool from_native_stack = linkage()->GetIncomingDescriptor()->UseNativeStack();
- bool to_native_stack = descriptor->UseNativeStack();
-
- bool always_claim = to_native_stack != from_native_stack;
-
+ // `arguments` includes alignment "holes". This means that slots bigger than
+ // kPointerSize, e.g. Simd128, will span across multiple arguments.
int claim_count = static_cast<int>(arguments->size());
int slot = claim_count - 1;
+ claim_count = RoundUp(claim_count, 2);
// Bump the stack pointer(s).
- if (claim_count > 0 || always_claim) {
+ if (claim_count > 0) {
// TODO(titzer): claim and poke probably take small immediates.
// TODO(titzer): it would be better to bump the csp here only
- // and emit paired stores with increment for non c frames.
- ArchOpcode claim = to_native_stack ? kArm64ClaimCSP : kArm64ClaimJSSP;
- // ClaimJSSP(0) or ClaimCSP(0) isn't a nop if there is a mismatch between
- // CSP and JSSP.
- Emit(claim, g.NoOutput(), g.TempImmediate(claim_count));
+ // and emit paired stores with increment for non c frames.
+ Emit(kArm64Claim, g.NoOutput(), g.TempImmediate(claim_count));
+ }
+
+ if (claim_count > 0) {
+ // Store padding, which might be overwritten.
+ Emit(kArm64Poke, g.NoOutput(), g.UseImmediate(0),
+ g.TempImmediate(claim_count - 1));
}
// Poke the arguments into the stack.
- ArchOpcode poke = to_native_stack ? kArm64PokeCSP : kArm64PokeJSSP;
while (slot >= 0) {
- Emit(poke, g.NoOutput(), g.UseRegister((*arguments)[slot].node()),
- g.TempImmediate(slot));
+ Node* input_node = (*arguments)[slot].node;
+ // Skip any alignment holes in pushed nodes.
+ if (input_node != nullptr) {
+ Emit(kArm64Poke, g.NoOutput(), g.UseRegister(input_node),
+ g.TempImmediate(slot));
+ }
slot--;
// TODO(ahaas): Poke arguments in pairs if two subsequent arguments have the
// same type.
@@ -1816,6 +1720,29 @@ void InstructionSelector::EmitPrepareArguments(
}
}
+void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
+ const CallDescriptor* descriptor,
+ Node* node) {
+ Arm64OperandGenerator g(this);
+
+ int reverse_slot = 0;
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ reverse_slot += output.location.GetSizeInPointers();
+ // Skip any alignment holes in nodes.
+ if (output.node == nullptr) continue;
+ DCHECK(!descriptor->IsCFunctionCall());
+
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ }
+
+ Emit(kArm64Peek, g.DefineAsRegister(output.node),
+ g.UseImmediate(reverse_slot));
+ }
+}
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
@@ -1834,7 +1761,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -2002,24 +1930,23 @@ void EmitBranchOrDeoptimize(InstructionSelector* selector,
} else {
DCHECK(cont->IsDeoptimize());
selector->EmitDeoptimize(cont->Encode(opcode), g.NoOutput(), value,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
}
}
// Try to emit TBZ, TBNZ, CBZ or CBNZ for certain comparisons of {node}
-// against zero, depending on the condition.
-bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, Node* user,
- FlagsCondition cond, FlagsContinuation* cont) {
- Int32BinopMatcher m_user(user);
- USE(m_user);
- DCHECK(m_user.right().Is(0) || m_user.left().Is(0));
-
+// against {value}, depending on the condition.
+bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value,
+ Node* user, FlagsCondition cond, FlagsContinuation* cont) {
// Only handle branches and deoptimisations.
if (!cont->IsBranch() && !cont->IsDeoptimize()) return false;
switch (cond) {
case kSignedLessThan:
case kSignedGreaterThanOrEqual: {
+ // Here we handle sign tests, aka. comparisons with zero.
+ if (value != 0) return false;
// We don't generate TBZ/TBNZ for deoptimisations, as they have a
// shorter range than conditional branches and generating them for
// deoptimisations results in more veneers.
@@ -2045,9 +1972,29 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, Node* user,
return true;
}
case kEqual:
- case kNotEqual:
+ case kNotEqual: {
+ if (node->opcode() == IrOpcode::kWord32And) {
+ // Emit a tbz/tbnz if we are comparing with a single-bit mask:
+ // Branch(Word32Equal(Word32And(x, 1 << N), 1 << N), true, false)
+ Int32BinopMatcher m_and(node);
+ if (cont->IsBranch() && base::bits::IsPowerOfTwo(value) &&
+ m_and.right().Is(value) && selector->CanCover(user, node)) {
+ Arm64OperandGenerator g(selector);
+ // In the code generator, Equal refers to a bit being cleared. We want
+ // the opposite here so negate the condition.
+ cont->Negate();
+ selector->Emit(cont->Encode(kArm64TestAndBranch32), g.NoOutput(),
+ g.UseRegister(m_and.left().node()),
+ g.TempImmediate(base::bits::CountTrailingZeros(value)),
+ g.Label(cont->true_block()),
+ g.Label(cont->false_block()));
+ return true;
+ }
+ }
+ } // Fall through.
case kUnsignedLessThanOrEqual:
case kUnsignedGreaterThan: {
+ if (value != 0) return false;
Arm64OperandGenerator g(selector);
cont->Overwrite(MapForCbz(cond));
EmitBranchOrDeoptimize(selector, kArm64CompareAndBranch32,
@@ -2062,15 +2009,20 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, Node* user,
void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Int32BinopMatcher m(node);
- ArchOpcode opcode = kArm64Cmp32;
FlagsCondition cond = cont->condition();
- if (m.right().Is(0)) {
- if (TryEmitCbzOrTbz(selector, m.left().node(), node, cond, cont)) return;
- } else if (m.left().Is(0)) {
+ if (m.right().HasValue()) {
+ if (TryEmitCbzOrTbz(selector, m.left().node(), m.right().Value(), node,
+ cond, cont)) {
+ return;
+ }
+ } else if (m.left().HasValue()) {
FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
- if (TryEmitCbzOrTbz(selector, m.right().node(), node, commuted_cond, cont))
+ if (TryEmitCbzOrTbz(selector, m.right().node(), m.left().Value(), node,
+ commuted_cond, cont)) {
return;
+ }
}
+ ArchOpcode opcode = kArm64Cmp32;
ImmediateMode immediate_mode = kArithmeticImm;
if (m.right().Is(0) && (m.left().IsInt32Add() || m.left().IsWord32And())) {
// Emit flag setting add/and instructions for comparisons against zero.
@@ -2141,7 +2093,7 @@ bool TryEmitTestAndBranch(InstructionSelector* selector, Node* node,
Arm64OperandGenerator g(selector);
Matcher m(node);
if (cont->IsBranch() && m.right().HasValue() &&
- (base::bits::CountPopulation(m.right().Value()) == 1)) {
+ base::bits::IsPowerOfTwo(m.right().Value())) {
// If the mask has only one bit set, we can use tbz/tbnz.
DCHECK((cont->condition() == kEqual) || (cont->condition() == kNotEqual));
selector->Emit(
@@ -2356,7 +2308,8 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(cont->Encode(kArm64Tst32), g.NoOutput(),
g.UseRegister(value), g.UseRegister(value),
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else {
DCHECK(cont->IsTrap());
selector->Emit(cont->Encode(kArm64Tst32), g.NoOutput(),
@@ -2376,14 +2329,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -2648,7 +2601,7 @@ void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(right));
return;
}
- Emit(kArm64Float64InsertLowWord32, g.DefineAsRegister(node),
+ Emit(kArm64Float64InsertLowWord32, g.DefineSameAsFirst(node),
g.UseRegister(left), g.UseRegister(right));
}
@@ -2665,7 +2618,7 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(left));
return;
}
- Emit(kArm64Float64InsertHighWord32, g.DefineAsRegister(node),
+ Emit(kArm64Float64InsertHighWord32, g.DefineSameAsFirst(node),
g.UseRegister(left), g.UseRegister(right));
}
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index 5406ec5766..53c3435b55 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -103,8 +103,9 @@ Reduction BranchElimination::ReduceDeoptimizeConditional(Node* node) {
// with the {control} node that already contains the right information.
ReplaceWithValue(node, dead(), effect, control);
} else {
- control = graph()->NewNode(common()->Deoptimize(p.kind(), p.reason()),
- frame_state, effect, control);
+ control = graph()->NewNode(
+ common()->Deoptimize(p.kind(), p.reason(), VectorSlotPair()),
+ frame_state, effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), control);
Revisit(graph()->end());
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 7e1fbfddb3..54a924fce4 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -14,6 +14,7 @@
#include "src/interpreter/bytecodes.h"
#include "src/objects-inl.h"
#include "src/objects/literal-objects.h"
+#include "src/vector-slot-pair.h"
namespace v8 {
namespace internal {
@@ -949,7 +950,7 @@ void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::BuildStoreGlobal(LanguageMode language_mode) {
+void BytecodeGraphBuilder::VisitStaGlobal() {
PrepareEagerCheckpoint();
Handle<Name> name =
Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
@@ -957,19 +958,13 @@ void BytecodeGraphBuilder::BuildStoreGlobal(LanguageMode language_mode) {
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
Node* value = environment()->LookupAccumulator();
+ LanguageMode language_mode =
+ feedback.vector()->GetLanguageMode(feedback.slot());
const Operator* op = javascript()->StoreGlobal(language_mode, name, feedback);
Node* node = NewNode(op, value);
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::VisitStaGlobalSloppy() {
- BuildStoreGlobal(LanguageMode::kSloppy);
-}
-
-void BytecodeGraphBuilder::VisitStaGlobalStrict() {
- BuildStoreGlobal(LanguageMode::kStrict);
-}
-
void BytecodeGraphBuilder::VisitStaDataPropertyInLiteral() {
PrepareEagerCheckpoint();
@@ -1609,7 +1604,8 @@ void BytecodeGraphBuilder::BuildCall(ConvertReceiverMode receiver_mode,
CallFrequency frequency = ComputeCallFrequency(slot_id);
const Operator* op =
- javascript()->Call(arg_count, frequency, feedback, receiver_mode);
+ javascript()->Call(arg_count, frequency, feedback, receiver_mode,
+ GetSpeculationMode(slot_id));
JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedCall(
op, args, static_cast<int>(arg_count), feedback.slot());
if (lowering.IsExit()) return;
@@ -1947,8 +1943,8 @@ void BytecodeGraphBuilder::VisitThrow() {
void BytecodeGraphBuilder::VisitAbort() {
BuildLoopExitsForFunctionExit(bytecode_analysis()->GetInLivenessFor(
bytecode_iterator().current_offset()));
- BailoutReason reason =
- static_cast<BailoutReason>(bytecode_iterator().GetIndexOperand(0));
+ AbortReason reason =
+ static_cast<AbortReason>(bytecode_iterator().GetIndexOperand(0));
NewNode(simplified()->RuntimeAbort(reason));
Node* control = NewNode(common()->Throw());
MergeControlToLeaveFunction(control);
@@ -2104,6 +2100,11 @@ CallFrequency BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const {
invocation_frequency_.value());
}
+SpeculationMode BytecodeGraphBuilder::GetSpeculationMode(int slot_id) const {
+ CallICNexus nexus(feedback_vector(), feedback_vector()->ToSlot(slot_id));
+ return nexus.GetSpeculationMode();
+}
+
void BytecodeGraphBuilder::VisitBitwiseNot() {
BuildUnaryOp(javascript()->BitwiseNot());
}
@@ -2574,7 +2575,7 @@ void BytecodeGraphBuilder::VisitSwitchOnSmiNoFeedback() {
PrepareEagerCheckpoint();
Node* acc = environment()->LookupAccumulator();
- Node* acc_smi = NewNode(simplified()->CheckSmi(), acc);
+ Node* acc_smi = NewNode(simplified()->CheckSmi(VectorSlotPair()), acc);
BuildSwitchOnSmi(acc_smi);
}
@@ -2670,7 +2671,9 @@ void BytecodeGraphBuilder::VisitForInNext() {
// We need to rename the {index} here, as in case of OSR we loose the
// information that the {index} is always a valid unsigned Smi value.
index = graph()->NewNode(common()->TypeGuard(Type::UnsignedSmall()), index,
+ environment()->GetEffectDependency(),
environment()->GetControlDependency());
+ environment()->UpdateEffectDependency(index);
FeedbackSlot slot =
feedback_vector()->ToSlot(bytecode_iterator().GetIndexOperand(3));
@@ -2736,14 +2739,16 @@ void BytecodeGraphBuilder::VisitRestoreGeneratorState() {
environment()->BindAccumulator(state, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::VisitRestoreGeneratorRegisters() {
+void BytecodeGraphBuilder::VisitResumeGenerator() {
Node* generator =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(1);
+ interpreter::Register generator_state_reg =
+ bytecode_iterator().GetRegisterOperand(1);
+ interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(2);
// We assume we are restoring registers starting fromm index 0.
CHECK_EQ(0, first_reg.index());
int register_count =
- static_cast<int>(bytecode_iterator().GetRegisterCountOperand(2));
+ static_cast<int>(bytecode_iterator().GetRegisterCountOperand(3));
// Bijection between registers and array indices must match that used in
// InterpreterAssembler::ExportRegisterFile.
@@ -2751,6 +2756,16 @@ void BytecodeGraphBuilder::VisitRestoreGeneratorRegisters() {
Node* value = NewNode(javascript()->GeneratorRestoreRegister(i), generator);
environment()->BindRegister(interpreter::Register(i), value);
}
+
+ // We're no longer resuming, so update the state register.
+ environment()->BindRegister(
+ generator_state_reg,
+ jsgraph()->SmiConstant(JSGeneratorObject::kGeneratorExecuting));
+
+ // Update the accumulator with the generator's input_or_debug_pos.
+ Node* input_or_debug_pos =
+ NewNode(javascript()->GeneratorRestoreInputOrDebugPos(), generator);
+ environment()->BindAccumulator(input_or_debug_pos);
}
void BytecodeGraphBuilder::VisitWide() {
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index 562c3ddaea..91b857298c 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -16,6 +16,9 @@
namespace v8 {
namespace internal {
+
+class VectorSlotPair;
+
namespace compiler {
class Reduction;
@@ -152,7 +155,6 @@ class BytecodeGraphBuilder {
void BuildCreateArguments(CreateArgumentsType type);
Node* BuildLoadGlobal(Handle<Name> name, uint32_t feedback_slot_index,
TypeofMode typeof_mode);
- void BuildStoreGlobal(LanguageMode language_mode);
enum class StoreMode {
// Check the prototype chain before storing.
@@ -232,6 +234,10 @@ class BytecodeGraphBuilder {
// feedback.
CallFrequency ComputeCallFrequency(int slot_id) const;
+ // Helper function to extract the speculation mode from the recorded type
+ // feedback.
+ SpeculationMode GetSpeculationMode(int slot_id) const;
+
// Control flow plumbing.
void BuildJump();
void BuildJumpIf(Node* condition);
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index dd4197d466..330b19fac3 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -224,7 +224,7 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(
// The target for C calls is always an address (i.e. machine pointer).
MachineType target_type = MachineType::Pointer();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
- CallDescriptor::Flags flags = CallDescriptor::kUseNativeStack;
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
if (set_initialize_root_flag) {
flags |= CallDescriptor::kInitializeRootRegister;
}
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index f24cec64a7..071f8952db 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -245,7 +245,12 @@ TNode<IntPtrT> CodeAssembler::IntPtrConstant(intptr_t value) {
}
TNode<Number> CodeAssembler::NumberConstant(double value) {
- return UncheckedCast<Number>(raw_assembler()->NumberConstant(value));
+ int smi_value;
+ if (DoubleToSmiInteger(value, &smi_value)) {
+ return UncheckedCast<Number>(SmiConstant(smi_value));
+ } else {
+ return UncheckedCast<Number>(raw_assembler()->NumberConstant(value));
+ }
}
TNode<Smi> CodeAssembler::SmiConstant(Smi* value) {
@@ -1357,13 +1362,13 @@ Node* CodeAssemblerVariable::value() const {
str << "#Use of unbound variable:"
<< "#\n Variable: " << *this << "#\n Current Block: ";
state_->PrintCurrentBlock(str);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
if (!state_->InsideBlock()) {
std::stringstream str;
str << "#Accessing variable value outside a block:"
<< "#\n Variable: " << *this;
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
#endif // DEBUG
return impl_->value_;
@@ -1456,7 +1461,7 @@ void CodeAssemblerLabel::MergeVariables() {
}
str << "\n# Current Block: ";
state_->PrintCurrentBlock(str);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
#endif // DEBUG
}
@@ -1472,7 +1477,7 @@ void CodeAssemblerLabel::Bind(AssemblerDebugInfo debug_info) {
str << "Cannot bind the same label twice:"
<< "\n# current: " << debug_info
<< "\n# previous: " << *label_->block();
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
state_->raw_assembler_->Bind(label_, debug_info);
UpdateVariablesAfterBind();
@@ -1524,7 +1529,7 @@ void CodeAssemblerLabel::UpdateVariablesAfterBind() {
<< " vs. found=" << (not_found ? 0 : i->second.size())
<< "\n# Variable: " << *var_impl
<< "\n# Current Block: " << *label_->block();
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
#endif // DEBUG
Node* phi = state_->raw_assembler_->Phi(
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 90a9d02fce..9f0d463dc1 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -17,6 +17,7 @@
#include "src/globals.h"
#include "src/heap/heap.h"
#include "src/machine-type.h"
+#include "src/objects/data-handler.h"
#include "src/runtime/runtime.h"
#include "src/zone/zone-containers.h"
@@ -26,6 +27,10 @@ namespace internal {
class Callable;
class CallInterfaceDescriptor;
class Isolate;
+class JSCollection;
+class JSWeakCollection;
+class JSWeakMap;
+class JSWeakSet;
class Factory;
class Zone;
@@ -252,7 +257,7 @@ class Node;
class RawMachineAssembler;
class RawMachineLabel;
-typedef ZoneList<CodeAssemblerVariable*> CodeAssemblerVariableList;
+typedef ZoneVector<CodeAssemblerVariable*> CodeAssemblerVariableList;
typedef std::function<void()> CodeAssemblerCallback;
@@ -1062,6 +1067,11 @@ class V8_EXPORT_PRIVATE CodeAssembler {
bool Word32ShiftIsSafe() const;
private:
+ // These two don't have definitions and are here only for catching use cases
+ // where the cast is not necessary.
+ TNode<Int32T> Signed(TNode<Int32T> x);
+ TNode<Uint32T> Unsigned(TNode<Uint32T> x);
+
RawMachineAssembler* raw_assembler() const;
// Calls respective callback registered in the state.
@@ -1157,7 +1167,7 @@ class CodeAssemblerLabel {
CodeAssembler* assembler,
const CodeAssemblerVariableList& merged_variables,
CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred)
- : CodeAssemblerLabel(assembler, merged_variables.length(),
+ : CodeAssemblerLabel(assembler, merged_variables.size(),
&(merged_variables[0]), type) {}
CodeAssemblerLabel(
CodeAssembler* assembler, size_t count,
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 3d43ab4765..0fb38e5933 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -310,7 +310,10 @@ MaybeHandle<HandlerTable> CodeGenerator::GetHandlerTable() const {
}
Handle<Code> CodeGenerator::FinalizeCode() {
- if (result_ != kSuccess) return Handle<Code>();
+ if (result_ != kSuccess) {
+ tasm()->AbortedCodeGeneration();
+ return Handle<Code>();
+ }
// Allocate exception handler table.
Handle<HandlerTable> table = HandlerTable::Empty(isolate());
@@ -915,9 +918,17 @@ int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset,
FrameStateDescriptor* const descriptor = entry.descriptor();
frame_state_offset++;
- Translation translation(
- &translations_, static_cast<int>(descriptor->GetFrameCount()),
- static_cast<int>(descriptor->GetJSFrameCount()), zone());
+ int update_feedback_count = entry.feedback().IsValid() ? 1 : 0;
+ Translation translation(&translations_,
+ static_cast<int>(descriptor->GetFrameCount()),
+ static_cast<int>(descriptor->GetJSFrameCount()),
+ update_feedback_count, zone());
+ if (entry.feedback().IsValid()) {
+ DeoptimizationLiteral literal =
+ DeoptimizationLiteral(entry.feedback().vector());
+ int literal_id = DefineDeoptimizationLiteral(literal);
+ translation.AddUpdateFeedback(literal_id, entry.feedback().slot().ToInt());
+ }
InstructionOperandIterator iter(instr, frame_state_offset);
BuildTranslationForFrameStateDescriptor(descriptor, &iter, &translation,
state_combine);
@@ -1000,8 +1011,6 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
literal = DeoptimizationLiteral(isolate()->factory()->true_value());
}
} else {
- // TODO(jarin,bmeurer): We currently pass in raw pointers to the
- // JSFunction::entry here. We should really consider fixing this.
DCHECK(type == MachineType::Int32() ||
type == MachineType::Uint32() ||
type.representation() == MachineRepresentation::kWord32 ||
@@ -1019,8 +1028,6 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
case Constant::kInt64:
// When pointers are 8 bytes, we can use int64 constants to represent
// Smis.
- // TODO(jarin,bmeurer): We currently pass in raw pointers to the
- // JSFunction::entry here. We should really consider fixing this.
DCHECK(type.representation() == MachineRepresentation::kWord64 ||
type.representation() == MachineRepresentation::kTagged);
DCHECK_EQ(8, kPointerSize);
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index 70fdf71578..d9bc5c8173 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -138,9 +138,10 @@ Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) {
if (condition->opcode() == IrOpcode::kBooleanNot) {
NodeProperties::ReplaceValueInput(node, condition->InputAt(0), 0);
NodeProperties::ChangeOp(
- node, condition_is_true
- ? common()->DeoptimizeIf(p.kind(), p.reason())
- : common()->DeoptimizeUnless(p.kind(), p.reason()));
+ node, condition_is_true ? common()->DeoptimizeIf(p.kind(), p.reason(),
+ VectorSlotPair())
+ : common()->DeoptimizeUnless(
+ p.kind(), p.reason(), VectorSlotPair()));
return Changed(node);
}
Decision const decision = DecideCondition(condition);
@@ -148,8 +149,9 @@ Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) {
if (condition_is_true == (decision == Decision::kTrue)) {
ReplaceWithValue(node, dead(), effect, control);
} else {
- control = graph()->NewNode(common()->Deoptimize(p.kind(), p.reason()),
- frame_state, effect, control);
+ control = graph()->NewNode(
+ common()->Deoptimize(p.kind(), p.reason(), VectorSlotPair()),
+ frame_state, effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), control);
Revisit(graph()->end());
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index f43ff7e515..54af052d56 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -43,7 +43,8 @@ int ValueInputCountOfReturn(Operator const* const op) {
}
bool operator==(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
- return lhs.kind() == rhs.kind() && lhs.reason() == rhs.reason();
+ return lhs.kind() == rhs.kind() && lhs.reason() == rhs.reason() &&
+ lhs.feedback() == rhs.feedback();
}
bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
@@ -51,11 +52,15 @@ bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
}
size_t hash_value(DeoptimizeParameters p) {
- return base::hash_combine(p.kind(), p.reason());
+ return base::hash_combine(p.kind(), p.reason(), p.feedback());
}
std::ostream& operator<<(std::ostream& os, DeoptimizeParameters p) {
- return os << p.kind() << ":" << p.reason();
+ os << p.kind() << ":" << p.reason();
+ if (p.feedback().IsValid()) {
+ os << "; " << p.feedback();
+ }
+ return os;
}
DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const op) {
@@ -343,8 +348,7 @@ ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
#define COMMON_CACHED_OP_LIST(V) \
V(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1) \
- V(DeadValue, Operator::kFoldable, 0, 0, 0, 1, 0, 0) \
- V(Unreachable, Operator::kFoldable, 0, 1, 1, 0, 1, 0) \
+ V(Unreachable, Operator::kFoldable, 0, 1, 1, 1, 1, 0) \
V(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
@@ -409,7 +413,6 @@ ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
#define CACHED_DEOPTIMIZE_LIST(V) \
V(Eager, MinusZero) \
- V(Eager, NoReason) \
V(Eager, WrongMap) \
V(Soft, InsufficientTypeFeedbackForGenericKeyedAccess) \
V(Soft, InsufficientTypeFeedbackForGenericNamedAccess)
@@ -424,7 +427,6 @@ ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
#define CACHED_DEOPTIMIZE_UNLESS_LIST(V) \
V(Eager, LostPrecision) \
V(Eager, LostPrecisionOrNaN) \
- V(Eager, NoReason) \
V(Eager, NotAHeapNumber) \
V(Eager, NotANumberOrOddball) \
V(Eager, NotASmi) \
@@ -606,7 +608,7 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"Deoptimize", // name
1, 1, 1, 0, 0, 1, // counts
- DeoptimizeParameters(kKind, kReason)) {} // parameter
+ DeoptimizeParameters(kKind, kReason, VectorSlotPair())) {}
};
#define CACHED_DEOPTIMIZE(Kind, Reason) \
DeoptimizeOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
@@ -622,7 +624,7 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"DeoptimizeIf", // name
2, 1, 1, 0, 1, 1, // counts
- DeoptimizeParameters(kKind, kReason)) {} // parameter
+ DeoptimizeParameters(kKind, kReason, VectorSlotPair())) {}
};
#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
DeoptimizeIfOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
@@ -639,7 +641,7 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"DeoptimizeUnless", // name
2, 1, 1, 0, 1, 1, // counts
- DeoptimizeParameters(kKind, kReason)) {} // parameter
+ DeoptimizeParameters(kKind, kReason, VectorSlotPair())) {}
};
#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
DeoptimizeUnlessOperator<DeoptimizeKind::k##Kind, \
@@ -817,17 +819,18 @@ const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
UNREACHABLE();
}
-const Operator* CommonOperatorBuilder::Deoptimize(DeoptimizeKind kind,
- DeoptimizeReason reason) {
-#define CACHED_DEOPTIMIZE(Kind, Reason) \
- if (kind == DeoptimizeKind::k##Kind && \
- reason == DeoptimizeReason::k##Reason) { \
- return &cache_.kDeoptimize##Kind##Reason##Operator; \
+const Operator* CommonOperatorBuilder::Deoptimize(
+ DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback) {
+#define CACHED_DEOPTIMIZE(Kind, Reason) \
+ if (kind == DeoptimizeKind::k##Kind && \
+ reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
+ return &cache_.kDeoptimize##Kind##Reason##Operator; \
}
CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE)
#undef CACHED_DEOPTIMIZE
// Uncached
- DeoptimizeParameters parameter(kind, reason);
+ DeoptimizeParameters parameter(kind, reason, feedback);
return new (zone()) Operator1<DeoptimizeParameters>( // --
IrOpcode::kDeoptimize, // opcodes
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -836,17 +839,18 @@ const Operator* CommonOperatorBuilder::Deoptimize(DeoptimizeKind kind,
parameter); // parameter
}
-const Operator* CommonOperatorBuilder::DeoptimizeIf(DeoptimizeKind kind,
- DeoptimizeReason reason) {
-#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
- if (kind == DeoptimizeKind::k##Kind && \
- reason == DeoptimizeReason::k##Reason) { \
- return &cache_.kDeoptimizeIf##Kind##Reason##Operator; \
+const Operator* CommonOperatorBuilder::DeoptimizeIf(
+ DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback) {
+#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
+ if (kind == DeoptimizeKind::k##Kind && \
+ reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
+ return &cache_.kDeoptimizeIf##Kind##Reason##Operator; \
}
CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
#undef CACHED_DEOPTIMIZE_IF
// Uncached
- DeoptimizeParameters parameter(kind, reason);
+ DeoptimizeParameters parameter(kind, reason, feedback);
return new (zone()) Operator1<DeoptimizeParameters>( // --
IrOpcode::kDeoptimizeIf, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -856,16 +860,17 @@ const Operator* CommonOperatorBuilder::DeoptimizeIf(DeoptimizeKind kind,
}
const Operator* CommonOperatorBuilder::DeoptimizeUnless(
- DeoptimizeKind kind, DeoptimizeReason reason) {
-#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
- if (kind == DeoptimizeKind::k##Kind && \
- reason == DeoptimizeReason::k##Reason) { \
- return &cache_.kDeoptimizeUnless##Kind##Reason##Operator; \
+ DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback) {
+#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
+ if (kind == DeoptimizeKind::k##Kind && \
+ reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
+ return &cache_.kDeoptimizeUnless##Kind##Reason##Operator; \
}
CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
#undef CACHED_DEOPTIMIZE_UNLESS
// Uncached
- DeoptimizeParameters parameter(kind, reason);
+ DeoptimizeParameters parameter(kind, reason, feedback);
return new (zone()) Operator1<DeoptimizeParameters>( // --
IrOpcode::kDeoptimizeUnless, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -1131,7 +1136,7 @@ const Operator* CommonOperatorBuilder::TypeGuard(Type* type) {
return new (zone()) Operator1<Type*>( // --
IrOpcode::kTypeGuard, Operator::kPure, // opcode
"TypeGuard", // name
- 1, 0, 1, 1, 0, 0, // counts
+ 1, 1, 1, 1, 1, 0, // counts
type); // parameter
}
@@ -1278,6 +1283,11 @@ uint32_t ObjectIdOf(Operator const* op) {
}
}
+MachineRepresentation DeadValueRepresentationOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kDeadValue, op->opcode());
+ return OpParameter<MachineRepresentation>(op);
+}
+
const Operator* CommonOperatorBuilder::FrameState(
BailoutId bailout_id, OutputFrameStateCombine state_combine,
const FrameStateFunctionInfo* function_info) {
@@ -1393,6 +1403,31 @@ CommonOperatorBuilder::CreateFrameStateFunctionInfo(
FrameStateFunctionInfo(type, parameter_count, local_count, shared_info);
}
+const Operator* CommonOperatorBuilder::DeadValue(MachineRepresentation rep) {
+ return new (zone()) Operator1<MachineRepresentation>( // --
+ IrOpcode::kDeadValue, Operator::kPure, // opcode
+ "DeadValue", // name
+ 1, 0, 0, 1, 0, 0, // counts
+ rep); // parameter
+}
+
+#undef COMMON_CACHED_OP_LIST
+#undef CACHED_RETURN_LIST
+#undef CACHED_END_LIST
+#undef CACHED_EFFECT_PHI_LIST
+#undef CACHED_INDUCTION_VARIABLE_PHI_LIST
+#undef CACHED_LOOP_LIST
+#undef CACHED_MERGE_LIST
+#undef CACHED_DEOPTIMIZE_LIST
+#undef CACHED_DEOPTIMIZE_IF_LIST
+#undef CACHED_DEOPTIMIZE_UNLESS_LIST
+#undef CACHED_TRAP_IF_LIST
+#undef CACHED_TRAP_UNLESS_LIST
+#undef CACHED_PARAMETER_LIST
+#undef CACHED_PHI_LIST
+#undef CACHED_PROJECTION_LIST
+#undef CACHED_STATE_VALUES_LIST
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 06541d9a38..0e0614dced 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -11,6 +11,7 @@
#include "src/deoptimize-reason.h"
#include "src/globals.h"
#include "src/machine-type.h"
+#include "src/vector-slot-pair.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone-handle-set.h"
@@ -52,15 +53,18 @@ int ValueInputCountOfReturn(Operator const* const op);
// Parameters for the {Deoptimize} operator.
class DeoptimizeParameters final {
public:
- DeoptimizeParameters(DeoptimizeKind kind, DeoptimizeReason reason)
- : kind_(kind), reason_(reason) {}
+ DeoptimizeParameters(DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback)
+ : kind_(kind), reason_(reason), feedback_(feedback) {}
DeoptimizeKind kind() const { return kind_; }
DeoptimizeReason reason() const { return reason_; }
+ const VectorSlotPair& feedback() const { return feedback_; }
private:
DeoptimizeKind const kind_;
DeoptimizeReason const reason_;
+ VectorSlotPair const feedback_;
};
bool operator==(DeoptimizeParameters, DeoptimizeParameters);
@@ -338,6 +342,8 @@ ArgumentsStateType ArgumentsStateTypeOf(Operator const*) WARN_UNUSED_RESULT;
uint32_t ObjectIdOf(Operator const*);
+MachineRepresentation DeadValueRepresentationOf(Operator const*);
+
// Interface for building common operators that can be used at any level of IR,
// including JavaScript, mid-level, and low-level.
class V8_EXPORT_PRIVATE CommonOperatorBuilder final
@@ -346,7 +352,7 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
explicit CommonOperatorBuilder(Zone* zone);
const Operator* Dead();
- const Operator* DeadValue();
+ const Operator* DeadValue(MachineRepresentation rep);
const Operator* Unreachable();
const Operator* End(size_t control_input_count);
const Operator* Branch(BranchHint = BranchHint::kNone);
@@ -358,10 +364,12 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* IfValue(int32_t value);
const Operator* IfDefault();
const Operator* Throw();
- const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason);
- const Operator* DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason);
- const Operator* DeoptimizeUnless(DeoptimizeKind kind,
- DeoptimizeReason reason);
+ const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback);
+ const Operator* DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback);
+ const Operator* DeoptimizeUnless(DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback);
const Operator* TrapIf(int32_t trap_id);
const Operator* TrapUnless(int32_t trap_id);
const Operator* Return(int value_input_count = 1);
diff --git a/deps/v8/src/compiler/dead-code-elimination.cc b/deps/v8/src/compiler/dead-code-elimination.cc
index d40bc37b6d..523d37fe29 100644
--- a/deps/v8/src/compiler/dead-code-elimination.cc
+++ b/deps/v8/src/compiler/dead-code-elimination.cc
@@ -21,10 +21,8 @@ DeadCodeElimination::DeadCodeElimination(Editor* editor, Graph* graph,
graph_(graph),
common_(common),
dead_(graph->NewNode(common->Dead())),
- dead_value_(graph->NewNode(common->DeadValue())),
zone_(temp_zone) {
NodeProperties::SetType(dead_, Type::None());
- NodeProperties::SetType(dead_value_, Type::None());
}
namespace {
@@ -38,11 +36,11 @@ bool NoReturn(Node* node) {
NodeProperties::GetTypeOrAny(node)->IsNone();
}
-bool HasDeadInput(Node* node) {
+Node* FindDeadInput(Node* node) {
for (Node* input : node->inputs()) {
- if (NoReturn(input)) return true;
+ if (NoReturn(input)) return input;
}
- return false;
+ return nullptr;
}
} // namespace
@@ -209,17 +207,27 @@ Reduction DeadCodeElimination::ReducePhi(Node* node) {
DCHECK_EQ(IrOpcode::kPhi, node->opcode());
Reduction reduction = PropagateDeadControl(node);
if (reduction.Changed()) return reduction;
- if (PhiRepresentationOf(node->op()) == MachineRepresentation::kNone ||
+ MachineRepresentation rep = PhiRepresentationOf(node->op());
+ if (rep == MachineRepresentation::kNone ||
NodeProperties::GetTypeOrAny(node)->IsNone()) {
- return Replace(dead_value());
+ return Replace(DeadValue(node, rep));
+ }
+ int input_count = node->op()->ValueInputCount();
+ for (int i = 0; i < input_count; ++i) {
+ Node* input = NodeProperties::GetValueInput(node, i);
+ if (input->opcode() == IrOpcode::kDeadValue &&
+ DeadValueRepresentationOf(input->op()) != rep) {
+ NodeProperties::ReplaceValueInput(node, DeadValue(input, rep), i);
+ }
}
return NoChange();
}
Reduction DeadCodeElimination::ReducePureNode(Node* node) {
DCHECK_EQ(0, node->op()->EffectInputCount());
- if (HasDeadInput(node)) {
- return Replace(dead_value());
+ if (node->opcode() == IrOpcode::kDeadValue) return NoChange();
+ if (Node* input = FindDeadInput(node)) {
+ return Replace(DeadValue(input));
}
return NoChange();
}
@@ -234,8 +242,7 @@ Reduction DeadCodeElimination::ReduceUnreachableOrIfException(Node* node) {
return Replace(effect);
}
if (effect->opcode() == IrOpcode::kUnreachable) {
- RelaxEffectsAndControls(node);
- return Replace(dead_value());
+ return Replace(effect);
}
return NoChange();
}
@@ -246,10 +253,10 @@ Reduction DeadCodeElimination::ReduceEffectNode(Node* node) {
if (effect->opcode() == IrOpcode::kDead) {
return Replace(effect);
}
- if (HasDeadInput(node)) {
+ if (Node* input = FindDeadInput(node)) {
if (effect->opcode() == IrOpcode::kUnreachable) {
RelaxEffectsAndControls(node);
- return Replace(dead_value());
+ return Replace(DeadValue(input));
}
Node* control = node->op()->ControlInputCount() == 1
@@ -257,7 +264,8 @@ Reduction DeadCodeElimination::ReduceEffectNode(Node* node) {
: graph()->start();
Node* unreachable =
graph()->NewNode(common()->Unreachable(), effect, control);
- ReplaceWithValue(node, dead_value(), node, control);
+ NodeProperties::SetType(unreachable, Type::None());
+ ReplaceWithValue(node, DeadValue(input), node, control);
return Replace(unreachable);
}
@@ -270,11 +278,12 @@ Reduction DeadCodeElimination::ReduceDeoptimizeOrReturnOrTerminate(Node* node) {
node->opcode() == IrOpcode::kTerminate);
Reduction reduction = PropagateDeadControl(node);
if (reduction.Changed()) return reduction;
- if (HasDeadInput(node)) {
+ if (FindDeadInput(node) != nullptr) {
Node* effect = NodeProperties::GetEffectInput(node, 0);
Node* control = NodeProperties::GetControlInput(node, 0);
if (effect->opcode() != IrOpcode::kUnreachable) {
effect = graph()->NewNode(common()->Unreachable(), effect, control);
+ NodeProperties::SetType(effect, Type::None());
}
node->TrimInputCount(2);
node->ReplaceInput(0, effect);
@@ -322,6 +331,16 @@ void DeadCodeElimination::TrimMergeOrPhi(Node* node, int size) {
NodeProperties::ChangeOp(node, op);
}
+Node* DeadCodeElimination::DeadValue(Node* node, MachineRepresentation rep) {
+ if (node->opcode() == IrOpcode::kDeadValue) {
+ if (rep == DeadValueRepresentationOf(node->op())) return node;
+ node = NodeProperties::GetValueInput(node, 0);
+ }
+ Node* dead_value = graph()->NewNode(common()->DeadValue(rep), node);
+ NodeProperties::SetType(dead_value, Type::None());
+ return dead_value;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/dead-code-elimination.h b/deps/v8/src/compiler/dead-code-elimination.h
index b1e403ca86..217d58ef31 100644
--- a/deps/v8/src/compiler/dead-code-elimination.h
+++ b/deps/v8/src/compiler/dead-code-elimination.h
@@ -8,6 +8,7 @@
#include "src/base/compiler-specific.h"
#include "src/compiler/graph-reducer.h"
#include "src/globals.h"
+#include "src/machine-type.h"
namespace v8 {
namespace internal {
@@ -17,13 +18,23 @@ namespace compiler {
class CommonOperatorBuilder;
// Propagates {Dead} control and {DeadValue} values through the graph and
-// thereby removes dead code. When {DeadValue} hits the effect chain, a crashing
-// {Unreachable} node is inserted and the rest of the effect chain is collapsed.
-// We wait for the {EffectControlLinearizer} to connect {Unreachable} nodes to
-// the graph end, since this is much easier if there is no floating control.
-// We detect dead values based on types, pruning uses of DeadValue except for
-// uses by phi. These remaining uses are eliminated in the
-// {EffectControlLinearizer}, where they are replaced with dummy values.
+// thereby removes dead code.
+// We detect dead values based on types, replacing uses of nodes with
+// {Type::None()} with {DeadValue}. A pure node (other than a phi) using
+// {DeadValue} is replaced by {DeadValue}. When {DeadValue} hits the effect
+// chain, a crashing {Unreachable} node is inserted and the rest of the effect
+// chain is collapsed. We wait for the {EffectControlLinearizer} to connect
+// {Unreachable} nodes to the graph end, since this is much easier if there is
+// no floating control.
+// {DeadValue} has an input, which has to have {Type::None()}. This input is
+// important to maintain the dependency on the cause of the unreachable code.
+// {Unreachable} has a value output and {Type::None()} so it can be used by
+// {DeadValue}.
+// {DeadValue} nodes track a {MachineRepresentation} so they can be lowered to a
+// value-producing node. {DeadValue} has the runtime semantics of crashing and
+// behaves like a constant of its representation so it can be used in gap moves.
+// Since phi nodes are the only remaining use of {DeadValue}, this
+// representation is only adjusted for uses by phi nodes.
// In contrast to {DeadValue}, {Dead} can never remain in the graph.
class V8_EXPORT_PRIVATE DeadCodeElimination final
: public NON_EXPORTED_BASE(AdvancedReducer) {
@@ -53,15 +64,16 @@ class V8_EXPORT_PRIVATE DeadCodeElimination final
void TrimMergeOrPhi(Node* node, int size);
+ Node* DeadValue(Node* none_node,
+ MachineRepresentation rep = MachineRepresentation::kNone);
+
Graph* graph() const { return graph_; }
CommonOperatorBuilder* common() const { return common_; }
Node* dead() const { return dead_; }
- Node* dead_value() const { return dead_value_; }
Graph* const graph_;
CommonOperatorBuilder* const common_;
Node* const dead_;
- Node* const dead_value_;
Zone* zone_;
DISALLOW_COPY_AND_ASSIGN(DeadCodeElimination);
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 2372a0fe40..a47941e28d 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -145,9 +145,10 @@ bool HasIncomingBackEdges(BasicBlock* block) {
return false;
}
-void RemoveRegionNode(Node* node) {
+void RemoveRenameNode(Node* node) {
DCHECK(IrOpcode::kFinishRegion == node->opcode() ||
- IrOpcode::kBeginRegion == node->opcode());
+ IrOpcode::kBeginRegion == node->opcode() ||
+ IrOpcode::kTypeGuard == node->opcode());
// Update the value/context uses to the value input of the finish node and
// the effect uses to the effect input.
for (Edge edge : node->use_edges()) {
@@ -318,28 +319,6 @@ void TryCloneBranch(Node* node, BasicBlock* block, Zone* temp_zone,
merge->Kill();
}
-Node* DummyValue(JSGraph* jsgraph, MachineRepresentation rep) {
- switch (rep) {
- case MachineRepresentation::kTagged:
- case MachineRepresentation::kTaggedSigned:
- return jsgraph->SmiConstant(0xdead);
- case MachineRepresentation::kTaggedPointer:
- return jsgraph->TheHoleConstant();
- case MachineRepresentation::kWord64:
- return jsgraph->Int64Constant(0xdead);
- case MachineRepresentation::kWord32:
- return jsgraph->Int32Constant(0xdead);
- case MachineRepresentation::kFloat64:
- return jsgraph->Float64Constant(0xdead);
- case MachineRepresentation::kFloat32:
- return jsgraph->Float32Constant(0xdead);
- case MachineRepresentation::kBit:
- return jsgraph->Int32Constant(0);
- default:
- UNREACHABLE();
- }
-}
-
} // namespace
void EffectControlLinearizer::Run() {
@@ -369,7 +348,6 @@ void EffectControlLinearizer::Run() {
// Iterate over the phis and update the effect phis.
Node* effect_phi = nullptr;
Node* terminate = nullptr;
- int predecessor_count = static_cast<int>(block->PredecessorCount());
for (; instr < block->NodeCount(); instr++) {
Node* node = block->NodeAt(instr);
// Only go through the phis and effect phis.
@@ -380,19 +358,7 @@ void EffectControlLinearizer::Run() {
DCHECK_NE(IrOpcode::kIfException, control->opcode());
effect_phi = node;
} else if (node->opcode() == IrOpcode::kPhi) {
- DCHECK_EQ(predecessor_count, node->op()->ValueInputCount());
- for (int i = 0; i < predecessor_count; ++i) {
- if (NodeProperties::GetValueInput(node, i)->opcode() ==
- IrOpcode::kDeadValue) {
- // Phi uses of {DeadValue} must originate from unreachable code. Due
- // to schedule freedom between the effect and the control chain,
- // they might still appear in reachable code. So we replace them
- // with a dummy value.
- NodeProperties::ReplaceValueInput(
- node, DummyValue(jsgraph(), PhiRepresentationOf(node->op())),
- i);
- }
- }
+ // Just skip phis.
} else if (node->opcode() == IrOpcode::kTerminate) {
DCHECK_NULL(terminate);
terminate = node;
@@ -573,7 +539,7 @@ void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state,
region_observability_ = RegionObservability::kObservable;
// Update the value uses to the value input of the finish node and
// the effect uses to the effect input.
- return RemoveRegionNode(node);
+ return RemoveRenameNode(node);
}
if (node->opcode() == IrOpcode::kBeginRegion) {
// Determine the observability for this region and use that for all
@@ -583,7 +549,10 @@ void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state,
region_observability_ = RegionObservabilityOf(node->op());
// Update the value uses to the value input of the finish node and
// the effect uses to the effect input.
- return RemoveRegionNode(node);
+ return RemoveRenameNode(node);
+ }
+ if (node->opcode() == IrOpcode::kTypeGuard) {
+ return RemoveRenameNode(node);
}
// Special treatment for checkpoint nodes.
@@ -781,6 +750,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckedTruncateTaggedToWord32:
result = LowerCheckedTruncateTaggedToWord32(node, frame_state);
break;
+ case IrOpcode::kNumberToString:
+ result = LowerNumberToString(node);
+ break;
case IrOpcode::kObjectIsArrayBufferView:
result = LowerObjectIsArrayBufferView(node);
break;
@@ -847,12 +819,17 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kNewArgumentsElements:
result = LowerNewArgumentsElements(node);
break;
+ case IrOpcode::kNewConsString:
+ result = LowerNewConsString(node);
+ break;
case IrOpcode::kArrayBufferWasNeutered:
result = LowerArrayBufferWasNeutered(node);
break;
case IrOpcode::kSameValue:
result = LowerSameValue(node);
break;
+ case IrOpcode::kDeadValue:
+ result = LowerDeadValue(node);
case IrOpcode::kStringFromCharCode:
result = LowerStringFromCharCode(node);
break;
@@ -862,6 +839,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kStringIndexOf:
result = LowerStringIndexOf(node);
break;
+ case IrOpcode::kStringLength:
+ result = LowerStringLength(node);
+ break;
case IrOpcode::kStringToNumber:
result = LowerStringToNumber(node);
break;
@@ -874,6 +854,12 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kSeqStringCharCodeAt:
result = LowerSeqStringCharCodeAt(node);
break;
+ case IrOpcode::kStringCodePointAt:
+ result = LowerStringCodePointAt(node);
+ break;
+ case IrOpcode::kSeqStringCodePointAt:
+ result = LowerSeqStringCharCodeAt(node);
+ break;
case IrOpcode::kStringToLowerCaseIntl:
result = LowerStringToLowerCaseIntl(node);
break;
@@ -889,6 +875,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kStringLessThanOrEqual:
result = LowerStringLessThanOrEqual(node);
break;
+ case IrOpcode::kNumberIsFloat64Hole:
+ result = LowerNumberIsFloat64Hole(node);
+ break;
case IrOpcode::kCheckFloat64Hole:
result = LowerCheckFloat64Hole(node, frame_state);
break;
@@ -1136,6 +1125,7 @@ void EffectControlLinearizer::TruncateTaggedPointerToBit(
Node* value = node->InputAt(0);
auto if_heapnumber = __ MakeDeferredLabel();
+ auto if_bigint = __ MakeDeferredLabel();
Node* zero = __ Int32Constant(0);
Node* fzero = __ Float64Constant(0.0);
@@ -1154,15 +1144,22 @@ void EffectControlLinearizer::TruncateTaggedPointerToBit(
Node* value_map_bitfield =
__ LoadField(AccessBuilder::ForMapBitField(), value_map);
__ GotoIfNot(
- __ Word32Equal(__ Word32And(value_map_bitfield,
- __ Int32Constant(1 << Map::kIsUndetectable)),
- zero),
+ __ Word32Equal(
+ __ Word32And(value_map_bitfield,
+ __ Int32Constant(Map::IsUndetectableBit::kMask)),
+ zero),
done, zero);
// Check if {value} is a HeapNumber.
__ GotoIf(__ WordEqual(value_map, __ HeapNumberMapConstant()),
&if_heapnumber);
+ // Check if {value} is a BigInt.
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+ __ GotoIf(__ Word32Equal(value_instance_type, __ Int32Constant(BIGINT_TYPE)),
+ &if_bigint);
+
// All other values that reach here are true.
__ Goto(done, __ Int32Constant(1));
@@ -1174,6 +1171,15 @@ void EffectControlLinearizer::TruncateTaggedPointerToBit(
__ LoadField(AccessBuilder::ForHeapNumberValue(), value);
__ Goto(done, __ Float64LessThan(fzero, __ Float64Abs(value_value)));
}
+
+ __ Bind(&if_bigint);
+ {
+ Node* bitfield = __ LoadField(AccessBuilder::ForBigIntBitfield(), value);
+ Node* length_is_zero = __ WordEqual(
+ __ WordAnd(bitfield, __ IntPtrConstant(BigInt::LengthBits::kMask)),
+ __ IntPtrConstant(0));
+ __ Goto(done, __ Word32Equal(length_is_zero, zero));
+ }
}
Node* EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node) {
@@ -1294,9 +1300,11 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node) {
Node* EffectControlLinearizer::LowerCheckBounds(Node* node, Node* frame_state) {
Node* index = node->InputAt(0);
Node* limit = node->InputAt(1);
+ const CheckParameters& params = CheckParametersOf(node->op());
Node* check = __ Uint32LessThan(index, limit);
- __ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds, params.feedback(), check,
+ frame_state);
return index;
}
@@ -1305,9 +1313,12 @@ Node* EffectControlLinearizer::LowerMaskIndexWithBound(Node* node) {
if (mask_array_index_ == kMaskArrayIndex) {
Node* limit = node->InputAt(1);
- Node* mask = __ Word32Sar(__ Word32Or(__ Int32Sub(limit, index), index),
- __ Int32Constant(31));
- mask = __ Word32Xor(mask, __ Int32Constant(-1));
+ // mask = ((index - limit) & ~index) >> 31
+ // index = index & mask
+ Node* neg_index = __ Word32Xor(index, __ Int32Constant(-1));
+ Node* mask =
+ __ Word32Sar(__ Word32And(__ Int32Sub(index, limit), neg_index),
+ __ Int32Constant(31));
index = __ Word32And(index, mask);
}
return index;
@@ -1346,10 +1357,11 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
Node* bitfield3 =
__ LoadField(AccessBuilder::ForMapBitField3(), value_map);
Node* if_not_deprecated = __ WordEqual(
- __ Word32And(bitfield3, __ Int32Constant(Map::Deprecated::kMask)),
+ __ Word32And(bitfield3,
+ __ Int32Constant(Map::IsDeprecatedBit::kMask)),
__ Int32Constant(0));
- __ DeoptimizeIf(DeoptimizeReason::kWrongMap, if_not_deprecated,
- frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kWrongMap, p.feedback(),
+ if_not_deprecated, frame_state);
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kTryMigrateInstance;
@@ -1360,8 +1372,8 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
__ ExternalConstant(ExternalReference(id, isolate())),
__ Int32Constant(1), __ NoContextConstant());
Node* check = ObjectIsSmi(result);
- __ DeoptimizeIf(DeoptimizeReason::kInstanceMigrationFailed, check,
- frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kInstanceMigrationFailed, p.feedback(),
+ check, frame_state);
}
// Reload the current map of the {value}.
@@ -1372,7 +1384,8 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
Node* map = __ HeapConstant(maps[i]);
Node* check = __ WordEqual(value_map, map);
if (i == map_count - 1) {
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check,
+ frame_state);
} else {
__ GotoIf(check, &done);
}
@@ -1390,7 +1403,8 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
Node* map = __ HeapConstant(maps[i]);
Node* check = __ WordEqual(value_map, map);
if (i == map_count - 1) {
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check,
+ frame_state);
} else {
__ GotoIf(check, &done);
}
@@ -1423,6 +1437,7 @@ Node* EffectControlLinearizer::LowerCompareMaps(Node* node) {
Node* EffectControlLinearizer::LowerCheckNumber(Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
auto if_not_smi = __ MakeDeferredLabel();
auto done = __ MakeLabel();
@@ -1434,7 +1449,8 @@ Node* EffectControlLinearizer::LowerCheckNumber(Node* node, Node* frame_state) {
__ Bind(&if_not_smi);
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* check1 = __ WordEqual(value_map, __ HeapNumberMapConstant());
- __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, check1, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, params.feedback(),
+ check1, frame_state);
__ Goto(&done);
__ Bind(&done);
@@ -1452,8 +1468,8 @@ Node* EffectControlLinearizer::LowerCheckReceiver(Node* node,
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
Node* check = __ Uint32LessThanOrEqual(
__ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type);
- __ DeoptimizeIfNot(DeoptimizeReason::kNotAJavaScriptObject, check,
- frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotAJavaScriptObject, VectorSlotPair(),
+ check, frame_state);
return value;
}
@@ -1464,12 +1480,14 @@ Node* EffectControlLinearizer::LowerCheckSymbol(Node* node, Node* frame_state) {
Node* check =
__ WordEqual(value_map, __ HeapConstant(factory()->symbol_map()));
- __ DeoptimizeIfNot(DeoptimizeReason::kNotASymbol, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotASymbol, VectorSlotPair(), check,
+ frame_state);
return value;
}
Node* EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* value_instance_type =
@@ -1477,7 +1495,8 @@ Node* EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state) {
Node* check = __ Uint32LessThan(value_instance_type,
__ Uint32Constant(FIRST_NONSTRING_TYPE));
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, params.feedback(),
+ check, frame_state);
return value;
}
@@ -1494,7 +1513,8 @@ Node* EffectControlLinearizer::LowerCheckSeqString(Node* node,
value_instance_type,
__ Int32Constant(kStringRepresentationMask | kIsNotStringMask)),
__ Int32Constant(kSeqStringTag | kStringTag));
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, VectorSlotPair(),
+ check, frame_state);
return value;
}
@@ -1510,7 +1530,8 @@ Node* EffectControlLinearizer::LowerCheckInternalizedString(Node* node,
__ Word32And(value_instance_type,
__ Int32Constant(kIsNotStringMask | kIsNotInternalizedMask)),
__ Int32Constant(kInternalizedTag));
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, VectorSlotPair(),
+ check, frame_state);
return value;
}
@@ -1518,7 +1539,7 @@ Node* EffectControlLinearizer::LowerCheckInternalizedString(Node* node,
void EffectControlLinearizer::LowerCheckIf(Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
__ DeoptimizeIfNot(DeoptimizeKind::kEager, DeoptimizeReasonOf(node->op()),
- value, frame_state);
+ VectorSlotPair(), value, frame_state);
}
Node* EffectControlLinearizer::LowerCheckedInt32Add(Node* node,
@@ -1528,7 +1549,8 @@ Node* EffectControlLinearizer::LowerCheckedInt32Add(Node* node,
Node* value = __ Int32AddWithOverflow(lhs, rhs);
Node* check = __ Projection(1, value);
- __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kOverflow, VectorSlotPair(), check,
+ frame_state);
return __ Projection(0, value);
}
@@ -1539,7 +1561,8 @@ Node* EffectControlLinearizer::LowerCheckedInt32Sub(Node* node,
Node* value = __ Int32SubWithOverflow(lhs, rhs);
Node* check = __ Projection(1, value);
- __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kOverflow, VectorSlotPair(), check,
+ frame_state);
return __ Projection(0, value);
}
@@ -1567,11 +1590,13 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
// Check if {rhs} is zero.
Node* check = __ Word32Equal(rhs, zero);
- __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(), check,
+ frame_state);
// Check if {lhs} is zero, as that would produce minus zero.
check = __ Word32Equal(lhs, zero);
- __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kMinusZero, VectorSlotPair(), check,
+ frame_state);
// Check if {lhs} is kMinInt and {rhs} is -1, in which case we'd have
// to return -kMinInt, which is not representable.
@@ -1584,7 +1609,8 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
// Check if {rhs} is -1.
Node* minusone = __ Int32Constant(-1);
Node* is_minus_one = __ Word32Equal(rhs, minusone);
- __ DeoptimizeIf(DeoptimizeReason::kOverflow, is_minus_one, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kOverflow, VectorSlotPair(), is_minus_one,
+ frame_state);
__ Goto(&minint_check_done);
__ Bind(&minint_check_done);
@@ -1597,7 +1623,8 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
// Check if the remainder is non-zero.
Node* check = __ Word32Equal(lhs, __ Int32Mul(rhs, value));
- __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, VectorSlotPair(), check,
+ frame_state);
return value;
}
@@ -1645,7 +1672,8 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
// Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
Node* check = __ Word32Equal(vtrue0, zero);
- __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(), check,
+ frame_state);
__ Goto(&rhs_checked, vtrue0);
}
@@ -1679,7 +1707,8 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
// Check if we would have to return -0.
Node* check = __ Word32Equal(vtrue1, zero);
- __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kMinusZero, VectorSlotPair(), check,
+ frame_state);
__ Goto(&done, vtrue1);
}
@@ -1696,14 +1725,16 @@ Node* EffectControlLinearizer::LowerCheckedUint32Div(Node* node,
// Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
Node* check = __ Word32Equal(rhs, zero);
- __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(), check,
+ frame_state);
// Perform the actual unsigned integer division.
Node* value = __ Uint32Div(lhs, rhs);
// Check if the remainder is non-zero.
check = __ Word32Equal(lhs, __ Int32Mul(rhs, value));
- __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, VectorSlotPair(), check,
+ frame_state);
return value;
}
@@ -1716,7 +1747,8 @@ Node* EffectControlLinearizer::LowerCheckedUint32Mod(Node* node,
// Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
Node* check = __ Word32Equal(rhs, zero);
- __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(), check,
+ frame_state);
// Perform the actual unsigned integer modulus.
return __ Uint32Mod(lhs, rhs);
@@ -1730,7 +1762,8 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mul(Node* node,
Node* projection = __ Int32MulWithOverflow(lhs, rhs);
Node* check = __ Projection(1, projection);
- __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kOverflow, VectorSlotPair(), check,
+ frame_state);
Node* value = __ Projection(0, projection);
@@ -1745,7 +1778,8 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mul(Node* node,
__ Bind(&if_zero);
// We may need to return negative zero.
Node* check_or = __ Int32LessThan(__ Word32Or(lhs, rhs), zero);
- __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check_or, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kMinusZero, VectorSlotPair(), check_or,
+ frame_state);
__ Goto(&check_done);
__ Bind(&check_done);
@@ -1758,35 +1792,42 @@ Node* EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned(
Node* node, Node* frame_state) {
DCHECK(SmiValuesAre31Bits());
Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
Node* add = __ Int32AddWithOverflow(value, value);
Node* check = __ Projection(1, add);
- __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kOverflow, params.feedback(), check,
+ frame_state);
return __ Projection(0, add);
}
Node* EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
Node* frame_state) {
Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
Node* unsafe = __ Int32LessThan(value, __ Int32Constant(0));
- __ DeoptimizeIf(DeoptimizeReason::kLostPrecision, unsafe, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kLostPrecision, params.feedback(), unsafe,
+ frame_state);
return value;
}
Node* EffectControlLinearizer::LowerCheckedUint32ToTaggedSigned(
Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
Node* check = __ Uint32LessThanOrEqual(value, SmiMaxValueConstant());
- __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, params.feedback(), check,
+ frame_state);
return ChangeUint32ToSmi(value);
}
Node* EffectControlLinearizer::BuildCheckedFloat64ToInt32(
- CheckForMinusZeroMode mode, Node* value, Node* frame_state) {
+ CheckForMinusZeroMode mode, const VectorSlotPair& feedback, Node* value,
+ Node* frame_state) {
Node* value32 = __ RoundFloat64ToInt32(value);
Node* check_same = __ Float64Equal(value, __ ChangeInt32ToFloat64(value32));
- __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecisionOrNaN, check_same,
- frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecisionOrNaN, feedback,
+ check_same, frame_state);
if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
// Check if {value} is -0.
@@ -1801,7 +1842,8 @@ Node* EffectControlLinearizer::BuildCheckedFloat64ToInt32(
// In case of 0, we need to check the high bits for the IEEE -0 pattern.
Node* check_negative = __ Int32LessThan(__ Float64ExtractHighWord32(value),
__ Int32Constant(0));
- __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check_negative, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kMinusZero, feedback, check_negative,
+ frame_state);
__ Goto(&check_done);
__ Bind(&check_done);
@@ -1811,22 +1853,27 @@ Node* EffectControlLinearizer::BuildCheckedFloat64ToInt32(
Node* EffectControlLinearizer::LowerCheckedFloat64ToInt32(Node* node,
Node* frame_state) {
- CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
+ const CheckMinusZeroParameters& params =
+ CheckMinusZeroParametersOf(node->op());
Node* value = node->InputAt(0);
- return BuildCheckedFloat64ToInt32(mode, value, frame_state);
+ return BuildCheckedFloat64ToInt32(params.mode(), params.feedback(), value,
+ frame_state);
}
Node* EffectControlLinearizer::LowerCheckedTaggedSignedToInt32(
Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
Node* check = ObjectIsSmi(value);
- __ DeoptimizeIfNot(DeoptimizeReason::kNotASmi, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotASmi, params.feedback(), check,
+ frame_state);
return ChangeSmiToInt32(value);
}
Node* EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
Node* frame_state) {
- CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
+ const CheckMinusZeroParameters& params =
+ CheckMinusZeroParametersOf(node->op());
Node* value = node->InputAt(0);
auto if_not_smi = __ MakeDeferredLabel();
@@ -1842,9 +1889,11 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
__ Bind(&if_not_smi);
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* check_map = __ WordEqual(value_map, __ HeapNumberMapConstant());
- __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, check_map, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, params.feedback(),
+ check_map, frame_state);
Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
- vfalse = BuildCheckedFloat64ToInt32(mode, vfalse, frame_state);
+ vfalse = BuildCheckedFloat64ToInt32(params.mode(), params.feedback(), vfalse,
+ frame_state);
__ Goto(&done, vfalse);
__ Bind(&done);
@@ -1852,13 +1901,14 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
}
Node* EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
- CheckTaggedInputMode mode, Node* value, Node* frame_state) {
+ CheckTaggedInputMode mode, const VectorSlotPair& feedback, Node* value,
+ Node* frame_state) {
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* check_number = __ WordEqual(value_map, __ HeapNumberMapConstant());
switch (mode) {
case CheckTaggedInputMode::kNumber: {
- __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, check_number,
- frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, feedback,
+ check_number, frame_state);
break;
}
case CheckTaggedInputMode::kNumberOrOddball: {
@@ -1871,8 +1921,8 @@ Node* EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
__ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
Node* check_oddball =
__ Word32Equal(instance_type, __ Int32Constant(ODDBALL_TYPE));
- __ DeoptimizeIfNot(DeoptimizeReason::kNotANumberOrOddball, check_oddball,
- frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotANumberOrOddball, feedback,
+ check_oddball, frame_state);
STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
__ Goto(&check_done);
@@ -1896,8 +1946,8 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
// In the Smi case, just convert to int32 and then float64.
// Otherwise, check heap numberness and load the number.
- Node* number =
- BuildCheckedHeapNumberOrOddballToFloat64(mode, value, frame_state);
+ Node* number = BuildCheckedHeapNumberOrOddballToFloat64(
+ mode, VectorSlotPair(), value, frame_state);
__ Goto(&done, number);
__ Bind(&if_smi);
@@ -1912,9 +1962,11 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedSigned(
Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
Node* check = ObjectIsSmi(value);
- __ DeoptimizeIfNot(DeoptimizeReason::kNotASmi, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotASmi, params.feedback(), check,
+ frame_state);
return value;
}
@@ -1922,9 +1974,11 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedSigned(
Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedPointer(
Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
Node* check = ObjectIsSmi(value);
- __ DeoptimizeIf(DeoptimizeReason::kSmi, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kSmi, params.feedback(), check,
+ frame_state);
return value;
}
@@ -1950,7 +2004,8 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToWord32(Node* node) {
Node* EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(
Node* node, Node* frame_state) {
- CheckTaggedInputMode mode = CheckTaggedInputModeOf(node->op());
+ const CheckTaggedInputParameters& params =
+ CheckTaggedInputParametersOf(node->op());
Node* value = node->InputAt(0);
auto if_not_smi = __ MakeLabel();
@@ -1964,8 +2019,8 @@ Node* EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(
// Otherwise, check that it's a heap number or oddball and truncate the value
// to int32.
__ Bind(&if_not_smi);
- Node* number =
- BuildCheckedHeapNumberOrOddballToFloat64(mode, value, frame_state);
+ Node* number = BuildCheckedHeapNumberOrOddballToFloat64(
+ params.mode(), params.feedback(), value, frame_state);
number = __ TruncateFloat64ToWord32(number);
__ Goto(&done, number);
@@ -1980,6 +2035,19 @@ Node* EffectControlLinearizer::LowerAllocate(Node* node) {
return new_node;
}
+Node* EffectControlLinearizer::LowerNumberToString(Node* node) {
+ Node* argument = node->InputAt(0);
+
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kNumberToString);
+ Operator::Properties properties = Operator::kEliminatable;
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ return __ Call(desc, __ HeapConstant(callable.code()), argument,
+ __ NoContextConstant());
+}
+
Node* EffectControlLinearizer::LowerObjectIsArrayBufferView(Node* node) {
Node* value = node->InputAt(0);
@@ -2039,9 +2107,10 @@ Node* EffectControlLinearizer::LowerObjectIsCallable(Node* node) {
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* value_bit_field =
__ LoadField(AccessBuilder::ForMapBitField(), value_map);
- Node* vfalse = __ Word32Equal(
- __ Int32Constant(1 << Map::kIsCallable),
- __ Word32And(value_bit_field, __ Int32Constant(1 << Map::kIsCallable)));
+ Node* vfalse =
+ __ Word32Equal(__ Int32Constant(Map::IsCallableBit::kMask),
+ __ Word32And(value_bit_field,
+ __ Int32Constant(Map::IsCallableBit::kMask)));
__ Goto(&done, vfalse);
__ Bind(&if_smi);
@@ -2063,10 +2132,10 @@ Node* EffectControlLinearizer::LowerObjectIsConstructor(Node* node) {
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* value_bit_field =
__ LoadField(AccessBuilder::ForMapBitField(), value_map);
- Node* vfalse =
- __ Word32Equal(__ Int32Constant(1 << Map::kIsConstructor),
- __ Word32And(value_bit_field,
- __ Int32Constant(1 << Map::kIsConstructor)));
+ Node* vfalse = __ Word32Equal(
+ __ Int32Constant(Map::IsConstructorBit::kMask),
+ __ Word32And(value_bit_field,
+ __ Int32Constant(Map::IsConstructorBit::kMask)));
__ Goto(&done, vfalse);
__ Bind(&if_smi);
@@ -2089,10 +2158,10 @@ Node* EffectControlLinearizer::LowerObjectIsDetectableCallable(Node* node) {
Node* value_bit_field =
__ LoadField(AccessBuilder::ForMapBitField(), value_map);
Node* vfalse = __ Word32Equal(
- __ Int32Constant(1 << Map::kIsCallable),
+ __ Int32Constant(Map::IsCallableBit::kMask),
__ Word32And(value_bit_field,
- __ Int32Constant((1 << Map::kIsCallable) |
- (1 << Map::kIsUndetectable))));
+ __ Int32Constant((Map::IsCallableBit::kMask) |
+ (Map::IsUndetectableBit::kMask))));
__ Goto(&done, vfalse);
__ Bind(&if_smi);
@@ -2102,6 +2171,13 @@ Node* EffectControlLinearizer::LowerObjectIsDetectableCallable(Node* node) {
return done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerNumberIsFloat64Hole(Node* node) {
+ Node* value = node->InputAt(0);
+ Node* check = __ Word32Equal(__ Float64ExtractHighWord32(value),
+ __ Int32Constant(kHoleNanUpper32));
+ return check;
+}
+
Node* EffectControlLinearizer::LowerObjectIsMinusZero(Node* node) {
Node* value = node->InputAt(0);
Node* zero = __ Int32Constant(0);
@@ -2169,9 +2245,10 @@ Node* EffectControlLinearizer::LowerObjectIsNonCallable(Node* node) {
Node* value_bit_field =
__ LoadField(AccessBuilder::ForMapBitField(), value_map);
- Node* check2 = __ Word32Equal(
- __ Int32Constant(0),
- __ Word32And(value_bit_field, __ Int32Constant(1 << Map::kIsCallable)));
+ Node* check2 =
+ __ Word32Equal(__ Int32Constant(0),
+ __ Word32And(value_bit_field,
+ __ Int32Constant(Map::IsCallableBit::kMask)));
__ Goto(&done, check2);
__ Bind(&if_primitive);
@@ -2283,9 +2360,10 @@ Node* EffectControlLinearizer::LowerObjectIsUndetectable(Node* node) {
Node* value_bit_field =
__ LoadField(AccessBuilder::ForMapBitField(), value_map);
Node* vfalse = __ Word32Equal(
- __ Word32Equal(__ Int32Constant(0),
- __ Word32And(value_bit_field,
- __ Int32Constant(1 << Map::kIsUndetectable))),
+ __ Word32Equal(
+ __ Int32Constant(0),
+ __ Word32And(value_bit_field,
+ __ Int32Constant(Map::IsUndetectableBit::kMask))),
__ Int32Constant(0));
__ Goto(&done, vfalse);
@@ -2511,6 +2589,52 @@ Node* EffectControlLinearizer::LowerNewArgumentsElements(Node* node) {
__ SmiConstant(mapped_count), __ NoContextConstant());
}
+Node* EffectControlLinearizer::LowerNewConsString(Node* node) {
+ Node* length = node->InputAt(0);
+ Node* first = node->InputAt(1);
+ Node* second = node->InputAt(2);
+
+ // Determine the instance types of {first} and {second}.
+ Node* first_map = __ LoadField(AccessBuilder::ForMap(), first);
+ Node* first_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), first_map);
+ Node* second_map = __ LoadField(AccessBuilder::ForMap(), second);
+ Node* second_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), second_map);
+
+ // Determine the proper map for the resulting ConsString.
+ // If both {first} and {second} are one-byte strings, we
+ // create a new ConsOneByteString, otherwise we create a
+ // new ConsString instead.
+ auto if_onebyte = __ MakeLabel();
+ auto if_twobyte = __ MakeLabel();
+ auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
+ STATIC_ASSERT(kOneByteStringTag != 0);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ Node* instance_type = __ Word32And(first_instance_type, second_instance_type);
+ Node* encoding =
+ __ Word32And(instance_type, __ Int32Constant(kStringEncodingMask));
+ __ Branch(__ Word32Equal(encoding, __ Int32Constant(kTwoByteStringTag)),
+ &if_twobyte, &if_onebyte);
+ __ Bind(&if_onebyte);
+ __ Goto(&done,
+ jsgraph()->HeapConstant(factory()->cons_one_byte_string_map()));
+ __ Bind(&if_twobyte);
+ __ Goto(&done, jsgraph()->HeapConstant(factory()->cons_string_map()));
+ __ Bind(&done);
+ Node* result_map = done.PhiAt(0);
+
+ // Allocate the resulting ConsString.
+ Node* result = __ Allocate(NOT_TENURED, __ Int32Constant(ConsString::kSize));
+ __ StoreField(AccessBuilder::ForMap(), result, result_map);
+ __ StoreField(AccessBuilder::ForNameHashField(), result,
+ jsgraph()->Int32Constant(Name::kEmptyHashField));
+ __ StoreField(AccessBuilder::ForStringLength(), result, length);
+ __ StoreField(AccessBuilder::ForConsStringFirst(), result, first);
+ __ StoreField(AccessBuilder::ForConsStringSecond(), result, second);
+ return result;
+}
+
Node* EffectControlLinearizer::LowerArrayBufferWasNeutered(Node* node) {
Node* value = node->InputAt(0);
@@ -2538,6 +2662,15 @@ Node* EffectControlLinearizer::LowerSameValue(Node* node) {
__ NoContextConstant());
}
+Node* EffectControlLinearizer::LowerDeadValue(Node* node) {
+ Node* input = NodeProperties::GetValueInput(node, 0);
+ if (input->opcode() != IrOpcode::kUnreachable) {
+ Node* unreachable = __ Unreachable();
+ NodeProperties::ReplaceValueInput(node, unreachable, 0);
+ }
+ return node;
+}
+
Node* EffectControlLinearizer::LowerStringToNumber(Node* node) {
Node* string = node->InputAt(0);
@@ -2580,19 +2713,25 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
__ NoContextConstant());
}
-Node* EffectControlLinearizer::LowerSeqStringCharCodeAt(Node* node) {
+Node* EffectControlLinearizer::LowerStringCodePointAt(Node* node) {
Node* receiver = node->InputAt(0);
Node* position = node->InputAt(1);
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kStringCodePointAt);
+ Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties,
+ MachineType::TaggedSigned());
+ return __ Call(desc, __ HeapConstant(callable.code()), receiver, position,
+ __ NoContextConstant());
+}
+
+Node* EffectControlLinearizer::LoadFromString(Node* receiver, Node* position,
+ Node* is_one_byte) {
auto one_byte_load = __ MakeLabel();
auto done = __ MakeLabel(MachineRepresentation::kWord32);
-
- Node* map = __ LoadField(AccessBuilder::ForMap(), receiver);
- Node* instance_type = __ LoadField(AccessBuilder::ForMapInstanceType(), map);
- Node* is_one_byte = __ Word32Equal(
- __ Word32And(instance_type, __ Int32Constant(kStringEncodingMask)),
- __ Int32Constant(kOneByteStringTag));
-
__ GotoIf(is_one_byte, &one_byte_load);
Node* two_byte_result = __ LoadElement(
AccessBuilder::ForSeqTwoByteStringCharacter(), receiver, position);
@@ -2607,6 +2746,85 @@ Node* EffectControlLinearizer::LowerSeqStringCharCodeAt(Node* node) {
return done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerSeqStringCharCodeAt(Node* node) {
+ Node* receiver = node->InputAt(0);
+ Node* position = node->InputAt(1);
+
+ Node* map = __ LoadField(AccessBuilder::ForMap(), receiver);
+ Node* instance_type = __ LoadField(AccessBuilder::ForMapInstanceType(), map);
+ Node* is_one_byte = __ Word32Equal(
+ __ Word32And(instance_type, __ Int32Constant(kStringEncodingMask)),
+ __ Int32Constant(kOneByteStringTag));
+
+ return LoadFromString(receiver, position, is_one_byte);
+}
+
+Node* EffectControlLinearizer::LowerSeqStringCodePointAt(
+ Node* node, UnicodeEncoding encoding) {
+ Node* receiver = node->InputAt(0);
+ Node* position = node->InputAt(1);
+
+ Node* map = __ LoadField(AccessBuilder::ForMap(), receiver);
+ Node* instance_type = __ LoadField(AccessBuilder::ForMapInstanceType(), map);
+ Node* is_one_byte = __ Word32Equal(
+ __ Word32And(instance_type, __ Int32Constant(kStringEncodingMask)),
+ __ Int32Constant(kOneByteStringTag));
+
+ Node* first_char_code = LoadFromString(receiver, position, is_one_byte);
+
+ auto return_result = __ MakeLabel(MachineRepresentation::kWord32);
+
+ // Check if first character code is outside of interval [0xD800, 0xDBFF].
+ Node* first_out =
+ __ Word32Equal(__ Word32And(first_char_code, __ Int32Constant(0xFC00)),
+ __ Int32Constant(0xD800));
+ // Return first character code.
+ __ GotoIf(first_out, &return_result, first_char_code);
+ // Check if position + 1 is still in range.
+ Node* length = __ LoadField(AccessBuilder::ForStringLength(), receiver);
+ Node* next_position = __ Int32Add(position, __ Int32Constant(1));
+ Node* next_position_in_range = __ Int32LessThan(next_position, length);
+ __ GotoIf(next_position_in_range, &return_result, first_char_code);
+
+ // Load second character code.
+ Node* second_char_code = LoadFromString(receiver, next_position, is_one_byte);
+ // Check if first character code is outside of interval [0xD800, 0xDBFF].
+ Node* second_out =
+ __ Word32Equal(__ Word32And(second_char_code, __ Int32Constant(0xFC00)),
+ __ Int32Constant(0xDC00));
+ __ GotoIfNot(second_out, &return_result, first_char_code);
+
+ Node* result;
+ switch (encoding) {
+ case UnicodeEncoding::UTF16:
+ result = __ Word32Or(
+// Need to swap the order for big-endian platforms
+#if V8_TARGET_BIG_ENDIAN
+ __ Word32Shl(first_char_code, __ Int32Constant(16)),
+ second_char_code);
+#else
+ __ Word32Shl(second_char_code, __ Int32Constant(16)),
+ first_char_code);
+#endif
+ break;
+ case UnicodeEncoding::UTF32: {
+ // Convert UTF16 surrogate pair into |word32| code point, encoded as
+ // UTF32.
+ Node* surrogate_offset =
+ __ Int32Constant(0x10000 - (0xD800 << 10) - 0xDC00);
+
+ // (lead << 10) + trail + SURROGATE_OFFSET
+ result = __ Int32Add(__ Word32Shl(first_char_code, __ Int32Constant(10)),
+ __ Int32Add(second_char_code, surrogate_offset));
+ break;
+ }
+ }
+ __ Goto(&return_result, result);
+
+ __ Bind(&return_result);
+ return return_result.PhiAt(0);
+}
+
Node* EffectControlLinearizer::LowerStringFromCharCode(Node* node) {
Node* value = node->InputAt(0);
@@ -2836,6 +3054,12 @@ Node* EffectControlLinearizer::LowerStringIndexOf(Node* node) {
position, __ NoContextConstant());
}
+Node* EffectControlLinearizer::LowerStringLength(Node* node) {
+ Node* subject = node->InputAt(0);
+
+ return __ LoadField(AccessBuilder::ForStringLength(), subject);
+}
+
Node* EffectControlLinearizer::LowerStringComparison(Callable const& callable,
Node* node) {
Node* lhs = node->InputAt(0);
@@ -2872,7 +3096,8 @@ Node* EffectControlLinearizer::LowerCheckFloat64Hole(Node* node,
Node* value = node->InputAt(0);
Node* check = __ Word32Equal(__ Float64ExtractHighWord32(value),
__ Int32Constant(kHoleNanUpper32));
- __ DeoptimizeIf(DeoptimizeReason::kHole, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kHole, VectorSlotPair(), check,
+ frame_state);
return value;
}
@@ -2881,7 +3106,8 @@ Node* EffectControlLinearizer::LowerCheckNotTaggedHole(Node* node,
Node* frame_state) {
Node* value = node->InputAt(0);
Node* check = __ WordEqual(value, __ TheHoleConstant());
- __ DeoptimizeIf(DeoptimizeReason::kHole, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kHole, VectorSlotPair(), check,
+ frame_state);
return value;
}
@@ -2918,8 +3144,8 @@ void EffectControlLinearizer::LowerCheckEqualsInternalizedString(
__ Bind(&if_notsame);
{
// Now {val} could still be a non-internalized String that matches {exp}.
- __ DeoptimizeIf(DeoptimizeReason::kWrongName, ObjectIsSmi(val),
- frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kWrongName, VectorSlotPair(),
+ ObjectIsSmi(val), frame_state);
Node* val_map = __ LoadField(AccessBuilder::ForMap(), val);
Node* val_instance_type =
__ LoadField(AccessBuilder::ForMapInstanceType(), val_map);
@@ -2937,7 +3163,7 @@ void EffectControlLinearizer::LowerCheckEqualsInternalizedString(
// Check that the {val} is a non-internalized String, if it's anything
// else it cannot match the recorded feedback {exp} anyways.
__ DeoptimizeIfNot(
- DeoptimizeReason::kWrongName,
+ DeoptimizeReason::kWrongName, VectorSlotPair(),
__ Word32Equal(__ Word32And(val_instance_type,
__ Int32Constant(kIsNotStringMask |
kIsNotInternalizedMask)),
@@ -2956,7 +3182,7 @@ void EffectControlLinearizer::LowerCheckEqualsInternalizedString(
__ Call(common()->Call(desc), try_internalize_string_function, val);
// Now see if the results match.
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongName,
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, VectorSlotPair(),
__ WordEqual(exp, val_internalized), frame_state);
__ Goto(&if_same);
}
@@ -2966,7 +3192,7 @@ void EffectControlLinearizer::LowerCheckEqualsInternalizedString(
// The {val} is a ThinString, let's check the actual value.
Node* val_actual =
__ LoadField(AccessBuilder::ForThinStringActual(), val);
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongName,
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, VectorSlotPair(),
__ WordEqual(exp, val_actual), frame_state);
__ Goto(&if_same);
}
@@ -2980,7 +3206,8 @@ void EffectControlLinearizer::LowerCheckEqualsSymbol(Node* node,
Node* exp = node->InputAt(0);
Node* val = node->InputAt(1);
Node* check = __ WordEqual(exp, val);
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, VectorSlotPair(), check,
+ frame_state);
}
Node* EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value) {
@@ -3135,7 +3362,7 @@ Node* EffectControlLinearizer::LowerEnsureWritableFastElements(Node* node) {
Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
Node* frame_state) {
- GrowFastElementsMode mode = GrowFastElementsModeOf(node->op());
+ GrowFastElementsParameters params = GrowFastElementsParametersOf(node->op());
Node* object = node->InputAt(0);
Node* elements = node->InputAt(1);
Node* index = node->InputAt(2);
@@ -3154,7 +3381,7 @@ Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
// We need to grow the {elements} for {object}.
Operator::Properties properties = Operator::kEliminatable;
Callable callable =
- (mode == GrowFastElementsMode::kDoubleElements)
+ (params.mode() == GrowFastElementsMode::kDoubleElements)
? Builtins::CallableFor(isolate(), Builtins::kGrowFastDoubleElements)
: Builtins::CallableFor(isolate(),
Builtins::kGrowFastSmiOrObjectElements);
@@ -3166,10 +3393,8 @@ Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
ChangeInt32ToSmi(index), __ NoContextConstant());
// Ensure that we were able to grow the {elements}.
- // TODO(turbofan): We use kSmi as reason here similar to Crankshaft,
- // but maybe we should just introduce a reason that makes sense.
- __ DeoptimizeIf(DeoptimizeReason::kSmi, ObjectIsSmi(new_elements),
- frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kCouldNotGrowElements, params.feedback(),
+ ObjectIsSmi(new_elements), frame_state);
__ Goto(&done, new_elements);
__ Bind(&done);
@@ -3723,12 +3948,13 @@ void EffectControlLinearizer::LowerStoreSignedSmallElement(Node* node) {
}
void EffectControlLinearizer::LowerRuntimeAbort(Node* node) {
- BailoutReason reason = BailoutReasonOf(node->op());
+ AbortReason reason = AbortReasonOf(node->op());
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kAbort;
CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
- __ Call(desc, __ CEntryStubConstant(1), jsgraph()->SmiConstant(reason),
+ __ Call(desc, __ CEntryStubConstant(1),
+ jsgraph()->SmiConstant(static_cast<int>(reason)),
__ ExternalConstant(ExternalReference(id, isolate())),
__ Int32Constant(1), __ NoContextConstant());
}
@@ -4165,14 +4391,14 @@ Node* EffectControlLinearizer::LowerFindOrderedHashMapEntry(Node* node) {
Node* EffectControlLinearizer::ComputeIntegerHash(Node* value) {
// See v8::internal::ComputeIntegerHash()
- value = __ Int32Add(__ Word32Xor(value, __ Int32Constant(0xffffffff)),
+ value = __ Int32Add(__ Word32Xor(value, __ Int32Constant(0xFFFFFFFF)),
__ Word32Shl(value, __ Int32Constant(15)));
value = __ Word32Xor(value, __ Word32Shr(value, __ Int32Constant(12)));
value = __ Int32Add(value, __ Word32Shl(value, __ Int32Constant(2)));
value = __ Word32Xor(value, __ Word32Shr(value, __ Int32Constant(4)));
value = __ Int32Mul(value, __ Int32Constant(2057));
value = __ Word32Xor(value, __ Word32Shr(value, __ Int32Constant(16)));
- value = __ Word32And(value, __ Int32Constant(0x3fffffff));
+ value = __ Word32And(value, __ Int32Constant(0x3FFFFFFF));
return value;
}
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index 7cf6910386..47b1586d6d 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -90,6 +90,7 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerTruncateTaggedToWord32(Node* node);
Node* LowerCheckedTruncateTaggedToWord32(Node* node, Node* frame_state);
Node* LowerAllocate(Node* node);
+ Node* LowerNumberToString(Node* node);
Node* LowerObjectIsArrayBufferView(Node* node);
Node* LowerObjectIsBigInt(Node* node);
Node* LowerObjectIsCallable(Node* node);
@@ -104,22 +105,28 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerObjectIsString(Node* node);
Node* LowerObjectIsSymbol(Node* node);
Node* LowerObjectIsUndetectable(Node* node);
+ Node* LowerNumberIsFloat64Hole(Node* node);
Node* LowerArgumentsFrame(Node* node);
Node* LowerArgumentsLength(Node* node);
Node* LowerNewDoubleElements(Node* node);
Node* LowerNewSmiOrObjectElements(Node* node);
Node* LowerNewArgumentsElements(Node* node);
+ Node* LowerNewConsString(Node* node);
Node* LowerArrayBufferWasNeutered(Node* node);
Node* LowerSameValue(Node* node);
+ Node* LowerDeadValue(Node* node);
Node* LowerStringToNumber(Node* node);
Node* LowerStringCharAt(Node* node);
Node* LowerStringCharCodeAt(Node* node);
Node* LowerSeqStringCharCodeAt(Node* node);
+ Node* LowerStringCodePointAt(Node* node);
+ Node* LowerSeqStringCodePointAt(Node* node, UnicodeEncoding encoding);
Node* LowerStringToLowerCaseIntl(Node* node);
Node* LowerStringToUpperCaseIntl(Node* node);
Node* LowerStringFromCharCode(Node* node);
Node* LowerStringFromCodePoint(Node* node);
Node* LowerStringIndexOf(Node* node);
+ Node* LowerStringLength(Node* node);
Node* LowerStringEqual(Node* node);
Node* LowerStringLessThan(Node* node);
Node* LowerStringLessThanOrEqual(Node* node);
@@ -156,9 +163,11 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Maybe<Node*> LowerFloat64RoundTruncate(Node* node);
Node* AllocateHeapNumberWithValue(Node* node);
- Node* BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode, Node* value,
+ Node* BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode,
+ const VectorSlotPair& feedback, Node* value,
Node* frame_state);
Node* BuildCheckedHeapNumberOrOddballToFloat64(CheckTaggedInputMode mode,
+ const VectorSlotPair& feedback,
Node* value,
Node* frame_state);
Node* BuildFloat64RoundDown(Node* value);
@@ -173,6 +182,7 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* ChangeSmiToIntPtr(Node* value);
Node* ChangeSmiToInt32(Node* value);
Node* ObjectIsSmi(Node* value);
+ Node* LoadFromString(Node* receiver, Node* position, Node* is_one_byte);
Node* SmiMaxValueConstant();
Node* SmiShiftBitsConstant();
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index aa2a1b2f3a..16a9d78faf 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -33,18 +33,39 @@ EscapeAnalysisReducer::EscapeAnalysisReducer(
arguments_elements_(zone),
zone_(zone) {}
-Node* EscapeAnalysisReducer::MaybeGuard(Node* original, Node* replacement) {
- // We might need to guard the replacement if the type of the {replacement}
- // node is not in a sub-type relation to the type of the the {original} node.
+Reduction EscapeAnalysisReducer::ReplaceNode(Node* original,
+ Node* replacement) {
+ const VirtualObject* vobject =
+ analysis_result().GetVirtualObject(replacement);
+ if (replacement->opcode() == IrOpcode::kDead ||
+ (vobject && !vobject->HasEscaped())) {
+ RelaxEffectsAndControls(original);
+ return Replace(replacement);
+ }
Type* const replacement_type = NodeProperties::GetType(replacement);
Type* const original_type = NodeProperties::GetType(original);
- if (!replacement_type->Is(original_type)) {
- Node* const control = NodeProperties::GetControlInput(original);
- replacement = jsgraph()->graph()->NewNode(
- jsgraph()->common()->TypeGuard(original_type), replacement, control);
- NodeProperties::SetType(replacement, original_type);
+ if (replacement_type->Is(original_type)) {
+ RelaxEffectsAndControls(original);
+ return Replace(replacement);
}
- return replacement;
+
+ // We need to guard the replacement if we would widen the type otherwise.
+ DCHECK_EQ(1, original->op()->EffectOutputCount());
+ DCHECK_EQ(1, original->op()->EffectInputCount());
+ DCHECK_EQ(1, original->op()->ControlInputCount());
+ Node* effect = NodeProperties::GetEffectInput(original);
+ Node* control = NodeProperties::GetControlInput(original);
+ original->TrimInputCount(0);
+ original->AppendInput(jsgraph()->zone(), replacement);
+ original->AppendInput(jsgraph()->zone(), effect);
+ original->AppendInput(jsgraph()->zone(), control);
+ NodeProperties::SetType(
+ original,
+ Type::Intersect(original_type, replacement_type, jsgraph()->zone()));
+ NodeProperties::ChangeOp(original,
+ jsgraph()->common()->TypeGuard(original_type));
+ ReplaceWithValue(original, original, original, control);
+ return NoChange();
}
namespace {
@@ -74,11 +95,7 @@ Reduction EscapeAnalysisReducer::Reduce(Node* node) {
DCHECK(node->opcode() != IrOpcode::kAllocate &&
node->opcode() != IrOpcode::kFinishRegion);
DCHECK_NE(replacement, node);
- if (replacement != jsgraph()->Dead()) {
- replacement = MaybeGuard(node, replacement);
- }
- RelaxEffectsAndControls(node);
- return Replace(replacement);
+ return ReplaceNode(node, replacement);
}
switch (node->opcode()) {
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.h b/deps/v8/src/compiler/escape-analysis-reducer.h
index b89d4d03e8..29290d3a0a 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.h
+++ b/deps/v8/src/compiler/escape-analysis-reducer.h
@@ -97,7 +97,7 @@ class V8_EXPORT_PRIVATE EscapeAnalysisReducer final
void ReduceFrameStateInputs(Node* node);
Node* ReduceDeoptState(Node* node, Node* effect, Deduplicator* deduplicator);
Node* ObjectIdNode(const VirtualObject* vobject);
- Node* MaybeGuard(Node* original, Node* replacement);
+ Reduction ReplaceNode(Node* original, Node* replacement);
JSGraph* jsgraph() const { return jsgraph_; }
EscapeAnalysisResult analysis_result() const { return analysis_result_; }
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index b3b1abb6df..4b773136a9 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -223,8 +223,12 @@ class EscapeAnalysisTracker : public ZoneObject {
replacement_ = replacement;
vobject_ =
replacement ? tracker_->virtual_objects_.Get(replacement) : nullptr;
- TRACE("Set %s#%d as replacement.\n", replacement->op()->mnemonic(),
- replacement->id());
+ if (replacement) {
+ TRACE("Set %s#%d as replacement.\n", replacement->op()->mnemonic(),
+ replacement->id());
+ } else {
+ TRACE("Set nullptr as replacement.\n");
+ }
}
void MarkForDeletion() { SetReplacement(tracker_->jsgraph_->Dead()); }
@@ -248,10 +252,6 @@ class EscapeAnalysisTracker : public ZoneObject {
Node* GetReplacementOf(Node* node) { return replacements_[node]; }
Node* ResolveReplacement(Node* node) {
if (Node* replacement = GetReplacementOf(node)) {
- // Replacements cannot have replacements. This is important to ensure
- // re-visitation: If a replacement is replaced, then all nodes accessing
- // the replacement have to be updated.
- DCHECK_NULL(GetReplacementOf(replacement));
return replacement;
}
return node;
@@ -768,7 +768,12 @@ EscapeAnalysis::EscapeAnalysis(JSGraph* jsgraph, Zone* zone)
jsgraph_(jsgraph) {}
Node* EscapeAnalysisResult::GetReplacementOf(Node* node) {
- return tracker_->GetReplacementOf(node);
+ Node* replacement = tracker_->GetReplacementOf(node);
+ // Replacements cannot have replacements. This is important to ensure
+ // re-visitation: If a replacement is replaced, then all nodes accessing
+ // the replacement have to be updated.
+ if (replacement) DCHECK_NULL(tracker_->GetReplacementOf(replacement));
+ return replacement;
}
Node* EscapeAnalysisResult::GetVirtualObjectField(const VirtualObject* vobject,
diff --git a/deps/v8/src/compiler/frame.cc b/deps/v8/src/compiler/frame.cc
index e0284c8ab4..0b6d7ac193 100644
--- a/deps/v8/src/compiler/frame.cc
+++ b/deps/v8/src/compiler/frame.cc
@@ -13,13 +13,22 @@ namespace internal {
namespace compiler {
Frame::Frame(int fixed_frame_size_in_slots)
- : frame_slot_count_(fixed_frame_size_in_slots),
+ : fixed_slot_count_(fixed_frame_size_in_slots),
+ frame_slot_count_(fixed_frame_size_in_slots),
spill_slot_count_(0),
+ return_slot_count_(0),
allocated_registers_(nullptr),
allocated_double_registers_(nullptr) {}
int Frame::AlignFrame(int alignment) {
int alignment_slots = alignment / kPointerSize;
+ // We have to align return slots separately, because they are claimed
+ // separately on the stack.
+ int return_delta =
+ alignment_slots - (return_slot_count_ & (alignment_slots - 1));
+ if (return_delta != alignment_slots) {
+ frame_slot_count_ += return_delta;
+ }
int delta = alignment_slots - (frame_slot_count_ & (alignment_slots - 1));
if (delta != alignment_slots) {
frame_slot_count_ += delta;
diff --git a/deps/v8/src/compiler/frame.h b/deps/v8/src/compiler/frame.h
index fe8008913d..f5c36dba17 100644
--- a/deps/v8/src/compiler/frame.h
+++ b/deps/v8/src/compiler/frame.h
@@ -22,7 +22,7 @@ class CallDescriptor;
// into them. Mutable state associated with the frame is stored separately in
// FrameAccessState.
//
-// Frames are divided up into three regions.
+// Frames are divided up into four regions.
// - The first is the fixed header, which always has a constant size and can be
// predicted before code generation begins depending on the type of code being
// generated.
@@ -33,11 +33,15 @@ class CallDescriptor;
// reserved after register allocation, since its size can only be precisely
// determined after register allocation once the number of used callee-saved
// register is certain.
+// - The fourth region is a scratch area for return values from other functions
+// called, if multiple returns cannot all be passed in registers. This region
+// Must be last in a stack frame, so that it is positioned immediately below
+// the stack frame of a callee to store to.
//
// The frame region immediately below the fixed header contains spill slots
// starting at slot 4 for JSFunctions. The callee-saved frame region below that
-// starts at 4+spill_slot_count_. Callee stack slots corresponding to
-// parameters are accessible through negative slot ids.
+// starts at 4+spill_slot_count_. Callee stack slots correspond to
+// parameters that are accessible through negative slot ids.
//
// Every slot of a caller or callee frame is accessible by the register
// allocator and gap resolver with a SpillSlotOperand containing its
@@ -73,7 +77,13 @@ class CallDescriptor;
// |- - - - - - - - -| | |
// | ... | Callee-saved |
// |- - - - - - - - -| | |
-// m+r+3 | callee-saved r | v v
+// m+r+3 | callee-saved r | v |
+// +-----------------+---- |
+// m+r+4 | return 0 | ^ |
+// |- - - - - - - - -| | |
+// | ... | Return |
+// |- - - - - - - - -| | |
+// | return q-1 | v v
// -----+-----------------+----- <-- stack ptr -------------
//
class Frame : public ZoneObject {
@@ -81,8 +91,9 @@ class Frame : public ZoneObject {
explicit Frame(int fixed_frame_size_in_slots);
inline int GetTotalFrameSlotCount() const { return frame_slot_count_; }
-
+ inline int GetFixedSlotCount() const { return fixed_slot_count_; }
inline int GetSpillSlotCount() const { return spill_slot_count_; }
+ inline int GetReturnSlotCount() const { return return_slot_count_; }
void SetAllocatedRegisters(BitVector* regs) {
DCHECK_NULL(allocated_registers_);
@@ -112,19 +123,25 @@ class Frame : public ZoneObject {
}
int AllocateSpillSlot(int width, int alignment = 0) {
+ DCHECK_EQ(frame_slot_count_,
+ fixed_slot_count_ + spill_slot_count_ + return_slot_count_);
int frame_slot_count_before = frame_slot_count_;
- if (alignment <= kPointerSize) {
- AllocateAlignedFrameSlots(width);
- } else {
- // We need to allocate more place for spill slot
- // in case we need an aligned spill slot to be
- // able to properly align start of spill slot
- // and still have enough place to hold all the
- // data
- AllocateAlignedFrameSlots(width + alignment - kPointerSize);
+ if (alignment > kPointerSize) {
+ // Slots are pointer sized, so alignment greater than a pointer size
+ // requires allocating additional slots.
+ width += alignment - kPointerSize;
}
+ AllocateAlignedFrameSlots(width);
spill_slot_count_ += frame_slot_count_ - frame_slot_count_before;
- return frame_slot_count_ - 1;
+ return frame_slot_count_ - return_slot_count_ - 1;
+ }
+
+ void EnsureReturnSlots(int count) {
+ if (count > return_slot_count_) {
+ count -= return_slot_count_;
+ frame_slot_count_ += count;
+ return_slot_count_ += count;
+ }
}
int AlignFrame(int alignment = kDoubleSize);
@@ -152,8 +169,10 @@ class Frame : public ZoneObject {
}
private:
+ int fixed_slot_count_;
int frame_slot_count_;
int spill_slot_count_;
+ int return_slot_count_;
BitVector* allocated_registers_;
BitVector* allocated_double_registers_;
diff --git a/deps/v8/src/compiler/gap-resolver.cc b/deps/v8/src/compiler/gap-resolver.cc
index 3dc1ee27c9..4542a73685 100644
--- a/deps/v8/src/compiler/gap-resolver.cc
+++ b/deps/v8/src/compiler/gap-resolver.cc
@@ -5,7 +5,6 @@
#include "src/compiler/gap-resolver.h"
#include <algorithm>
-#include <functional>
#include <set>
namespace v8 {
@@ -19,10 +18,6 @@ namespace {
const int kFloat32Bit = REP_BIT(MachineRepresentation::kFloat32);
const int kFloat64Bit = REP_BIT(MachineRepresentation::kFloat64);
-inline bool Blocks(MoveOperands* move, InstructionOperand destination) {
- return !move->IsEliminated() && move->source().InterferesWith(destination);
-}
-
// Splits a FP move between two location operands into the equivalent series of
// moves between smaller sub-operands, e.g. a double move to two single moves.
// This helps reduce the number of cycles that would normally occur under FP
@@ -53,7 +48,7 @@ MoveOperands* Split(MoveOperands* move, MachineRepresentation smaller_rep,
src_index = src_loc.register_code() * aliases;
} else {
src_index = src_loc.index();
- // For operands that occuply multiple slots, the index refers to the last
+ // For operands that occupy multiple slots, the index refers to the last
// slot. On little-endian architectures, we start at the high slot and use a
// negative step so that register-to-slot moves are in the correct order.
src_step = -slot_size;
@@ -197,8 +192,11 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) {
// The move may be blocked on a (at most one) pending move, in which case we
// have a cycle. Search for such a blocking move and perform a swap to
// resolve it.
- auto blocker = std::find_if(moves->begin(), moves->end(),
- std::bind2nd(std::ptr_fun(&Blocks), destination));
+ auto blocker =
+ std::find_if(moves->begin(), moves->end(), [&](MoveOperands* move) {
+ return !move->IsEliminated() &&
+ move->source().InterferesWith(destination);
+ });
if (blocker == moves->end()) {
// The easy case: This move is not blocked.
assembler_->AssembleMove(&source, &destination);
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index 50001976a9..a0b2e0ff0a 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -134,6 +134,11 @@ Node* GraphAssembler::DebugBreak() {
current_effect_, current_control_);
}
+Node* GraphAssembler::Unreachable() {
+ return current_effect_ = graph()->NewNode(common()->Unreachable(),
+ current_effect_, current_control_);
+}
+
Node* GraphAssembler::Store(StoreRepresentation rep, Node* object, Node* offset,
Node* value) {
return current_effect_ =
@@ -164,24 +169,33 @@ Node* GraphAssembler::ToNumber(Node* value) {
value, NoContextConstant(), current_effect_);
}
-Node* GraphAssembler::DeoptimizeIf(DeoptimizeReason reason, Node* condition,
- Node* frame_state) {
+Node* GraphAssembler::BitcastWordToTagged(Node* value) {
+ return current_effect_ =
+ graph()->NewNode(machine()->BitcastWordToTagged(), value,
+ current_effect_, current_control_);
+}
+
+Node* GraphAssembler::DeoptimizeIf(DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
+ Node* condition, Node* frame_state) {
return current_control_ = current_effect_ = graph()->NewNode(
- common()->DeoptimizeIf(DeoptimizeKind::kEager, reason), condition,
- frame_state, current_effect_, current_control_);
+ common()->DeoptimizeIf(DeoptimizeKind::kEager, reason, feedback),
+ condition, frame_state, current_effect_, current_control_);
}
Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeKind kind,
- DeoptimizeReason reason, Node* condition,
- Node* frame_state) {
+ DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
+ Node* condition, Node* frame_state) {
return current_control_ = current_effect_ = graph()->NewNode(
- common()->DeoptimizeUnless(kind, reason), condition, frame_state,
- current_effect_, current_control_);
+ common()->DeoptimizeUnless(kind, reason, feedback), condition,
+ frame_state, current_effect_, current_control_);
}
-Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeReason reason, Node* condition,
- Node* frame_state) {
- return DeoptimizeIfNot(DeoptimizeKind::kEager, reason, condition,
+Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
+ Node* condition, Node* frame_state) {
+ return DeoptimizeIfNot(DeoptimizeKind::kEager, reason, feedback, condition,
frame_state);
}
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index 3d3c2ed103..9ae74d0df5 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -8,6 +8,7 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
+#include "src/vector-slot-pair.h"
namespace v8 {
namespace internal {
@@ -28,8 +29,7 @@ namespace compiler {
V(RoundFloat64ToInt32) \
V(TruncateFloat64ToWord32) \
V(Float64ExtractHighWord32) \
- V(Float64Abs) \
- V(BitcastWordToTagged)
+ V(Float64Abs)
#define PURE_ASSEMBLER_MACH_BINOP_LIST(V) \
V(WordShl) \
@@ -193,9 +193,12 @@ class GraphAssembler {
// Debugging
Node* DebugBreak();
+ Node* Unreachable();
+
Node* Float64RoundDown(Node* value);
Node* ToNumber(Node* value);
+ Node* BitcastWordToTagged(Node* value);
Node* Allocate(PretenureFlag pretenure, Node* size);
Node* LoadField(FieldAccess const&, Node* object);
Node* LoadElement(ElementAccess const&, Node* object, Node* index);
@@ -209,12 +212,13 @@ class GraphAssembler {
Node* Retain(Node* buffer);
Node* UnsafePointerAdd(Node* base, Node* external);
- Node* DeoptimizeIf(DeoptimizeReason reason, Node* condition,
- Node* frame_state);
+ Node* DeoptimizeIf(DeoptimizeReason reason, VectorSlotPair const& feedback,
+ Node* condition, Node* frame_state);
Node* DeoptimizeIfNot(DeoptimizeKind kind, DeoptimizeReason reason,
- Node* condition, Node* frame_state);
- Node* DeoptimizeIfNot(DeoptimizeReason reason, Node* condition,
+ VectorSlotPair const& feedback, Node* condition,
Node* frame_state);
+ Node* DeoptimizeIfNot(DeoptimizeReason reason, VectorSlotPair const& feedback,
+ Node* condition, Node* frame_state);
template <typename... Args>
Node* Call(const CallDescriptor* desc, Args... args);
template <typename... Args>
diff --git a/deps/v8/src/compiler/graph-trimmer.h b/deps/v8/src/compiler/graph-trimmer.h
index e57dc18b5e..edabae0b8a 100644
--- a/deps/v8/src/compiler/graph-trimmer.h
+++ b/deps/v8/src/compiler/graph-trimmer.h
@@ -15,7 +15,6 @@ namespace compiler {
// Forward declarations.
class Graph;
-
// Trims dead nodes from the node graph.
class V8_EXPORT_PRIVATE GraphTrimmer final {
public:
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index 8e9505bae1..47ded6a30c 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -175,17 +175,6 @@ bool HasImmediateInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsImmediate();
}
-class OutOfLineLoadZero final : public OutOfLineCode {
- public:
- OutOfLineLoadZero(CodeGenerator* gen, Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final { __ xor_(result_, result_); }
-
- private:
- Register const result_;
-};
-
class OutOfLineLoadFloat32NaN final : public OutOfLineCode {
public:
OutOfLineLoadFloat32NaN(CodeGenerator* gen, XMMRegister result)
@@ -298,425 +287,6 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
} // namespace
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, OutOfLineLoadNaN, \
- SingleOrDouble) \
- do { \
- auto result = i.OutputDoubleRegister(); \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- OutOfLineCode* ool = new (zone()) OutOfLineLoadNaN(this, result); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, i.MemoryOperand(2)); \
- __ bind(ool->exit()); \
- } else { \
- auto index2 = i.InputInt32(0); \
- auto length = i.InputInt32(1); \
- auto index1 = i.InputRegister(2); \
- RelocInfo::Mode rmode_length = i.ToConstant(instr->InputAt(1)).rmode(); \
- RelocInfo::Mode rmode_buffer = i.ToConstant(instr->InputAt(3)).rmode(); \
- DCHECK_LE(index2, length); \
- __ cmp(index1, Immediate(reinterpret_cast<Address>(length - index2), \
- rmode_length)); \
- class OutOfLineLoadFloat final : public OutOfLineCode { \
- public: \
- OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result, \
- Register buffer, Register index1, int32_t index2, \
- int32_t length, RelocInfo::Mode rmode_length, \
- RelocInfo::Mode rmode_buffer) \
- : OutOfLineCode(gen), \
- result_(result), \
- buffer_reg_(buffer), \
- buffer_int_(0), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- rmode_length_(rmode_length), \
- rmode_buffer_(rmode_buffer) {} \
- \
- OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result, \
- int32_t buffer, Register index1, int32_t index2, \
- int32_t length, RelocInfo::Mode rmode_length, \
- RelocInfo::Mode rmode_buffer) \
- : OutOfLineCode(gen), \
- result_(result), \
- buffer_reg_(no_reg), \
- buffer_int_(buffer), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- rmode_length_(rmode_length), \
- rmode_buffer_(rmode_buffer) {} \
- \
- void Generate() final { \
- Label oob; \
- __ push(index1_); \
- __ lea(index1_, Operand(index1_, index2_)); \
- __ cmp(index1_, Immediate(reinterpret_cast<Address>(length_), \
- rmode_length_)); \
- __ j(above_equal, &oob, Label::kNear); \
- if (buffer_reg_.is_valid()) { \
- __ asm_instr(result_, Operand(buffer_reg_, index1_, times_1, 0)); \
- } else { \
- __ asm_instr(result_, \
- Operand(index1_, buffer_int_, rmode_buffer_)); \
- } \
- __ pop(index1_); \
- __ jmp(exit()); \
- __ bind(&oob); \
- __ pop(index1_); \
- __ xorp##SingleOrDouble(result_, result_); \
- __ divs##SingleOrDouble(result_, result_); \
- } \
- \
- private: \
- XMMRegister const result_; \
- Register const buffer_reg_; \
- int32_t const buffer_int_; \
- Register const index1_; \
- int32_t const index2_; \
- int32_t const length_; \
- RelocInfo::Mode rmode_length_; \
- RelocInfo::Mode rmode_buffer_; \
- }; \
- if (instr->InputAt(3)->IsRegister()) { \
- auto buffer = i.InputRegister(3); \
- OutOfLineCode* ool = new (zone()) \
- OutOfLineLoadFloat(this, result, buffer, index1, index2, length, \
- rmode_length, rmode_buffer); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
- __ bind(ool->exit()); \
- } else { \
- auto buffer = i.InputInt32(3); \
- OutOfLineCode* ool = new (zone()) \
- OutOfLineLoadFloat(this, result, buffer, index1, index2, length, \
- rmode_length, rmode_buffer); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, Operand(index1, buffer + index2, rmode_buffer)); \
- __ bind(ool->exit()); \
- } \
- } \
- } while (false)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
- do { \
- auto result = i.OutputRegister(); \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- OutOfLineCode* ool = new (zone()) OutOfLineLoadZero(this, result); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, i.MemoryOperand(2)); \
- __ bind(ool->exit()); \
- } else { \
- auto index2 = i.InputInt32(0); \
- auto length = i.InputInt32(1); \
- auto index1 = i.InputRegister(2); \
- RelocInfo::Mode rmode_length = i.ToConstant(instr->InputAt(1)).rmode(); \
- RelocInfo::Mode rmode_buffer = i.ToConstant(instr->InputAt(3)).rmode(); \
- DCHECK_LE(index2, length); \
- __ cmp(index1, Immediate(reinterpret_cast<Address>(length - index2), \
- rmode_length)); \
- class OutOfLineLoadInteger final : public OutOfLineCode { \
- public: \
- OutOfLineLoadInteger(CodeGenerator* gen, Register result, \
- Register buffer, Register index1, int32_t index2, \
- int32_t length, RelocInfo::Mode rmode_length, \
- RelocInfo::Mode rmode_buffer) \
- : OutOfLineCode(gen), \
- result_(result), \
- buffer_reg_(buffer), \
- buffer_int_(0), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- rmode_length_(rmode_length), \
- rmode_buffer_(rmode_buffer) {} \
- \
- OutOfLineLoadInteger(CodeGenerator* gen, Register result, \
- int32_t buffer, Register index1, int32_t index2, \
- int32_t length, RelocInfo::Mode rmode_length, \
- RelocInfo::Mode rmode_buffer) \
- : OutOfLineCode(gen), \
- result_(result), \
- buffer_reg_(no_reg), \
- buffer_int_(buffer), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- rmode_length_(rmode_length), \
- rmode_buffer_(rmode_buffer) {} \
- \
- void Generate() final { \
- Label oob; \
- bool need_cache = result_ != index1_; \
- if (need_cache) __ push(index1_); \
- __ lea(index1_, Operand(index1_, index2_)); \
- __ cmp(index1_, Immediate(reinterpret_cast<Address>(length_), \
- rmode_length_)); \
- __ j(above_equal, &oob, Label::kNear); \
- if (buffer_reg_.is_valid()) { \
- __ asm_instr(result_, Operand(buffer_reg_, index1_, times_1, 0)); \
- } else { \
- __ asm_instr(result_, \
- Operand(index1_, buffer_int_, rmode_buffer_)); \
- } \
- if (need_cache) __ pop(index1_); \
- __ jmp(exit()); \
- __ bind(&oob); \
- if (need_cache) __ pop(index1_); \
- __ xor_(result_, result_); \
- } \
- \
- private: \
- Register const result_; \
- Register const buffer_reg_; \
- int32_t const buffer_int_; \
- Register const index1_; \
- int32_t const index2_; \
- int32_t const length_; \
- RelocInfo::Mode rmode_length_; \
- RelocInfo::Mode rmode_buffer_; \
- }; \
- if (instr->InputAt(3)->IsRegister()) { \
- auto buffer = i.InputRegister(3); \
- OutOfLineCode* ool = new (zone()) \
- OutOfLineLoadInteger(this, result, buffer, index1, index2, length, \
- rmode_length, rmode_buffer); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
- __ bind(ool->exit()); \
- } else { \
- auto buffer = i.InputInt32(3); \
- OutOfLineCode* ool = new (zone()) \
- OutOfLineLoadInteger(this, result, buffer, index1, index2, length, \
- rmode_length, rmode_buffer); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, Operand(index1, buffer + index2, rmode_buffer)); \
- __ bind(ool->exit()); \
- } \
- } \
- } while (false)
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
- do { \
- auto value = i.InputDoubleRegister(2); \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- Label done; \
- __ j(above_equal, &done, Label::kNear); \
- __ asm_instr(i.MemoryOperand(3), value); \
- __ bind(&done); \
- } else { \
- auto index2 = i.InputInt32(0); \
- auto length = i.InputInt32(1); \
- auto index1 = i.InputRegister(3); \
- RelocInfo::Mode rmode_length = i.ToConstant(instr->InputAt(1)).rmode(); \
- RelocInfo::Mode rmode_buffer = i.ToConstant(instr->InputAt(4)).rmode(); \
- DCHECK_LE(index2, length); \
- __ cmp(index1, Immediate(reinterpret_cast<Address>(length - index2), \
- rmode_length)); \
- class OutOfLineStoreFloat final : public OutOfLineCode { \
- public: \
- OutOfLineStoreFloat(CodeGenerator* gen, Register buffer, \
- Register index1, int32_t index2, int32_t length, \
- XMMRegister value, RelocInfo::Mode rmode_length, \
- RelocInfo::Mode rmode_buffer) \
- : OutOfLineCode(gen), \
- buffer_reg_(buffer), \
- buffer_int_(0), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- value_(value), \
- rmode_length_(rmode_length), \
- rmode_buffer_(rmode_buffer) {} \
- \
- OutOfLineStoreFloat(CodeGenerator* gen, int32_t buffer, \
- Register index1, int32_t index2, int32_t length, \
- XMMRegister value, RelocInfo::Mode rmode_length, \
- RelocInfo::Mode rmode_buffer) \
- : OutOfLineCode(gen), \
- buffer_reg_(no_reg), \
- buffer_int_(buffer), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- value_(value), \
- rmode_length_(rmode_length), \
- rmode_buffer_(rmode_buffer) {} \
- \
- void Generate() final { \
- Label oob; \
- __ push(index1_); \
- __ lea(index1_, Operand(index1_, index2_)); \
- __ cmp(index1_, Immediate(reinterpret_cast<Address>(length_), \
- rmode_length_)); \
- __ j(above_equal, &oob, Label::kNear); \
- if (buffer_reg_.is_valid()) { \
- __ asm_instr(Operand(buffer_reg_, index1_, times_1, 0), value_); \
- } else { \
- __ asm_instr(Operand(index1_, buffer_int_, rmode_buffer_), \
- value_); \
- } \
- __ bind(&oob); \
- __ pop(index1_); \
- } \
- \
- private: \
- Register const buffer_reg_; \
- int32_t const buffer_int_; \
- Register const index1_; \
- int32_t const index2_; \
- int32_t const length_; \
- XMMRegister const value_; \
- RelocInfo::Mode rmode_length_; \
- RelocInfo::Mode rmode_buffer_; \
- }; \
- if (instr->InputAt(4)->IsRegister()) { \
- auto buffer = i.InputRegister(4); \
- OutOfLineCode* ool = new (zone()) \
- OutOfLineStoreFloat(this, buffer, index1, index2, length, value, \
- rmode_length, rmode_buffer); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
- __ bind(ool->exit()); \
- } else { \
- auto buffer = i.InputInt32(4); \
- OutOfLineCode* ool = new (zone()) \
- OutOfLineStoreFloat(this, buffer, index1, index2, length, value, \
- rmode_length, rmode_buffer); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(Operand(index1, buffer + index2, rmode_buffer), value); \
- __ bind(ool->exit()); \
- } \
- } \
- } while (false)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value) \
- do { \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- Label done; \
- __ j(above_equal, &done, Label::kNear); \
- __ asm_instr(i.MemoryOperand(3), value); \
- __ bind(&done); \
- } else { \
- auto index2 = i.InputInt32(0); \
- auto length = i.InputInt32(1); \
- auto index1 = i.InputRegister(3); \
- RelocInfo::Mode rmode_length = i.ToConstant(instr->InputAt(1)).rmode(); \
- RelocInfo::Mode rmode_buffer = i.ToConstant(instr->InputAt(4)).rmode(); \
- DCHECK_LE(index2, length); \
- __ cmp(index1, Immediate(reinterpret_cast<Address>(length - index2), \
- rmode_length)); \
- class OutOfLineStoreInteger final : public OutOfLineCode { \
- public: \
- OutOfLineStoreInteger(CodeGenerator* gen, Register buffer, \
- Register index1, int32_t index2, int32_t length, \
- Value value, RelocInfo::Mode rmode_length, \
- RelocInfo::Mode rmode_buffer) \
- : OutOfLineCode(gen), \
- buffer_reg_(buffer), \
- buffer_int_(0), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- value_(value), \
- rmode_length_(rmode_length), \
- rmode_buffer_(rmode_buffer) {} \
- \
- OutOfLineStoreInteger(CodeGenerator* gen, int32_t buffer, \
- Register index1, int32_t index2, int32_t length, \
- Value value, RelocInfo::Mode rmode_length, \
- RelocInfo::Mode rmode_buffer) \
- : OutOfLineCode(gen), \
- buffer_reg_(no_reg), \
- buffer_int_(buffer), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- value_(value), \
- rmode_length_(rmode_length), \
- rmode_buffer_(rmode_buffer) {} \
- \
- void Generate() final { \
- Label oob; \
- __ push(index1_); \
- __ lea(index1_, Operand(index1_, index2_)); \
- __ cmp(index1_, Immediate(reinterpret_cast<Address>(length_), \
- rmode_length_)); \
- __ j(above_equal, &oob, Label::kNear); \
- if (buffer_reg_.is_valid()) { \
- __ asm_instr(Operand(buffer_reg_, index1_, times_1, 0), value_); \
- } else { \
- __ asm_instr(Operand(index1_, buffer_int_, rmode_buffer_), \
- value_); \
- } \
- __ bind(&oob); \
- __ pop(index1_); \
- } \
- \
- private: \
- Register const buffer_reg_; \
- int32_t const buffer_int_; \
- Register const index1_; \
- int32_t const index2_; \
- int32_t const length_; \
- Value const value_; \
- RelocInfo::Mode rmode_length_; \
- RelocInfo::Mode rmode_buffer_; \
- }; \
- if (instr->InputAt(4)->IsRegister()) { \
- auto buffer = i.InputRegister(4); \
- OutOfLineCode* ool = new (zone()) \
- OutOfLineStoreInteger(this, buffer, index1, index2, length, value, \
- rmode_length, rmode_buffer); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
- __ bind(ool->exit()); \
- } else { \
- auto buffer = i.InputInt32(4); \
- OutOfLineCode* ool = new (zone()) \
- OutOfLineStoreInteger(this, buffer, index1, index2, length, value, \
- rmode_length, rmode_buffer); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(Operand(index1, buffer + index2, rmode_buffer), value); \
- __ bind(ool->exit()); \
- } \
- } \
- } while (false)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
- do { \
- if (instr->InputAt(2)->IsRegister()) { \
- Register value = i.InputRegister(2); \
- ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register); \
- } else { \
- Immediate value = i.InputImmediate(2); \
- ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
- } \
- } while (false)
-
#define ASSEMBLE_COMPARE(asm_instr) \
do { \
if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \
@@ -1025,7 +595,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
- __ Assert(equal, kWrongFunctionContext);
+ __ Assert(equal, AbortReason::kWrongFunctionContext);
}
__ mov(ecx, FieldOperand(func, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
@@ -1449,6 +1019,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32Popcnt:
__ Popcnt(i.OutputRegister(), i.InputOperand(0));
break;
+ case kLFence:
+ __ lfence();
+ break;
case kSSEFloat32Cmp:
__ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
break;
@@ -1892,6 +1465,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movss(operand, i.InputDoubleRegister(index));
}
break;
+ case kIA32Movdqu:
+ if (instr->HasOutput()) {
+ __ Movdqu(i.OutputSimd128Register(), i.MemoryOperand());
+ } else {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ Movdqu(operand, i.InputSimd128Register(index));
+ }
+ break;
case kIA32BitcastFI:
if (instr->InputAt(0)->IsFPStackSlot()) {
__ mov(i.OutputRegister(), i.InputOperand(0));
@@ -1978,6 +1560,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
}
break;
+ case kIA32PushSimd128:
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ sub(esp, Immediate(kSimd128Size));
+ __ movups(Operand(esp, 0), i.InputSimd128Register(0));
+ } else {
+ __ movups(kScratchDoubleReg, i.InputOperand(0));
+ __ sub(esp, Immediate(kSimd128Size));
+ __ movups(Operand(esp, 0), kScratchDoubleReg);
+ }
+ frame_access_state()->IncreaseSPDelta(kSimd128Size / kPointerSize);
+ break;
case kIA32Push:
if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
size_t index = 0;
@@ -1997,7 +1590,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kIA32Poke: {
- int const slot = MiscField::decode(instr->opcode());
+ int slot = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) {
__ mov(Operand(esp, slot * kPointerSize), i.InputImmediate(0));
} else {
@@ -2005,6 +1598,214 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kIA32Peek: {
+ int reverse_slot = i.InputInt32(0) + 1;
+ int offset =
+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ movsd(i.OutputDoubleRegister(), Operand(ebp, offset));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+ __ movss(i.OutputFloatRegister(), Operand(ebp, offset));
+ }
+ } else {
+ __ mov(i.OutputRegister(), Operand(ebp, offset));
+ }
+ break;
+ }
+ case kSSEF32x4Splat: {
+ DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ XMMRegister dst = i.OutputSimd128Register();
+ __ shufps(dst, dst, 0x0);
+ break;
+ }
+ case kAVXF32x4Splat: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister src = i.InputFloatRegister(0);
+ __ vshufps(i.OutputSimd128Register(), src, src, 0x0);
+ break;
+ }
+ case kSSEF32x4ExtractLane: {
+ DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ XMMRegister dst = i.OutputFloatRegister();
+ int8_t lane = i.InputInt8(1);
+ if (lane != 0) {
+ DCHECK_LT(lane, 4);
+ __ shufps(dst, dst, lane);
+ }
+ break;
+ }
+ case kAVXF32x4ExtractLane: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputFloatRegister();
+ XMMRegister src = i.InputSimd128Register(0);
+ int8_t lane = i.InputInt8(1);
+ if (lane == 0) {
+ if (dst != src) __ vmovaps(dst, src);
+ } else {
+ DCHECK_LT(lane, 4);
+ __ vshufps(dst, src, src, lane);
+ }
+ break;
+ }
+ case kSSEF32x4ReplaceLane: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ insertps(i.OutputSimd128Register(), i.InputOperand(2),
+ i.InputInt8(1) << 4);
+ break;
+ }
+ case kAVXF32x4ReplaceLane: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vinsertps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(2), i.InputInt8(1) << 4);
+ break;
+ }
+ case kSSEF32x4Abs: {
+ XMMRegister dst = i.OutputSimd128Register();
+ Operand src = i.InputOperand(0);
+ if (src.is_reg(dst)) {
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrld(kScratchDoubleReg, 1);
+ __ andps(dst, kScratchDoubleReg);
+ } else {
+ __ pcmpeqd(dst, dst);
+ __ psrld(dst, 1);
+ __ andps(dst, src);
+ }
+ break;
+ }
+ case kAVXF32x4Abs: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpsrld(kScratchDoubleReg, kScratchDoubleReg, 1);
+ __ vandps(i.OutputSimd128Register(), kScratchDoubleReg,
+ i.InputOperand(0));
+ break;
+ }
+ case kSSEF32x4Neg: {
+ XMMRegister dst = i.OutputSimd128Register();
+ Operand src = i.InputOperand(0);
+ if (src.is_reg(dst)) {
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ pslld(kScratchDoubleReg, 31);
+ __ xorps(dst, kScratchDoubleReg);
+ } else {
+ __ pcmpeqd(dst, dst);
+ __ pslld(dst, 31);
+ __ xorps(dst, src);
+ }
+ break;
+ }
+ case kAVXF32x4Neg: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpslld(kScratchDoubleReg, kScratchDoubleReg, 31);
+ __ vxorps(i.OutputSimd128Register(), kScratchDoubleReg,
+ i.InputOperand(0));
+ break;
+ }
+ case kSSEF32x4Add: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ addps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Add: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vaddps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEF32x4Sub: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ subps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Sub: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vsubps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEF32x4Mul: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ mulps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Mul: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vmulps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEF32x4Min: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ minps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Min: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vminps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEF32x4Max: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ maxps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Max: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vmaxps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEF32x4Eq: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpeqps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Eq: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vcmpeqps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEF32x4Ne: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpneqps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Ne: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vcmpneqps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEF32x4Lt: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpltps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Lt: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vcmpltps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEF32x4Le: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpleps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Le: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vcmpleps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
case kIA32I32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ Movd(dst, i.InputOperand(0));
@@ -2774,52 +2575,68 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpcmpeqb(i.OutputSimd128Register(), kScratchDoubleReg, src2);
break;
}
- case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
- break;
- case kCheckedLoadUint8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_b);
- break;
- case kCheckedLoadInt16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_w);
- break;
- case kCheckedLoadUint16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_w);
+ case kIA32S128Zero: {
+ XMMRegister dst = i.OutputSimd128Register();
+ __ Pxor(dst, dst);
break;
- case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
+ }
+ case kSSES128Not: {
+ XMMRegister dst = i.OutputSimd128Register();
+ Operand src = i.InputOperand(0);
+ if (src.is_reg(dst)) {
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ pxor(dst, kScratchDoubleReg);
+ } else {
+ __ pcmpeqd(dst, dst);
+ __ pxor(dst, src);
+ }
break;
- case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(movss, OutOfLineLoadFloat32NaN, s);
+ }
+ case kAVXS128Not: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpxor(i.OutputSimd128Register(), kScratchDoubleReg, i.InputOperand(0));
break;
- case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(movsd, OutOfLineLoadFloat64NaN, d);
+ }
+ case kSSES128And: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ pand(i.OutputSimd128Register(), i.InputOperand(1));
break;
- case kCheckedStoreWord8:
- ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
+ }
+ case kAVXS128And: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpand(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
- case kCheckedStoreWord16:
- ASSEMBLE_CHECKED_STORE_INTEGER(mov_w);
+ }
+ case kSSES128Or: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ por(i.OutputSimd128Register(), i.InputOperand(1));
break;
- case kCheckedStoreWord32:
- ASSEMBLE_CHECKED_STORE_INTEGER(mov);
+ }
+ case kAVXS128Or: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpor(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
- case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT(movss);
+ }
+ case kSSES128Xor: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ pxor(i.OutputSimd128Register(), i.InputOperand(1));
break;
- case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
+ }
+ case kAVXS128Xor: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpxor(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
+ }
case kIA32StackCheck: {
ExternalReference const stack_limit =
ExternalReference::address_of_stack_limit(__ isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
break;
}
- case kCheckedLoadWord64:
- case kCheckedStoreWord64:
- UNREACHABLE(); // currently unsupported checked int64 load/store.
- break;
case kAtomicExchangeInt8: {
__ xchg_b(i.InputRegister(0), i.MemoryOperand(1));
__ movsx_b(i.InputRegister(0), i.InputRegister(0));
@@ -3038,7 +2855,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
- __ AssertUnreachable(kUnexpectedReturnFromWasmTrap);
+ __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
}
@@ -3287,7 +3104,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the unoptimized
// frame is still on the stack. Optimized code uses OSR values directly from
@@ -3331,12 +3148,13 @@ void CodeGenerator::AssembleConstructFrame() {
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
- __ AssertUnreachable(kUnexpectedReturnFromWasmTrap);
+ __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
__ bind(&done);
}
- // Skip callee-saved slots, which are pushed below.
+ // Skip callee-saved and return slots, which are created below.
shrink_slots -= base::bits::CountPopulation(saves);
+ shrink_slots -= frame()->GetReturnSlotCount();
if (shrink_slots > 0) {
__ sub(esp, Immediate(shrink_slots * kPointerSize));
}
@@ -3348,6 +3166,11 @@ void CodeGenerator::AssembleConstructFrame() {
if (((1 << i) & saves)) __ push(Register::from_code(i));
}
}
+
+ // Allocate return slots (located after callee-saved).
+ if (frame()->GetReturnSlotCount() > 0) {
+ __ sub(esp, Immediate(frame()->GetReturnSlotCount() * kPointerSize));
+ }
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
@@ -3356,6 +3179,10 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
const RegList saves = descriptor->CalleeSavedRegisters();
// Restore registers.
if (saves != 0) {
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ __ add(esp, Immediate(returns * kPointerSize));
+ }
for (int i = 0; i < Register::kNumRegisters; i++) {
if (!((1 << i) & saves)) continue;
__ pop(Register::from_code(i));
diff --git a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
index b9bf261022..a17d9f06ce 100644
--- a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
@@ -43,6 +43,7 @@ namespace compiler {
V(IA32Lzcnt) \
V(IA32Tzcnt) \
V(IA32Popcnt) \
+ V(LFence) \
V(SSEFloat32Cmp) \
V(SSEFloat32Add) \
V(SSEFloat32Sub) \
@@ -103,14 +104,45 @@ namespace compiler {
V(IA32Movl) \
V(IA32Movss) \
V(IA32Movsd) \
+ V(IA32Movdqu) \
V(IA32BitcastFI) \
V(IA32BitcastIF) \
V(IA32Lea) \
V(IA32Push) \
V(IA32PushFloat32) \
V(IA32PushFloat64) \
+ V(IA32PushSimd128) \
V(IA32Poke) \
+ V(IA32Peek) \
V(IA32StackCheck) \
+ V(SSEF32x4Splat) \
+ V(AVXF32x4Splat) \
+ V(SSEF32x4ExtractLane) \
+ V(AVXF32x4ExtractLane) \
+ V(SSEF32x4ReplaceLane) \
+ V(AVXF32x4ReplaceLane) \
+ V(SSEF32x4Abs) \
+ V(AVXF32x4Abs) \
+ V(SSEF32x4Neg) \
+ V(AVXF32x4Neg) \
+ V(SSEF32x4Add) \
+ V(AVXF32x4Add) \
+ V(SSEF32x4Sub) \
+ V(AVXF32x4Sub) \
+ V(SSEF32x4Mul) \
+ V(AVXF32x4Mul) \
+ V(SSEF32x4Min) \
+ V(AVXF32x4Min) \
+ V(SSEF32x4Max) \
+ V(AVXF32x4Max) \
+ V(SSEF32x4Eq) \
+ V(AVXF32x4Eq) \
+ V(SSEF32x4Ne) \
+ V(AVXF32x4Ne) \
+ V(SSEF32x4Lt) \
+ V(AVXF32x4Lt) \
+ V(SSEF32x4Le) \
+ V(AVXF32x4Le) \
V(IA32I32x4Splat) \
V(IA32I32x4ExtractLane) \
V(SSEI32x4ReplaceLane) \
@@ -229,7 +261,16 @@ namespace compiler {
V(SSEI8x16GtU) \
V(AVXI8x16GtU) \
V(SSEI8x16GeU) \
- V(AVXI8x16GeU)
+ V(AVXI8x16GeU) \
+ V(IA32S128Zero) \
+ V(SSES128Not) \
+ V(AVXS128Not) \
+ V(SSES128And) \
+ V(AVXS128And) \
+ V(SSES128Or) \
+ V(AVXS128Or) \
+ V(SSES128Xor) \
+ V(AVXS128Xor)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
index 83c60e4455..db43c1ed1c 100644
--- a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -97,6 +97,34 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXFloat32Neg:
case kIA32BitcastFI:
case kIA32BitcastIF:
+ case kSSEF32x4Splat:
+ case kAVXF32x4Splat:
+ case kSSEF32x4ExtractLane:
+ case kAVXF32x4ExtractLane:
+ case kSSEF32x4ReplaceLane:
+ case kAVXF32x4ReplaceLane:
+ case kSSEF32x4Abs:
+ case kAVXF32x4Abs:
+ case kSSEF32x4Neg:
+ case kAVXF32x4Neg:
+ case kSSEF32x4Add:
+ case kAVXF32x4Add:
+ case kSSEF32x4Sub:
+ case kAVXF32x4Sub:
+ case kSSEF32x4Mul:
+ case kAVXF32x4Mul:
+ case kSSEF32x4Min:
+ case kAVXF32x4Min:
+ case kSSEF32x4Max:
+ case kAVXF32x4Max:
+ case kSSEF32x4Eq:
+ case kAVXF32x4Eq:
+ case kSSEF32x4Ne:
+ case kAVXF32x4Ne:
+ case kSSEF32x4Lt:
+ case kAVXF32x4Lt:
+ case kSSEF32x4Le:
+ case kAVXF32x4Le:
case kIA32I32x4Splat:
case kIA32I32x4ExtractLane:
case kSSEI32x4ReplaceLane:
@@ -216,6 +244,15 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXI8x16GtU:
case kSSEI8x16GeU:
case kAVXI8x16GeU:
+ case kIA32S128Zero:
+ case kSSES128Not:
+ case kAVXS128Not:
+ case kSSES128And:
+ case kAVXS128And:
+ case kSSES128Or:
+ case kAVXS128Or:
+ case kSSES128Xor:
+ case kAVXS128Xor:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
@@ -235,16 +272,20 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Movl:
case kIA32Movss:
case kIA32Movsd:
+ case kIA32Movdqu:
// Moves are used for memory load/store operations.
return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
case kIA32StackCheck:
+ case kIA32Peek:
return kIsLoadOperation;
case kIA32Push:
case kIA32PushFloat32:
case kIA32PushFloat64:
+ case kIA32PushSimd128:
case kIA32Poke:
+ case kLFence:
return kHasSideEffect;
#define CASE(Name) case k##Name:
@@ -262,18 +303,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
// Basic latency modeling for ia32 instructions. They have been determined
// in an empirical way.
switch (instr->arch_opcode()) {
- case kCheckedLoadInt8:
- case kCheckedLoadUint8:
- case kCheckedLoadInt16:
- case kCheckedLoadUint16:
- case kCheckedLoadWord32:
- case kCheckedLoadFloat32:
- case kCheckedLoadFloat64:
- case kCheckedStoreWord8:
- case kCheckedStoreWord16:
- case kCheckedStoreWord32:
- case kCheckedStoreFloat32:
- case kCheckedStoreFloat64:
case kSSEFloat64Mul:
return 5;
case kIA32Imul:
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index bae563d7b6..d8bf250ec6 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -225,6 +225,11 @@ void InstructionSelector::VisitDebugAbort(Node* node) {
Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), edx));
}
+void InstructionSelector::VisitSpeculationFence(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kLFence, g.NoOutput());
+}
+
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
@@ -249,8 +254,10 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord32:
opcode = kIA32Movl;
break;
+ case MachineRepresentation::kSimd128:
+ opcode = kIA32Movdqu;
+ break;
case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -339,8 +346,10 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord32:
opcode = kIA32Movl;
break;
+ case MachineRepresentation::kSimd128:
+ opcode = kIA32Movdqu;
+ break;
case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -379,156 +388,6 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- IA32OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
- Int32BinopMatcher moffset(offset);
- InstructionOperand buffer_operand = g.CanBeImmediate(buffer)
- ? g.UseImmediate(buffer)
- : g.UseRegister(buffer);
- Int32Matcher mlength(length);
- if (mlength.HasValue() && moffset.right().HasValue() &&
- moffset.right().Value() >= 0 &&
- mlength.Value() >= moffset.right().Value()) {
- Emit(opcode, g.DefineAsRegister(node),
- g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
- g.UseRegister(moffset.left().node()), buffer_operand);
- return;
- }
- IntMatcher<int32_t, IrOpcode::kRelocatableInt32Constant> mmlength(length);
- if (mmlength.HasValue() && moffset.right().HasValue() &&
- moffset.right().Value() >= 0 &&
- mmlength.Value() >= moffset.right().Value()) {
- Emit(opcode, g.DefineAsRegister(node),
- g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
- g.UseRegister(moffset.left().node()), buffer_operand);
- return;
- }
- }
- InstructionOperand offset_operand = g.UseRegister(offset);
- InstructionOperand length_operand =
- g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
- if (g.CanBeImmediate(buffer)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), offset_operand, length_operand,
- offset_operand, g.UseImmediate(buffer));
- } else {
- Emit(opcode | AddressingModeField::encode(kMode_MR1),
- g.DefineAsRegister(node), offset_operand, length_operand,
- g.UseRegister(buffer), offset_operand);
- }
-}
-
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- IA32OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- InstructionOperand value_operand =
- g.CanBeImmediate(value) ? g.UseImmediate(value)
- : ((rep == MachineRepresentation::kWord8 ||
- rep == MachineRepresentation::kBit)
- ? g.UseByteRegister(value)
- : g.UseRegister(value));
- if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
- Int32BinopMatcher moffset(offset);
- InstructionOperand buffer_operand = g.CanBeImmediate(buffer)
- ? g.UseImmediate(buffer)
- : g.UseRegister(buffer);
- Int32Matcher mlength(length);
- if (mlength.HasValue() && moffset.right().HasValue() &&
- moffset.right().Value() >= 0 &&
- mlength.Value() >= moffset.right().Value()) {
- Emit(opcode, g.NoOutput(), g.UseImmediate(moffset.right().node()),
- g.UseImmediate(length), value_operand,
- g.UseRegister(moffset.left().node()), buffer_operand);
- return;
- }
- IntMatcher<int32_t, IrOpcode::kRelocatableInt32Constant> mmlength(length);
- if (mmlength.HasValue() && moffset.right().HasValue() &&
- moffset.right().Value() >= 0 &&
- mmlength.Value() >= moffset.right().Value()) {
- Emit(opcode, g.NoOutput(), g.UseImmediate(moffset.right().node()),
- g.UseImmediate(length), value_operand,
- g.UseRegister(moffset.left().node()), buffer_operand);
- return;
- }
- }
- InstructionOperand offset_operand = g.UseRegister(offset);
- InstructionOperand length_operand =
- g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
- if (g.CanBeImmediate(buffer)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- offset_operand, length_operand, value_operand, offset_operand,
- g.UseImmediate(buffer));
- } else {
- Emit(opcode | AddressingModeField::encode(kMode_MR1), g.NoOutput(),
- offset_operand, length_operand, value_operand, g.UseRegister(buffer),
- offset_operand);
- }
-}
-
namespace {
// Shared routine for multiple binary operations.
@@ -599,7 +458,8 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -1110,11 +970,11 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments.
for (size_t n = 0; n < arguments->size(); ++n) {
PushParameter input = (*arguments)[n];
- if (input.node()) {
+ if (input.node) {
int const slot = static_cast<int>(n);
InstructionOperand value = g.CanBeImmediate(node)
- ? g.UseImmediate(input.node())
- : g.UseRegister(input.node());
+ ? g.UseImmediate(input.node)
+ : g.UseRegister(input.node);
Emit(kIA32Poke | MiscField::encode(slot), g.NoOutput(), value);
}
}
@@ -1123,29 +983,30 @@ void InstructionSelector::EmitPrepareArguments(
int effect_level = GetEffectLevel(node);
for (PushParameter input : base::Reversed(*arguments)) {
// Skip any alignment holes in pushed nodes.
- Node* input_node = input.node();
- if (input.node() == nullptr) continue;
- if (g.CanBeMemoryOperand(kIA32Push, node, input_node, effect_level)) {
+ if (input.node == nullptr) continue;
+ if (g.CanBeMemoryOperand(kIA32Push, node, input.node, effect_level)) {
InstructionOperand outputs[1];
InstructionOperand inputs[4];
size_t input_count = 0;
InstructionCode opcode = kIA32Push;
AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
- input_node, inputs, &input_count);
+ input.node, inputs, &input_count);
opcode |= AddressingModeField::encode(mode);
Emit(opcode, 0, outputs, input_count, inputs);
} else {
InstructionOperand value =
- g.CanBeImmediate(input.node())
- ? g.UseImmediate(input.node())
+ g.CanBeImmediate(input.node)
+ ? g.UseImmediate(input.node)
: IsSupported(ATOM) ||
- sequence()->IsFP(GetVirtualRegister(input.node()))
- ? g.UseRegister(input.node())
- : g.Use(input.node());
- if (input.type() == MachineType::Float32()) {
+ sequence()->IsFP(GetVirtualRegister(input.node))
+ ? g.UseRegister(input.node)
+ : g.Use(input.node);
+ if (input.location.GetType() == MachineType::Float32()) {
Emit(kIA32PushFloat32, g.NoOutput(), value);
- } else if (input.type() == MachineType::Float64()) {
+ } else if (input.location.GetType() == MachineType::Float64()) {
Emit(kIA32PushFloat64, g.NoOutput(), value);
+ } else if (input.location.GetType() == MachineType::Simd128()) {
+ Emit(kIA32PushSimd128, g.NoOutput(), value);
} else {
Emit(kIA32Push, g.NoOutput(), value);
}
@@ -1154,6 +1015,29 @@ void InstructionSelector::EmitPrepareArguments(
}
}
+void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
+ const CallDescriptor* descriptor,
+ Node* node) {
+ IA32OperandGenerator g(this);
+
+ int reverse_slot = 0;
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ // Skip any alignment holes in nodes.
+ if (output.node != nullptr) {
+ DCHECK(!descriptor->IsCFunctionCall());
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ }
+ Emit(kIA32Peek, g.DefineAsRegister(output.node),
+ g.UseImmediate(reverse_slot));
+ }
+ reverse_slot += output.location.GetSizeInPointers();
+ }
+}
+
bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
@@ -1181,7 +1065,8 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
selector->Emit(opcode, 0, nullptr, input_count, inputs);
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
InstructionOperand output = g.DefineAsRegister(cont->result());
selector->Emit(opcode, 1, &output, input_count, inputs);
@@ -1203,7 +1088,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsByteRegister(cont->result()), left, right);
} else {
@@ -1389,7 +1275,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()));
@@ -1503,14 +1390,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -1897,12 +1784,21 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
-#define SIMD_TYPES(V) \
- V(I32x4) \
- V(I16x8) \
+#define SIMD_INT_TYPES(V) \
+ V(I32x4) \
+ V(I16x8) \
V(I8x16)
#define SIMD_BINOP_LIST(V) \
+ V(F32x4Add) \
+ V(F32x4Sub) \
+ V(F32x4Mul) \
+ V(F32x4Min) \
+ V(F32x4Max) \
+ V(F32x4Eq) \
+ V(F32x4Ne) \
+ V(F32x4Lt) \
+ V(F32x4Le) \
V(I32x4Add) \
V(I32x4Sub) \
V(I32x4Mul) \
@@ -1948,13 +1844,21 @@ VISIT_ATOMIC_BINOP(Xor)
V(I8x16MinU) \
V(I8x16MaxU) \
V(I8x16GtU) \
- V(I8x16GeU)
-
-#define SIMD_UNOP_LIST(V) \
- V(I32x4Neg) \
- V(I16x8Neg) \
+ V(I8x16GeU) \
+ V(S128And) \
+ V(S128Or) \
+ V(S128Xor)
+
+#define SIMD_INT_UNOP_LIST(V) \
+ V(I32x4Neg) \
+ V(I16x8Neg) \
V(I8x16Neg)
+#define SIMD_OTHER_UNOP_LIST(V) \
+ V(F32x4Abs) \
+ V(F32x4Neg) \
+ V(S128Not)
+
#define SIMD_SHIFT_OPCODES(V) \
V(I32x4Shl) \
V(I32x4ShrS) \
@@ -1963,11 +1867,38 @@ VISIT_ATOMIC_BINOP(Xor)
V(I16x8ShrS) \
V(I16x8ShrU)
+void InstructionSelector::VisitF32x4Splat(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
+ if (IsSupported(AVX)) {
+ Emit(kAVXF32x4Splat, g.DefineAsRegister(node), operand0);
+ } else {
+ Emit(kSSEF32x4Splat, g.DefineSameAsFirst(node), operand0);
+ }
+}
+
+void InstructionSelector::VisitF32x4ExtractLane(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
+ InstructionOperand operand1 = g.UseImmediate(OpParameter<int32_t>(node));
+ if (IsSupported(AVX)) {
+ Emit(kAVXF32x4ExtractLane, g.DefineAsRegister(node), operand0, operand1);
+ } else {
+ Emit(kSSEF32x4ExtractLane, g.DefineSameAsFirst(node), operand0, operand1);
+ }
+}
+
+void InstructionSelector::VisitS128Zero(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kIA32S128Zero, g.DefineAsRegister(node));
+}
+
+
#define VISIT_SIMD_SPLAT(Type) \
void InstructionSelector::Visit##Type##Splat(Node* node) { \
VisitRO(this, node, kIA32##Type##Splat); \
}
-SIMD_TYPES(VISIT_SIMD_SPLAT)
+SIMD_INT_TYPES(VISIT_SIMD_SPLAT)
#undef VISIT_SIMD_SPLAT
#define VISIT_SIMD_EXTRACT_LANE(Type) \
@@ -1977,7 +1908,7 @@ SIMD_TYPES(VISIT_SIMD_SPLAT)
Emit(kIA32##Type##ExtractLane, g.DefineAsRegister(node), \
g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); \
}
-SIMD_TYPES(VISIT_SIMD_EXTRACT_LANE)
+SIMD_INT_TYPES(VISIT_SIMD_EXTRACT_LANE)
#undef VISIT_SIMD_EXTRACT_LANE
#define VISIT_SIMD_REPLACE_LANE(Type) \
@@ -1994,7 +1925,8 @@ SIMD_TYPES(VISIT_SIMD_EXTRACT_LANE)
operand1, operand2); \
} \
}
-SIMD_TYPES(VISIT_SIMD_REPLACE_LANE)
+SIMD_INT_TYPES(VISIT_SIMD_REPLACE_LANE)
+VISIT_SIMD_REPLACE_LANE(F32x4)
#undef VISIT_SIMD_REPLACE_LANE
#define VISIT_SIMD_SHIFT(Opcode) \
@@ -2011,13 +1943,22 @@ SIMD_TYPES(VISIT_SIMD_REPLACE_LANE)
SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
#undef VISIT_SIMD_SHIFT
-#define VISIT_SIMD_UNOP(Opcode) \
+#define VISIT_SIMD_INT_UNOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
IA32OperandGenerator g(this); \
Emit(kIA32##Opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0))); \
}
-SIMD_UNOP_LIST(VISIT_SIMD_UNOP)
-#undef VISIT_SIMD_UNOP
+SIMD_INT_UNOP_LIST(VISIT_SIMD_INT_UNOP)
+#undef VISIT_SIMD_INT_UNOP
+
+#define VISIT_SIMD_OTHER_UNOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ IA32OperandGenerator g(this); \
+ InstructionCode opcode = IsSupported(AVX) ? kAVX##Opcode : kSSE##Opcode; \
+ Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0))); \
+ }
+SIMD_OTHER_UNOP_LIST(VISIT_SIMD_OTHER_UNOP)
+#undef VISIT_SIMD_OTHER_UNOP
#define VISIT_SIMD_BINOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@@ -2039,7 +1980,8 @@ MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::kWord32ShiftIsSafe |
- MachineOperatorBuilder::kWord32Ctz;
+ MachineOperatorBuilder::kWord32Ctz |
+ MachineOperatorBuilder::kSpeculationFence;
if (CpuFeatures::IsSupported(POPCNT)) {
flags |= MachineOperatorBuilder::kWord32Popcnt;
}
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
index f5457ee562..df3078d739 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -68,20 +68,6 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
V(ArchParentFramePointer) \
V(ArchTruncateDoubleToI) \
V(ArchStoreWithWriteBarrier) \
- V(CheckedLoadInt8) \
- V(CheckedLoadUint8) \
- V(CheckedLoadInt16) \
- V(CheckedLoadUint16) \
- V(CheckedLoadWord32) \
- V(CheckedLoadWord64) \
- V(CheckedLoadFloat32) \
- V(CheckedLoadFloat64) \
- V(CheckedStoreWord8) \
- V(CheckedStoreWord16) \
- V(CheckedStoreWord32) \
- V(CheckedStoreWord64) \
- V(CheckedStoreFloat32) \
- V(CheckedStoreFloat64) \
V(ArchStackSlot) \
V(AtomicLoadInt8) \
V(AtomicLoadUint8) \
diff --git a/deps/v8/src/compiler/instruction-scheduler.cc b/deps/v8/src/compiler/instruction-scheduler.cc
index b1164767f2..f7afaab697 100644
--- a/deps/v8/src/compiler/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/instruction-scheduler.cc
@@ -268,21 +268,7 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kIeee754Float64Sinh:
case kIeee754Float64Tan:
case kIeee754Float64Tanh:
-#ifdef V8_TARGET_ARCH_ARM64
- // This is an unfortunate effect of arm64 dual stack pointers:
- // * TruncateDoubleToI may call a stub, and the stub will push and pop
- // values onto the stack. Push updates both CSP and JSSP but pop only
- // restores JSSP.
- // * kIeee754XXX opcodes call a C Function and the call macro may update
- // CSP to meet alignment requirements but it will not bring back CSP to
- // its original value.
- // Those opcode cannot be reordered with instructions with side effects
- // such as Arm64ClaimCSP.
- // TODO(arm64): remove when JSSP is gone.
- return kHasSideEffect;
-#else
return kNoOpcodeFlags;
-#endif
case kArchStackPointer:
// ArchStackPointer instruction loads the current stack pointer value and
@@ -315,22 +301,6 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchThrowTerminator:
return kIsBlockTerminator;
- case kCheckedLoadInt8:
- case kCheckedLoadUint8:
- case kCheckedLoadInt16:
- case kCheckedLoadUint16:
- case kCheckedLoadWord32:
- case kCheckedLoadWord64:
- case kCheckedLoadFloat32:
- case kCheckedLoadFloat64:
- return kIsLoadOperation;
-
- case kCheckedStoreWord8:
- case kCheckedStoreWord16:
- case kCheckedStoreWord32:
- case kCheckedStoreWord64:
- case kCheckedStoreFloat32:
- case kCheckedStoreFloat64:
case kArchStoreWithWriteBarrier:
return kHasSideEffect;
diff --git a/deps/v8/src/compiler/instruction-selector-impl.h b/deps/v8/src/compiler/instruction-selector-impl.h
index 8334d1751a..7c7a2708c5 100644
--- a/deps/v8/src/compiler/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/instruction-selector-impl.h
@@ -251,6 +251,23 @@ class OperandGenerator {
return Constant(OpParameter<ExternalReference>(node));
case IrOpcode::kHeapConstant:
return Constant(OpParameter<Handle<HeapObject>>(node));
+ case IrOpcode::kDeadValue: {
+ switch (DeadValueRepresentationOf(node->op())) {
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord32:
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kTaggedSigned:
+ case MachineRepresentation::kTaggedPointer:
+ return Constant(static_cast<int32_t>(0));
+ case MachineRepresentation::kFloat64:
+ return Constant(static_cast<double>(0));
+ case MachineRepresentation::kFloat32:
+ return Constant(static_cast<float>(0));
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
default:
break;
}
@@ -350,8 +367,9 @@ class FlagsContinuation final {
static FlagsContinuation ForDeoptimize(FlagsCondition condition,
DeoptimizeKind kind,
DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
Node* frame_state) {
- return FlagsContinuation(condition, kind, reason, frame_state);
+ return FlagsContinuation(condition, kind, reason, feedback, frame_state);
}
// Creates a new flags continuation for a boolean value.
@@ -382,6 +400,10 @@ class FlagsContinuation final {
DCHECK(IsDeoptimize());
return reason_;
}
+ VectorSlotPair const& feedback() const {
+ DCHECK(IsDeoptimize());
+ return feedback_;
+ }
Node* frame_state() const {
DCHECK(IsDeoptimize());
return frame_state_or_result_;
@@ -452,11 +474,13 @@ class FlagsContinuation final {
private:
FlagsContinuation(FlagsCondition condition, DeoptimizeKind kind,
- DeoptimizeReason reason, Node* frame_state)
+ DeoptimizeReason reason, VectorSlotPair const& feedback,
+ Node* frame_state)
: mode_(kFlags_deoptimize),
condition_(condition),
kind_(kind),
reason_(reason),
+ feedback_(feedback),
frame_state_or_result_(frame_state) {
DCHECK_NOT_NULL(frame_state);
}
@@ -480,6 +504,7 @@ class FlagsContinuation final {
FlagsCondition condition_;
DeoptimizeKind kind_; // Only valid if mode_ == kFlags_deoptimize
DeoptimizeReason reason_; // Only valid if mode_ == kFlags_deoptimize
+ VectorSlotPair feedback_; // Only valid if mode_ == kFlags_deoptimize
Node* frame_state_or_result_; // Only valid if mode_ == kFlags_deoptimize
// or mode_ == kFlags_set.
BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch.
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index d19692e3dd..c94b42b458 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -668,7 +668,7 @@ struct CallBuffer {
const CallDescriptor* descriptor;
FrameStateDescriptor* frame_state_descriptor;
- NodeVector output_nodes;
+ ZoneVector<PushParameter> output_nodes;
InstructionOperandVector outputs;
InstructionOperandVector instruction_args;
ZoneVector<PushParameter> pushed_nodes;
@@ -693,26 +693,38 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
bool is_tail_call,
int stack_param_delta) {
OperandGenerator g(this);
- DCHECK_LE(call->op()->ValueOutputCount(),
- static_cast<int>(buffer->descriptor->ReturnCount()));
+ size_t ret_count = buffer->descriptor->ReturnCount();
+ DCHECK_LE(call->op()->ValueOutputCount(), ret_count);
DCHECK_EQ(
call->op()->ValueInputCount(),
static_cast<int>(buffer->input_count() + buffer->frame_state_count()));
- if (buffer->descriptor->ReturnCount() > 0) {
+ if (ret_count > 0) {
// Collect the projections that represent multiple outputs from this call.
- if (buffer->descriptor->ReturnCount() == 1) {
- buffer->output_nodes.push_back(call);
+ if (ret_count == 1) {
+ PushParameter result = {call, buffer->descriptor->GetReturnLocation(0)};
+ buffer->output_nodes.push_back(result);
} else {
- buffer->output_nodes.resize(buffer->descriptor->ReturnCount(), nullptr);
+ buffer->output_nodes.resize(ret_count);
+ int stack_count = 0;
+ for (size_t i = 0; i < ret_count; ++i) {
+ LinkageLocation location = buffer->descriptor->GetReturnLocation(i);
+ buffer->output_nodes[i] = PushParameter(nullptr, location);
+ if (location.IsCallerFrameSlot()) {
+ stack_count += location.GetSizeInPointers();
+ }
+ }
for (Edge const edge : call->use_edges()) {
if (!NodeProperties::IsValueEdge(edge)) continue;
- DCHECK_EQ(IrOpcode::kProjection, edge.from()->opcode());
- size_t const index = ProjectionIndexOf(edge.from()->op());
+ Node* node = edge.from();
+ DCHECK_EQ(IrOpcode::kProjection, node->opcode());
+ size_t const index = ProjectionIndexOf(node->op());
+
DCHECK_LT(index, buffer->output_nodes.size());
- DCHECK(!buffer->output_nodes[index]);
- buffer->output_nodes[index] = edge.from();
+ DCHECK(!buffer->output_nodes[index].node);
+ buffer->output_nodes[index].node = node;
}
+ frame_->EnsureReturnSlots(stack_count);
}
// Filter out the outputs that aren't live because no projection uses them.
@@ -722,22 +734,22 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
: buffer->frame_state_descriptor->state_combine()
.ConsumedOutputCount();
for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
- bool output_is_live = buffer->output_nodes[i] != nullptr ||
+ bool output_is_live = buffer->output_nodes[i].node != nullptr ||
i < outputs_needed_by_framestate;
if (output_is_live) {
- MachineRepresentation rep =
- buffer->descriptor->GetReturnType(static_cast<int>(i))
- .representation();
- LinkageLocation location =
- buffer->descriptor->GetReturnLocation(static_cast<int>(i));
+ LinkageLocation location = buffer->output_nodes[i].location;
+ MachineRepresentation rep = location.GetType().representation();
- Node* output = buffer->output_nodes[i];
+ Node* output = buffer->output_nodes[i].node;
InstructionOperand op = output == nullptr
? g.TempLocation(location)
: g.DefineAsLocation(output, location);
MarkAsRepresentation(rep, op);
- buffer->outputs.push_back(op);
+ if (!UnallocatedOperand::cast(op).HasFixedSlotPolicy()) {
+ buffer->outputs.push_back(op);
+ buffer->output_nodes[i].node = nullptr;
+ }
}
}
}
@@ -803,7 +815,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
int const state_id = sequence()->AddDeoptimizationEntry(
buffer->frame_state_descriptor, DeoptimizeKind::kLazy,
- DeoptimizeReason::kNoReason);
+ DeoptimizeReason::kUnknown, VectorSlotPair());
buffer->instruction_args.push_back(g.TempImmediate(state_id));
StateObjectDeduplicator deduplicator(instruction_zone());
@@ -842,8 +854,8 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
buffer->pushed_nodes.resize(stack_index + 1);
}
- PushParameter parameter(*iter, buffer->descriptor->GetInputType(index));
- buffer->pushed_nodes[stack_index] = parameter;
+ PushParameter param = {*iter, location};
+ buffer->pushed_nodes[stack_index] = param;
pushed_count++;
} else {
buffer->instruction_args.push_back(op);
@@ -890,7 +902,6 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
SetEffectLevel(node, effect_level);
if (node->opcode() == IrOpcode::kStore ||
node->opcode() == IrOpcode::kUnalignedStore ||
- node->opcode() == IrOpcode::kCheckedStore ||
node->opcode() == IrOpcode::kCall ||
node->opcode() == IrOpcode::kCallWithCallerSavedRegisters ||
node->opcode() == IrOpcode::kProtectedLoad ||
@@ -960,7 +971,7 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
<< "only one predecessor." << std::endl
<< "# Current Block: " << *successor << std::endl
<< "# Node: " << *node;
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
}
@@ -1026,7 +1037,7 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
case BasicBlock::kDeoptimize: {
DeoptimizeParameters p = DeoptimizeParametersOf(input->op());
Node* value = input->InputAt(0);
- return VisitDeoptimize(p.kind(), p.reason(), value);
+ return VisitDeoptimize(p.kind(), p.reason(), p.feedback(), value);
}
case BasicBlock::kThrow:
DCHECK_EQ(IrOpcode::kThrow, input->opcode());
@@ -1136,6 +1147,9 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kUnreachable:
VisitUnreachable(node);
return;
+ case IrOpcode::kDeadValue:
+ VisitDeadValue(node);
+ return;
case IrOpcode::kComment:
VisitComment(node);
return;
@@ -1472,14 +1486,6 @@ void InstructionSelector::VisitNode(Node* node) {
}
case IrOpcode::kUnalignedStore:
return VisitUnalignedStore(node);
- case IrOpcode::kCheckedLoad: {
- MachineRepresentation rep =
- CheckedLoadRepresentationOf(node->op()).representation();
- MarkAsRepresentation(rep, node);
- return VisitCheckedLoad(node);
- }
- case IrOpcode::kCheckedStore:
- return VisitCheckedStore(node);
case IrOpcode::kInt32PairAdd:
MarkAsWord32(node);
MarkPairProjectionsAsWord32(node);
@@ -1525,6 +1531,8 @@ void InstructionSelector::VisitNode(Node* node) {
ATOMIC_CASE(Or)
ATOMIC_CASE(Xor)
#undef ATOMIC_CASE
+ case IrOpcode::kSpeculationFence:
+ return VisitSpeculationFence(node);
case IrOpcode::kProtectedLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
@@ -2089,12 +2097,6 @@ void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitF32x4Splat(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
@@ -2102,73 +2104,36 @@ void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
void InstructionSelector::VisitF32x4Abs(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Neg(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
UNIMPLEMENTED();
}
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
-void InstructionSelector::VisitF32x4Add(Node* node) { UNIMPLEMENTED(); }
-
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitF32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Max(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4RecipApprox(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Ne(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Lt(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Le(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
// && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI32x4Splat(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Add(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Sub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Shl(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4ShrS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Mul(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4MaxS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4MinS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Ne(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4MinU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4MaxU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4ShrU(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
+void InstructionSelector::VisitF32x4RecipApprox(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
!V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
@@ -2221,79 +2186,11 @@ void InstructionSelector::VisitI16x8SConvertI32x4(Node* node) {
// && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI32x4Neg(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4GtS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4GeS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4GtU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4GeU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Splat(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8ExtractLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Shl(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8ShrS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8ShrU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Add(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8Sub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
- UNIMPLEMENTED();
-}
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
!V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8AddHoriz(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
// && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI16x8Mul(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8MinS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8MaxS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Ne(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8MinU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8MaxU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
@@ -2310,21 +2207,6 @@ void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
// && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI16x8GtS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8GeS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8GtU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8GeU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16Neg(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16Shl(Node* node) { UNIMPLEMENTED(); }
@@ -2333,17 +2215,6 @@ void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
// && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI8x16Splat(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16ExtractLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16SConvertI16x8(Node* node) {
@@ -2352,35 +2223,6 @@ void InstructionSelector::VisitI8x16SConvertI16x8(Node* node) {
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
// && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI8x16Add(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16AddSaturateS(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI8x16Sub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16SubSaturateS(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI8x16MinS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16MaxS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16Ne(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16GtS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16GeS(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }
@@ -2398,38 +2240,7 @@ void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
// && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI8x16AddSaturateU(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI8x16SubSaturateU(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI8x16MinU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16MaxU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16GtU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16GeU(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
!V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitS128And(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS128Or(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS128Xor(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS128Not(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS128Zero(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitS128Select(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
// && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
@@ -2582,15 +2393,6 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
buffer.instruction_args.push_back(g.Label(handler));
}
- bool from_native_stack = linkage()->GetIncomingDescriptor()->UseNativeStack();
- bool to_native_stack = descriptor->UseNativeStack();
- if (from_native_stack != to_native_stack) {
- // (arm64 only) Mismatch in the use of stack pointers. One or the other
- // has to be restored manually by the code generator.
- flags |= to_native_stack ? CallDescriptor::kRestoreJSSP
- : CallDescriptor::kRestoreCSP;
- }
-
// Select the appropriate opcode based on the call type.
InstructionCode opcode = kArchNop;
switch (descriptor->kind()) {
@@ -2618,6 +2420,8 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
&buffer.instruction_args.front());
if (instruction_selection_failed()) return;
call_instr->MarkAsCall();
+
+ EmitPrepareResults(&(buffer.output_nodes), descriptor, node);
}
void InstructionSelector::VisitCallWithCallerSavedRegisters(
@@ -2685,6 +2489,14 @@ void InstructionSelector::VisitTailCall(Node* node) {
Emit(kArchPrepareTailCall, g.NoOutput());
+ // Add an immediate operand that represents the first slot that is unused
+ // with respect to the stack pointer that has been updated for the tail call
+ // instruction. This is used by backends that need to pad arguments for stack
+ // alignment, in order to store an optional slot of padding above the
+ // arguments.
+ int optional_padding_slot = callee->GetFirstUnusedStackSlot();
+ buffer.instruction_args.push_back(g.TempImmediate(optional_padding_slot));
+
int first_unused_stack_slot =
(V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0) +
stack_param_delta;
@@ -2724,29 +2536,31 @@ void InstructionSelector::VisitReturn(Node* ret) {
Instruction* InstructionSelector::EmitDeoptimize(
InstructionCode opcode, InstructionOperand output, InstructionOperand a,
- DeoptimizeKind kind, DeoptimizeReason reason, Node* frame_state) {
+ DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback, Node* frame_state) {
size_t output_count = output.IsInvalid() ? 0 : 1;
InstructionOperand inputs[] = {a};
size_t input_count = arraysize(inputs);
return EmitDeoptimize(opcode, output_count, &output, input_count, inputs,
- kind, reason, frame_state);
+ kind, reason, feedback, frame_state);
}
Instruction* InstructionSelector::EmitDeoptimize(
InstructionCode opcode, InstructionOperand output, InstructionOperand a,
InstructionOperand b, DeoptimizeKind kind, DeoptimizeReason reason,
- Node* frame_state) {
+ VectorSlotPair const& feedback, Node* frame_state) {
size_t output_count = output.IsInvalid() ? 0 : 1;
InstructionOperand inputs[] = {a, b};
size_t input_count = arraysize(inputs);
return EmitDeoptimize(opcode, output_count, &output, input_count, inputs,
- kind, reason, frame_state);
+ kind, reason, feedback, frame_state);
}
Instruction* InstructionSelector::EmitDeoptimize(
InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
size_t input_count, InstructionOperand* inputs, DeoptimizeKind kind,
- DeoptimizeReason reason, Node* frame_state) {
+ DeoptimizeReason reason, VectorSlotPair const& feedback,
+ Node* frame_state) {
OperandGenerator g(this);
FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state);
InstructionOperandVector args(instruction_zone());
@@ -2757,7 +2571,7 @@ Instruction* InstructionSelector::EmitDeoptimize(
opcode |= MiscField::encode(static_cast<int>(input_count));
DCHECK_NE(DeoptimizeKind::kLazy, kind);
int const state_id =
- sequence()->AddDeoptimizationEntry(descriptor, kind, reason);
+ sequence()->AddDeoptimizationEntry(descriptor, kind, reason, feedback);
args.push_back(g.TempImmediate(state_id));
StateObjectDeduplicator deduplicator(instruction_zone());
AddInputsToFrameStateDescriptor(descriptor, frame_state, &g, &deduplicator,
@@ -2775,8 +2589,10 @@ void InstructionSelector::EmitIdentity(Node* node) {
void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind,
DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
Node* value) {
- EmitDeoptimize(kArchDeoptimize, 0, nullptr, 0, nullptr, kind, reason, value);
+ EmitDeoptimize(kArchDeoptimize, 0, nullptr, 0, nullptr, kind, reason,
+ feedback, value);
}
void InstructionSelector::VisitThrow(Node* node) {
@@ -2794,6 +2610,12 @@ void InstructionSelector::VisitUnreachable(Node* node) {
Emit(kArchDebugBreak, g.NoOutput());
}
+void InstructionSelector::VisitDeadValue(Node* node) {
+ OperandGenerator g(this);
+ MarkAsRepresentation(DeadValueRepresentationOf(node->op()), node);
+ Emit(kArchDebugBreak, g.DefineAsConstant(node));
+}
+
void InstructionSelector::VisitComment(Node* node) {
OperandGenerator g(this);
InstructionOperand operand(g.UseImmediate(node));
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index 2bd85d7dab..75c41c165f 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -10,6 +10,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/instruction-scheduler.h"
#include "src/compiler/instruction.h"
+#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
#include "src/globals.h"
@@ -30,17 +31,13 @@ class StateObjectDeduplicator;
// This struct connects nodes of parameters which are going to be pushed on the
// call stack with their parameter index in the call descriptor of the callee.
-class PushParameter {
- public:
- PushParameter() : node_(nullptr), type_(MachineType::None()) {}
- PushParameter(Node* node, MachineType type) : node_(node), type_(type) {}
-
- Node* node() const { return node_; }
- MachineType type() const { return type_; }
+struct PushParameter {
+ PushParameter(Node* n = nullptr,
+ LinkageLocation l = LinkageLocation::ForAnyRegister())
+ : node(n), location(l) {}
- private:
- Node* node_;
- MachineType type_;
+ Node* node;
+ LinkageLocation location;
};
enum class FrameStateInputKind { kAny, kStackSlot };
@@ -115,15 +112,20 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, DeoptimizeKind kind,
- DeoptimizeReason reason, Node* frame_state);
+ DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
+ Node* frame_state);
Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, InstructionOperand b,
DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
Node* frame_state);
Instruction* EmitDeoptimize(InstructionCode opcode, size_t output_count,
InstructionOperand* outputs, size_t input_count,
InstructionOperand* inputs, DeoptimizeKind kind,
- DeoptimizeReason reason, Node* frame_state);
+ DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
+ Node* frame_state);
// ===========================================================================
// ============== Architecture-independent CPU feature methods. ==============
@@ -345,14 +347,17 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
void VisitSwitch(Node* node, const SwitchInfo& sw);
void VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
- Node* value);
+ VectorSlotPair const& feedback, Node* value);
void VisitReturn(Node* ret);
void VisitThrow(Node* node);
void VisitRetain(Node* node);
void VisitUnreachable(Node* node);
+ void VisitDeadValue(Node* node);
void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
const CallDescriptor* descriptor, Node* node);
+ void EmitPrepareResults(ZoneVector<compiler::PushParameter>* results,
+ const CallDescriptor* descriptor, Node* node);
void EmitIdentity(Node* node);
bool CanProduceSignalingNaN(Node* node);
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index b1b322e1ee..f335177b95 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -927,10 +927,10 @@ void InstructionSequence::MarkAsRepresentation(MachineRepresentation rep,
int InstructionSequence::AddDeoptimizationEntry(
FrameStateDescriptor* descriptor, DeoptimizeKind kind,
- DeoptimizeReason reason) {
+ DeoptimizeReason reason, VectorSlotPair const& feedback) {
int deoptimization_id = static_cast<int>(deoptimization_entries_.size());
deoptimization_entries_.push_back(
- DeoptimizationEntry(descriptor, kind, reason));
+ DeoptimizationEntry(descriptor, kind, reason, feedback));
return deoptimization_id;
}
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index b0f6661274..7772f18ad9 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -1317,17 +1317,22 @@ class DeoptimizationEntry final {
public:
DeoptimizationEntry() {}
DeoptimizationEntry(FrameStateDescriptor* descriptor, DeoptimizeKind kind,
- DeoptimizeReason reason)
- : descriptor_(descriptor), kind_(kind), reason_(reason) {}
+ DeoptimizeReason reason, VectorSlotPair const& feedback)
+ : descriptor_(descriptor),
+ kind_(kind),
+ reason_(reason),
+ feedback_(feedback) {}
FrameStateDescriptor* descriptor() const { return descriptor_; }
DeoptimizeKind kind() const { return kind_; }
DeoptimizeReason reason() const { return reason_; }
+ VectorSlotPair const& feedback() const { return feedback_; }
private:
FrameStateDescriptor* descriptor_ = nullptr;
DeoptimizeKind kind_ = DeoptimizeKind::kEager;
- DeoptimizeReason reason_ = DeoptimizeReason::kNoReason;
+ DeoptimizeReason reason_ = DeoptimizeReason::kUnknown;
+ VectorSlotPair feedback_ = VectorSlotPair();
};
typedef ZoneVector<DeoptimizationEntry> DeoptimizationVector;
@@ -1586,7 +1591,8 @@ class V8_EXPORT_PRIVATE InstructionSequence final
}
int AddDeoptimizationEntry(FrameStateDescriptor* descriptor,
- DeoptimizeKind kind, DeoptimizeReason reason);
+ DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback);
DeoptimizationEntry const& GetDeoptimizationEntry(int deoptimization_id);
int GetDeoptimizationEntryCount() const {
return static_cast<int>(deoptimization_entries_.size());
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 042d9e0ef7..940f0904b3 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -316,9 +316,10 @@ void Int64Lowering::LowerNode(Node* node) {
case IrOpcode::kTailCall: {
CallDescriptor* descriptor =
const_cast<CallDescriptor*>(CallDescriptorOf(node->op()));
- if (DefaultLowering(node) ||
- (descriptor->ReturnCount() == 1 &&
- descriptor->GetReturnType(0) == MachineType::Int64())) {
+ bool returns_require_lowering =
+ GetReturnCountAfterLowering(descriptor) !=
+ static_cast<int>(descriptor->ReturnCount());
+ if (DefaultLowering(node) || returns_require_lowering) {
// Tail calls do not have return values, so adjusting the call
// descriptor is enough.
auto new_descriptor = GetI32WasmCallDescriptor(zone(), descriptor);
@@ -688,7 +689,7 @@ void Int64Lowering::LowerNode(Node* node) {
Int32Matcher m(shift);
if (m.HasValue()) {
// Precondition: 0 <= shift < 64.
- int32_t shift_value = m.Value() & 0x3f;
+ int32_t shift_value = m.Value() & 0x3F;
if (shift_value == 0) {
ReplaceNode(node, GetReplacementLow(input),
GetReplacementHigh(input));
@@ -705,7 +706,7 @@ void Int64Lowering::LowerNode(Node* node) {
low_input = GetReplacementHigh(input);
high_input = GetReplacementLow(input);
}
- int32_t masked_shift_value = shift_value & 0x1f;
+ int32_t masked_shift_value = shift_value & 0x1F;
Node* masked_shift =
graph()->NewNode(common()->Int32Constant(masked_shift_value));
Node* inv_shift = graph()->NewNode(
@@ -726,7 +727,7 @@ void Int64Lowering::LowerNode(Node* node) {
if (!machine()->Word32ShiftIsSafe()) {
safe_shift =
graph()->NewNode(machine()->Word32And(), shift,
- graph()->NewNode(common()->Int32Constant(0x1f)));
+ graph()->NewNode(common()->Int32Constant(0x1F)));
}
// By creating this bit-mask with SAR and SHL we do not have to deal
@@ -750,7 +751,7 @@ void Int64Lowering::LowerNode(Node* node) {
if (machine()->Word32ShiftIsSafe()) {
masked_shift6 =
graph()->NewNode(machine()->Word32And(), shift,
- graph()->NewNode(common()->Int32Constant(0x3f)));
+ graph()->NewNode(common()->Int32Constant(0x3F)));
}
Diamond lt32(
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index df6fdba3f0..7ff2bf6d5e 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -109,49 +109,22 @@ JSBuiltinReducer::JSBuiltinReducer(Editor* editor, JSGraph* jsgraph,
namespace {
-MaybeHandle<Map> GetMapWitness(Node* node) {
+Maybe<InstanceType> GetInstanceTypeWitness(Node* node) {
ZoneHandleSet<Map> maps;
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
NodeProperties::InferReceiverMapsResult result =
NodeProperties::InferReceiverMaps(receiver, effect, &maps);
- if (result == NodeProperties::kReliableReceiverMaps && maps.size() == 1) {
- return maps[0];
- }
- return MaybeHandle<Map>();
-}
-// TODO(turbofan): This was copied from Crankshaft, might be too restrictive.
-bool IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map) {
- DCHECK(!jsarray_map->is_dictionary_map());
- Isolate* isolate = jsarray_map->GetIsolate();
- Handle<Name> length_string = isolate->factory()->length_string();
- DescriptorArray* descriptors = jsarray_map->instance_descriptors();
- int number =
- descriptors->SearchWithCache(isolate, *length_string, *jsarray_map);
- DCHECK_NE(DescriptorArray::kNotFound, number);
- return descriptors->GetDetails(number).IsReadOnly();
-}
+ if (result == NodeProperties::kNoReceiverMaps || maps.size() == 0) {
+ return Nothing<InstanceType>();
+ }
-// TODO(turbofan): This was copied from Crankshaft, might be too restrictive.
-bool CanInlineArrayResizeOperation(Handle<Map> receiver_map) {
- Isolate* const isolate = receiver_map->GetIsolate();
- if (!receiver_map->prototype()->IsJSArray()) return false;
- Handle<JSArray> receiver_prototype(JSArray::cast(receiver_map->prototype()),
- isolate);
- // Ensure that all prototypes of the {receiver} are stable.
- for (PrototypeIterator it(isolate, receiver_prototype, kStartAtReceiver);
- !it.IsAtEnd(); it.Advance()) {
- Handle<JSReceiver> current = PrototypeIterator::GetCurrent<JSReceiver>(it);
- if (!current->map()->is_stable()) return false;
+ InstanceType first_type = maps[0]->instance_type();
+ for (const Handle<Map>& map : maps) {
+ if (map->instance_type() != first_type) return Nothing<InstanceType>();
}
- return receiver_map->instance_type() == JS_ARRAY_TYPE &&
- IsFastElementsKind(receiver_map->elements_kind()) &&
- !receiver_map->is_dictionary_map() && receiver_map->is_extensible() &&
- (!receiver_map->is_prototype_map() || receiver_map->is_stable()) &&
- isolate->IsNoElementsProtectorIntact() &&
- isolate->IsAnyInitialArrayPrototype(receiver_prototype) &&
- !IsReadOnlyLengthDescriptor(receiver_map);
+ return Just(first_type);
}
bool CanInlineJSArrayIteration(Handle<Map> receiver_map) {
@@ -189,7 +162,7 @@ bool CanInlineJSArrayIteration(Handle<Map> receiver_map) {
Reduction JSBuiltinReducer::ReduceArrayIterator(Node* node,
IterationKind kind) {
Handle<Map> receiver_map;
- if (GetMapWitness(node).ToHandle(&receiver_map)) {
+ if (NodeProperties::GetMapWitness(node).ToHandle(&receiver_map)) {
return ReduceArrayIterator(receiver_map, node, kind,
ArrayIteratorKind::kArray);
}
@@ -199,7 +172,7 @@ Reduction JSBuiltinReducer::ReduceArrayIterator(Node* node,
Reduction JSBuiltinReducer::ReduceTypedArrayIterator(Node* node,
IterationKind kind) {
Handle<Map> receiver_map;
- if (GetMapWitness(node).ToHandle(&receiver_map) &&
+ if (NodeProperties::GetMapWitness(node).ToHandle(&receiver_map) &&
receiver_map->instance_type() == JS_TYPED_ARRAY_TYPE) {
return ReduceArrayIterator(receiver_map, node, kind,
ArrayIteratorKind::kTypedArray);
@@ -313,8 +286,9 @@ Reduction JSBuiltinReducer::ReduceArrayIterator(Handle<Map> receiver_map,
return Replace(value);
}
-Reduction JSBuiltinReducer::ReduceFastArrayIteratorNext(
- Handle<Map> iterator_map, Node* node, IterationKind kind) {
+Reduction JSBuiltinReducer::ReduceFastArrayIteratorNext(InstanceType type,
+ Node* node,
+ IterationKind kind) {
Node* iterator = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -327,8 +301,8 @@ Reduction JSBuiltinReducer::ReduceFastArrayIteratorNext(
return NoChange();
}
- ElementsKind elements_kind = JSArrayIterator::ElementsKindForInstanceType(
- iterator_map->instance_type());
+ ElementsKind elements_kind =
+ JSArrayIterator::ElementsKindForInstanceType(type);
if (IsHoleyElementsKind(elements_kind)) {
if (!isolate()->IsNoElementsProtectorIntact()) {
@@ -484,15 +458,16 @@ Reduction JSBuiltinReducer::ReduceFastArrayIteratorNext(
return Replace(value);
}
-Reduction JSBuiltinReducer::ReduceTypedArrayIteratorNext(
- Handle<Map> iterator_map, Node* node, IterationKind kind) {
+Reduction JSBuiltinReducer::ReduceTypedArrayIteratorNext(InstanceType type,
+ Node* node,
+ IterationKind kind) {
Node* iterator = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- ElementsKind elements_kind = JSArrayIterator::ElementsKindForInstanceType(
- iterator_map->instance_type());
+ ElementsKind elements_kind =
+ JSArrayIterator::ElementsKindForInstanceType(type);
Node* array = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayIteratorObject()),
@@ -725,65 +700,58 @@ Reduction JSBuiltinReducer::ReduceTypedArrayToStringTag(Node* node) {
}
Reduction JSBuiltinReducer::ReduceArrayIteratorNext(Node* node) {
- Handle<Map> receiver_map;
- if (GetMapWitness(node).ToHandle(&receiver_map)) {
- switch (receiver_map->instance_type()) {
- case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
- return ReduceTypedArrayIteratorNext(receiver_map, node,
- IterationKind::kKeys);
-
- case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
- return ReduceFastArrayIteratorNext(receiver_map, node,
- IterationKind::kKeys);
-
- case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- return ReduceTypedArrayIteratorNext(receiver_map, node,
- IterationKind::kEntries);
-
- case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- return ReduceFastArrayIteratorNext(receiver_map, node,
- IterationKind::kEntries);
-
- case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
- return ReduceTypedArrayIteratorNext(receiver_map, node,
- IterationKind::kValues);
-
- case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
- return ReduceFastArrayIteratorNext(receiver_map, node,
- IterationKind::kValues);
-
- default:
- // Slow array iterators are not reduced
- return NoChange();
- }
+ Maybe<InstanceType> maybe_type = GetInstanceTypeWitness(node);
+ if (!maybe_type.IsJust()) return NoChange();
+ InstanceType type = maybe_type.FromJust();
+ switch (type) {
+ case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
+ return ReduceTypedArrayIteratorNext(type, node, IterationKind::kKeys);
+
+ case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
+ return ReduceFastArrayIteratorNext(type, node, IterationKind::kKeys);
+
+ case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ return ReduceTypedArrayIteratorNext(type, node, IterationKind::kEntries);
+
+ case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ return ReduceFastArrayIteratorNext(type, node, IterationKind::kEntries);
+
+ case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
+ return ReduceTypedArrayIteratorNext(type, node, IterationKind::kValues);
+
+ case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+ return ReduceFastArrayIteratorNext(type, node, IterationKind::kValues);
+
+ default:
+ // Slow array iterators are not reduced
+ return NoChange();
}
- return NoChange();
}
// ES6 section 22.1.2.2 Array.isArray ( arg )
@@ -896,398 +864,6 @@ Reduction JSBuiltinReducer::ReduceArrayIsArray(Node* node) {
return Replace(value);
}
-// ES6 section 22.1.3.17 Array.prototype.pop ( )
-Reduction JSBuiltinReducer::ReduceArrayPop(Node* node) {
- Handle<Map> receiver_map;
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- // TODO(turbofan): Extend this to also handle fast holey double elements
- // once we got the hole NaN mess sorted out in TurboFan/V8.
- if (GetMapWitness(node).ToHandle(&receiver_map) &&
- CanInlineArrayResizeOperation(receiver_map) &&
- receiver_map->elements_kind() != HOLEY_DOUBLE_ELEMENTS) {
- // Install code dependencies on the {receiver} prototype maps and the
- // global array protector cell.
- dependencies()->AssumePropertyCell(factory()->no_elements_protector());
- dependencies()->AssumePrototypeMapsStable(receiver_map);
-
- // Load the "length" property of the {receiver}.
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
- receiver, effect, control);
-
- // Check if the {receiver} has any elements.
- Node* check = graph()->NewNode(simplified()->NumberEqual(), length,
- jsgraph()->ZeroConstant());
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = jsgraph()->UndefinedConstant();
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- // TODO(tebbi): We should trim the backing store if the capacity is too
- // big, as implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
-
- // Load the elements backing store from the {receiver}.
- Node* elements = efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
- receiver, efalse, if_false);
-
- // Ensure that we aren't popping from a copy-on-write backing store.
- if (IsSmiOrObjectElementsKind(receiver_map->elements_kind())) {
- elements = efalse =
- graph()->NewNode(simplified()->EnsureWritableFastElements(),
- receiver, elements, efalse, if_false);
- }
-
- // Compute the new {length}.
- length = graph()->NewNode(simplified()->NumberSubtract(), length,
- jsgraph()->OneConstant());
-
- // Store the new {length} to the {receiver}.
- efalse = graph()->NewNode(
- simplified()->StoreField(
- AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
- receiver, length, efalse, if_false);
-
- // Load the last entry from the {elements}.
- vfalse = efalse = graph()->NewNode(
- simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(
- receiver_map->elements_kind())),
- elements, length, efalse, if_false);
-
- // Store a hole to the element we just removed from the {receiver}.
- efalse = graph()->NewNode(
- simplified()->StoreElement(AccessBuilder::ForFixedArrayElement(
- GetHoleyElementsKind(receiver_map->elements_kind()))),
- elements, length, jsgraph()->TheHoleConstant(), efalse, if_false);
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, control);
-
- // Convert the hole to undefined. Do this last, so that we can optimize
- // conversion operator via some smart strength reduction in many cases.
- if (IsHoleyElementsKind(receiver_map->elements_kind())) {
- value =
- graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
- }
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 22.1.3.18 Array.prototype.push ( )
-Reduction JSBuiltinReducer::ReduceArrayPush(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
- int const num_values = node->op()->ValueInputCount() - 2;
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
- if (receiver_maps.size() != 1) return NoChange();
- DCHECK_NE(NodeProperties::kNoReceiverMaps, result);
-
- // TODO(turbofan): Relax this to deal with multiple {receiver} maps.
- Handle<Map> receiver_map = receiver_maps[0];
- if (CanInlineArrayResizeOperation(receiver_map)) {
- // Collect the value inputs to push.
- std::vector<Node*> values(num_values);
- for (int i = 0; i < num_values; ++i) {
- values[i] = NodeProperties::GetValueInput(node, 2 + i);
- }
-
- // Install code dependencies on the {receiver} prototype maps and the
- // global array protector cell.
- dependencies()->AssumePropertyCell(factory()->no_elements_protector());
- dependencies()->AssumePrototypeMapsStable(receiver_map);
-
- // If the {receiver_maps} information is not reliable, we need
- // to check that the {receiver} still has one of these maps.
- if (result == NodeProperties::kUnreliableReceiverMaps) {
- if (receiver_map->is_stable()) {
- dependencies()->AssumeMapStable(receiver_map);
- } else {
- // TODO(turbofan): This is a potential - yet unlikely - deoptimization
- // loop, since we might not learn from this deoptimization in baseline
- // code. We need a way to learn from deoptimizations in optimized to
- // address these problems.
- effect = graph()->NewNode(
- simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps),
- receiver, effect, control);
- }
- }
-
- // TODO(turbofan): Perform type checks on the {values}. We are not
- // guaranteed to learn from these checks in case they fail, as the witness
- // (i.e. the map check from the LoadIC for a.push) might not be executed in
- // baseline code (after we stored the value in the builtin and thereby
- // changed the elements kind of a) before be decide to optimize this
- // function again. We currently don't have a proper way to deal with this;
- // the proper solution here is to learn on deopt, i.e. disable
- // Array.prototype.push inlining for this function.
- for (auto& value : values) {
- if (IsSmiElementsKind(receiver_map->elements_kind())) {
- value = effect =
- graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
- } else if (IsDoubleElementsKind(receiver_map->elements_kind())) {
- value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
- effect, control);
- // Make sure we do not store signaling NaNs into double arrays.
- value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
- }
- }
-
- // Load the "length" property of the {receiver}.
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
- receiver, effect, control);
- Node* value = length;
-
- // Check if we have any {values} to push.
- if (num_values > 0) {
- // Compute the resulting "length" of the {receiver}.
- Node* new_length = value = graph()->NewNode(
- simplified()->NumberAdd(), length, jsgraph()->Constant(num_values));
-
- // Load the elements backing store of the {receiver}.
- Node* elements = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
- receiver, effect, control);
- Node* elements_length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
- elements, effect, control);
-
- // TODO(turbofan): Check if we need to grow the {elements} backing store.
- // This will deopt if we cannot grow the array further, and we currently
- // don't necessarily learn from it. See the comment on the value type
- // check above.
- GrowFastElementsMode mode =
- IsDoubleElementsKind(receiver_map->elements_kind())
- ? GrowFastElementsMode::kDoubleElements
- : GrowFastElementsMode::kSmiOrObjectElements;
- elements = effect = graph()->NewNode(
- simplified()->MaybeGrowFastElements(mode), receiver, elements,
- graph()->NewNode(simplified()->NumberAdd(), length,
- jsgraph()->Constant(num_values - 1)),
- elements_length, effect, control);
-
- // Update the JSArray::length field. Since this is observable,
- // there must be no other check after this.
- effect = graph()->NewNode(
- simplified()->StoreField(
- AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
- receiver, new_length, effect, control);
-
- // Append the {values} to the {elements}.
- for (int i = 0; i < num_values; ++i) {
- Node* value = values[i];
- Node* index = graph()->NewNode(simplified()->NumberAdd(), length,
- jsgraph()->Constant(i));
- effect = graph()->NewNode(
- simplified()->StoreElement(AccessBuilder::ForFixedArrayElement(
- receiver_map->elements_kind())),
- elements, index, value, effect, control);
- }
- }
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 22.1.3.22 Array.prototype.shift ( )
-Reduction JSBuiltinReducer::ReduceArrayShift(Node* node) {
- Node* target = NodeProperties::GetValueInput(node, 0);
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- // TODO(turbofan): Extend this to also handle fast holey double elements
- // once we got the hole NaN mess sorted out in TurboFan/V8.
- Handle<Map> receiver_map;
- if (GetMapWitness(node).ToHandle(&receiver_map) &&
- CanInlineArrayResizeOperation(receiver_map) &&
- receiver_map->elements_kind() != HOLEY_DOUBLE_ELEMENTS) {
- // Install code dependencies on the {receiver} prototype maps and the
- // global array protector cell.
- dependencies()->AssumePropertyCell(factory()->no_elements_protector());
- dependencies()->AssumePrototypeMapsStable(receiver_map);
-
- // Load length of the {receiver}.
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
- receiver, effect, control);
-
- // Return undefined if {receiver} has no elements.
- Node* check0 = graph()->NewNode(simplified()->NumberEqual(), length,
- jsgraph()->ZeroConstant());
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
- Node* vtrue0 = jsgraph()->UndefinedConstant();
-
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
- Node* vfalse0;
- {
- // Check if we should take the fast-path.
- Node* check1 =
- graph()->NewNode(simplified()->NumberLessThanOrEqual(), length,
- jsgraph()->Constant(JSArray::kMaxCopyElements));
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check1, if_false0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = efalse0;
- Node* vtrue1;
- {
- Node* elements = etrue1 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
- receiver, etrue1, if_true1);
-
- // Load the first element here, which we return below.
- vtrue1 = etrue1 = graph()->NewNode(
- simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(
- receiver_map->elements_kind())),
- elements, jsgraph()->ZeroConstant(), etrue1, if_true1);
-
- // Ensure that we aren't shifting a copy-on-write backing store.
- if (IsSmiOrObjectElementsKind(receiver_map->elements_kind())) {
- elements = etrue1 =
- graph()->NewNode(simplified()->EnsureWritableFastElements(),
- receiver, elements, etrue1, if_true1);
- }
-
- // Shift the remaining {elements} by one towards the start.
- Node* loop = graph()->NewNode(common()->Loop(2), if_true1, if_true1);
- Node* eloop =
- graph()->NewNode(common()->EffectPhi(2), etrue1, etrue1, loop);
- Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
- NodeProperties::MergeControlToEnd(graph(), common(), terminate);
- Node* index = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2),
- jsgraph()->OneConstant(),
- jsgraph()->Constant(JSArray::kMaxCopyElements - 1), loop);
-
- {
- Node* check2 =
- graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch2 = graph()->NewNode(common()->Branch(), check2, loop);
-
- if_true1 = graph()->NewNode(common()->IfFalse(), branch2);
- etrue1 = eloop;
-
- Node* control = graph()->NewNode(common()->IfTrue(), branch2);
- Node* effect = etrue1;
-
- ElementAccess const access = AccessBuilder::ForFixedArrayElement(
- receiver_map->elements_kind());
- Node* value = effect =
- graph()->NewNode(simplified()->LoadElement(access), elements,
- index, effect, control);
- effect = graph()->NewNode(
- simplified()->StoreElement(access), elements,
- graph()->NewNode(simplified()->NumberSubtract(), index,
- jsgraph()->OneConstant()),
- value, effect, control);
-
- loop->ReplaceInput(1, control);
- eloop->ReplaceInput(1, effect);
- index->ReplaceInput(1,
- graph()->NewNode(simplified()->NumberAdd(), index,
- jsgraph()->OneConstant()));
- }
-
- // Compute the new {length}.
- length = graph()->NewNode(simplified()->NumberSubtract(), length,
- jsgraph()->OneConstant());
-
- // Store the new {length} to the {receiver}.
- etrue1 = graph()->NewNode(
- simplified()->StoreField(
- AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
- receiver, length, etrue1, if_true1);
-
- // Store a hole to the element we just removed from the {receiver}.
- etrue1 = graph()->NewNode(
- simplified()->StoreElement(AccessBuilder::ForFixedArrayElement(
- GetHoleyElementsKind(receiver_map->elements_kind()))),
- elements, length, jsgraph()->TheHoleConstant(), etrue1, if_true1);
- }
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = efalse0;
- Node* vfalse1;
- {
- // Call the generic C++ implementation.
- const int builtin_index = Builtins::kArrayShift;
- CallDescriptor const* const desc = Linkage::GetCEntryStubCallDescriptor(
- graph()->zone(), 1, BuiltinArguments::kNumExtraArgsWithReceiver,
- Builtins::name(builtin_index), node->op()->properties(),
- CallDescriptor::kNeedsFrameState);
- Node* stub_code = jsgraph()->CEntryStubConstant(1, kDontSaveFPRegs,
- kArgvOnStack, true);
- Address builtin_entry = Builtins::CppEntryOf(builtin_index);
- Node* entry = jsgraph()->ExternalConstant(
- ExternalReference(builtin_entry, isolate()));
- Node* argc =
- jsgraph()->Constant(BuiltinArguments::kNumExtraArgsWithReceiver);
- if_false1 = efalse1 = vfalse1 =
- graph()->NewNode(common()->Call(desc), stub_code, receiver,
- jsgraph()->PaddingConstant(), argc, target,
- jsgraph()->UndefinedConstant(), entry, argc,
- context, frame_state, efalse1, if_false1);
- }
-
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- efalse0 =
- graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
- vfalse0 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue1, vfalse1, if_false0);
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue0, vfalse0, control);
-
- // Convert the hole to undefined. Do this last, so that we can optimize
- // conversion operator via some smart strength reduction in many cases.
- if (IsHoleyElementsKind(receiver_map->elements_kind())) {
- value =
- graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
- }
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- return NoChange();
-}
-
namespace {
bool HasInstanceTypeWitness(Node* receiver, Node* effect,
@@ -1451,6 +1027,7 @@ Reduction JSBuiltinReducer::ReduceCollectionIteratorNext(
index = effect = graph()->NewNode(
common()->Call(desc), jsgraph()->HeapConstant(callable.code()), table,
index, jsgraph()->NoContextConstant(), effect);
+ NodeProperties::SetType(index, type_cache_.kFixedArrayLengthType);
// Update the {index} and {table} on the {receiver}.
effect = graph()->NewNode(
@@ -1562,8 +1139,9 @@ Reduction JSBuiltinReducer::ReduceCollectionIteratorNext(
// Abort loop with resulting value.
Node* control = graph()->NewNode(common()->IfFalse(), branch1);
Node* effect = etrue0;
- Node* value = graph()->NewNode(
- common()->TypeGuard(Type::NonInternal()), entry_key, control);
+ Node* value = effect =
+ graph()->NewNode(common()->TypeGuard(Type::NonInternal()),
+ entry_key, effect, control);
Node* done = jsgraph()->FalseConstant();
// Advance the index on the {receiver}.
@@ -2369,122 +1947,6 @@ Node* GetStringWitness(Node* node) {
} // namespace
-// ES6 section 21.1.3.1 String.prototype.charAt ( pos )
-Reduction JSBuiltinReducer::ReduceStringCharAt(Node* node) {
- // We need at least target, receiver and index parameters.
- if (node->op()->ValueInputCount() >= 3) {
- Node* index = NodeProperties::GetValueInput(node, 2);
- Type* index_type = NodeProperties::GetType(index);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- if (index_type->Is(Type::Integral32OrMinusZeroOrNaN())) {
- if (Node* receiver = GetStringWitness(node)) {
- if (!index_type->Is(Type::Unsigned32())) {
- // Map -0 and NaN to 0 (as per ToInteger), and the values in
- // the [-2^31,-1] range to the [2^31,2^32-1] range, which will
- // be considered out-of-bounds as well, because of the maximal
- // String length limit in V8.
- STATIC_ASSERT(String::kMaxLength <= kMaxInt);
- index = graph()->NewNode(simplified()->NumberToUint32(), index);
- }
-
- // Determine the {receiver} length.
- Node* receiver_length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
- effect, control);
-
- // Check if {index} is less than {receiver} length.
- Node* check = graph()->NewNode(simplified()->NumberLessThan(), index,
- receiver_length);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check, control);
-
- // Return the character from the {receiver} as single character string.
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-
- Node* masked_index = graph()->NewNode(
- simplified()->MaskIndexWithBound(), index, receiver_length);
-
- Node* vtrue = graph()->NewNode(simplified()->StringCharAt(), receiver,
- masked_index, if_true);
-
- // Return the empty string otherwise.
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = jsgraph()->EmptyStringConstant();
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, control);
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- }
- }
-
- return NoChange();
-}
-
-// ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos )
-Reduction JSBuiltinReducer::ReduceStringCharCodeAt(Node* node) {
- // We need at least target, receiver and index parameters.
- if (node->op()->ValueInputCount() >= 3) {
- Node* index = NodeProperties::GetValueInput(node, 2);
- Type* index_type = NodeProperties::GetType(index);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- if (index_type->Is(Type::Integral32OrMinusZeroOrNaN())) {
- if (Node* receiver = GetStringWitness(node)) {
- if (!index_type->Is(Type::Unsigned32())) {
- // Map -0 and NaN to 0 (as per ToInteger), and the values in
- // the [-2^31,-1] range to the [2^31,2^32-1] range, which will
- // be considered out-of-bounds as well, because of the maximal
- // String length limit in V8.
- STATIC_ASSERT(String::kMaxLength <= kMaxInt);
- index = graph()->NewNode(simplified()->NumberToUint32(), index);
- }
-
- // Determine the {receiver} length.
- Node* receiver_length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
- effect, control);
-
- // Check if {index} is less than {receiver} length.
- Node* check = graph()->NewNode(simplified()->NumberLessThan(), index,
- receiver_length);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check, control);
-
- // Load the character from the {receiver}.
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-
- Node* masked_index = graph()->NewNode(
- simplified()->MaskIndexWithBound(), index, receiver_length);
-
- Node* vtrue = graph()->NewNode(simplified()->StringCharCodeAt(),
- receiver, masked_index, if_true);
-
- // Return NaN otherwise.
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = jsgraph()->NaNConstant();
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, control);
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- }
- }
-
- return NoChange();
-}
-
// ES6 String.prototype.concat(...args)
// #sec-string.prototype.concat
Reduction JSBuiltinReducer::ReduceStringConcat(Node* node) {
@@ -2516,34 +1978,6 @@ Reduction JSBuiltinReducer::ReduceStringConcat(Node* node) {
return NoChange();
}
-// ES6 String.prototype.indexOf(searchString [, position])
-// #sec-string.prototype.indexof
-Reduction JSBuiltinReducer::ReduceStringIndexOf(Node* node) {
- // We need at least target, receiver and search_string parameters.
- if (node->op()->ValueInputCount() >= 3) {
- Node* search_string = NodeProperties::GetValueInput(node, 2);
- Type* search_string_type = NodeProperties::GetType(search_string);
- Node* position = (node->op()->ValueInputCount() >= 4)
- ? NodeProperties::GetValueInput(node, 3)
- : jsgraph()->ZeroConstant();
- Type* position_type = NodeProperties::GetType(position);
-
- if (search_string_type->Is(Type::String()) &&
- position_type->Is(Type::SignedSmall())) {
- if (Node* receiver = GetStringWitness(node)) {
- RelaxEffectsAndControls(node);
- node->ReplaceInput(0, receiver);
- node->ReplaceInput(1, search_string);
- node->ReplaceInput(2, position);
- node->TrimInputCount(3);
- NodeProperties::ChangeOp(node, simplified()->StringIndexOf());
- return Changed(node);
- }
- }
- }
- return NoChange();
-}
-
Reduction JSBuiltinReducer::ReduceStringIterator(Node* node) {
if (Node* receiver = GetStringWitness(node)) {
Node* effect = NodeProperties::GetEffectInput(node);
@@ -2584,9 +2018,7 @@ Reduction JSBuiltinReducer::ReduceStringIteratorNext(Node* node) {
Node* index = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSStringIteratorIndex()),
receiver, effect, control);
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()), string,
- effect, control);
+ Node* length = graph()->NewNode(simplified()->StringLength(), string);
// branch0: if (index < length)
Node* check0 =
@@ -2677,9 +2109,8 @@ Reduction JSBuiltinReducer::ReduceStringIteratorNext(Node* node) {
simplified()->StringFromCodePoint(UnicodeEncoding::UTF16), vtrue0);
// Update iterator.[[NextIndex]]
- Node* char_length = etrue0 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()), vtrue0,
- etrue0, if_true0);
+ Node* char_length =
+ graph()->NewNode(simplified()->StringLength(), vtrue0);
index = graph()->NewNode(simplified()->NumberAdd(), index, char_length);
etrue0 = graph()->NewNode(
simplified()->StoreField(AccessBuilder::ForJSStringIteratorIndex()),
@@ -2728,9 +2159,8 @@ Reduction JSBuiltinReducer::ReduceStringSlice(Node* node) {
if (start_type->Is(type_cache_.kSingletonMinusOne) &&
end_type->Is(Type::Undefined())) {
- Node* receiver_length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
- effect, control);
+ Node* receiver_length =
+ graph()->NewNode(simplified()->StringLength(), receiver);
Node* check =
graph()->NewNode(simplified()->NumberEqual(), receiver_length,
@@ -2855,12 +2285,6 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
return ReduceArrayIteratorNext(node);
case kArrayIsArray:
return ReduceArrayIsArray(node);
- case kArrayPop:
- return ReduceArrayPop(node);
- case kArrayPush:
- return ReduceArrayPush(node);
- case kArrayShift:
- return ReduceArrayShift(node);
case kDateNow:
return ReduceDateNow(node);
case kDateGetTime:
@@ -3024,14 +2448,8 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
case kStringFromCharCode:
reduction = ReduceStringFromCharCode(node);
break;
- case kStringCharAt:
- return ReduceStringCharAt(node);
- case kStringCharCodeAt:
- return ReduceStringCharCodeAt(node);
case kStringConcat:
return ReduceStringConcat(node);
- case kStringIndexOf:
- return ReduceStringIndexOf(node);
case kStringIterator:
return ReduceStringIterator(node);
case kStringIteratorNext:
diff --git a/deps/v8/src/compiler/js-builtin-reducer.h b/deps/v8/src/compiler/js-builtin-reducer.h
index 2b22b0ce7c..b3c44c7a0f 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.h
+++ b/deps/v8/src/compiler/js-builtin-reducer.h
@@ -47,15 +47,13 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
IterationKind kind,
ArrayIteratorKind iter_kind);
Reduction ReduceArrayIteratorNext(Node* node);
- Reduction ReduceFastArrayIteratorNext(Handle<Map> iterator_map, Node* node,
+ Reduction ReduceFastArrayIteratorNext(InstanceType type, Node* node,
IterationKind kind);
- Reduction ReduceTypedArrayIteratorNext(Handle<Map> iterator_map, Node* node,
+ Reduction ReduceTypedArrayIteratorNext(InstanceType type, Node* node,
IterationKind kind);
Reduction ReduceTypedArrayToStringTag(Node* node);
Reduction ReduceArrayIsArray(Node* node);
- Reduction ReduceArrayPop(Node* node);
- Reduction ReduceArrayPush(Node* node);
- Reduction ReduceArrayShift(Node* node);
+
Reduction ReduceCollectionIterator(Node* node,
InstanceType collection_instance_type,
int collection_iterator_map_index);
@@ -110,11 +108,8 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
Reduction ReduceNumberIsSafeInteger(Node* node);
Reduction ReduceNumberParseInt(Node* node);
Reduction ReduceObjectCreate(Node* node);
- Reduction ReduceStringCharAt(Node* node);
- Reduction ReduceStringCharCodeAt(Node* node);
Reduction ReduceStringConcat(Node* node);
Reduction ReduceStringFromCharCode(Node* node);
- Reduction ReduceStringIndexOf(Node* node);
Reduction ReduceStringIterator(Node* node);
Reduction ReduceStringIteratorNext(Node* node);
Reduction ReduceStringSlice(Node* node);
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index c595b360d5..1f8e7a2cef 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -5,6 +5,7 @@
#include "src/compiler/js-call-reducer.h"
#include "src/api.h"
+#include "src/builtins/builtins-utils.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/compilation-dependencies.h"
@@ -17,6 +18,7 @@
#include "src/feedback-vector-inl.h"
#include "src/ic/call-optimization.h"
#include "src/objects-inl.h"
+#include "src/vector-slot-pair.h"
namespace v8 {
namespace internal {
@@ -90,20 +92,6 @@ Reduction JSCallReducer::ReduceBooleanConstructor(Node* node) {
return Replace(value);
}
-// ES6 section 20.1.1 The Number Constructor
-Reduction JSCallReducer::ReduceNumberConstructor(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
- CallParameters const& p = CallParametersOf(node->op());
-
- // Turn the {node} into a {JSToNumber} call.
- DCHECK_LE(2u, p.arity());
- Node* value = (p.arity() == 2) ? jsgraph()->ZeroConstant()
- : NodeProperties::GetValueInput(node, 2);
- NodeProperties::ReplaceValueInputs(node, value);
- NodeProperties::ChangeOp(node, javascript()->ToNumber());
- return Changed(node);
-}
-
// ES section #sec-object-constructor
Reduction JSCallReducer::ReduceObjectConstructor(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
@@ -549,7 +537,7 @@ Reduction JSCallReducer::ReduceObjectPrototypeHasOwnProperty(Node* node) {
Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
receiver_map, cache_type);
effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kNoReason), check, effect,
+ simplified()->CheckIf(DeoptimizeReason::kWrongMap), check, effect,
control);
}
Node* value = jsgraph()->TrueConstant();
@@ -804,15 +792,37 @@ bool CanInlineArrayIteratingBuiltin(Handle<Map> receiver_map) {
isolate->IsAnyInitialArrayPrototype(receiver_prototype);
}
+Node* JSCallReducer::WireInLoopStart(Node* k, Node** control, Node** effect) {
+ Node* loop = *control =
+ graph()->NewNode(common()->Loop(2), *control, *control);
+ Node* eloop = *effect =
+ graph()->NewNode(common()->EffectPhi(2), *effect, *effect, loop);
+ Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
+ NodeProperties::MergeControlToEnd(graph(), common(), terminate);
+ return graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), k,
+ k, loop);
+}
+
+void JSCallReducer::WireInLoopEnd(Node* loop, Node* eloop, Node* vloop, Node* k,
+ Node* control, Node* effect) {
+ loop->ReplaceInput(1, control);
+ vloop->ReplaceInput(1, k);
+ eloop->ReplaceInput(1, effect);
+}
+
Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
Node* node) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- CallParameters const& p = CallParametersOf(node->op());
// Try to determine the {receiver} map.
Node* receiver = NodeProperties::GetValueInput(node, 1);
@@ -825,10 +835,193 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
- if (result != NodeProperties::kReliableReceiverMaps) {
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+
+ // By ensuring that {kind} is object or double, we can be polymorphic
+ // on different elements kinds.
+ ElementsKind kind = receiver_maps[0]->elements_kind();
+ if (IsSmiElementsKind(kind)) {
+ kind = FastSmiToObjectElementsKind(kind);
+ }
+ for (Handle<Map> receiver_map : receiver_maps) {
+ ElementsKind next_kind = receiver_map->elements_kind();
+ if (!CanInlineArrayIteratingBuiltin(receiver_map)) {
+ return NoChange();
+ }
+ if (!IsFastElementsKind(next_kind)) {
+ return NoChange();
+ }
+ if (IsDoubleElementsKind(kind) != IsDoubleElementsKind(next_kind)) {
+ return NoChange();
+ }
+ if (IsHoleyElementsKind(next_kind)) {
+ kind = GetHoleyElementsKind(kind);
+ }
+ }
+
+ // Install code dependencies on the {receiver} prototype maps and the
+ // global array protector cell.
+ dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+
+ // If we have unreliable maps, we need a map check.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ Node* k = jsgraph()->ZeroConstant();
+
+ Node* original_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
+ effect, control);
+
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ // Check whether the given callback function is callable. Note that this has
+ // to happen outside the loop to make sure we also throw on empty arrays.
+ Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayForEachLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+ Node* check_fail = nullptr;
+ Node* check_throw = nullptr;
+ WireInCallbackIsCallableCheck(fncallback, context, check_frame_state, effect,
+ &control, &check_fail, &check_throw);
+
+ // Start the loop.
+ Node* vloop = k = WireInLoopStart(k, &control, &effect);
+ Node *loop = control, *eloop = effect;
+ checkpoint_params[3] = k;
+
+ Node* continue_test =
+ graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
+ Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ continue_test, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), continue_branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
+ control = if_true;
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayForEachLoopEagerDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::EAGER);
+
+ effect =
+ graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+
+ // Make sure the map hasn't changed during the iteration
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+
+ Node* element =
+ SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
+
+ Node* next_k =
+ graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
+ checkpoint_params[3] = next_k;
+
+ Node* hole_true = nullptr;
+ Node* hole_false = nullptr;
+ Node* effect_true = effect;
+
+ if (IsHoleyElementsKind(kind)) {
+ // Holey elements kind require a hole check and skipping of the element in
+ // the case of a hole.
+ Node* check;
+ if (IsDoubleElementsKind(kind)) {
+ check = graph()->NewNode(simplified()->NumberIsFloat64Hole(), element);
+ } else {
+ check = graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
+ }
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ hole_true = graph()->NewNode(common()->IfTrue(), branch);
+ hole_false = graph()->NewNode(common()->IfFalse(), branch);
+ control = hole_false;
+
+ // The contract is that we don't leak "the hole" into "user JavaScript",
+ // so we must rename the {element} here to explicitly exclude "the hole"
+ // from the type of {element}.
+ element = effect = graph()->NewNode(
+ common()->TypeGuard(Type::NonInternal()), element, effect, control);
+ }
+
+ frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayForEachLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+
+ control = effect = graph()->NewNode(
+ javascript()->Call(5, p.frequency()), fncallback, this_arg, element, k,
+ receiver, context, frame_state, effect, control);
+
+ // Rewire potential exception edges.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ RewirePostCallbackExceptionEdges(check_throw, on_exception, effect,
+ &check_fail, &control);
+ }
+
+ if (IsHoleyElementsKind(kind)) {
+ Node* after_call_control = control;
+ Node* after_call_effect = effect;
+ control = hole_true;
+ effect = effect_true;
+
+ control = graph()->NewNode(common()->Merge(2), control, after_call_control);
+ effect = graph()->NewNode(common()->EffectPhi(2), effect, after_call_effect,
+ control);
+ }
+
+ WireInLoopEnd(loop, eloop, vloop, next_k, control, effect);
+
+ control = if_false;
+ effect = eloop;
+
+ // Wire up the branch for the case when IsCallable fails for the callback.
+ // Since {check_throw} is an unconditional throw, it's impossible to
+ // return a successful completion. Therefore, we simply connect the successful
+ // completion to the graph end.
+ Node* throw_node =
+ graph()->NewNode(common()->Throw(), check_throw, check_fail);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+
+ ReplaceWithValue(node, jsgraph()->UndefinedConstant(), effect, control);
+ return Replace(jsgraph()->UndefinedConstant());
+}
+
+Reduction JSCallReducer::ReduceArrayReduce(Handle<JSFunction> function,
+ Node* node) {
+ if (!FLAG_turbo_inline_array_builtins) return NoChange();
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
return NoChange();
}
- if (receiver_maps.size() == 0) return NoChange();
+
+ // Try to determine the {receiver} map.
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* fncallback = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
ElementsKind kind = IsDoubleElementsKind(receiver_maps[0]->elements_kind())
? PACKED_DOUBLE_ELEMENTS
@@ -838,8 +1031,7 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
if (!CanInlineArrayIteratingBuiltin(receiver_map)) {
return NoChange();
}
- if (!IsFastElementsKind(next_kind) ||
- (IsDoubleElementsKind(next_kind) && IsHoleyElementsKind(next_kind))) {
+ if (!IsFastElementsKind(next_kind) || IsHoleyElementsKind(next_kind)) {
return NoChange();
}
if (IsDoubleElementsKind(kind) != IsDoubleElementsKind(next_kind)) {
@@ -854,36 +1046,73 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
// global array protector cell.
dependencies()->AssumePropertyCell(factory()->no_elements_protector());
- Node* k = jsgraph()->ZeroConstant();
+ // If we have unreliable maps, we need a map check.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
Node* original_length = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS)),
receiver, effect, control);
- std::vector<Node*> checkpoint_params(
- {receiver, fncallback, this_arg, k, original_length});
+ Node* k = jsgraph()->ZeroConstant();
+
+ std::vector<Node*> checkpoint_params({receiver, fncallback, k,
+ original_length,
+ jsgraph()->UndefinedConstant()});
const int stack_parameters = static_cast<int>(checkpoint_params.size());
// Check whether the given callback function is callable. Note that this has
// to happen outside the loop to make sure we also throw on empty arrays.
Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayForEachLoopLazyDeoptContinuation,
- node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ jsgraph(), function, Builtins::kArrayReduceLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters - 1,
outer_frame_state, ContinuationFrameStateMode::LAZY);
Node* check_fail = nullptr;
Node* check_throw = nullptr;
WireInCallbackIsCallableCheck(fncallback, context, check_frame_state, effect,
&control, &check_fail, &check_throw);
+ // Set initial accumulator value
+ Node* cur = jsgraph()->TheHoleConstant();
+
+ Node* initial_element_check_fail = nullptr;
+ Node* initial_element_check_throw = nullptr;
+ if (node->op()->ValueInputCount() > 3) {
+ cur = NodeProperties::GetValueInput(node, 3);
+ } else {
+ Node* check =
+ graph()->NewNode(simplified()->NumberEqual(), original_length, k);
+ Node* check_branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ initial_element_check_fail =
+ graph()->NewNode(common()->IfTrue(), check_branch);
+ initial_element_check_throw = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
+ jsgraph()->Constant(MessageTemplate::kReduceNoInitial), fncallback,
+ context, check_frame_state, effect, initial_element_check_fail);
+ control = graph()->NewNode(common()->IfFalse(), check_branch);
+
+ cur = SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
+ k = graph()->NewNode(simplified()->NumberAdd(), k,
+ jsgraph()->OneConstant());
+ }
+
// Start the loop.
Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
Node* eloop = effect =
graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
NodeProperties::MergeControlToEnd(graph(), common(), terminate);
- Node* vloop = k = graph()->NewNode(
+ Node* kloop = k = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, 2), k, k, loop);
- checkpoint_params[3] = k;
+ Node* curloop = cur = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), cur, cur, loop);
+ checkpoint_params[2] = k;
+ checkpoint_params[4] = curloop;
control = loop;
effect = eloop;
@@ -898,7 +1127,7 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
control = if_true;
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayForEachLoopEagerDeoptContinuation,
+ jsgraph(), function, Builtins::kArrayReduceLoopEagerDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::EAGER);
@@ -910,11 +1139,12 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
effect, control);
- Node* element = SafeLoadElement(kind, receiver, control, &effect, &k);
+ Node* element =
+ SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
Node* next_k =
- graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->Constant(1));
- checkpoint_params[3] = next_k;
+ graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
+ checkpoint_params[2] = next_k;
Node* hole_true = nullptr;
Node* hole_false = nullptr;
@@ -934,18 +1164,19 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
// The contract is that we don't leak "the hole" into "user JavaScript",
// so we must rename the {element} here to explicitly exclude "the hole"
// from the type of {element}.
- element = graph()->NewNode(common()->TypeGuard(Type::NonInternal()),
- element, control);
+ element = effect = graph()->NewNode(
+ common()->TypeGuard(Type::NonInternal()), element, effect, control);
}
frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayForEachLoopLazyDeoptContinuation,
- node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ jsgraph(), function, Builtins::kArrayReduceLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters - 1,
outer_frame_state, ContinuationFrameStateMode::LAZY);
- control = effect = graph()->NewNode(
- javascript()->Call(5, p.frequency()), fncallback, this_arg, element, k,
- receiver, context, frame_state, effect, control);
+ Node* next_cur = control = effect =
+ graph()->NewNode(javascript()->Call(6, p.frequency()), fncallback,
+ jsgraph()->UndefinedConstant(), cur, element, k,
+ receiver, context, frame_state, effect, control);
// Rewire potential exception edges.
Node* on_exception = nullptr;
@@ -963,12 +1194,17 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
control = graph()->NewNode(common()->Merge(2), control, after_call_control);
effect = graph()->NewNode(common()->EffectPhi(2), effect, after_call_effect,
control);
+ next_cur =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), cur,
+ next_cur, control);
}
k = next_k;
+ cur = next_cur;
loop->ReplaceInput(1, control);
- vloop->ReplaceInput(1, k);
+ kloop->ReplaceInput(1, k);
+ curloop->ReplaceInput(1, cur);
eloop->ReplaceInput(1, effect);
control = if_false;
@@ -982,19 +1218,271 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
graph()->NewNode(common()->Throw(), check_throw, check_fail);
NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
- ReplaceWithValue(node, jsgraph()->UndefinedConstant(), effect, control);
- return Replace(jsgraph()->UndefinedConstant());
-}
+ if (node->op()->ValueInputCount() <= 3) {
+ // Wire up the branch for the case when an array is empty.
+ // Since {check_throw} is an unconditional throw, it's impossible to
+ // return a successful completion. Therefore, we simply connect the
+ // successful completion to the graph end.
+ Node* throw_node =
+ graph()->NewNode(common()->Throw(), initial_element_check_throw,
+ initial_element_check_fail);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+ }
+
+ ReplaceWithValue(node, curloop, effect, control);
+ return Replace(curloop);
+} // namespace compiler
+
+Reduction JSCallReducer::ReduceArrayReduceRight(Handle<JSFunction> function,
+ Node* node) {
+ if (!FLAG_turbo_inline_array_builtins) return NoChange();
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ // Try to determine the {receiver} map.
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* fncallback = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+
+ ElementsKind kind = IsDoubleElementsKind(receiver_maps[0]->elements_kind())
+ ? PACKED_DOUBLE_ELEMENTS
+ : PACKED_ELEMENTS;
+ for (Handle<Map> receiver_map : receiver_maps) {
+ ElementsKind next_kind = receiver_map->elements_kind();
+ if (!CanInlineArrayIteratingBuiltin(receiver_map)) {
+ return NoChange();
+ }
+ if (!IsFastElementsKind(next_kind) || IsHoleyElementsKind(next_kind)) {
+ return NoChange();
+ }
+ if (IsDoubleElementsKind(kind) != IsDoubleElementsKind(next_kind)) {
+ return NoChange();
+ }
+ if (IsHoleyElementsKind(next_kind)) {
+ kind = HOLEY_ELEMENTS;
+ }
+ }
+
+ // Install code dependencies on the {receiver} prototype maps and the
+ // global array protector cell.
+ dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+
+ // If we have unreliable maps, we need a map check.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ Node* original_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS)),
+ receiver, effect, control);
+
+ Node* k = graph()->NewNode(simplified()->NumberSubtract(), original_length,
+ jsgraph()->OneConstant());
+
+ std::vector<Node*> checkpoint_params({receiver, fncallback, k,
+ original_length,
+ jsgraph()->UndefinedConstant()});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ // Check whether the given callback function is callable. Note that this has
+ // to happen outside the loop to make sure we also throw on empty arrays.
+ Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayReduceRightLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters - 1,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+ Node* check_fail = nullptr;
+ Node* check_throw = nullptr;
+ WireInCallbackIsCallableCheck(fncallback, context, check_frame_state, effect,
+ &control, &check_fail, &check_throw);
+
+ // Set initial accumulator value
+ Node* cur = nullptr;
+
+ Node* initial_element_check_fail = nullptr;
+ Node* initial_element_check_throw = nullptr;
+ if (node->op()->ValueInputCount() > 3) {
+ cur = NodeProperties::GetValueInput(node, 3);
+ } else {
+ Node* check = graph()->NewNode(simplified()->NumberEqual(), original_length,
+ jsgraph()->SmiConstant(0));
+ Node* check_branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ initial_element_check_fail =
+ graph()->NewNode(common()->IfTrue(), check_branch);
+ initial_element_check_throw = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
+ jsgraph()->Constant(MessageTemplate::kReduceNoInitial), fncallback,
+ context, check_frame_state, effect, initial_element_check_fail);
+ control = graph()->NewNode(common()->IfFalse(), check_branch);
+
+ cur = SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
+ k = graph()->NewNode(simplified()->NumberSubtract(), k,
+ jsgraph()->OneConstant());
+ }
+
+ // Start the loop.
+ Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
+ Node* eloop = effect =
+ graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+ Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
+ NodeProperties::MergeControlToEnd(graph(), common(), terminate);
+ Node* kloop = k = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), k, k, loop);
+ Node* curloop = cur = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), cur, cur, loop);
+ checkpoint_params[2] = k;
+ checkpoint_params[4] = curloop;
+
+ control = loop;
+ effect = eloop;
+
+ Node* continue_test = graph()->NewNode(simplified()->NumberLessThanOrEqual(),
+ jsgraph()->ZeroConstant(), k);
+ Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ continue_test, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), continue_branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
+ control = if_true;
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function,
+ Builtins::kArrayReduceRightLoopEagerDeoptContinuation, node->InputAt(0),
+ context, &checkpoint_params[0], stack_parameters, outer_frame_state,
+ ContinuationFrameStateMode::EAGER);
+
+ effect =
+ graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+
+ // Make sure the map hasn't changed during the iteration
+ effect = graph()->NewNode(
+ simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
+ effect, control);
+
+ Node* element =
+ SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
+
+ Node* next_k = graph()->NewNode(simplified()->NumberSubtract(), k,
+ jsgraph()->OneConstant());
+ checkpoint_params[2] = next_k;
+
+ Node* hole_true = nullptr;
+ Node* hole_false = nullptr;
+ Node* effect_true = effect;
+
+ if (IsHoleyElementsKind(kind)) {
+ // Holey elements kind require a hole check and skipping of the element in
+ // the case of a hole.
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ hole_true = graph()->NewNode(common()->IfTrue(), branch);
+ hole_false = graph()->NewNode(common()->IfFalse(), branch);
+ control = hole_false;
+
+ // The contract is that we don't leak "the hole" into "user JavaScript",
+ // so we must rename the {element} here to explicitly exclude "the hole"
+ // from the type of {element}.
+ element = effect = graph()->NewNode(
+ common()->TypeGuard(Type::NonInternal()), element, effect, control);
+ }
+
+ frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayReduceRightLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters - 1,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+
+ Node* next_cur = control = effect =
+ graph()->NewNode(javascript()->Call(6, p.frequency()), fncallback,
+ jsgraph()->UndefinedConstant(), cur, element, k,
+ receiver, context, frame_state, effect, control);
+
+ // Rewire potential exception edges.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ RewirePostCallbackExceptionEdges(check_throw, on_exception, effect,
+ &check_fail, &control);
+ }
+
+ if (IsHoleyElementsKind(kind)) {
+ Node* after_call_control = control;
+ Node* after_call_effect = effect;
+ control = hole_true;
+ effect = effect_true;
+
+ control = graph()->NewNode(common()->Merge(2), control, after_call_control);
+ effect = graph()->NewNode(common()->EffectPhi(2), effect, after_call_effect,
+ control);
+ next_cur =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), cur,
+ next_cur, control);
+ }
+
+ k = next_k;
+ cur = next_cur;
+
+ loop->ReplaceInput(1, control);
+ kloop->ReplaceInput(1, k);
+ curloop->ReplaceInput(1, cur);
+ eloop->ReplaceInput(1, effect);
+
+ control = if_false;
+ effect = eloop;
+
+ // Wire up the branch for the case when IsCallable fails for the callback.
+ // Since {check_throw} is an unconditional throw, it's impossible to
+ // return a successful completion. Therefore, we simply connect the successful
+ // completion to the graph end.
+ Node* throw_node =
+ graph()->NewNode(common()->Throw(), check_throw, check_fail);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+
+ if (node->op()->ValueInputCount() <= 3) {
+ // Wire up the branch for the case when an array is empty.
+ // Since {check_throw} is an unconditional throw, it's impossible to
+ // return a successful completion. Therefore, we simply connect the
+ // successful completion to the graph end.
+ Node* throw_node =
+ graph()->NewNode(common()->Throw(), initial_element_check_throw,
+ initial_element_check_fail);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+ }
+
+ ReplaceWithValue(node, curloop, effect, control);
+ return Replace(curloop);
+} // namespace compiler
Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
Node* node) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- CallParameters const& p = CallParametersOf(node->op());
// Try to determine the {receiver} map.
Node* receiver = NodeProperties::GetValueInput(node, 1);
@@ -1007,31 +1495,18 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
- if (result != NodeProperties::kReliableReceiverMaps) {
- return NoChange();
- }
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// Ensure that any changes to the Array species constructor cause deopt.
if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
- if (receiver_maps.size() == 0) return NoChange();
-
const ElementsKind kind = receiver_maps[0]->elements_kind();
- // TODO(danno): Handle holey elements kinds.
- if (!IsFastPackedElementsKind(kind)) {
- return NoChange();
- }
-
for (Handle<Map> receiver_map : receiver_maps) {
- if (!CanInlineArrayIteratingBuiltin(receiver_map)) {
- return NoChange();
- }
+ if (!CanInlineArrayIteratingBuiltin(receiver_map)) return NoChange();
// We can handle different maps, as long as their elements kind are the
// same.
- if (receiver_map->elements_kind() != kind) {
- return NoChange();
- }
+ if (receiver_map->elements_kind() != kind) return NoChange();
}
dependencies()->AssumePropertyCell(factory()->species_protector());
@@ -1045,10 +1520,13 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
Node* k = jsgraph()->ZeroConstant();
- // Make sure the map hasn't changed before we construct the output array.
- effect = graph()->NewNode(
- simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
- effect, control);
+ // If we have unreliable maps, we need a map check.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
Node* original_length = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
@@ -1078,18 +1556,10 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
&control, &check_fail, &check_throw);
// Start the loop.
- Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
- Node* eloop = effect =
- graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
- Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
- NodeProperties::MergeControlToEnd(graph(), common(), terminate);
- Node* vloop = k = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2), k, k, loop);
+ Node* vloop = k = WireInLoopStart(k, &control, &effect);
+ Node *loop = control, *eloop = effect;
checkpoint_params[4] = k;
- control = loop;
- effect = eloop;
-
Node* continue_test =
graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
@@ -1108,15 +1578,44 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
// Make sure the map hasn't changed during the iteration
- effect = graph()->NewNode(
- simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
- effect, control);
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
- Node* element = SafeLoadElement(kind, receiver, control, &effect, &k);
+ Node* element =
+ SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
Node* next_k =
graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
+ Node* hole_true = nullptr;
+ Node* hole_false = nullptr;
+ Node* effect_true = effect;
+
+ if (IsHoleyElementsKind(kind)) {
+ // Holey elements kind require a hole check and skipping of the element in
+ // the case of a hole.
+ Node* check;
+ if (IsDoubleElementsKind(kind)) {
+ check = graph()->NewNode(simplified()->NumberIsFloat64Hole(), element);
+ } else {
+ check = graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
+ }
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ hole_true = graph()->NewNode(common()->IfTrue(), branch);
+ hole_false = graph()->NewNode(common()->IfFalse(), branch);
+ control = hole_false;
+
+ // The contract is that we don't leak "the hole" into "user JavaScript",
+ // so we must rename the {element} here to explicitly exclude "the hole"
+ // from the type of {element}.
+ element = effect = graph()->NewNode(
+ common()->TypeGuard(Type::NonInternal()), element, effect, control);
+ }
+
// This frame state is dealt with by hand in
// ArrayMapLoopLazyDeoptContinuation.
frame_state = CreateJavaScriptBuiltinContinuationFrameState(
@@ -1143,11 +1642,19 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
simplified()->TransitionAndStoreElement(double_map, fast_map), a, k,
callback_value, effect, control);
- k = next_k;
+ if (IsHoleyElementsKind(kind)) {
+ Node* after_call_and_store_control = control;
+ Node* after_call_and_store_effect = effect;
+ control = hole_true;
+ effect = effect_true;
- loop->ReplaceInput(1, control);
- vloop->ReplaceInput(1, k);
- eloop->ReplaceInput(1, effect);
+ control = graph()->NewNode(common()->Merge(2), control,
+ after_call_and_store_control);
+ effect = graph()->NewNode(common()->EffectPhi(2), effect,
+ after_call_and_store_effect, control);
+ }
+
+ WireInLoopEnd(loop, eloop, vloop, next_k, control, effect);
control = if_false;
effect = eloop;
@@ -1168,11 +1675,15 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
Node* node) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- CallParameters const& p = CallParametersOf(node->op());
// Try to determine the {receiver} map.
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* fncallback = node->op()->ValueInputCount() > 2
@@ -1184,21 +1695,14 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
- if (result != NodeProperties::kReliableReceiverMaps) {
- return NoChange();
- }
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// And ensure that any changes to the Array species constructor cause deopt.
if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
- if (receiver_maps.size() == 0) return NoChange();
-
const ElementsKind kind = receiver_maps[0]->elements_kind();
-
- // TODO(danno): Handle holey elements kinds.
- if (!IsFastPackedElementsKind(kind)) {
- return NoChange();
- }
+ // The output array is packed (filter doesn't visit holes).
+ const ElementsKind packed_kind = GetPackedElementsKind(kind);
for (Handle<Map> receiver_map : receiver_maps) {
if (!CanInlineArrayIteratingBuiltin(receiver_map)) {
@@ -1206,23 +1710,24 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
}
// We can handle different maps, as long as their elements kind are the
// same.
- if (receiver_map->elements_kind() != kind) {
- return NoChange();
- }
+ if (receiver_map->elements_kind() != kind) return NoChange();
}
dependencies()->AssumePropertyCell(factory()->species_protector());
Handle<Map> initial_map(
- Map::cast(native_context()->GetInitialJSArrayMap(kind)));
+ Map::cast(native_context()->GetInitialJSArrayMap(packed_kind)));
Node* k = jsgraph()->ZeroConstant();
Node* to = jsgraph()->ZeroConstant();
- // Make sure the map hasn't changed before we construct the output array.
- effect = graph()->NewNode(
- simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
- effect, control);
+ // If we have unreliable maps, we need a map check.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
Node* a; // Construct the output array.
{
@@ -1232,7 +1737,8 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
ab.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), empty_fixed_array);
ab.Store(AccessBuilder::ForJSObjectElements(), empty_fixed_array);
- ab.Store(AccessBuilder::ForJSArrayLength(kind), jsgraph()->ZeroConstant());
+ ab.Store(AccessBuilder::ForJSArrayLength(packed_kind),
+ jsgraph()->ZeroConstant());
for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
ab.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
jsgraph()->UndefinedConstant());
@@ -1268,19 +1774,11 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
}
// Start the loop.
- Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
- Node* eloop = effect =
- graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
- Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
- NodeProperties::MergeControlToEnd(graph(), common(), terminate);
- Node* vloop = k = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2), k, k, loop);
+ Node* vloop = k = WireInLoopStart(k, &control, &effect);
+ Node *loop = control, *eloop = effect;
Node* v_to_loop = to = graph()->NewNode(
common()->Phi(MachineRepresentation::kTaggedSigned, 2), to, to, loop);
- control = loop;
- effect = eloop;
-
Node* continue_test =
graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
@@ -1305,15 +1803,45 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
}
// Make sure the map hasn't changed during the iteration.
- effect = graph()->NewNode(
- simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
- effect, control);
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
- Node* element = SafeLoadElement(kind, receiver, control, &effect, &k);
+ Node* element =
+ SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
Node* next_k =
graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
+ Node* hole_true = nullptr;
+ Node* hole_false = nullptr;
+ Node* effect_true = effect;
+ Node* hole_true_vto = to;
+
+ if (IsHoleyElementsKind(kind)) {
+ // Holey elements kind require a hole check and skipping of the element in
+ // the case of a hole.
+ Node* check;
+ if (IsDoubleElementsKind(kind)) {
+ check = graph()->NewNode(simplified()->NumberIsFloat64Hole(), element);
+ } else {
+ check = graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
+ }
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ hole_true = graph()->NewNode(common()->IfTrue(), branch);
+ hole_false = graph()->NewNode(common()->IfFalse(), branch);
+ control = hole_false;
+
+ // The contract is that we don't leak "the hole" into "user JavaScript",
+ // so we must rename the {element} here to explicitly exclude "the hole"
+ // from the type of {element}.
+ element = effect = graph()->NewNode(
+ common()->TypeGuard(Type::NonInternal()), element, effect, control);
+ }
+
Node* callback_value = nullptr;
{
// This frame state is dealt with by hand in
@@ -1363,14 +1891,25 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
// We have to coerce callback_value to boolean, and only store the element in
// a if it's true. The checkpoint above protects against the case that
// growing {a} fails.
- to = DoFilterPostCallbackWork(kind, &control, &effect, a, to, element,
+ to = DoFilterPostCallbackWork(packed_kind, &control, &effect, a, to, element,
callback_value);
- k = next_k;
- loop->ReplaceInput(1, control);
- vloop->ReplaceInput(1, k);
+ if (IsHoleyElementsKind(kind)) {
+ Node* after_call_control = control;
+ Node* after_call_effect = effect;
+ control = hole_true;
+ effect = effect_true;
+
+ control = graph()->NewNode(common()->Merge(2), control, after_call_control);
+ effect = graph()->NewNode(common()->EffectPhi(2), effect, after_call_effect,
+ control);
+ to =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTaggedSigned, 2),
+ hole_true_vto, to, control);
+ }
+
+ WireInLoopEnd(loop, eloop, vloop, next_k, control, effect);
v_to_loop->ReplaceInput(1, to);
- eloop->ReplaceInput(1, effect);
control = if_false;
effect = eloop;
@@ -1387,6 +1926,216 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
return Replace(a);
}
+Reduction JSCallReducer::ReduceArrayFind(ArrayFindVariant variant,
+ Handle<JSFunction> function,
+ Node* node) {
+ if (!FLAG_turbo_inline_array_builtins) return NoChange();
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Builtins::Name eager_continuation_builtin;
+ Builtins::Name lazy_continuation_builtin;
+ Builtins::Name after_callback_lazy_continuation_builtin;
+ if (variant == ArrayFindVariant::kFind) {
+ eager_continuation_builtin = Builtins::kArrayFindLoopEagerDeoptContinuation;
+ lazy_continuation_builtin = Builtins::kArrayFindLoopLazyDeoptContinuation;
+ after_callback_lazy_continuation_builtin =
+ Builtins::kArrayFindLoopAfterCallbackLazyDeoptContinuation;
+ } else {
+ DCHECK_EQ(ArrayFindVariant::kFindIndex, variant);
+ eager_continuation_builtin =
+ Builtins::kArrayFindIndexLoopEagerDeoptContinuation;
+ lazy_continuation_builtin =
+ Builtins::kArrayFindIndexLoopLazyDeoptContinuation;
+ after_callback_lazy_continuation_builtin =
+ Builtins::kArrayFindIndexLoopAfterCallbackLazyDeoptContinuation;
+ }
+
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+
+ // Try to determine the {receiver} map.
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* fncallback = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* this_arg = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+
+ const ElementsKind kind = receiver_maps[0]->elements_kind();
+
+ // TODO(pwong): Handle holey double elements kinds.
+ if (IsDoubleElementsKind(kind) && IsHoleyElementsKind(kind)) {
+ return NoChange();
+ }
+
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!CanInlineArrayIteratingBuiltin(receiver_map)) return NoChange();
+ // We can handle different maps, as long as their elements kind are the
+ // same.
+ if (receiver_map->elements_kind() != kind) return NoChange();
+ }
+
+ // Install code dependencies on the {receiver} prototype maps and the
+ // global array protector cell.
+ dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+
+ // If we have unreliable maps, we need a map check.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ Node* k = jsgraph()->ZeroConstant();
+
+ Node* original_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
+ effect, control);
+
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ // Check whether the given callback function is callable. Note that this has
+ // to happen outside the loop to make sure we also throw on empty arrays.
+ Node* check_fail = nullptr;
+ Node* check_throw = nullptr;
+ {
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, lazy_continuation_builtin, node->InputAt(0),
+ context, &checkpoint_params[0], stack_parameters, outer_frame_state,
+ ContinuationFrameStateMode::LAZY);
+ WireInCallbackIsCallableCheck(fncallback, context, frame_state, effect,
+ &control, &check_fail, &check_throw);
+ }
+
+ // Start the loop.
+ Node* vloop = k = WireInLoopStart(k, &control, &effect);
+ Node *loop = control, *eloop = effect;
+ checkpoint_params[3] = k;
+
+ // Check if we've iterated past the last element of the array.
+ Node* if_false = nullptr;
+ {
+ Node* continue_test =
+ graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
+ Node* continue_branch = graph()->NewNode(
+ common()->Branch(BranchHint::kTrue), continue_test, control);
+ control = graph()->NewNode(common()->IfTrue(), continue_branch);
+ if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
+ }
+
+ // Check the map hasn't changed during the iteration.
+ {
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, eager_continuation_builtin, node->InputAt(0),
+ context, &checkpoint_params[0], stack_parameters, outer_frame_state,
+ ContinuationFrameStateMode::EAGER);
+
+ effect =
+ graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ // Load k-th element from receiver.
+ Node* element =
+ SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
+
+ // Increment k for the next iteration.
+ Node* next_k = checkpoint_params[3] =
+ graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
+
+ // Replace holes with undefined.
+ if (IsHoleyElementsKind(kind)) {
+ element = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+ graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant()),
+ jsgraph()->UndefinedConstant(), element);
+ }
+
+ Node* if_found_return_value =
+ (variant == ArrayFindVariant::kFind) ? element : k;
+
+ // Call the callback.
+ Node* callback_value = nullptr;
+ {
+ std::vector<Node*> call_checkpoint_params({receiver, fncallback, this_arg,
+ next_k, original_length,
+ if_found_return_value});
+ const int call_stack_parameters =
+ static_cast<int>(call_checkpoint_params.size());
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, after_callback_lazy_continuation_builtin,
+ node->InputAt(0), context, &call_checkpoint_params[0],
+ call_stack_parameters, outer_frame_state,
+ ContinuationFrameStateMode::LAZY);
+
+ callback_value = control = effect = graph()->NewNode(
+ javascript()->Call(5, p.frequency()), fncallback, this_arg, element, k,
+ receiver, context, frame_state, effect, control);
+ }
+
+ // Rewire potential exception edges.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ RewirePostCallbackExceptionEdges(check_throw, on_exception, effect,
+ &check_fail, &control);
+ }
+
+ // Check whether the given callback function returned a truthy value.
+ Node* boolean_result =
+ graph()->NewNode(simplified()->ToBoolean(), callback_value);
+ Node* efound_branch = effect;
+ Node* found_branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ boolean_result, control);
+ Node* if_found = graph()->NewNode(common()->IfTrue(), found_branch);
+ Node* if_notfound = graph()->NewNode(common()->IfFalse(), found_branch);
+ control = if_notfound;
+
+ // Close the loop.
+ WireInLoopEnd(loop, eloop, vloop, next_k, control, effect);
+
+ control = graph()->NewNode(common()->Merge(2), if_found, if_false);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), efound_branch, eloop, control);
+
+ Node* if_not_found_value = (variant == ArrayFindVariant::kFind)
+ ? jsgraph()->UndefinedConstant()
+ : jsgraph()->MinusOneConstant();
+ Node* return_value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ if_found_return_value, if_not_found_value, control);
+
+ // Wire up the branch for the case when IsCallable fails for the callback.
+ // Since {check_throw} is an unconditional throw, it's impossible to
+ // return a successful completion. Therefore, we simply connect the successful
+ // completion to the graph end.
+ Node* throw_node =
+ graph()->NewNode(common()->Throw(), check_throw, check_fail);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+
+ ReplaceWithValue(node, return_value, effect, control);
+ return Replace(return_value);
+}
+
Node* JSCallReducer::DoFilterPostCallbackWork(ElementsKind kind, Node** control,
Node** effect, Node* a, Node* to,
Node* element,
@@ -1411,8 +2160,8 @@ Node* JSCallReducer::DoFilterPostCallbackWork(ElementsKind kind, Node** control,
// We know that {to} is in Unsigned31 range here, being smaller than
// {original_length} at all times.
- Node* checked_to =
- graph()->NewNode(common()->TypeGuard(Type::Unsigned31()), to, if_true);
+ Node* checked_to = etrue = graph()->NewNode(
+ common()->TypeGuard(Type::Unsigned31()), to, etrue, if_true);
Node* elements_length = etrue = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForFixedArrayLength()), elements,
etrue, if_true);
@@ -1420,9 +2169,9 @@ Node* JSCallReducer::DoFilterPostCallbackWork(ElementsKind kind, Node** control,
GrowFastElementsMode mode =
IsDoubleElementsKind(kind) ? GrowFastElementsMode::kDoubleElements
: GrowFastElementsMode::kSmiOrObjectElements;
- elements = etrue =
- graph()->NewNode(simplified()->MaybeGrowFastElements(mode), a, elements,
- checked_to, elements_length, etrue, if_true);
+ elements = etrue = graph()->NewNode(
+ simplified()->MaybeGrowFastElements(mode, VectorSlotPair()), a,
+ elements, checked_to, elements_length, etrue, if_true);
// Update the length of {a}.
Node* new_length_a = graph()->NewNode(simplified()->NumberAdd(), checked_to,
@@ -1489,14 +2238,15 @@ void JSCallReducer::RewirePostCallbackExceptionEdges(Node* check_throw,
}
Node* JSCallReducer::SafeLoadElement(ElementsKind kind, Node* receiver,
- Node* control, Node** effect, Node** k) {
+ Node* control, Node** effect, Node** k,
+ const VectorSlotPair& feedback) {
// Make sure that the access is still in bounds, since the callback could have
// changed the array's size.
Node* length = *effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
*effect, control);
- *k = *effect = graph()->NewNode(simplified()->CheckBounds(), *k, length,
- *effect, control);
+ *k = *effect = graph()->NewNode(simplified()->CheckBounds(feedback), *k,
+ length, *effect, control);
// Reload the elements pointer before calling the callback, since the previous
// callback might have resized the array causing the elements buffer to be
@@ -1514,6 +2264,455 @@ Node* JSCallReducer::SafeLoadElement(ElementsKind kind, Node* receiver,
return element;
}
+Reduction JSCallReducer::ReduceArrayEvery(Handle<JSFunction> function,
+ Node* node) {
+ if (!FLAG_turbo_inline_array_builtins) return NoChange();
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ // Try to determine the {receiver} map.
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* fncallback = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* this_arg = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+
+ // And ensure that any changes to the Array species constructor cause deopt.
+ if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
+
+ const ElementsKind kind = receiver_maps[0]->elements_kind();
+
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!CanInlineArrayIteratingBuiltin(receiver_map)) return NoChange();
+ // We can handle different maps, as long as their elements kind are the
+ // same.
+ if (receiver_map->elements_kind() != kind) return NoChange();
+ }
+
+ dependencies()->AssumePropertyCell(factory()->species_protector());
+
+ // If we have unreliable maps, we need a map check.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ Node* k = jsgraph()->ZeroConstant();
+
+ // Make sure the map hasn't changed before we construct the output array.
+ effect = graph()->NewNode(
+ simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
+ effect, control);
+
+ Node* original_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
+ effect, control);
+
+ // Check whether the given callback function is callable. Note that this has
+ // to happen outside the loop to make sure we also throw on empty arrays.
+ Node* check_fail = nullptr;
+ Node* check_throw = nullptr;
+ {
+ // This frame state doesn't ever call the deopt continuation, it's only
+ // necessary to specifiy a continuation in order to handle the exceptional
+ // case.
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayEveryLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+ WireInCallbackIsCallableCheck(fncallback, context, check_frame_state,
+ effect, &control, &check_fail, &check_throw);
+ }
+
+ // Start the loop.
+ Node* vloop = k = WireInLoopStart(k, &control, &effect);
+ Node *loop = control, *eloop = effect;
+
+ Node* continue_test =
+ graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
+ Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ continue_test, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), continue_branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
+ control = if_true;
+
+ {
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayEveryLoopEagerDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::EAGER);
+
+ effect =
+ graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+ }
+
+ // Make sure the map hasn't changed during the iteration.
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+
+ Node* element =
+ SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
+
+ Node* next_k =
+ graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
+
+ Node* hole_true = nullptr;
+ Node* hole_false = nullptr;
+ Node* effect_true = effect;
+
+ if (IsHoleyElementsKind(kind)) {
+ // Holey elements kind require a hole check and skipping of the element in
+ // the case of a hole.
+ Node* check;
+ if (IsDoubleElementsKind(kind)) {
+ check = graph()->NewNode(simplified()->NumberIsFloat64Hole(), element);
+ } else {
+ check = graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
+ }
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ hole_true = graph()->NewNode(common()->IfTrue(), branch);
+ hole_false = graph()->NewNode(common()->IfFalse(), branch);
+ control = hole_false;
+
+ // The contract is that we don't leak "the hole" into "user JavaScript",
+ // so we must rename the {element} here to explicitly exclude "the hole"
+ // from the type of {element}.
+ element = effect = graph()->NewNode(
+ common()->TypeGuard(Type::NonInternal()), element, effect, control);
+ }
+
+ Node* callback_value = nullptr;
+ {
+ // This frame state is dealt with by hand in
+ // Builtins::kArrayEveryLoopLazyDeoptContinuation.
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayEveryLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+
+ callback_value = control = effect = graph()->NewNode(
+ javascript()->Call(5, p.frequency()), fncallback, this_arg, element, k,
+ receiver, context, frame_state, effect, control);
+ }
+
+ // Rewire potential exception edges.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ RewirePostCallbackExceptionEdges(check_throw, on_exception, effect,
+ &check_fail, &control);
+ }
+
+ // We have to coerce callback_value to boolean.
+ Node* if_false_callback;
+ Node* efalse_callback;
+ {
+ Node* boolean_result =
+ graph()->NewNode(simplified()->ToBoolean(), callback_value);
+ Node* check_boolean_result =
+ graph()->NewNode(simplified()->ReferenceEqual(), boolean_result,
+ jsgraph()->TrueConstant());
+ Node* boolean_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check_boolean_result, control);
+ if_false_callback = graph()->NewNode(common()->IfFalse(), boolean_branch);
+ efalse_callback = effect;
+
+ // Nothing to do in the true case.
+ control = graph()->NewNode(common()->IfTrue(), boolean_branch);
+ }
+
+ if (IsHoleyElementsKind(kind)) {
+ Node* after_call_control = control;
+ Node* after_call_effect = effect;
+ control = hole_true;
+ effect = effect_true;
+
+ control = graph()->NewNode(common()->Merge(2), control, after_call_control);
+ effect = graph()->NewNode(common()->EffectPhi(2), effect, after_call_effect,
+ control);
+ }
+
+ WireInLoopEnd(loop, eloop, vloop, next_k, control, effect);
+
+ control = graph()->NewNode(common()->Merge(2), if_false, if_false_callback);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), eloop, efalse_callback, control);
+ Node* return_value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2),
+ jsgraph()->TrueConstant(), jsgraph()->FalseConstant(), control);
+
+ // Wire up the branch for the case when IsCallable fails for the callback.
+ // Since {check_throw} is an unconditional throw, it's impossible to
+ // return a successful completion. Therefore, we simply connect the successful
+ // completion to the graph end.
+ Node* throw_node =
+ graph()->NewNode(common()->Throw(), check_throw, check_fail);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+
+ ReplaceWithValue(node, return_value, effect, control);
+ return Replace(return_value);
+}
+
+Reduction JSCallReducer::ReduceArraySome(Handle<JSFunction> function,
+ Node* node) {
+ if (!FLAG_turbo_inline_array_builtins) return NoChange();
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ // Try to determine the {receiver} map.
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* fncallback = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* this_arg = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+
+ // And ensure that any changes to the Array species constructor cause deopt.
+ if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
+
+ if (receiver_maps.size() == 0) return NoChange();
+
+ const ElementsKind kind = receiver_maps[0]->elements_kind();
+
+ // TODO(pwong): Handle holey double elements kinds.
+ if (IsDoubleElementsKind(kind) && IsHoleyElementsKind(kind)) {
+ return NoChange();
+ }
+
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!CanInlineArrayIteratingBuiltin(receiver_map)) return NoChange();
+ // We can handle different maps, as long as their elements kind are the
+ // same.
+ if (receiver_map->elements_kind() != kind) return NoChange();
+ }
+
+ dependencies()->AssumePropertyCell(factory()->species_protector());
+
+ Node* k = jsgraph()->ZeroConstant();
+
+ // If we have unreliable maps, we need a map check.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ // Make sure the map hasn't changed before we construct the output array.
+ effect = graph()->NewNode(
+ simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
+ effect, control);
+
+ Node* original_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
+ effect, control);
+
+ // Check whether the given callback function is callable. Note that this has
+ // to happen outside the loop to make sure we also throw on empty arrays.
+ Node* check_fail = nullptr;
+ Node* check_throw = nullptr;
+ {
+ // This frame state doesn't ever call the deopt continuation, it's only
+ // necessary to specifiy a continuation in order to handle the exceptional
+ // case.
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArraySomeLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+ WireInCallbackIsCallableCheck(fncallback, context, check_frame_state,
+ effect, &control, &check_fail, &check_throw);
+ }
+
+ // Start the loop.
+ Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
+ Node* eloop = effect =
+ graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+ Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
+ NodeProperties::MergeControlToEnd(graph(), common(), terminate);
+ Node* vloop = k = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), k, k, loop);
+
+ Node* continue_test =
+ graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
+ Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ continue_test, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), continue_branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
+ control = if_true;
+
+ {
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArraySomeLoopEagerDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::EAGER);
+
+ effect =
+ graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+ }
+
+ // Make sure the map hasn't changed during the iteration.
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+
+ Node* element =
+ SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
+
+ Node* next_k =
+ graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
+
+ Node* hole_true = nullptr;
+ Node* hole_false = nullptr;
+ Node* effect_true = effect;
+
+ if (IsHoleyElementsKind(kind)) {
+ // Holey elements kind require a hole check and skipping of the element in
+ // the case of a hole.
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ hole_true = graph()->NewNode(common()->IfTrue(), branch);
+ hole_false = graph()->NewNode(common()->IfFalse(), branch);
+ control = hole_false;
+
+ // The contract is that we don't leak "the hole" into "user JavaScript",
+ // so we must rename the {element} here to explicitly exclude "the hole"
+ // from the type of {element}.
+ element = effect = graph()->NewNode(
+ common()->TypeGuard(Type::NonInternal()), element, effect, control);
+ }
+
+ Node* callback_value = nullptr;
+ {
+ // This frame state is dealt with by hand in
+ // Builtins::kArrayEveryLoopLazyDeoptContinuation.
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArraySomeLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+
+ callback_value = control = effect = graph()->NewNode(
+ javascript()->Call(5, p.frequency()), fncallback, this_arg, element, k,
+ receiver, context, frame_state, effect, control);
+ }
+
+ // Rewire potential exception edges.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ RewirePostCallbackExceptionEdges(check_throw, on_exception, effect,
+ &check_fail, &control);
+ }
+
+ // We have to coerce callback_value to boolean.
+ Node* if_true_callback;
+ Node* etrue_callback;
+ {
+ Node* boolean_result =
+ graph()->NewNode(simplified()->ToBoolean(), callback_value);
+ Node* check_boolean_result =
+ graph()->NewNode(simplified()->ReferenceEqual(), boolean_result,
+ jsgraph()->TrueConstant());
+ Node* boolean_branch = graph()->NewNode(
+ common()->Branch(BranchHint::kFalse), check_boolean_result, control);
+ if_true_callback = graph()->NewNode(common()->IfTrue(), boolean_branch);
+ etrue_callback = effect;
+
+ // Nothing to do in the false case.
+ control = graph()->NewNode(common()->IfFalse(), boolean_branch);
+ }
+
+ if (IsHoleyElementsKind(kind)) {
+ Node* after_call_control = control;
+ Node* after_call_effect = effect;
+ control = hole_true;
+ effect = effect_true;
+
+ control = graph()->NewNode(common()->Merge(2), control, after_call_control);
+ effect = graph()->NewNode(common()->EffectPhi(2), effect, after_call_effect,
+ control);
+ }
+
+ loop->ReplaceInput(1, control);
+ vloop->ReplaceInput(1, next_k);
+ eloop->ReplaceInput(1, effect);
+
+ control = graph()->NewNode(common()->Merge(2), if_false, if_true_callback);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), eloop, etrue_callback, control);
+ Node* return_value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2),
+ jsgraph()->FalseConstant(), jsgraph()->TrueConstant(), control);
+
+ // Wire up the branch for the case when IsCallable fails for the callback.
+ // Since {check_throw} is an unconditional throw, it's impossible to
+ // return a successful completion. Therefore, we simply connect the successful
+ // completion to the graph end.
+ Node* throw_node =
+ graph()->NewNode(common()->Throw(), check_throw, check_fail);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+
+ ReplaceWithValue(node, return_value, effect, control);
+ return Replace(return_value);
+}
+
Reduction JSCallReducer::ReduceCallApiFunction(Node* node,
Handle<JSFunction> function) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
@@ -1911,8 +3110,6 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceFunctionPrototypeCall(node);
case Builtins::kFunctionPrototypeHasInstance:
return ReduceFunctionPrototypeHasInstance(node);
- case Builtins::kNumberConstructor:
- return ReduceNumberConstructor(node);
case Builtins::kObjectConstructor:
return ReduceObjectConstructor(node);
case Builtins::kObjectGetPrototypeOf:
@@ -1941,8 +3138,30 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceArrayMap(function, node);
case Builtins::kArrayFilter:
return ReduceArrayFilter(function, node);
+ case Builtins::kArrayReduce:
+ return ReduceArrayReduce(function, node);
+ case Builtins::kArrayReduceRight:
+ return ReduceArrayReduceRight(function, node);
+ case Builtins::kArrayPrototypeFind:
+ return ReduceArrayFind(ArrayFindVariant::kFind, function, node);
+ case Builtins::kArrayPrototypeFindIndex:
+ return ReduceArrayFind(ArrayFindVariant::kFindIndex, function, node);
+ case Builtins::kArrayEvery:
+ return ReduceArrayEvery(function, node);
+ case Builtins::kArrayPrototypePush:
+ return ReduceArrayPrototypePush(node);
+ case Builtins::kArrayPrototypePop:
+ return ReduceArrayPrototypePop(node);
+ case Builtins::kArrayPrototypeShift:
+ return ReduceArrayPrototypeShift(node);
case Builtins::kReturnReceiver:
return ReduceReturnReceiver(node);
+ case Builtins::kStringPrototypeIndexOf:
+ return ReduceStringPrototypeIndexOf(function, node);
+ case Builtins::kStringPrototypeCharAt:
+ return ReduceStringPrototypeCharAt(node);
+ case Builtins::kStringPrototypeCharCodeAt:
+ return ReduceStringPrototypeCharCodeAt(node);
default:
break;
}
@@ -2046,9 +3265,9 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
// Check that the {target} is still the {target_function}.
Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
target_function);
- effect =
- graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
- check, effect, control);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kWrongCallTarget), check,
+ effect, control);
// Specialize the JSCall node to the {target_function}.
NodeProperties::ReplaceValueInput(node, target_function, 0);
@@ -2119,9 +3338,9 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
// Check that the {target} is still the {array_function}.
Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
array_function);
- effect =
- graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
- check, effect, control);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kWrongCallTarget), check,
+ effect, control);
// Turn the {node} into a {JSCreateArray} call.
NodeProperties::ReplaceEffectInput(node, effect);
@@ -2142,9 +3361,9 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
// Check that the {new_target} is still the {new_target_feedback}.
Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
new_target, new_target_feedback);
- effect =
- graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
- check, effect, control);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kWrongCallTarget), check,
+ effect, control);
// Specialize the JSConstruct node to the {new_target_feedback}.
NodeProperties::ReplaceValueInput(node, new_target_feedback, arity + 1);
@@ -2297,6 +3516,47 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
return NoChange();
}
+// ES6 String.prototype.indexOf(searchString [, position])
+// #sec-string.prototype.indexof
+Reduction JSCallReducer::ReduceStringPrototypeIndexOf(
+ Handle<JSFunction> function, Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (node->op()->ValueInputCount() >= 3) {
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* new_receiver = effect = graph()->NewNode(
+ simplified()->CheckString(p.feedback()), receiver, effect, control);
+
+ Node* search_string = NodeProperties::GetValueInput(node, 2);
+ Node* new_search_string = effect =
+ graph()->NewNode(simplified()->CheckString(p.feedback()), search_string,
+ effect, control);
+
+ Node* new_position = jsgraph()->ZeroConstant();
+ if (node->op()->ValueInputCount() >= 4) {
+ Node* position = NodeProperties::GetValueInput(node, 3);
+ new_position = effect = graph()->NewNode(
+ simplified()->CheckSmi(p.feedback()), position, effect, control);
+ }
+
+ NodeProperties::ReplaceEffectInput(node, effect);
+ RelaxEffectsAndControls(node);
+ node->ReplaceInput(0, new_receiver);
+ node->ReplaceInput(1, new_search_string);
+ node->ReplaceInput(2, new_position);
+ node->TrimInputCount(3);
+ NodeProperties::ChangeOp(node, simplified()->StringIndexOf());
+ return Changed(node);
+ }
+ return NoChange();
+}
+
Reduction JSCallReducer::ReduceJSConstructWithArrayLike(Node* node) {
DCHECK_EQ(IrOpcode::kJSConstructWithArrayLike, node->opcode());
CallFrequency frequency = CallFrequencyOf(node->op());
@@ -2328,9 +3588,9 @@ Reduction JSCallReducer::ReduceSoftDeoptimize(Node* node,
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* frame_state = NodeProperties::FindFrameStateBefore(node);
- Node* deoptimize =
- graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kSoft, reason),
- frame_state, effect, control);
+ Node* deoptimize = graph()->NewNode(
+ common()->Deoptimize(DeoptimizeKind::kSoft, reason, VectorSlotPair()),
+ frame_state, effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
Revisit(graph()->end());
@@ -2339,6 +3599,571 @@ Reduction JSCallReducer::ReduceSoftDeoptimize(Node* node,
return Changed(node);
}
+namespace {
+
+// TODO(turbofan): This was copied from Crankshaft, might be too restrictive.
+bool IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map) {
+ DCHECK(!jsarray_map->is_dictionary_map());
+ Isolate* isolate = jsarray_map->GetIsolate();
+ Handle<Name> length_string = isolate->factory()->length_string();
+ DescriptorArray* descriptors = jsarray_map->instance_descriptors();
+ int number =
+ descriptors->SearchWithCache(isolate, *length_string, *jsarray_map);
+ DCHECK_NE(DescriptorArray::kNotFound, number);
+ return descriptors->GetDetails(number).IsReadOnly();
+}
+
+// TODO(turbofan): This was copied from Crankshaft, might be too restrictive.
+bool CanInlineArrayResizeOperation(Handle<Map> receiver_map) {
+ Isolate* const isolate = receiver_map->GetIsolate();
+ if (!receiver_map->prototype()->IsJSArray()) return false;
+ Handle<JSArray> receiver_prototype(JSArray::cast(receiver_map->prototype()),
+ isolate);
+ return receiver_map->instance_type() == JS_ARRAY_TYPE &&
+ IsFastElementsKind(receiver_map->elements_kind()) &&
+ !receiver_map->is_dictionary_map() && receiver_map->is_extensible() &&
+ isolate->IsAnyInitialArrayPrototype(receiver_prototype) &&
+ !IsReadOnlyLengthDescriptor(receiver_map);
+}
+
+} // namespace
+
+// ES6 section 22.1.3.18 Array.prototype.push ( )
+Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ if (!isolate()->IsNoElementsProtectorIntact()) return NoChange();
+
+ int const num_values = node->op()->ValueInputCount() - 2;
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Try to determine the {receiver} map(s).
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ DCHECK_NE(0, receiver_maps.size());
+
+ ElementsKind kind = receiver_maps[0]->elements_kind();
+
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!CanInlineArrayResizeOperation(receiver_map)) return NoChange();
+ if (!UnionElementsKindUptoPackedness(&kind, receiver_map->elements_kind()))
+ return NoChange();
+ }
+
+ // Install code dependencies on the {receiver} global array protector cell.
+ dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+
+ // If the {receiver_maps} information is not reliable, we need
+ // to check that the {receiver} still has one of these maps.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ // Collect the value inputs to push.
+ std::vector<Node*> values(num_values);
+ for (int i = 0; i < num_values; ++i) {
+ values[i] = NodeProperties::GetValueInput(node, 2 + i);
+ }
+
+ for (auto& value : values) {
+ if (IsSmiElementsKind(kind)) {
+ value = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
+ value, effect, control);
+ } else if (IsDoubleElementsKind(kind)) {
+ value = effect = graph()->NewNode(simplified()->CheckNumber(p.feedback()),
+ value, effect, control);
+ // Make sure we do not store signaling NaNs into double arrays.
+ value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
+ }
+ }
+
+ // Load the "length" property of the {receiver}.
+ Node* length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
+ effect, control);
+ Node* value = length;
+
+ // Check if we have any {values} to push.
+ if (num_values > 0) {
+ // Compute the resulting "length" of the {receiver}.
+ Node* new_length = value = graph()->NewNode(
+ simplified()->NumberAdd(), length, jsgraph()->Constant(num_values));
+
+ // Load the elements backing store of the {receiver}.
+ Node* elements = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
+ effect, control);
+ Node* elements_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()), elements,
+ effect, control);
+
+ GrowFastElementsMode mode =
+ IsDoubleElementsKind(kind) ? GrowFastElementsMode::kDoubleElements
+ : GrowFastElementsMode::kSmiOrObjectElements;
+ elements = effect = graph()->NewNode(
+ simplified()->MaybeGrowFastElements(mode, p.feedback()), receiver,
+ elements,
+ graph()->NewNode(simplified()->NumberAdd(), length,
+ jsgraph()->Constant(num_values - 1)),
+ elements_length, effect, control);
+
+ // Update the JSArray::length field. Since this is observable,
+ // there must be no other check after this.
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, new_length, effect, control);
+
+ // Append the {values} to the {elements}.
+ for (int i = 0; i < num_values; ++i) {
+ Node* value = values[i];
+ Node* index = graph()->NewNode(simplified()->NumberAdd(), length,
+ jsgraph()->Constant(i));
+ effect = graph()->NewNode(
+ simplified()->StoreElement(AccessBuilder::ForFixedArrayElement(kind)),
+ elements, index, value, effect, control);
+ }
+ }
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+// ES6 section 22.1.3.17 Array.prototype.pop ( )
+Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ if (!isolate()->IsNoElementsProtectorIntact()) return NoChange();
+
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ DCHECK_NE(0, receiver_maps.size());
+
+ ElementsKind kind = receiver_maps[0]->elements_kind();
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!CanInlineArrayResizeOperation(receiver_map)) return NoChange();
+ // TODO(turbofan): Extend this to also handle fast holey double elements
+ // once we got the hole NaN mess sorted out in TurboFan/V8.
+ if (receiver_map->elements_kind() == HOLEY_DOUBLE_ELEMENTS)
+ return NoChange();
+ if (!UnionElementsKindUptoPackedness(&kind, receiver_map->elements_kind()))
+ return NoChange();
+ }
+
+ // Install code dependencies on the {receiver} global array protector cell.
+ dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+
+ // If the {receiver_maps} information is not reliable, we need
+ // to check that the {receiver} still has one of these maps.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ // Load the "length" property of the {receiver}.
+ Node* length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
+ effect, control);
+
+ // Check if the {receiver} has any elements.
+ Node* check = graph()->NewNode(simplified()->NumberEqual(), length,
+ jsgraph()->ZeroConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->UndefinedConstant();
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ // TODO(tebbi): We should trim the backing store if the capacity is too
+ // big, as implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
+
+ // Load the elements backing store from the {receiver}.
+ Node* elements = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
+ efalse, if_false);
+
+ // Ensure that we aren't popping from a copy-on-write backing store.
+ if (IsSmiOrObjectElementsKind(kind)) {
+ elements = efalse =
+ graph()->NewNode(simplified()->EnsureWritableFastElements(), receiver,
+ elements, efalse, if_false);
+ }
+
+ // Compute the new {length}.
+ length = graph()->NewNode(simplified()->NumberSubtract(), length,
+ jsgraph()->OneConstant());
+
+ // Store the new {length} to the {receiver}.
+ efalse = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, length, efalse, if_false);
+
+ // Load the last entry from the {elements}.
+ vfalse = efalse = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(kind)),
+ elements, length, efalse, if_false);
+
+ // Store a hole to the element we just removed from the {receiver}.
+ efalse = graph()->NewNode(
+ simplified()->StoreElement(
+ AccessBuilder::ForFixedArrayElement(GetHoleyElementsKind(kind))),
+ elements, length, jsgraph()->TheHoleConstant(), efalse, if_false);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ Node* value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
+
+ // Convert the hole to undefined. Do this last, so that we can optimize
+ // conversion operator via some smart strength reduction in many cases.
+ if (IsHoleyElementsKind(kind)) {
+ value =
+ graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
+ }
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+// ES6 section 22.1.3.22 Array.prototype.shift ( )
+Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ if (!isolate()->IsNoElementsProtectorIntact()) return NoChange();
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ DCHECK_NE(0, receiver_maps.size());
+
+ ElementsKind kind = receiver_maps[0]->elements_kind();
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!CanInlineArrayResizeOperation(receiver_map)) return NoChange();
+ // TODO(turbofan): Extend this to also handle fast holey double elements
+ // once we got the hole NaN mess sorted out in TurboFan/V8.
+ if (receiver_map->elements_kind() == HOLEY_DOUBLE_ELEMENTS)
+ return NoChange();
+ if (!UnionElementsKindUptoPackedness(&kind, receiver_map->elements_kind()))
+ return NoChange();
+ }
+
+ // Install code dependencies on the {receiver} global array protector cell.
+ dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+
+ // If the {receiver_maps} information is not reliable, we need
+ // to check that the {receiver} still has one of these maps.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ // Load length of the {receiver}.
+ Node* length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
+ effect, control);
+
+ // Return undefined if {receiver} has no elements.
+ Node* check0 = graph()->NewNode(simplified()->NumberEqual(), length,
+ jsgraph()->ZeroConstant());
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0 = jsgraph()->UndefinedConstant();
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+ Node* vfalse0;
+ {
+ // Check if we should take the fast-path.
+ Node* check1 =
+ graph()->NewNode(simplified()->NumberLessThanOrEqual(), length,
+ jsgraph()->Constant(JSArray::kMaxCopyElements));
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = efalse0;
+ Node* vtrue1;
+ {
+ Node* elements = etrue1 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ receiver, etrue1, if_true1);
+
+ // Load the first element here, which we return below.
+ vtrue1 = etrue1 = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(kind)),
+ elements, jsgraph()->ZeroConstant(), etrue1, if_true1);
+
+ // Ensure that we aren't shifting a copy-on-write backing store.
+ if (IsSmiOrObjectElementsKind(kind)) {
+ elements = etrue1 =
+ graph()->NewNode(simplified()->EnsureWritableFastElements(),
+ receiver, elements, etrue1, if_true1);
+ }
+
+ // Shift the remaining {elements} by one towards the start.
+ Node* loop = graph()->NewNode(common()->Loop(2), if_true1, if_true1);
+ Node* eloop =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, etrue1, loop);
+ Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
+ NodeProperties::MergeControlToEnd(graph(), common(), terminate);
+ Node* index = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2),
+ jsgraph()->OneConstant(),
+ jsgraph()->Constant(JSArray::kMaxCopyElements - 1), loop);
+
+ {
+ Node* check2 =
+ graph()->NewNode(simplified()->NumberLessThan(), index, length);
+ Node* branch2 = graph()->NewNode(common()->Branch(), check2, loop);
+
+ if_true1 = graph()->NewNode(common()->IfFalse(), branch2);
+ etrue1 = eloop;
+
+ Node* control = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* effect = etrue1;
+
+ ElementAccess const access = AccessBuilder::ForFixedArrayElement(kind);
+ Node* value = effect =
+ graph()->NewNode(simplified()->LoadElement(access), elements, index,
+ effect, control);
+ effect =
+ graph()->NewNode(simplified()->StoreElement(access), elements,
+ graph()->NewNode(simplified()->NumberSubtract(),
+ index, jsgraph()->OneConstant()),
+ value, effect, control);
+
+ loop->ReplaceInput(1, control);
+ eloop->ReplaceInput(1, effect);
+ index->ReplaceInput(1,
+ graph()->NewNode(simplified()->NumberAdd(), index,
+ jsgraph()->OneConstant()));
+ }
+
+ // Compute the new {length}.
+ length = graph()->NewNode(simplified()->NumberSubtract(), length,
+ jsgraph()->OneConstant());
+
+ // Store the new {length} to the {receiver}.
+ etrue1 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, length, etrue1, if_true1);
+
+ // Store a hole to the element we just removed from the {receiver}.
+ etrue1 = graph()->NewNode(
+ simplified()->StoreElement(
+ AccessBuilder::ForFixedArrayElement(GetHoleyElementsKind(kind))),
+ elements, length, jsgraph()->TheHoleConstant(), etrue1, if_true1);
+ }
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = efalse0;
+ Node* vfalse1;
+ {
+ // Call the generic C++ implementation.
+ const int builtin_index = Builtins::kArrayShift;
+ CallDescriptor const* const desc = Linkage::GetCEntryStubCallDescriptor(
+ graph()->zone(), 1, BuiltinArguments::kNumExtraArgsWithReceiver,
+ Builtins::name(builtin_index), node->op()->properties(),
+ CallDescriptor::kNeedsFrameState);
+ Node* stub_code =
+ jsgraph()->CEntryStubConstant(1, kDontSaveFPRegs, kArgvOnStack, true);
+ Address builtin_entry = Builtins::CppEntryOf(builtin_index);
+ Node* entry = jsgraph()->ExternalConstant(
+ ExternalReference(builtin_entry, isolate()));
+ Node* argc =
+ jsgraph()->Constant(BuiltinArguments::kNumExtraArgsWithReceiver);
+ if_false1 = efalse1 = vfalse1 =
+ graph()->NewNode(common()->Call(desc), stub_code, receiver,
+ jsgraph()->PaddingConstant(), argc, target,
+ jsgraph()->UndefinedConstant(), entry, argc, context,
+ frame_state, efalse1, if_false1);
+ }
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ efalse0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
+ vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue1, vfalse1, if_false0);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue0, vfalse0, control);
+
+ // Convert the hole to undefined. Do this last, so that we can optimize
+ // conversion operator via some smart strength reduction in many cases.
+ if (IsHoleyElementsKind(kind)) {
+ value =
+ graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
+ }
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+// ES6 section 21.1.3.1 String.prototype.charAt ( pos )
+Reduction JSCallReducer::ReduceStringPrototypeCharAt(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* index = jsgraph()->ZeroConstant();
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ receiver = effect = graph()->NewNode(simplified()->CheckString(p.feedback()),
+ receiver, effect, control);
+ if (node->op()->ValueInputCount() >= 3) {
+ index = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
+ NodeProperties::GetValueInput(node, 2),
+ effect, control);
+ // Map -0 and NaN to 0 (as per ToInteger), and the values in
+ // the [-2^31,-1] range to the [2^31,2^32-1] range, which will
+ // be considered out-of-bounds as well, because of the maximal
+ // String length limit in V8.
+ STATIC_ASSERT(String::kMaxLength <= kMaxInt);
+ index = graph()->NewNode(simplified()->NumberToUint32(), index);
+ }
+
+ // Determine the {receiver} length.
+ Node* receiver_length =
+ graph()->NewNode(simplified()->StringLength(), receiver);
+
+ // Check if {index} is less than {receiver} length.
+ Node* check =
+ graph()->NewNode(simplified()->NumberLessThan(), index, receiver_length);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ // Return the character from the {receiver} as single character string.
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+
+ Node* masked_index = graph()->NewNode(simplified()->MaskIndexWithBound(),
+ index, receiver_length);
+
+ Node* vtrue = graph()->NewNode(simplified()->StringCharAt(), receiver,
+ masked_index, if_true);
+
+ // Return the empty string otherwise.
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = jsgraph()->EmptyStringConstant();
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+// ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos )
+Reduction JSCallReducer::ReduceStringPrototypeCharCodeAt(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* index = jsgraph()->ZeroConstant();
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ receiver = effect = graph()->NewNode(simplified()->CheckString(p.feedback()),
+ receiver, effect, control);
+ if (node->op()->ValueInputCount() >= 3) {
+ index = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
+ NodeProperties::GetValueInput(node, 2),
+ effect, control);
+
+ // Map -0 and NaN to 0 (as per ToInteger), and the values in
+ // the [-2^31,-1] range to the [2^31,2^32-1] range, which will
+ // be considered out-of-bounds as well, because of the maximal
+ // String length limit in V8.
+ STATIC_ASSERT(String::kMaxLength <= kMaxInt);
+ index = graph()->NewNode(simplified()->NumberToUint32(), index);
+ }
+
+ // Determine the {receiver} length.
+ Node* receiver_length =
+ graph()->NewNode(simplified()->StringLength(), receiver);
+
+ // Check if {index} is less than {receiver} length.
+ Node* check =
+ graph()->NewNode(simplified()->NumberLessThan(), index, receiver_length);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ // Load the character from the {receiver}.
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+
+ Node* masked_index = graph()->NewNode(simplified()->MaskIndexWithBound(),
+ index, receiver_length);
+
+ Node* vtrue = graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
+ masked_index, if_true);
+
+ // Return NaN otherwise.
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = jsgraph()->NaNConstant();
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
Graph* JSCallReducer::graph() const { return jsgraph()->graph(); }
Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); }
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 6e2353c4c1..b2656b6be8 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -15,6 +15,7 @@ namespace internal {
// Forward declarations.
class CompilationDependencies;
class Factory;
+class VectorSlotPair;
namespace compiler {
@@ -24,7 +25,6 @@ class CommonOperatorBuilder;
class JSGraph;
class JSOperatorBuilder;
class SimplifiedOperatorBuilder;
-class VectorSlotPair;
// Performs strength reduction on {JSConstruct} and {JSCall} nodes,
// which might allow inlining or other optimizations to be performed afterwards.
@@ -55,7 +55,6 @@ class JSCallReducer final : public AdvancedReducer {
Reduction ReduceArrayConstructor(Node* node);
Reduction ReduceBooleanConstructor(Node* node);
Reduction ReduceCallApiFunction(Node* node, Handle<JSFunction> function);
- Reduction ReduceNumberConstructor(Node* node);
Reduction ReduceFunctionPrototypeApply(Node* node);
Reduction ReduceFunctionPrototypeBind(Node* node);
Reduction ReduceFunctionPrototypeCall(Node* node);
@@ -73,8 +72,18 @@ class JSCallReducer final : public AdvancedReducer {
Reduction ReduceReflectGetPrototypeOf(Node* node);
Reduction ReduceReflectHas(Node* node);
Reduction ReduceArrayForEach(Handle<JSFunction> function, Node* node);
+ Reduction ReduceArrayReduce(Handle<JSFunction> function, Node* node);
+ Reduction ReduceArrayReduceRight(Handle<JSFunction> function, Node* node);
Reduction ReduceArrayMap(Handle<JSFunction> function, Node* node);
Reduction ReduceArrayFilter(Handle<JSFunction> function, Node* node);
+ enum class ArrayFindVariant : uint8_t { kFind, kFindIndex };
+ Reduction ReduceArrayFind(ArrayFindVariant variant,
+ Handle<JSFunction> function, Node* node);
+ Reduction ReduceArrayEvery(Handle<JSFunction> function, Node* node);
+ Reduction ReduceArraySome(Handle<JSFunction> function, Node* node);
+ Reduction ReduceArrayPrototypePush(Node* node);
+ Reduction ReduceArrayPrototypePop(Node* node);
+ Reduction ReduceArrayPrototypeShift(Node* node);
Reduction ReduceCallOrConstructWithArrayLikeOrSpread(
Node* node, int arity, CallFrequency const& frequency,
VectorSlotPair const& feedback);
@@ -85,6 +94,10 @@ class JSCallReducer final : public AdvancedReducer {
Reduction ReduceJSCallWithArrayLike(Node* node);
Reduction ReduceJSCallWithSpread(Node* node);
Reduction ReduceReturnReceiver(Node* node);
+ Reduction ReduceStringPrototypeIndexOf(Handle<JSFunction> function,
+ Node* node);
+ Reduction ReduceStringPrototypeCharAt(Node* node);
+ Reduction ReduceStringPrototypeCharCodeAt(Node* node);
Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason);
@@ -107,10 +120,20 @@ class JSCallReducer final : public AdvancedReducer {
Node* effect, Node** check_fail,
Node** control);
+ // Begin the central loop of a higher-order array builtin. A Loop is wired
+ // into {control}, an EffectPhi into {effect}, and the array index {k} is
+ // threaded into a Phi, which is returned. It's helpful to save the
+ // value of {control} as the loop node, and of {effect} as the corresponding
+ // EffectPhi after function return.
+ Node* WireInLoopStart(Node* k, Node** control, Node** effect);
+ void WireInLoopEnd(Node* loop, Node* eloop, Node* vloop, Node* k,
+ Node* control, Node* effect);
+
// Load receiver[k], first bounding k by receiver array length.
// k is thusly changed, and the effect is changed as well.
Node* SafeLoadElement(ElementsKind kind, Node* receiver, Node* control,
- Node** effect, Node** k);
+ Node** effect, Node** k,
+ const VectorSlotPair& feedback);
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 9b0601f8f1..d3b9ee4e70 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -525,7 +525,7 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
// This has to be kept in sync with src/runtime/runtime-array.cc,
// where this limit is protected.
length = effect = graph()->NewNode(
- simplified()->CheckBounds(), length,
+ simplified()->CheckBounds(VectorSlotPair()), length,
jsgraph()->Constant(JSArray::kInitialMaxFastElementArray), effect,
control);
@@ -617,15 +617,16 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node,
if (IsSmiElementsKind(elements_kind)) {
for (auto& value : values) {
if (!NodeProperties::GetType(value)->Is(Type::SignedSmall())) {
- value = effect =
- graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
+ value = effect = graph()->NewNode(
+ simplified()->CheckSmi(VectorSlotPair()), value, effect, control);
}
}
} else if (IsDoubleElementsKind(elements_kind)) {
for (auto& value : values) {
if (!NodeProperties::GetType(value)->Is(Type::Number())) {
- value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
- effect, control);
+ value = effect =
+ graph()->NewNode(simplified()->CheckNumber(VectorSlotPair()), value,
+ effect, control);
}
// Make sure we do not store signaling NaNs into double arrays.
value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
@@ -913,6 +914,7 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
DCHECK(!function_map->is_dictionary_map());
// Emit code to allocate the JSFunction instance.
+ STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
AllocationBuilder a(jsgraph(), effect, control);
a.Allocate(function_map->instance_size());
a.Store(AccessBuilder::ForMap(), function_map);
@@ -980,9 +982,9 @@ Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) {
AllocationBuilder aa(jsgraph(), effect, graph()->start());
aa.AllocateArray(2, factory()->fixed_array_map());
aa.Store(AccessBuilder::ForFixedArrayElement(PACKED_ELEMENTS),
- jsgraph()->Constant(0), key);
+ jsgraph()->ZeroConstant(), key);
aa.Store(AccessBuilder::ForFixedArrayElement(PACKED_ELEMENTS),
- jsgraph()->Constant(1), value);
+ jsgraph()->OneConstant(), value);
Node* elements = aa.Finish();
AllocationBuilder a(jsgraph(), elements, graph()->start());
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index d06717717d..c09dcbc1b3 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -251,32 +251,17 @@ void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
const StoreGlobalParameters& p = StoreGlobalParametersOf(node->op());
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
- Node* context = NodeProperties::GetContextInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- // Load global object from the context.
- Node* native_context = effect =
- graph()->NewNode(machine()->Load(MachineType::AnyTagged()), context,
- jsgraph()->IntPtrConstant(
- Context::SlotOffset(Context::NATIVE_CONTEXT_INDEX)),
- effect, control);
- Node* global = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), native_context,
- jsgraph()->IntPtrConstant(Context::SlotOffset(Context::EXTENSION_INDEX)),
- effect, control);
- NodeProperties::ReplaceEffectInput(node, effect);
- node->InsertInput(zone(), 0, global);
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
- node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
+ node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
Callable callable =
- CodeFactory::StoreGlobalIC(isolate(), p.language_mode());
+ Builtins::CallableFor(isolate(), Builtins::kStoreGlobalICTrampoline);
ReplaceWithStubCall(node, callable, flags);
} else {
Callable callable =
- CodeFactory::StoreGlobalICInOptimizedCode(isolate(), p.language_mode());
+ Builtins::CallableFor(isolate(), Builtins::kStoreGlobalIC);
Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
- node->InsertInput(zone(), 4, vector);
+ node->InsertInput(zone(), 3, vector);
ReplaceWithStubCall(node, callable, flags);
}
}
@@ -708,6 +693,10 @@ void JSGenericLowering::LowerJSGeneratorRestoreContinuation(Node* node) {
UNREACHABLE(); // Eliminated in typed lowering.
}
+void JSGenericLowering::LowerJSGeneratorRestoreInputOrDebugPos(Node* node) {
+ UNREACHABLE(); // Eliminated in typed lowering.
+}
+
void JSGenericLowering::LowerJSGeneratorRestoreRegister(Node* node) {
UNREACHABLE(); // Eliminated in typed lowering.
}
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index 12c610da56..cb3c620117 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -305,10 +305,6 @@ Node* JSGraph::Dead() {
return CACHED(kDead, graph()->NewNode(common()->Dead()));
}
-Node* JSGraph::DeadValue() {
- return CACHED(kDeadValue, graph()->NewNode(common()->DeadValue()));
-}
-
void JSGraph::GetCachedNodes(NodeVector* nodes) {
cache_.GetCachedNodes(nodes);
for (size_t i = 0; i < arraysize(cached_nodes_); i++) {
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index a685fd69a8..f5b4bdc181 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -155,9 +155,6 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
// Create a control node that serves as dependency for dead nodes.
Node* Dead();
- // Sentinel for a value resulting from unreachable computations.
- Node* DeadValue();
-
CommonOperatorBuilder* common() const { return common_; }
JSOperatorBuilder* javascript() const { return javascript_; }
SimplifiedOperatorBuilder* simplified() const { return simplified_; }
@@ -199,7 +196,6 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
kEmptyStateValues,
kSingleDeadTypedStateValues,
kDead,
- kDeadValue,
kNumCachedNodes // Must remain last.
};
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index 9cff51985a..c9909dcb75 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -6,6 +6,7 @@
#include "src/compilation-info.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/simplified-operator.h"
#include "src/objects-inl.h"
@@ -556,6 +557,8 @@ void JSInliningHeuristic::CreateOrReuseDispatch(Node* node, Node* callee,
Node** if_successes,
Node** calls, Node** inputs,
int input_count) {
+ SourcePositionTable::Scope position(
+ source_positions_, source_positions_->GetSourcePosition(node));
if (TryReuseDispatch(node, callee, candidate, if_successes, calls, inputs,
input_count)) {
return;
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
index dffa5cfd6a..f4f24f41b4 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.h
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -22,6 +22,7 @@ class JSInliningHeuristic final : public AdvancedReducer {
inliner_(editor, local_zone, info, jsgraph, source_positions),
candidates_(local_zone),
seen_(local_zone),
+ source_positions_(source_positions),
jsgraph_(jsgraph) {}
const char* reducer_name() const override { return "JSInliningHeuristic"; }
@@ -85,6 +86,7 @@ class JSInliningHeuristic final : public AdvancedReducer {
JSInliner inliner_;
Candidates candidates_;
ZoneSet<NodeId> seen_;
+ SourcePositionTable* source_positions_;
JSGraph* const jsgraph_;
int cumulative_count_ = 0;
};
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index 2322b8ac3a..dc1ec521f2 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -135,7 +135,8 @@ Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
// TODO(bmeurer): Move MergeControlToEnd() to the AdvancedReducer.
Node* deoptimize = graph()->NewNode(
- common()->Deoptimize(DeoptimizeKind::kEager, DeoptimizeReason::kNoReason),
+ common()->Deoptimize(DeoptimizeKind::kEager,
+ DeoptimizeReason::kDeoptimizeNow, VectorSlotPair()),
frame_state, effect, control);
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
Revisit(graph()->end());
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index a6786da157..b2f8c567e2 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -20,6 +20,7 @@
#include "src/feedback-vector.h"
#include "src/field-index-inl.h"
#include "src/isolate-inl.h"
+#include "src/vector-slot-pair.h"
namespace v8 {
namespace internal {
@@ -596,8 +597,8 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
representation = MachineRepresentation::kTaggedPointer;
} else {
// Check that the {value} is a Smi.
- value = effect = graph()->NewNode(simplified()->CheckSmi(), value,
- effect, control);
+ value = effect = graph()->NewNode(
+ simplified()->CheckSmi(VectorSlotPair()), value, effect, control);
property_cell_value_type = Type::SignedSmall();
representation = MachineRepresentation::kTaggedSigned;
}
@@ -1061,13 +1062,11 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
if (access_mode == AccessMode::kStore) return NoChange();
// Ensure that the {receiver} is actually a String.
- receiver = effect = graph()->NewNode(simplified()->CheckString(), receiver,
- effect, control);
+ receiver = effect = graph()->NewNode(
+ simplified()->CheckString(VectorSlotPair()), receiver, effect, control);
// Determine the {receiver} length.
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
- effect, control);
+ Node* length = graph()->NewNode(simplified()->StringLength(), receiver);
// Load the single character string from {receiver} or yield undefined
// if the {index} is out of bounds (depending on the {load_mode}).
@@ -1425,9 +1424,9 @@ Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize(
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* frame_state = NodeProperties::FindFrameStateBefore(node);
- Node* deoptimize =
- graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kSoft, reason),
- frame_state, effect, control);
+ Node* deoptimize = graph()->NewNode(
+ common()->Deoptimize(DeoptimizeKind::kSoft, reason, VectorSlotPair()),
+ frame_state, effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
Revisit(graph()->end());
@@ -1504,7 +1503,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
receiver_map, enumerator);
effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kNoReason), check, effect,
+ simplified()->CheckIf(DeoptimizeReason::kWrongMap), check, effect,
control);
}
@@ -1525,9 +1524,9 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
simplified()->BooleanNot(),
graph()->NewNode(simplified()->ReferenceEqual(), enum_indices,
jsgraph()->EmptyFixedArrayConstant()));
- effect =
- graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
- check, effect, control);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kWrongEnumIndices), check,
+ effect, control);
// Determine the index from the {enum_indices}.
index = effect = graph()->NewNode(
@@ -1775,7 +1774,7 @@ JSNativeContextSpecialization::BuildPropertyStore(
Node* check =
graph()->NewNode(simplified()->ReferenceEqual(), value, constant_value);
effect =
- graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
+ graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kWrongValue),
check, effect, control);
value = constant_value;
} else if (access_info.IsAccessorConstant()) {
@@ -1809,8 +1808,9 @@ JSNativeContextSpecialization::BuildPropertyStore(
access_mode == AccessMode::kStoreInLiteral);
switch (field_representation) {
case MachineRepresentation::kFloat64: {
- value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
- effect, control);
+ value = effect =
+ graph()->NewNode(simplified()->CheckNumber(VectorSlotPair()), value,
+ effect, control);
if (!field_index.is_inobject() || field_index.is_hidden_field() ||
!FLAG_unbox_double_fields) {
if (access_info.HasTransitionMap()) {
@@ -1852,8 +1852,8 @@ JSNativeContextSpecialization::BuildPropertyStore(
Node* check = graph()->NewNode(simplified()->NumberEqual(),
current_value, value);
effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kNoReason), check, effect,
- control);
+ simplified()->CheckIf(DeoptimizeReason::kWrongValue), check,
+ effect, control);
return ValueEffectControl(value, effect, control);
}
break;
@@ -1871,14 +1871,14 @@ JSNativeContextSpecialization::BuildPropertyStore(
Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
current_value, value);
effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kNoReason), check, effect,
- control);
+ simplified()->CheckIf(DeoptimizeReason::kWrongValue), check,
+ effect, control);
return ValueEffectControl(value, effect, control);
}
if (field_representation == MachineRepresentation::kTaggedSigned) {
- value = effect = graph()->NewNode(simplified()->CheckSmi(), value,
- effect, control);
+ value = effect = graph()->NewNode(
+ simplified()->CheckSmi(VectorSlotPair()), value, effect, control);
field_access.write_barrier_kind = kNoWriteBarrier;
} else if (field_representation ==
@@ -2007,7 +2007,7 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
Node* name = NodeProperties::GetValueInput(node, 1);
Node* check = graph()->NewNode(simplified()->ReferenceEqual(), name,
jsgraph()->HeapConstant(cached_name));
- effect = graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
+ effect = graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kWrongName),
check, effect, control);
Node* value = NodeProperties::GetValueInput(node, 2);
@@ -2127,13 +2127,14 @@ JSNativeContextSpecialization::BuildElementAccess(
// Check that the {index} is a valid array index, we do the actual
// bounds check below and just skip the store below if it's out of
// bounds for the {receiver}.
- index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
- jsgraph()->Constant(Smi::kMaxValue),
- effect, control);
+ index = effect = graph()->NewNode(
+ simplified()->CheckBounds(VectorSlotPair()), index,
+ jsgraph()->Constant(Smi::kMaxValue), effect, control);
} else {
// Check that the {index} is in the valid range for the {receiver}.
- index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
- length, effect, control);
+ index = effect =
+ graph()->NewNode(simplified()->CheckBounds(VectorSlotPair()), index,
+ length, effect, control);
}
// Access the actual element.
@@ -2279,13 +2280,14 @@ JSNativeContextSpecialization::BuildElementAccess(
// Check that the {index} is a valid array index, we do the actual
// bounds check below and just skip the store below if it's out of
// bounds for the {receiver}.
- index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
- jsgraph()->Constant(Smi::kMaxValue),
- effect, control);
+ index = effect = graph()->NewNode(
+ simplified()->CheckBounds(VectorSlotPair()), index,
+ jsgraph()->Constant(Smi::kMaxValue), effect, control);
} else {
// Check that the {index} is in the valid range for the {receiver}.
- index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
- length, effect, control);
+ index = effect =
+ graph()->NewNode(simplified()->CheckBounds(VectorSlotPair()), index,
+ length, effect, control);
}
// Compute the element access.
@@ -2406,11 +2408,12 @@ JSNativeContextSpecialization::BuildElementAccess(
} else {
DCHECK_EQ(AccessMode::kStore, access_mode);
if (IsSmiElementsKind(elements_kind)) {
- value = effect =
- graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
+ value = effect = graph()->NewNode(
+ simplified()->CheckSmi(VectorSlotPair()), value, effect, control);
} else if (IsDoubleElementsKind(elements_kind)) {
- value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
- effect, control);
+ value = effect =
+ graph()->NewNode(simplified()->CheckNumber(VectorSlotPair()), value,
+ effect, control);
// Make sure we do not store signalling NaNs into double arrays.
value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
}
@@ -2443,8 +2446,9 @@ JSNativeContextSpecialization::BuildElementAccess(
jsgraph()->Constant(JSObject::kMaxGap))
: graph()->NewNode(simplified()->NumberAdd(), length,
jsgraph()->OneConstant());
- index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
- limit, effect, control);
+ index = effect =
+ graph()->NewNode(simplified()->CheckBounds(VectorSlotPair()), index,
+ limit, effect, control);
// Grow {elements} backing store if necessary.
GrowFastElementsMode mode =
@@ -2452,8 +2456,8 @@ JSNativeContextSpecialization::BuildElementAccess(
? GrowFastElementsMode::kDoubleElements
: GrowFastElementsMode::kSmiOrObjectElements;
elements = effect = graph()->NewNode(
- simplified()->MaybeGrowFastElements(mode), receiver, elements,
- index, elements_length, effect, control);
+ simplified()->MaybeGrowFastElements(mode, VectorSlotPair()),
+ receiver, elements, index, elements_length, effect, control);
// Also update the "length" property if {receiver} is a JSArray.
if (receiver_is_jsarray) {
@@ -2505,9 +2509,9 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
dependencies()->AssumePropertyCell(factory()->no_elements_protector());
// Ensure that the {index} is a valid String length.
- index = *effect = graph()->NewNode(simplified()->CheckBounds(), index,
- jsgraph()->Constant(String::kMaxLength),
- *effect, *control);
+ index = *effect = graph()->NewNode(
+ simplified()->CheckBounds(VectorSlotPair()), index,
+ jsgraph()->Constant(String::kMaxLength), *effect, *control);
// Load the single character string from {receiver} or yield
// undefined if the {index} is not within the valid bounds.
@@ -2531,8 +2535,9 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
vtrue, vfalse, *control);
} else {
// Ensure that {index} is less than {receiver} length.
- index = *effect = graph()->NewNode(simplified()->CheckBounds(), index,
- length, *effect, *control);
+ index = *effect =
+ graph()->NewNode(simplified()->CheckBounds(VectorSlotPair()), index,
+ length, *effect, *control);
Node* masked_index =
graph()->NewNode(simplified()->MaskIndexWithBound(), index, length);
@@ -2579,8 +2584,8 @@ Node* JSNativeContextSpecialization::BuildExtendPropertiesBackingStore(
common()->Select(MachineRepresentation::kTaggedSigned),
graph()->NewNode(simplified()->ObjectIsSmi(), properties), properties,
jsgraph()->SmiConstant(PropertyArray::kNoHashSentinel));
- hash = graph()->NewNode(common()->TypeGuard(Type::SignedSmall()), hash,
- control);
+ hash = effect = graph()->NewNode(common()->TypeGuard(Type::SignedSmall()),
+ hash, effect, control);
hash =
graph()->NewNode(simplified()->NumberShiftLeft(), hash,
jsgraph()->Constant(PropertyArray::HashField::kShift));
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index 5b5e6589d2..0ddf859cff 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -9,9 +9,9 @@
#include "src/base/lazy-instance.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
-#include "src/feedback-vector.h"
#include "src/handles-inl.h"
#include "src/objects-inl.h"
+#include "src/vector-slot-pair.h"
namespace v8 {
namespace internal {
@@ -28,29 +28,6 @@ CallFrequency CallFrequencyOf(Operator const* op) {
return OpParameter<CallFrequency>(op);
}
-VectorSlotPair::VectorSlotPair() {}
-
-
-int VectorSlotPair::index() const {
- return vector_.is_null() ? -1 : FeedbackVector::GetIndex(slot_);
-}
-
-
-bool operator==(VectorSlotPair const& lhs, VectorSlotPair const& rhs) {
- return lhs.slot() == rhs.slot() &&
- lhs.vector().location() == rhs.vector().location();
-}
-
-
-bool operator!=(VectorSlotPair const& lhs, VectorSlotPair const& rhs) {
- return !(lhs == rhs);
-}
-
-
-size_t hash_value(VectorSlotPair const& p) {
- return base::hash_combine(p.slot(), p.vector().location());
-}
-
std::ostream& operator<<(std::ostream& os,
ConstructForwardVarargsParameters const& p) {
@@ -599,6 +576,7 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) {
V(LoadMessage, Operator::kNoThrow | Operator::kNoWrite, 0, 1) \
V(StoreMessage, Operator::kNoRead | Operator::kNoThrow, 1, 0) \
V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \
+ V(GeneratorRestoreInputOrDebugPos, Operator::kNoThrow, 1, 1) \
V(StackCheck, Operator::kNoWrite, 0, 0) \
V(Debugger, Operator::kNoProperties, 0, 0) \
V(GetSuperConstructor, Operator::kNoWrite, 1, 1)
@@ -645,6 +623,7 @@ struct JSOperatorGlobalCache final {
Name##Operator<BinaryOperationHint::kNumberOrOddball> \
k##Name##NumberOrOddballOperator; \
Name##Operator<BinaryOperationHint::kString> k##Name##StringOperator; \
+ Name##Operator<BinaryOperationHint::kBigInt> k##Name##BigIntOperator; \
Name##Operator<BinaryOperationHint::kAny> k##Name##AnyOperator;
BINARY_OP_LIST(BINARY_OP)
#undef BINARY_OP
@@ -667,6 +646,7 @@ struct JSOperatorGlobalCache final {
k##Name##InternalizedStringOperator; \
Name##Operator<CompareOperationHint::kString> k##Name##StringOperator; \
Name##Operator<CompareOperationHint::kSymbol> k##Name##SymbolOperator; \
+ Name##Operator<CompareOperationHint::kBigInt> k##Name##BigIntOperator; \
Name##Operator<CompareOperationHint::kReceiver> k##Name##ReceiverOperator; \
Name##Operator<CompareOperationHint::kAny> k##Name##AnyOperator;
COMPARE_OP_LIST(COMPARE_OP)
@@ -703,6 +683,8 @@ CACHED_OP_LIST(CACHED_OP)
return &cache_.k##Name##NumberOrOddballOperator; \
case BinaryOperationHint::kString: \
return &cache_.k##Name##StringOperator; \
+ case BinaryOperationHint::kBigInt: \
+ return &cache_.k##Name##BigIntOperator; \
case BinaryOperationHint::kAny: \
return &cache_.k##Name##AnyOperator; \
} \
@@ -729,6 +711,8 @@ BINARY_OP_LIST(BINARY_OP)
return &cache_.k##Name##StringOperator; \
case CompareOperationHint::kSymbol: \
return &cache_.k##Name##SymbolOperator; \
+ case CompareOperationHint::kBigInt: \
+ return &cache_.k##Name##BigIntOperator; \
case CompareOperationHint::kReceiver: \
return &cache_.k##Name##ReceiverOperator; \
case CompareOperationHint::kAny: \
@@ -763,8 +747,10 @@ const Operator* JSOperatorBuilder::CallForwardVarargs(size_t arity,
const Operator* JSOperatorBuilder::Call(size_t arity, CallFrequency frequency,
VectorSlotPair const& feedback,
- ConvertReceiverMode convert_mode) {
- CallParameters parameters(arity, frequency, feedback, convert_mode);
+ ConvertReceiverMode convert_mode,
+ SpeculationMode speculation_mode) {
+ CallParameters parameters(arity, frequency, feedback, convert_mode,
+ speculation_mode);
return new (zone()) Operator1<CallParameters>( // --
IrOpcode::kJSCall, Operator::kNoProperties, // opcode
"JSCall", // name
@@ -781,9 +767,10 @@ const Operator* JSOperatorBuilder::CallWithArrayLike(CallFrequency frequency) {
}
const Operator* JSOperatorBuilder::CallWithSpread(
- uint32_t arity, CallFrequency frequency, VectorSlotPair const& feedback) {
+ uint32_t arity, CallFrequency frequency, VectorSlotPair const& feedback,
+ SpeculationMode speculation_mode) {
CallParameters parameters(arity, frequency, feedback,
- ConvertReceiverMode::kAny);
+ ConvertReceiverMode::kAny, speculation_mode);
return new (zone()) Operator1<CallParameters>( // --
IrOpcode::kJSCallWithSpread, Operator::kNoProperties, // opcode
"JSCallWithSpread", // name
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 94a9b1fdb6..3875234d5a 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -10,6 +10,7 @@
#include "src/handles.h"
#include "src/runtime/runtime.h"
#include "src/type-hints.h"
+#include "src/vector-slot-pair.h"
namespace v8 {
namespace internal {
@@ -18,7 +19,6 @@ class AllocationSite;
class BoilerplateDescription;
class ConstantElementsPair;
class SharedFunctionInfo;
-class FeedbackVector;
namespace compiler {
@@ -59,32 +59,6 @@ std::ostream& operator<<(std::ostream&, CallFrequency);
CallFrequency CallFrequencyOf(Operator const* op) WARN_UNUSED_RESULT;
-// Defines a pair of {FeedbackVector} and {FeedbackSlot}, which
-// is used to access the type feedback for a certain {Node}.
-class V8_EXPORT_PRIVATE VectorSlotPair {
- public:
- VectorSlotPair();
- VectorSlotPair(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : vector_(vector), slot_(slot) {}
-
- bool IsValid() const { return !vector_.is_null() && !slot_.IsInvalid(); }
-
- Handle<FeedbackVector> vector() const { return vector_; }
- FeedbackSlot slot() const { return slot_; }
-
- int index() const;
-
- private:
- const Handle<FeedbackVector> vector_;
- const FeedbackSlot slot_;
-};
-
-bool operator==(VectorSlotPair const&, VectorSlotPair const&);
-bool operator!=(VectorSlotPair const&, VectorSlotPair const&);
-
-size_t hash_value(VectorSlotPair const&);
-
-
// Defines the flags for a JavaScript call forwarding parameters. This
// is used as parameter by JSConstructForwardVarargs operators.
class ConstructForwardVarargsParameters final {
@@ -187,8 +161,10 @@ class CallParameters final {
public:
CallParameters(size_t arity, CallFrequency frequency,
VectorSlotPair const& feedback,
- ConvertReceiverMode convert_mode)
+ ConvertReceiverMode convert_mode,
+ SpeculationMode speculation_mode)
: bit_field_(ArityField::encode(arity) |
+ SpeculationModeField::encode(speculation_mode) |
ConvertReceiverModeField::encode(convert_mode)),
frequency_(frequency),
feedback_(feedback) {}
@@ -200,6 +176,10 @@ class CallParameters final {
}
VectorSlotPair const& feedback() const { return feedback_; }
+ SpeculationMode speculation_mode() const {
+ return SpeculationModeField::decode(bit_field_);
+ }
+
bool operator==(CallParameters const& that) const {
return this->bit_field_ == that.bit_field_ &&
this->frequency_ == that.frequency_ &&
@@ -212,7 +192,8 @@ class CallParameters final {
return base::hash_combine(p.bit_field_, p.frequency_, p.feedback_);
}
- typedef BitField<size_t, 0, 29> ArityField;
+ typedef BitField<size_t, 0, 28> ArityField;
+ typedef BitField<SpeculationMode, 28, 1> SpeculationModeField;
typedef BitField<ConvertReceiverMode, 29, 2> ConvertReceiverModeField;
uint32_t const bit_field_;
@@ -693,11 +674,13 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* Call(
size_t arity, CallFrequency frequency = CallFrequency(),
VectorSlotPair const& feedback = VectorSlotPair(),
- ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny);
+ ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny,
+ SpeculationMode speculation_mode = SpeculationMode::kAllowSpeculation);
const Operator* CallWithArrayLike(CallFrequency frequency);
const Operator* CallWithSpread(
uint32_t arity, CallFrequency frequency = CallFrequency(),
- VectorSlotPair const& feedback = VectorSlotPair());
+ VectorSlotPair const& feedback = VectorSlotPair(),
+ SpeculationMode speculation_mode = SpeculationMode::kAllowSpeculation);
const Operator* CallRuntime(Runtime::FunctionId id);
const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
const Operator* CallRuntime(const Runtime::Function* function, size_t arity);
@@ -761,8 +744,9 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
// Used to implement Ignition's RestoreGeneratorState bytecode.
const Operator* GeneratorRestoreContinuation();
- // Used to implement Ignition's RestoreGeneratorRegisters bytecode.
+ // Used to implement Ignition's ResumeGenerator bytecode.
const Operator* GeneratorRestoreRegister(int index);
+ const Operator* GeneratorRestoreInputOrDebugPos();
const Operator* StackCheck();
const Operator* Debugger();
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index a7ce12cdb4..0ec63600a2 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -38,6 +38,7 @@ bool BinaryOperationHintToNumberOperationHint(
case BinaryOperationHint::kAny:
case BinaryOperationHint::kNone:
case BinaryOperationHint::kString:
+ case BinaryOperationHint::kBigInt:
break;
}
return false;
@@ -90,6 +91,7 @@ class JSSpeculativeBinopBuilder final {
case CompareOperationHint::kNone:
case CompareOperationHint::kString:
case CompareOperationHint::kSymbol:
+ case CompareOperationHint::kBigInt:
case CompareOperationHint::kReceiver:
case CompareOperationHint::kInternalizedString:
break;
@@ -493,7 +495,8 @@ Node* JSTypeHintLowering::TryBuildSoftDeopt(FeedbackNexus& nexus, Node* effect,
DeoptimizeReason reason) const {
if ((flags() & kBailoutOnUninitialized) && nexus.IsUninitialized()) {
Node* deoptimize = jsgraph()->graph()->NewNode(
- jsgraph()->common()->Deoptimize(DeoptimizeKind::kSoft, reason),
+ jsgraph()->common()->Deoptimize(DeoptimizeKind::kSoft, reason,
+ VectorSlotPair()),
jsgraph()->Dead(), effect, control);
Node* frame_state = NodeProperties::FindFrameStateBefore(deoptimize);
deoptimize->ReplaceInput(0, frame_state);
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 2380c7c0f4..c265caf9f0 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -47,6 +47,7 @@ class JSBinopReduction final {
case CompareOperationHint::kNone:
case CompareOperationHint::kString:
case CompareOperationHint::kSymbol:
+ case CompareOperationHint::kBigInt:
case CompareOperationHint::kReceiver:
case CompareOperationHint::kInternalizedString:
break;
@@ -156,14 +157,16 @@ class JSBinopReduction final {
// CheckString node.
void CheckInputsToString() {
if (!left_type()->Is(Type::String())) {
- Node* left_input = graph()->NewNode(simplified()->CheckString(), left(),
- effect(), control());
+ Node* left_input =
+ graph()->NewNode(simplified()->CheckString(VectorSlotPair()), left(),
+ effect(), control());
node_->ReplaceInput(0, left_input);
update_effect(left_input);
}
if (!right_type()->Is(Type::String())) {
- Node* right_input = graph()->NewNode(simplified()->CheckString(), right(),
- effect(), control());
+ Node* right_input =
+ graph()->NewNode(simplified()->CheckString(VectorSlotPair()), right(),
+ effect(), control());
node_->ReplaceInput(1, right_input);
update_effect(right_input);
}
@@ -308,7 +311,8 @@ class JSBinopReduction final {
case IrOpcode::kSpeculativeNumberLessThanOrEqual:
return simplified()->NumberLessThanOrEqual();
case IrOpcode::kSpeculativeNumberAdd:
- return simplified()->NumberAdd();
+ // Handled by ReduceSpeculativeNumberAdd.
+ UNREACHABLE();
case IrOpcode::kSpeculativeNumberSubtract:
return simplified()->NumberSubtract();
case IrOpcode::kSpeculativeNumberMultiply:
@@ -539,13 +543,15 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
if (r.LeftInputIs(empty_string_type_)) {
- Node* value = effect = graph()->NewNode(simplified()->CheckString(),
- r.right(), effect, control);
+ Node* value = effect =
+ graph()->NewNode(simplified()->CheckString(VectorSlotPair()),
+ r.right(), effect, control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
} else if (r.RightInputIs(empty_string_type_)) {
- Node* value = effect = graph()->NewNode(simplified()->CheckString(),
- r.left(), effect, control);
+ Node* value = effect =
+ graph()->NewNode(simplified()->CheckString(VectorSlotPair()),
+ r.left(), effect, control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
@@ -594,6 +600,9 @@ Reduction JSTypedLowering::ReduceSpeculativeNumberBinop(Node* node) {
if ((hint == NumberOperationHint::kNumber ||
hint == NumberOperationHint::kNumberOrOddball) &&
r.BothInputsAre(Type::NumberOrUndefinedOrNullOrBoolean())) {
+ // We intentionally do this only in the Number and NumberOrOddball hint case
+ // because simplified lowering of these speculative ops may do some clever
+ // reductions in the other cases.
r.ConvertInputsToNumber();
return r.ChangeToPureOperator(r.NumberOpFromSpeculativeNumberOp(),
Type::Number());
@@ -634,22 +643,22 @@ Reduction JSTypedLowering::ReduceCreateConsString(Node* node) {
// Make sure {first} is actually a String.
Type* first_type = NodeProperties::GetType(first);
if (!first_type->Is(Type::String())) {
- first = effect =
- graph()->NewNode(simplified()->CheckString(), first, effect, control);
+ first = effect = graph()->NewNode(
+ simplified()->CheckString(VectorSlotPair()), first, effect, control);
first_type = NodeProperties::GetType(first);
}
// Make sure {second} is actually a String.
Type* second_type = NodeProperties::GetType(second);
if (!second_type->Is(Type::String())) {
- second = effect =
- graph()->NewNode(simplified()->CheckString(), second, effect, control);
+ second = effect = graph()->NewNode(
+ simplified()->CheckString(VectorSlotPair()), second, effect, control);
second_type = NodeProperties::GetType(second);
}
// Determine the {first} length.
- Node* first_length = BuildGetStringLength(first, &effect, control);
- Node* second_length = BuildGetStringLength(second, &effect, control);
+ Node* first_length = BuildGetStringLength(first);
+ Node* second_length = BuildGetStringLength(second);
// Compute the resulting length.
Node* length =
@@ -661,9 +670,9 @@ Reduction JSTypedLowering::ReduceCreateConsString(Node* node) {
// has the additional benefit of not holding on to the lazy {frame_state}
// and thus potentially reduces the number of live ranges and allows for
// more truncations.
- length = effect = graph()->NewNode(simplified()->CheckBounds(), length,
- jsgraph()->Constant(String::kMaxLength),
- effect, control);
+ length = effect = graph()->NewNode(
+ simplified()->CheckBounds(VectorSlotPair()), length,
+ jsgraph()->Constant(String::kMaxLength), effect, control);
} else {
// Check if we would overflow the allowed maximum string length.
Node* check =
@@ -698,40 +707,25 @@ Reduction JSTypedLowering::ReduceCreateConsString(Node* node) {
Revisit(graph()->end());
}
control = graph()->NewNode(common()->IfTrue(), branch);
+ length = effect =
+ graph()->NewNode(common()->TypeGuard(type_cache_.kStringLengthType),
+ length, effect, control);
}
- // Figure out the map for the resulting ConsString.
- // TODO(turbofan): We currently just use the cons_string_map here for
- // the sake of simplicity; we could also try to be smarter here and
- // use the one_byte_cons_string_map instead when the resulting ConsString
- // contains only one byte characters.
- Node* value_map = jsgraph()->HeapConstant(factory()->cons_string_map());
-
- // Allocate the resulting ConsString.
- AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(ConsString::kSize, NOT_TENURED, Type::OtherString());
- a.Store(AccessBuilder::ForMap(), value_map);
- a.Store(AccessBuilder::ForNameHashField(),
- jsgraph()->Constant(Name::kEmptyHashField));
- a.Store(AccessBuilder::ForStringLength(), length);
- a.Store(AccessBuilder::ForConsStringFirst(), first);
- a.Store(AccessBuilder::ForConsStringSecond(), second);
-
- // Morph the {node} into a {FinishRegion}.
- ReplaceWithValue(node, node, node, control);
- a.FinishAndChange(node);
- return Changed(node);
+ Node* value =
+ graph()->NewNode(simplified()->NewConsString(), length, first, second);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
}
-Node* JSTypedLowering::BuildGetStringLength(Node* value, Node** effect,
- Node* control) {
+Node* JSTypedLowering::BuildGetStringLength(Node* value) {
+ // TODO(bmeurer): Get rid of this hack and instead have a way to
+ // express the string length in the types.
HeapObjectMatcher m(value);
Node* length =
(m.HasValue() && m.Value()->IsString())
? jsgraph()->Constant(Handle<String>::cast(m.Value())->length())
- : (*effect) = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()),
- value, *effect, control);
+ : graph()->NewNode(simplified()->StringLength(), value);
return length;
}
@@ -866,9 +860,9 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node) {
ReplaceWithValue(node, replacement);
return Replace(replacement);
}
- if (r.OneInputCannotBe(Type::NumberOrString())) {
- // For values with canonical representation (i.e. neither String, nor
- // Number) an empty type intersection means the values cannot be strictly
+ if (r.OneInputCannotBe(Type::NumericOrString())) {
+ // For values with canonical representation (i.e. neither String nor
+ // Numeric) an empty type intersection means the values cannot be strictly
// equal.
if (!r.left_type()->Maybe(r.right_type())) {
Node* replacement = jsgraph()->FalseConstant();
@@ -1015,6 +1009,7 @@ Reduction JSTypedLowering::ReduceJSToNumberOrNumeric(Node* node) {
NodeProperties::ChangeOp(node, simplified()->PlainPrimitiveToNumber());
return Changed(node);
}
+ // TODO(neis): Reduce ToNumeric to ToNumber if input can't be BigInt?
return NoChange();
}
@@ -1051,7 +1046,9 @@ Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
return Replace(jsgraph()->HeapConstant(
factory()->NumberToString(factory()->NewNumber(input_type->Min()))));
}
- // TODO(turbofan): js-typed-lowering of ToString(x:number)
+ if (input_type->Is(Type::Number())) {
+ return Replace(graph()->NewNode(simplified()->NumberToString(), input));
+ }
return NoChange();
}
@@ -1133,16 +1130,12 @@ Reduction JSTypedLowering::ReduceJSLoadNamed(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
Node* receiver = NodeProperties::GetValueInput(node, 0);
Type* receiver_type = NodeProperties::GetType(receiver);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
Handle<Name> name = NamedAccessOf(node->op()).name();
// Optimize "length" property of strings.
if (name.is_identical_to(factory()->length_string()) &&
receiver_type->Is(Type::String())) {
- Node* value = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
- effect, control);
- ReplaceWithValue(node, value, effect);
+ Node* value = graph()->NewNode(simplified()->StringLength(), receiver);
+ ReplaceWithValue(node, value);
return Replace(value);
}
return NoChange();
@@ -1783,7 +1776,7 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
receiver_map, cache_type);
effect =
- graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
+ graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kWrongMap),
check, effect, control);
// Since the change to LoadElement() below is effectful, we connect
@@ -2098,6 +2091,22 @@ Reduction JSTypedLowering::ReduceJSGeneratorRestoreRegister(Node* node) {
return Changed(element);
}
+Reduction JSTypedLowering::ReduceJSGeneratorRestoreInputOrDebugPos(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSGeneratorRestoreInputOrDebugPos, node->opcode());
+
+ FieldAccess input_or_debug_pos_field =
+ AccessBuilder::ForJSGeneratorObjectInputOrDebugPos();
+ const Operator* new_op = simplified()->LoadField(input_or_debug_pos_field);
+
+ // Mutate the node in-place.
+ DCHECK(OperatorProperties::HasContextInput(node->op()));
+ DCHECK(!OperatorProperties::HasContextInput(new_op));
+ node->RemoveInput(NodeProperties::FirstContextIndex(node));
+
+ NodeProperties::ChangeOp(node, new_op);
+ return Changed(node);
+}
+
Reduction JSTypedLowering::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kJSEqual:
@@ -2183,6 +2192,8 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSGeneratorRestoreContinuation(node);
case IrOpcode::kJSGeneratorRestoreRegister:
return ReduceJSGeneratorRestoreRegister(node);
+ case IrOpcode::kJSGeneratorRestoreInputOrDebugPos:
+ return ReduceJSGeneratorRestoreInputOrDebugPos(node);
// TODO(mstarzinger): Simplified operations hiding in JS-level reducer not
// fooling anyone. Consider moving this into a separate reducer.
case IrOpcode::kSpeculativeNumberAdd:
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index 8b00c1d32c..d72303f495 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -73,6 +73,7 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Reduction ReduceJSGeneratorStore(Node* node);
Reduction ReduceJSGeneratorRestoreContinuation(Node* node);
Reduction ReduceJSGeneratorRestoreRegister(Node* node);
+ Reduction ReduceJSGeneratorRestoreInputOrDebugPos(Node* node);
Reduction ReduceNumberBinop(Node* node);
Reduction ReduceInt32Binop(Node* node);
Reduction ReduceUI32Shift(Node* node, Signedness signedness);
@@ -85,8 +86,8 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
// Helper for ReduceJSLoadModule and ReduceJSStoreModule.
Node* BuildGetModuleCell(Node* node);
- // Helpers for ReduceJSCreateConsString and ReduceJSStringConcat.
- Node* BuildGetStringLength(Node* value, Node** effect, Node* control);
+ // Helpers for ReduceJSCreateConsString.
+ Node* BuildGetStringLength(Node* value);
Factory* factory() const;
Graph* graph() const;
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 03b8074f0f..5df50e64f5 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -75,33 +75,42 @@ bool CallDescriptor::HasSameReturnLocationsAs(
return true;
}
-int CallDescriptor::GetStackParameterDelta(
- CallDescriptor const* tail_caller) const {
- int callee_slots_above_sp = 0;
+int CallDescriptor::GetFirstUnusedStackSlot() const {
+ int slots_above_sp = 0;
for (size_t i = 0; i < InputCount(); ++i) {
LinkageLocation operand = GetInputLocation(i);
if (!operand.IsRegister()) {
int new_candidate =
-operand.GetLocation() + operand.GetSizeInPointers() - 1;
- if (new_candidate > callee_slots_above_sp) {
- callee_slots_above_sp = new_candidate;
+ if (new_candidate > slots_above_sp) {
+ slots_above_sp = new_candidate;
}
}
}
- int tail_caller_slots_above_sp = 0;
- if (tail_caller != nullptr) {
- for (size_t i = 0; i < tail_caller->InputCount(); ++i) {
- LinkageLocation operand = tail_caller->GetInputLocation(i);
- if (!operand.IsRegister()) {
- int new_candidate =
- -operand.GetLocation() + operand.GetSizeInPointers() - 1;
- if (new_candidate > tail_caller_slots_above_sp) {
- tail_caller_slots_above_sp = new_candidate;
- }
+ return slots_above_sp;
+}
+
+int CallDescriptor::GetStackParameterDelta(
+ CallDescriptor const* tail_caller) const {
+ int callee_slots_above_sp = GetFirstUnusedStackSlot();
+ int tail_caller_slots_above_sp = tail_caller->GetFirstUnusedStackSlot();
+ int stack_param_delta = callee_slots_above_sp - tail_caller_slots_above_sp;
+ if (kPadArguments) {
+ // Adjust stack delta when it is odd.
+ if (stack_param_delta % 2 != 0) {
+ if (callee_slots_above_sp % 2 != 0) {
+ // The delta is odd due to the callee - we will need to add one slot
+ // of padding.
+ ++stack_param_delta;
+ } else {
+ // The delta is odd because of the caller. We already have one slot of
+ // padding that we can reuse for arguments, so we will need one fewer
+ // slot.
+ --stack_param_delta;
}
}
}
- return callee_slots_above_sp - tail_caller_slots_above_sp;
+ return stack_param_delta;
}
bool CallDescriptor::CanTailCall(const Node* node) const {
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 9e79a9af00..ade1d6902f 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -177,17 +177,14 @@ class V8_EXPORT_PRIVATE CallDescriptor final
kNeedsFrameState = 1u << 0,
kHasExceptionHandler = 1u << 1,
kCanUseRoots = 1u << 2,
- // (arm64 only) native stack should be used for arguments.
- kUseNativeStack = 1u << 3,
- // (arm64 only) call instruction has to restore JSSP or CSP.
- kRestoreJSSP = 1u << 4,
- kRestoreCSP = 1u << 5,
// Causes the code generator to initialize the root register.
- kInitializeRootRegister = 1u << 6,
+ kInitializeRootRegister = 1u << 3,
// Does not ever try to allocate space on our heap.
- kNoAllocate = 1u << 7,
+ kNoAllocate = 1u << 4,
// Push argument count as part of function prologue.
- kPushArgumentCount = 1u << 8
+ kPushArgumentCount = 1u << 5,
+ // Use retpoline for this call if indirect.
+ kRetpoline = 1u << 6
};
typedef base::Flags<Flag> Flags;
@@ -197,12 +194,14 @@ class V8_EXPORT_PRIVATE CallDescriptor final
RegList callee_saved_registers,
RegList callee_saved_fp_registers, Flags flags,
const char* debug_name = "",
- const RegList allocatable_registers = 0)
+ const RegList allocatable_registers = 0,
+ size_t stack_return_count = 0)
: kind_(kind),
target_type_(target_type),
target_loc_(target_loc),
location_sig_(location_sig),
stack_param_count_(stack_param_count),
+ stack_return_count_(stack_return_count),
properties_(properties),
callee_saved_registers_(callee_saved_registers),
callee_saved_fp_registers_(callee_saved_fp_registers),
@@ -232,6 +231,9 @@ class V8_EXPORT_PRIVATE CallDescriptor final
// The number of stack parameters to the call.
size_t StackParameterCount() const { return stack_param_count_; }
+ // The number of stack return values from the call.
+ size_t StackReturnCount() const { return stack_return_count_; }
+
// The number of parameters to the JS function call.
size_t JSParameterCount() const {
DCHECK(IsJSFunctionCall());
@@ -248,7 +250,6 @@ class V8_EXPORT_PRIVATE CallDescriptor final
Flags flags() const { return flags_; }
bool NeedsFrameState() const { return flags() & kNeedsFrameState; }
- bool UseNativeStack() const { return flags() & kUseNativeStack; }
bool PushArgumentCount() const { return flags() & kPushArgumentCount; }
bool InitializeRootRegister() const {
return flags() & kInitializeRootRegister;
@@ -293,7 +294,10 @@ class V8_EXPORT_PRIVATE CallDescriptor final
bool HasSameReturnLocationsAs(const CallDescriptor* other) const;
- int GetStackParameterDelta(const CallDescriptor* tail_caller = nullptr) const;
+ // Returns the first stack slot that is not used by the stack parameters.
+ int GetFirstUnusedStackSlot() const;
+
+ int GetStackParameterDelta(const CallDescriptor* tail_caller) const;
bool CanTailCall(const Node* call) const;
@@ -318,6 +322,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final
const LinkageLocation target_loc_;
const LocationSignature* const location_sig_;
const size_t stack_param_count_;
+ const size_t stack_return_count_;
const Operator::Properties properties_;
const RegList callee_saved_registers_;
const RegList callee_saved_fp_registers_;
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index 0313e57909..7888f5a21e 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -811,12 +811,12 @@ Reduction LoadElimination::ReduceEnsureWritableFastElements(Node* node) {
}
Reduction LoadElimination::ReduceMaybeGrowFastElements(Node* node) {
- GrowFastElementsMode mode = GrowFastElementsModeOf(node->op());
+ GrowFastElementsParameters params = GrowFastElementsParametersOf(node->op());
Node* const object = NodeProperties::GetValueInput(node, 0);
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
- if (mode == GrowFastElementsMode::kDoubleElements) {
+ if (params.mode() == GrowFastElementsMode::kDoubleElements) {
// We know that the resulting elements have the fixed double array map.
state = state->SetMaps(
node, ZoneHandleSet<Map>(factory()->fixed_double_array_map()), zone());
diff --git a/deps/v8/src/compiler/loop-analysis.cc b/deps/v8/src/compiler/loop-analysis.cc
index a9cd46d975..d6b88b13f5 100644
--- a/deps/v8/src/compiler/loop-analysis.cc
+++ b/deps/v8/src/compiler/loop-analysis.cc
@@ -14,7 +14,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define OFFSET(x) ((x)&0x1f)
+#define OFFSET(x) ((x)&0x1F)
#define BIT(x) (1u << OFFSET(x))
#define INDEX(x) ((x) >> 5)
diff --git a/deps/v8/src/compiler/loop-peeling.cc b/deps/v8/src/compiler/loop-peeling.cc
index 5f8857c5df..ae5b0dfbac 100644
--- a/deps/v8/src/compiler/loop-peeling.cc
+++ b/deps/v8/src/compiler/loop-peeling.cc
@@ -4,6 +4,7 @@
#include "src/compiler/loop-peeling.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph.h"
#include "src/compiler/node-marker.h"
#include "src/compiler/node-properties.h"
@@ -107,7 +108,7 @@ struct Peeling {
// The vector which contains the mapped nodes.
NodeVector* pairs;
- Peeling(Graph* graph, Zone* tmp_zone, size_t max, NodeVector* p)
+ Peeling(Graph* graph, size_t max, NodeVector* p)
: node_map(graph, static_cast<uint32_t>(max)), pairs(p) {}
Node* map(Node* node) {
@@ -121,10 +122,13 @@ struct Peeling {
pairs->push_back(copy);
}
- void CopyNodes(Graph* graph, Zone* tmp_zone, Node* dead, NodeRange nodes) {
- NodeVector inputs(tmp_zone);
+ void CopyNodes(Graph* graph, Zone* tmp_zone_, Node* dead, NodeRange nodes,
+ SourcePositionTable* source_positions) {
+ NodeVector inputs(tmp_zone_);
// Copy all the nodes first.
for (Node* node : nodes) {
+ SourcePositionTable::Scope position(
+ source_positions, source_positions->GetSourcePosition(node));
inputs.clear();
for (Node* input : node->inputs()) {
inputs.push_back(map(input));
@@ -166,13 +170,13 @@ Node* PeeledIteration::map(Node* node) {
return node;
}
-bool LoopPeeler::CanPeel(LoopTree* loop_tree, LoopTree::Loop* loop) {
+bool LoopPeeler::CanPeel(LoopTree::Loop* loop) {
// Look for returns and if projections that are outside the loop but whose
// control input is inside the loop.
- Node* loop_node = loop_tree->GetLoopControl(loop);
- for (Node* node : loop_tree->LoopNodes(loop)) {
+ Node* loop_node = loop_tree_->GetLoopControl(loop);
+ for (Node* node : loop_tree_->LoopNodes(loop)) {
for (Node* use : node->uses()) {
- if (!loop_tree->Contains(loop, use)) {
+ if (!loop_tree_->Contains(loop, use)) {
bool unmarked_exit;
switch (node->opcode()) {
case IrOpcode::kLoopExit:
@@ -187,7 +191,7 @@ bool LoopPeeler::CanPeel(LoopTree* loop_tree, LoopTree::Loop* loop) {
}
if (unmarked_exit) {
if (FLAG_trace_turbo_loop) {
- Node* loop_node = loop_tree->GetLoopControl(loop);
+ Node* loop_node = loop_tree_->GetLoopControl(loop);
PrintF(
"Cannot peel loop %i. Loop exit without explicit mark: Node %i "
"(%s) is inside "
@@ -203,47 +207,45 @@ bool LoopPeeler::CanPeel(LoopTree* loop_tree, LoopTree::Loop* loop) {
return true;
}
-
-PeeledIteration* LoopPeeler::Peel(Graph* graph, CommonOperatorBuilder* common,
- LoopTree* loop_tree, LoopTree::Loop* loop,
- Zone* tmp_zone) {
- if (!CanPeel(loop_tree, loop)) return nullptr;
+PeeledIteration* LoopPeeler::Peel(LoopTree::Loop* loop) {
+ if (!CanPeel(loop)) return nullptr;
//============================================================================
// Construct the peeled iteration.
//============================================================================
- PeeledIterationImpl* iter = new (tmp_zone) PeeledIterationImpl(tmp_zone);
+ PeeledIterationImpl* iter = new (tmp_zone_) PeeledIterationImpl(tmp_zone_);
size_t estimated_peeled_size = 5 + (loop->TotalSize()) * 2;
- Peeling peeling(graph, tmp_zone, estimated_peeled_size, &iter->node_pairs_);
+ Peeling peeling(graph_, estimated_peeled_size, &iter->node_pairs_);
- Node* dead = graph->NewNode(common->Dead());
+ Node* dead = graph_->NewNode(common_->Dead());
// Map the loop header nodes to their entry values.
- for (Node* node : loop_tree->HeaderNodes(loop)) {
+ for (Node* node : loop_tree_->HeaderNodes(loop)) {
peeling.Insert(node, node->InputAt(kAssumedLoopEntryIndex));
}
// Copy all the nodes of loop body for the peeled iteration.
- peeling.CopyNodes(graph, tmp_zone, dead, loop_tree->BodyNodes(loop));
+ peeling.CopyNodes(graph_, tmp_zone_, dead, loop_tree_->BodyNodes(loop),
+ source_positions_);
//============================================================================
// Replace the entry to the loop with the output of the peeled iteration.
//============================================================================
- Node* loop_node = loop_tree->GetLoopControl(loop);
+ Node* loop_node = loop_tree_->GetLoopControl(loop);
Node* new_entry;
int backedges = loop_node->InputCount() - 1;
if (backedges > 1) {
// Multiple backedges from original loop, therefore multiple output edges
// from the peeled iteration.
- NodeVector inputs(tmp_zone);
+ NodeVector inputs(tmp_zone_);
for (int i = 1; i < loop_node->InputCount(); i++) {
inputs.push_back(peeling.map(loop_node->InputAt(i)));
}
Node* merge =
- graph->NewNode(common->Merge(backedges), backedges, &inputs[0]);
+ graph_->NewNode(common_->Merge(backedges), backedges, &inputs[0]);
// Merge values from the multiple output edges of the peeled iteration.
- for (Node* node : loop_tree->HeaderNodes(loop)) {
+ for (Node* node : loop_tree_->HeaderNodes(loop)) {
if (node->opcode() == IrOpcode::kLoop) continue; // already done.
inputs.clear();
for (int i = 0; i < backedges; i++) {
@@ -252,8 +254,8 @@ PeeledIteration* LoopPeeler::Peel(Graph* graph, CommonOperatorBuilder* common,
for (Node* input : inputs) {
if (input != inputs[0]) { // Non-redundant phi.
inputs.push_back(merge);
- const Operator* op = common->ResizeMergeOrPhi(node->op(), backedges);
- Node* phi = graph->NewNode(op, backedges + 1, &inputs[0]);
+ const Operator* op = common_->ResizeMergeOrPhi(node->op(), backedges);
+ Node* phi = graph_->NewNode(op, backedges + 1, &inputs[0]);
node->ReplaceInput(0, phi);
break;
}
@@ -263,7 +265,7 @@ PeeledIteration* LoopPeeler::Peel(Graph* graph, CommonOperatorBuilder* common,
} else {
// Only one backedge, simply replace the input to loop with output of
// peeling.
- for (Node* node : loop_tree->HeaderNodes(loop)) {
+ for (Node* node : loop_tree_->HeaderNodes(loop)) {
node->ReplaceInput(0, peeling.map(node->InputAt(1)));
}
new_entry = peeling.map(loop_node->InputAt(1));
@@ -273,23 +275,23 @@ PeeledIteration* LoopPeeler::Peel(Graph* graph, CommonOperatorBuilder* common,
//============================================================================
// Change the exit and exit markers to merge/phi/effect-phi.
//============================================================================
- for (Node* exit : loop_tree->ExitNodes(loop)) {
+ for (Node* exit : loop_tree_->ExitNodes(loop)) {
switch (exit->opcode()) {
case IrOpcode::kLoopExit:
// Change the loop exit node to a merge node.
exit->ReplaceInput(1, peeling.map(exit->InputAt(0)));
- NodeProperties::ChangeOp(exit, common->Merge(2));
+ NodeProperties::ChangeOp(exit, common_->Merge(2));
break;
case IrOpcode::kLoopExitValue:
// Change exit marker to phi.
- exit->InsertInput(graph->zone(), 1, peeling.map(exit->InputAt(0)));
+ exit->InsertInput(graph_->zone(), 1, peeling.map(exit->InputAt(0)));
NodeProperties::ChangeOp(
- exit, common->Phi(MachineRepresentation::kTagged, 2));
+ exit, common_->Phi(MachineRepresentation::kTagged, 2));
break;
case IrOpcode::kLoopExitEffect:
// Change effect exit marker to effect phi.
- exit->InsertInput(graph->zone(), 1, peeling.map(exit->InputAt(0)));
- NodeProperties::ChangeOp(exit, common->EffectPhi(2));
+ exit->InsertInput(graph_->zone(), 1, peeling.map(exit->InputAt(0)));
+ NodeProperties::ChangeOp(exit, common_->EffectPhi(2));
break;
default:
break;
@@ -298,15 +300,11 @@ PeeledIteration* LoopPeeler::Peel(Graph* graph, CommonOperatorBuilder* common,
return iter;
}
-namespace {
-
-void PeelInnerLoops(Graph* graph, CommonOperatorBuilder* common,
- LoopTree* loop_tree, LoopTree::Loop* loop,
- Zone* temp_zone) {
+void LoopPeeler::PeelInnerLoops(LoopTree::Loop* loop) {
// If the loop has nested loops, peel inside those.
if (!loop->children().empty()) {
for (LoopTree::Loop* inner_loop : loop->children()) {
- PeelInnerLoops(graph, common, loop_tree, inner_loop, temp_zone);
+ PeelInnerLoops(inner_loop);
}
return;
}
@@ -314,15 +312,17 @@ void PeelInnerLoops(Graph* graph, CommonOperatorBuilder* common,
if (loop->TotalSize() > LoopPeeler::kMaxPeeledNodes) return;
if (FLAG_trace_turbo_loop) {
PrintF("Peeling loop with header: ");
- for (Node* node : loop_tree->HeaderNodes(loop)) {
+ for (Node* node : loop_tree_->HeaderNodes(loop)) {
PrintF("%i ", node->id());
}
PrintF("\n");
}
- LoopPeeler::Peel(graph, common, loop_tree, loop, temp_zone);
+ Peel(loop);
}
+namespace {
+
void EliminateLoopExit(Node* node) {
DCHECK_EQ(IrOpcode::kLoopExit, node->opcode());
// The exit markers take the loop exit as input. We iterate over uses
@@ -347,21 +347,18 @@ void EliminateLoopExit(Node* node) {
} // namespace
-// static
-void LoopPeeler::PeelInnerLoopsOfTree(Graph* graph,
- CommonOperatorBuilder* common,
- LoopTree* loop_tree, Zone* temp_zone) {
- for (LoopTree::Loop* loop : loop_tree->outer_loops()) {
- PeelInnerLoops(graph, common, loop_tree, loop, temp_zone);
+void LoopPeeler::PeelInnerLoopsOfTree() {
+ for (LoopTree::Loop* loop : loop_tree_->outer_loops()) {
+ PeelInnerLoops(loop);
}
- EliminateLoopExits(graph, temp_zone);
+ EliminateLoopExits(graph_, tmp_zone_);
}
// static
-void LoopPeeler::EliminateLoopExits(Graph* graph, Zone* temp_zone) {
- ZoneQueue<Node*> queue(temp_zone);
- ZoneVector<bool> visited(graph->NodeCount(), false, temp_zone);
+void LoopPeeler::EliminateLoopExits(Graph* graph, Zone* tmp_zone) {
+ ZoneQueue<Node*> queue(tmp_zone);
+ ZoneVector<bool> visited(graph->NodeCount(), false, tmp_zone);
queue.push(graph->end());
while (!queue.empty()) {
Node* node = queue.front();
diff --git a/deps/v8/src/compiler/loop-peeling.h b/deps/v8/src/compiler/loop-peeling.h
index 301e4b8b6c..cd08900dcd 100644
--- a/deps/v8/src/compiler/loop-peeling.h
+++ b/deps/v8/src/compiler/loop-peeling.h
@@ -13,6 +13,8 @@ namespace v8 {
namespace internal {
namespace compiler {
+class SourcePositionTable;
+
// Represents the output of peeling a loop, which is basically the mapping
// from the body of the loop to the corresponding nodes in the peeled
// iteration.
@@ -31,15 +33,28 @@ class CommonOperatorBuilder;
// Implements loop peeling.
class V8_EXPORT_PRIVATE LoopPeeler {
public:
- static bool CanPeel(LoopTree* loop_tree, LoopTree::Loop* loop);
- static PeeledIteration* Peel(Graph* graph, CommonOperatorBuilder* common,
- LoopTree* loop_tree, LoopTree::Loop* loop,
- Zone* tmp_zone);
- static void PeelInnerLoopsOfTree(Graph* graph, CommonOperatorBuilder* common,
- LoopTree* loop_tree, Zone* tmp_zone);
-
- static void EliminateLoopExits(Graph* graph, Zone* temp_zone);
+ LoopPeeler(Graph* graph, CommonOperatorBuilder* common, LoopTree* loop_tree,
+ Zone* tmp_zone, SourcePositionTable* source_positions)
+ : graph_(graph),
+ common_(common),
+ loop_tree_(loop_tree),
+ tmp_zone_(tmp_zone),
+ source_positions_(source_positions) {}
+ bool CanPeel(LoopTree::Loop* loop);
+ PeeledIteration* Peel(LoopTree::Loop* loop);
+ void PeelInnerLoopsOfTree();
+
+ static void EliminateLoopExits(Graph* graph, Zone* tmp_zone);
static const size_t kMaxPeeledNodes = 1000;
+
+ private:
+ Graph* const graph_;
+ CommonOperatorBuilder* const common_;
+ LoopTree* const loop_tree_;
+ Zone* const tmp_zone_;
+ SourcePositionTable* const source_positions_;
+
+ void PeelInnerLoops(LoopTree::Loop* loop);
};
diff --git a/deps/v8/src/compiler/loop-variable-optimizer.cc b/deps/v8/src/compiler/loop-variable-optimizer.cc
index 069c86414c..1e93de5124 100644
--- a/deps/v8/src/compiler/loop-variable-optimizer.cc
+++ b/deps/v8/src/compiler/loop-variable-optimizer.cc
@@ -301,7 +301,8 @@ const InductionVariable* LoopVariableOptimizer::FindInductionVariable(
InductionVariable* LoopVariableOptimizer::TryGetInductionVariable(Node* phi) {
DCHECK_EQ(2, phi->op()->ValueInputCount());
- DCHECK_EQ(IrOpcode::kLoop, NodeProperties::GetControlInput(phi)->opcode());
+ Node* loop = NodeProperties::GetControlInput(phi);
+ DCHECK_EQ(IrOpcode::kLoop, loop->opcode());
Node* initial = phi->InputAt(0);
Node* arith = phi->InputAt(1);
InductionVariable::ArithmeticType arithmeticType;
@@ -318,17 +319,20 @@ InductionVariable* LoopVariableOptimizer::TryGetInductionVariable(Node* phi) {
}
// TODO(jarin) Support both sides.
- // XXX
- if (arith->InputAt(0) != phi) {
- if ((arith->InputAt(0)->opcode() != IrOpcode::kJSToNumber &&
- arith->InputAt(0)->opcode() != IrOpcode::kSpeculativeToNumber) ||
- arith->InputAt(0)->InputAt(0) != phi) {
- return nullptr;
+ if (arith->InputAt(0) != phi) return nullptr;
+
+ Node* effect_phi = nullptr;
+ for (Node* use : loop->uses()) {
+ if (use->opcode() == IrOpcode::kEffectPhi) {
+ DCHECK_NULL(effect_phi);
+ effect_phi = use;
}
}
+ if (!effect_phi) return nullptr;
+
Node* incr = arith->InputAt(1);
- return new (zone())
- InductionVariable(phi, arith, incr, initial, zone(), arithmeticType);
+ return new (zone()) InductionVariable(phi, effect_phi, arith, incr, initial,
+ zone(), arithmeticType);
}
void LoopVariableOptimizer::DetectInductionVariables(Node* loop) {
@@ -398,10 +402,14 @@ void LoopVariableOptimizer::ChangeToPhisAndInsertGuards() {
Type* backedge_type = NodeProperties::GetType(backedge_value);
Type* phi_type = NodeProperties::GetType(induction_var->phi());
if (!backedge_type->Is(phi_type)) {
- Node* backedge_control =
- NodeProperties::GetControlInput(induction_var->phi())->InputAt(1);
- Node* rename = graph()->NewNode(common()->TypeGuard(phi_type),
- backedge_value, backedge_control);
+ Node* loop = NodeProperties::GetControlInput(induction_var->phi());
+ Node* backedge_control = loop->InputAt(1);
+ Node* backedge_effect =
+ NodeProperties::GetEffectInput(induction_var->effect_phi(), 1);
+ Node* rename =
+ graph()->NewNode(common()->TypeGuard(phi_type), backedge_value,
+ backedge_effect, backedge_control);
+ induction_var->effect_phi()->ReplaceInput(1, rename);
induction_var->phi()->ReplaceInput(1, rename);
}
}
diff --git a/deps/v8/src/compiler/loop-variable-optimizer.h b/deps/v8/src/compiler/loop-variable-optimizer.h
index 8054ec16c8..9eec614070 100644
--- a/deps/v8/src/compiler/loop-variable-optimizer.h
+++ b/deps/v8/src/compiler/loop-variable-optimizer.h
@@ -18,6 +18,7 @@ class Node;
class InductionVariable : public ZoneObject {
public:
Node* phi() const { return phi_; }
+ Node* effect_phi() const { return effect_phi_; }
Node* arith() const { return arith_; }
Node* increment() const { return increment_; }
Node* init_value() const { return init_value_; }
@@ -39,9 +40,10 @@ class InductionVariable : public ZoneObject {
private:
friend class LoopVariableOptimizer;
- InductionVariable(Node* phi, Node* arith, Node* increment, Node* init_value,
- Zone* zone, ArithmeticType arithmeticType)
+ InductionVariable(Node* phi, Node* effect_phi, Node* arith, Node* increment,
+ Node* init_value, Zone* zone, ArithmeticType arithmeticType)
: phi_(phi),
+ effect_phi_(effect_phi),
arith_(arith),
increment_(increment),
init_value_(init_value),
@@ -53,6 +55,7 @@ class InductionVariable : public ZoneObject {
void AddLowerBound(Node* bound, ConstraintKind kind);
Node* phi_;
+ Node* effect_phi_;
Node* arith_;
Node* increment_;
Node* init_value_;
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index 8393a749bb..43f1518461 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -116,10 +116,6 @@ class MachineRepresentationInferrer {
representation_vector_[node->id()] = PromoteRepresentation(
LoadRepresentationOf(node->op()).representation());
break;
- case IrOpcode::kCheckedLoad:
- representation_vector_[node->id()] = PromoteRepresentation(
- CheckedLoadRepresentationOf(node->op()).representation());
- break;
case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer:
case IrOpcode::kLoadParentFramePointer:
@@ -165,10 +161,6 @@ class MachineRepresentationInferrer {
representation_vector_[node->id()] = PromoteRepresentation(
StoreRepresentationOf(node->op()).representation());
break;
- case IrOpcode::kCheckedStore:
- representation_vector_[node->id()] =
- PromoteRepresentation(CheckedStoreRepresentationOf(node->op()));
- break;
case IrOpcode::kUnalignedStore:
representation_vector_[node->id()] = PromoteRepresentation(
UnalignedStoreRepresentationOf(node->op()));
@@ -273,6 +265,11 @@ class MachineRepresentationInferrer {
MachineRepresentation::kFloat64;
}
break;
+ case IrOpcode::kI32x4ReplaceLane:
+ case IrOpcode::kI32x4Splat:
+ representation_vector_[node->id()] =
+ MachineRepresentation::kSimd128;
+ break;
#undef LABEL
default:
break;
@@ -377,6 +374,14 @@ class MachineRepresentationChecker {
CheckValueInputRepresentationIs(node, 0,
MachineRepresentation::kSimd128);
break;
+ case IrOpcode::kI32x4ReplaceLane:
+ CheckValueInputRepresentationIs(node, 0,
+ MachineRepresentation::kSimd128);
+ CheckValueInputForInt32Op(node, 1);
+ break;
+ case IrOpcode::kI32x4Splat:
+ CheckValueInputForInt32Op(node, 0);
+ break;
#define LABEL(opcode) case IrOpcode::k##opcode:
case IrOpcode::kChangeInt32ToTagged:
case IrOpcode::kChangeUint32ToTagged:
@@ -562,7 +567,7 @@ class MachineRepresentationChecker {
str << "Node #" << node->id() << ":" << *node->op()
<< " in the machine graph is not being checked.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
break;
}
@@ -592,7 +597,7 @@ class MachineRepresentationChecker {
<< input_representation << " which doesn't have a " << representation
<< " representation.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
@@ -611,7 +616,7 @@ class MachineRepresentationChecker {
<< " uses node #" << input->id() << ":" << *input->op()
<< " which doesn't have a tagged representation.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
void CheckValueInputIsTaggedOrPointer(Node const* node, int index) {
@@ -644,7 +649,7 @@ class MachineRepresentationChecker {
<< " uses node #" << input->id() << ":" << *input->op()
<< " which doesn't have a tagged or pointer representation.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
@@ -661,7 +666,7 @@ class MachineRepresentationChecker {
str << "TypeError: node #" << input->id() << ":" << *input->op()
<< " is untyped.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
break;
}
default:
@@ -672,7 +677,7 @@ class MachineRepresentationChecker {
<< " uses node #" << input->id() << ":" << *input->op()
<< " which doesn't have an int32-compatible representation.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
void CheckValueInputForInt64Op(Node const* node, int index) {
@@ -687,7 +692,7 @@ class MachineRepresentationChecker {
str << "TypeError: node #" << input->id() << ":" << *input->op()
<< " is untyped.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
break;
}
@@ -700,7 +705,7 @@ class MachineRepresentationChecker {
<< input_representation
<< " which doesn't have a kWord64 representation.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
void CheckValueInputForFloat32Op(Node const* node, int index) {
@@ -714,7 +719,7 @@ class MachineRepresentationChecker {
<< " uses node #" << input->id() << ":" << *input->op()
<< " which doesn't have a kFloat32 representation.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
void CheckValueInputForFloat64Op(Node const* node, int index) {
@@ -728,7 +733,7 @@ class MachineRepresentationChecker {
<< " uses node #" << input->id() << ":" << *input->op()
<< " which doesn't have a kFloat64 representation.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
void CheckCallInputs(Node const* node) {
@@ -755,7 +760,7 @@ class MachineRepresentationChecker {
}
if (should_log_error) {
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index e589f0cbd8..97c83b1b82 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -293,7 +293,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
// (x >> K) < C => x < (C << K)
// when C < (M >> K)
const uint32_t c = m.right().Value();
- const uint32_t k = mleft.right().Value() & 0x1f;
+ const uint32_t k = mleft.right().Value() & 0x1F;
if (c < static_cast<uint32_t>(kMaxInt >> k)) {
node->ReplaceInput(0, mleft.left().node());
node->ReplaceInput(1, Uint32Constant(c << k));
@@ -684,7 +684,6 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReduceFloat64InsertHighWord32(node);
case IrOpcode::kStore:
case IrOpcode::kUnalignedStore:
- case IrOpcode::kCheckedStore:
return ReduceStore(node);
case IrOpcode::kFloat64Equal:
case IrOpcode::kFloat64LessThan:
@@ -923,10 +922,7 @@ Reduction MachineOperatorReducer::ReduceStore(Node* node) {
NodeMatcher nm(node);
MachineRepresentation rep;
int value_input;
- if (nm.IsCheckedStore()) {
- rep = CheckedStoreRepresentationOf(node->op());
- value_input = 3;
- } else if (nm.IsStore()) {
+ if (nm.IsStore()) {
rep = StoreRepresentationOf(node->op()).representation();
value_input = 2;
} else {
@@ -941,9 +937,9 @@ Reduction MachineOperatorReducer::ReduceStore(Node* node) {
case IrOpcode::kWord32And: {
Uint32BinopMatcher m(value);
if (m.right().HasValue() && ((rep == MachineRepresentation::kWord8 &&
- (m.right().Value() & 0xff) == 0xff) ||
+ (m.right().Value() & 0xFF) == 0xFF) ||
(rep == MachineRepresentation::kWord16 &&
- (m.right().Value() & 0xffff) == 0xffff))) {
+ (m.right().Value() & 0xFFFF) == 0xFFFF))) {
node->ReplaceInput(value_input, m.left().node());
return Changed(node);
}
@@ -1029,12 +1025,12 @@ Reduction MachineOperatorReducer::ReduceWord32Shifts(Node* node) {
(node->opcode() == IrOpcode::kWord32Shr) ||
(node->opcode() == IrOpcode::kWord32Sar));
if (machine()->Word32ShiftIsSafe()) {
- // Remove the explicit 'and' with 0x1f if the shift provided by the machine
+ // Remove the explicit 'and' with 0x1F if the shift provided by the machine
// instruction matches that required by JavaScript.
Int32BinopMatcher m(node);
if (m.right().IsWord32And()) {
Int32BinopMatcher mright(m.right().node());
- if (mright.right().Is(0x1f)) {
+ if (mright.right().Is(0x1F)) {
node->ReplaceInput(1, mright.left().node());
return Changed(node);
}
@@ -1088,7 +1084,7 @@ Reduction MachineOperatorReducer::ReduceWord32Shr(Node* node) {
if (m.left().IsWord32And() && m.right().HasValue()) {
Uint32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
- uint32_t shift = m.right().Value() & 0x1f;
+ uint32_t shift = m.right().Value() & 0x1F;
uint32_t mask = mleft.right().Value();
if ((mask >> shift) == 0) {
// (m >>> s) == 0 implies ((x & m) >>> s) == 0
@@ -1180,7 +1176,7 @@ Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
if (m.left().IsWord32Shl()) {
Uint32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() &&
- (mleft.right().Value() & 0x1f) >=
+ (mleft.right().Value() & 0x1F) >=
base::bits::CountTrailingZeros(mask)) {
// (x << L) & (-1 << K) => x << L iff L >= K
return Replace(mleft.node());
@@ -1344,7 +1340,7 @@ Reduction MachineOperatorReducer::ReduceFloat64InsertLowWord32(Node* node) {
Uint32Matcher mrhs(node->InputAt(1));
if (mlhs.HasValue() && mrhs.HasValue()) {
return ReplaceFloat64(bit_cast<double>(
- (bit_cast<uint64_t>(mlhs.Value()) & V8_UINT64_C(0xFFFFFFFF00000000)) |
+ (bit_cast<uint64_t>(mlhs.Value()) & uint64_t{0xFFFFFFFF00000000}) |
mrhs.Value()));
}
return NoChange();
@@ -1357,7 +1353,7 @@ Reduction MachineOperatorReducer::ReduceFloat64InsertHighWord32(Node* node) {
Uint32Matcher mrhs(node->InputAt(1));
if (mlhs.HasValue() && mrhs.HasValue()) {
return ReplaceFloat64(bit_cast<double>(
- (bit_cast<uint64_t>(mlhs.Value()) & V8_UINT64_C(0xFFFFFFFF)) |
+ (bit_cast<uint64_t>(mlhs.Value()) & uint64_t{0xFFFFFFFF}) |
(static_cast<uint64_t>(mrhs.Value()) << 32)));
}
return NoChange();
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 2603b1d18e..66178308be 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -59,17 +59,6 @@ UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
return OpParameter<UnalignedStoreRepresentation>(op);
}
-CheckedLoadRepresentation CheckedLoadRepresentationOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kCheckedLoad, op->opcode());
- return OpParameter<CheckedLoadRepresentation>(op);
-}
-
-
-CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kCheckedStore, op->opcode());
- return OpParameter<CheckedStoreRepresentation>(op);
-}
-
bool operator==(StackSlotRepresentation lhs, StackSlotRepresentation rhs) {
return lhs.size() == rhs.size() && lhs.alignment() == rhs.alignment();
}
@@ -149,7 +138,6 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
PURE_BINARY_OP_LIST_64(V) \
V(Word32Clz, Operator::kNoProperties, 1, 0, 1) \
V(Word64Clz, Operator::kNoProperties, 1, 0, 1) \
- V(BitcastTaggedToWord, Operator::kNoProperties, 1, 0, 1) \
V(BitcastWordToTaggedSigned, Operator::kNoProperties, 1, 0, 1) \
V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
@@ -474,14 +462,6 @@ struct MachineOperatorGlobalCache {
Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
"UnalignedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \
- struct CheckedLoad##Type##Operator final \
- : public Operator1<CheckedLoadRepresentation> { \
- CheckedLoad##Type##Operator() \
- : Operator1<CheckedLoadRepresentation>( \
- IrOpcode::kCheckedLoad, \
- Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
- "CheckedLoad", 3, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
struct ProtectedLoad##Type##Operator final \
: public Operator1<LoadRepresentation> { \
ProtectedLoad##Type##Operator() \
@@ -492,7 +472,6 @@ struct MachineOperatorGlobalCache {
}; \
Load##Type##Operator kLoad##Type; \
UnalignedLoad##Type##Operator kUnalignedLoad##Type; \
- CheckedLoad##Type##Operator kCheckedLoad##Type; \
ProtectedLoad##Type##Operator kProtectedLoad##Type;
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
@@ -547,15 +526,6 @@ struct MachineOperatorGlobalCache {
"UnalignedStore", 3, 1, 1, 0, 1, 0, \
MachineRepresentation::Type) {} \
}; \
- struct CheckedStore##Type##Operator final \
- : public Operator1<CheckedStoreRepresentation> { \
- CheckedStore##Type##Operator() \
- : Operator1<CheckedStoreRepresentation>( \
- IrOpcode::kCheckedStore, \
- Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
- "CheckedStore", 4, 1, 1, 0, 1, 0, MachineRepresentation::Type) { \
- } \
- }; \
struct ProtectedStore##Type##Operator \
: public Operator1<StoreRepresentation> { \
explicit ProtectedStore##Type##Operator() \
@@ -572,7 +542,6 @@ struct MachineOperatorGlobalCache {
kStore##Type##PointerWriteBarrier; \
Store##Type##FullWriteBarrier##Operator kStore##Type##FullWriteBarrier; \
UnalignedStore##Type##Operator kUnalignedStore##Type; \
- CheckedStore##Type##Operator kCheckedStore##Type; \
ProtectedStore##Type##Operator kProtectedStore##Type;
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
@@ -644,10 +613,25 @@ struct MachineOperatorGlobalCache {
BitcastWordToTaggedOperator()
: Operator(IrOpcode::kBitcastWordToTagged,
Operator::kEliminatable | Operator::kNoWrite,
- "BitcastWordToTagged", 1, 0, 0, 1, 0, 0) {}
+ "BitcastWordToTagged", 1, 1, 1, 1, 1, 0) {}
};
BitcastWordToTaggedOperator kBitcastWordToTagged;
+ struct BitcastTaggedToWordOperator : public Operator {
+ BitcastTaggedToWordOperator()
+ : Operator(IrOpcode::kBitcastTaggedToWord,
+ Operator::kEliminatable | Operator::kNoWrite,
+ "BitcastTaggedToWord", 1, 1, 1, 1, 1, 0) {}
+ };
+ BitcastTaggedToWordOperator kBitcastTaggedToWord;
+
+ struct SpeculationFenceOperator : public Operator {
+ SpeculationFenceOperator()
+ : Operator(IrOpcode::kSpeculationFence, Operator::kNoThrow,
+ "SpeculationFence", 0, 1, 1, 0, 1, 0) {}
+ };
+ SpeculationFenceOperator kSpeculationFence;
+
struct DebugAbortOperator : public Operator {
DebugAbortOperator()
: Operator(IrOpcode::kDebugAbort, Operator::kNoThrow, "DebugAbort", 1,
@@ -823,6 +807,10 @@ const Operator* MachineOperatorBuilder::BitcastWordToTagged() {
return &cache_.kBitcastWordToTagged;
}
+const Operator* MachineOperatorBuilder::BitcastTaggedToWord() {
+ return &cache_.kBitcastTaggedToWord;
+}
+
const Operator* MachineOperatorBuilder::DebugAbort() {
return &cache_.kDebugAbort;
}
@@ -835,33 +823,6 @@ const Operator* MachineOperatorBuilder::Comment(const char* msg) {
return new (zone_) CommentOperator(msg);
}
-const Operator* MachineOperatorBuilder::CheckedLoad(
- CheckedLoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return &cache_.kCheckedLoad##Type; \
- }
- MACHINE_TYPE_LIST(LOAD)
-#undef LOAD
- UNREACHABLE();
-}
-
-
-const Operator* MachineOperatorBuilder::CheckedStore(
- CheckedStoreRepresentation rep) {
- switch (rep) {
-#define STORE(kRep) \
- case MachineRepresentation::kRep: \
- return &cache_.kCheckedStore##kRep;
- MACHINE_REPRESENTATION_LIST(STORE)
-#undef STORE
- case MachineRepresentation::kBit:
- case MachineRepresentation::kNone:
- break;
- }
- UNREACHABLE();
-}
-
const Operator* MachineOperatorBuilder::AtomicLoad(LoadRepresentation rep) {
#define LOAD(Type) \
if (rep == MachineType::Type()) { \
@@ -952,6 +913,11 @@ const Operator* MachineOperatorBuilder::AtomicXor(MachineType rep) {
UNREACHABLE();
}
+const OptionalOperator MachineOperatorBuilder::SpeculationFence() {
+ return OptionalOperator(flags_ & kSpeculationFence,
+ &cache_.kSpeculationFence);
+}
+
#define SIMD_LANE_OPS(Type, lane_count) \
const Operator* MachineOperatorBuilder::Type##ExtractLane( \
int32_t lane_index) { \
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 3b6634c8bc..10b4b15701 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -83,17 +83,6 @@ typedef MachineRepresentation UnalignedStoreRepresentation;
UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
Operator const*);
-// A CheckedLoad needs a MachineType.
-typedef MachineType CheckedLoadRepresentation;
-
-CheckedLoadRepresentation CheckedLoadRepresentationOf(Operator const*);
-
-
-// A CheckedStore needs a MachineType.
-typedef MachineRepresentation CheckedStoreRepresentation;
-
-CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const*);
-
class StackSlotRepresentation final {
public:
StackSlotRepresentation(int size, int alignment)
@@ -154,13 +143,15 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
kWord64ReverseBytes = 1u << 19,
kInt32AbsWithOverflow = 1u << 20,
kInt64AbsWithOverflow = 1u << 21,
+ kSpeculationFence = 1u << 22,
kAllOptionalOps =
kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp |
kFloat64RoundUp | kFloat32RoundTruncate | kFloat64RoundTruncate |
kFloat64RoundTiesAway | kFloat32RoundTiesEven | kFloat64RoundTiesEven |
kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt |
kWord32ReverseBits | kWord64ReverseBits | kWord32ReverseBytes |
- kWord64ReverseBytes | kInt32AbsWithOverflow | kInt64AbsWithOverflow
+ kWord64ReverseBytes | kInt32AbsWithOverflow | kInt64AbsWithOverflow |
+ kSpeculationFence
};
typedef base::Flags<Flag, unsigned> Flags;
@@ -606,11 +597,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* LoadFramePointer();
const Operator* LoadParentFramePointer();
- // checked-load heap, index, length
- const Operator* CheckedLoad(CheckedLoadRepresentation);
- // checked-store heap, index, length, value
- const Operator* CheckedStore(CheckedStoreRepresentation);
-
// atomic-load [base + index]
const Operator* AtomicLoad(LoadRepresentation rep);
// atomic-store [base + index], value
@@ -630,6 +616,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// atomic-xor [base + index], value
const Operator* AtomicXor(MachineType rep);
+ const OptionalOperator SpeculationFence();
+
// Target machine word-size assumed by this builder.
bool Is32() const { return word() == MachineRepresentation::kWord32; }
bool Is64() const { return word() == MachineRepresentation::kWord64; }
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 767ada506a..596204e214 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -92,8 +92,6 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
return VisitStoreElement(node, state);
case IrOpcode::kStoreField:
return VisitStoreField(node, state);
- case IrOpcode::kCheckedLoad:
- case IrOpcode::kCheckedStore:
case IrOpcode::kDeoptimizeIf:
case IrOpcode::kDeoptimizeUnless:
case IrOpcode::kIfException:
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index b7301749cf..3b57081c9e 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -143,46 +143,6 @@ static inline bool HasRegisterInput(Instruction* instr, size_t index) {
namespace {
-class OutOfLineLoadSingle final : public OutOfLineCode {
- public:
- OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ Move(result_, std::numeric_limits<float>::quiet_NaN());
- }
-
- private:
- FloatRegister const result_;
-};
-
-
-class OutOfLineLoadDouble final : public OutOfLineCode {
- public:
- OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ Move(result_, std::numeric_limits<double>::quiet_NaN());
- }
-
- private:
- DoubleRegister const result_;
-};
-
-
-class OutOfLineLoadInteger final : public OutOfLineCode {
- public:
- OutOfLineLoadInteger(CodeGenerator* gen, Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final { __ mov(result_, zero_reg); }
-
- private:
- Register const result_;
-};
-
-
class OutOfLineRound : public OutOfLineCode {
public:
OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
@@ -391,82 +351,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
} // namespace
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \
- do { \
- auto result = i.Output##width##Register(); \
- auto ool = new (zone()) OutOfLineLoad##width(this, result); \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
- __ Addu(kScratchReg, i.InputRegister(2), offset); \
- __ asm_instr(result, MemOperand(kScratchReg, 0)); \
- } else { \
- auto offset = i.InputOperand(0).immediate(); \
- __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
- __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
- } \
- __ bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
- do { \
- auto result = i.OutputRegister(); \
- auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
- __ Addu(kScratchReg, i.InputRegister(2), offset); \
- __ asm_instr(result, MemOperand(kScratchReg, 0)); \
- } else { \
- auto offset = i.InputOperand(0).immediate(); \
- __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
- __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
- } \
- __ bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr) \
- do { \
- Label done; \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- auto value = i.InputOrZero##width##Register(2); \
- if (value == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { \
- __ Move(kDoubleRegZero, 0.0); \
- } \
- __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
- __ Addu(kScratchReg, i.InputRegister(3), offset); \
- __ asm_instr(value, MemOperand(kScratchReg, 0)); \
- } else { \
- auto offset = i.InputOperand(0).immediate(); \
- auto value = i.InputOrZero##width##Register(2); \
- if (value == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { \
- __ Move(kDoubleRegZero, 0.0); \
- } \
- __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
- __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
- } \
- __ bind(&done); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
- do { \
- Label done; \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- auto value = i.InputOrZeroRegister(2); \
- __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
- __ Addu(kScratchReg, i.InputRegister(3), offset); \
- __ asm_instr(value, MemOperand(kScratchReg, 0)); \
- } else { \
- auto offset = i.InputOperand(0).immediate(); \
- auto value = i.InputOrZeroRegister(2); \
- __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
- __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
- } \
- __ bind(&done); \
- } while (0)
-
#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \
if (IsMipsArchVariant(kMips32r6)) { \
__ cfc1(kScratchReg, FCSR); \
@@ -787,7 +671,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Call(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL
: RelocInfo::JS_TO_WASM_CALL);
} else {
- __ Call(at, i.InputRegister(0), 0);
+ __ Call(i.InputRegister(0));
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -816,7 +700,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Jump(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL
: RelocInfo::JS_TO_WASM_CALL);
} else {
- __ Jump(at, i.InputRegister(0), 0);
+ __ Jump(i.InputRegister(0));
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -834,7 +718,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
- __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
+ __ Assert(eq, AbortReason::kWrongFunctionContext, cp,
+ Operand(kScratchReg));
}
__ lw(at, FieldMemOperand(func, JSFunction::kCodeOffset));
__ Call(at, Code::kHeaderSize - kHeapObjectTag);
@@ -998,7 +883,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (FLAG_debug_code && alignment > 0) {
// Verify that the output_register is properly aligned
__ And(kScratchReg, i.OutputRegister(), Operand(kPointerSize - 1));
- __ Assert(eq, kAllocationIsNotDoubleAligned, kScratchReg,
+ __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, kScratchReg,
Operand(zero_reg));
}
@@ -1203,7 +1088,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register dst = i.OutputRegister();
uint32_t B0 = 0x55555555; // (T)~(T)0/3
uint32_t B1 = 0x33333333; // (T)~(T)0/15*3
- uint32_t B2 = 0x0f0f0f0f; // (T)~(T)0/255*15
+ uint32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15
uint32_t value = 0x01010101; // (T)~(T)0/255
uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
__ srl(kScratchReg, src, 1);
@@ -1742,14 +1627,45 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMipsPush:
if (instr->InputAt(0)->IsFPRegister()) {
- __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
- __ Subu(sp, sp, Operand(kDoubleSize));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
+ switch (op->representation()) {
+ case MachineRepresentation::kFloat32:
+ __ swc1(i.InputFloatRegister(0), MemOperand(sp, -kFloatSize));
+ __ Subu(sp, sp, Operand(kFloatSize));
+ frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
+ break;
+ case MachineRepresentation::kFloat64:
+ __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+ __ Subu(sp, sp, Operand(kDoubleSize));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ break;
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
} else {
__ Push(i.InputRegister(0));
frame_access_state()->IncreaseSPDelta(1);
}
break;
+ case kMipsPeek: {
+ int reverse_slot = MiscField::decode(instr->opcode());
+ int offset =
+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ Ldc1(i.OutputDoubleRegister(), MemOperand(fp, offset));
+ } else {
+ DCHECK_EQ(op->representation(), MachineRepresentation::kFloat32);
+ __ lwc1(i.OutputSingleRegister(0), MemOperand(fp, offset));
+ }
+ } else {
+ __ lw(i.OutputRegister(0), MemOperand(fp, offset));
+ }
+ break;
+ }
case kMipsStackClaim: {
__ Subu(sp, sp, Operand(i.InputInt32(0)));
frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / kPointerSize);
@@ -1773,46 +1689,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
break;
}
- case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
- break;
- case kCheckedLoadUint8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lbu);
- break;
- case kCheckedLoadInt16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lh);
- break;
- case kCheckedLoadUint16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lhu);
- break;
- case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lw);
- break;
- case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1);
- break;
- case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(Double, Ldc1);
- break;
- case kCheckedStoreWord8:
- ASSEMBLE_CHECKED_STORE_INTEGER(sb);
- break;
- case kCheckedStoreWord16:
- ASSEMBLE_CHECKED_STORE_INTEGER(sh);
- break;
- case kCheckedStoreWord32:
- ASSEMBLE_CHECKED_STORE_INTEGER(sw);
- break;
- case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1);
- break;
- case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FLOAT(Double, Sdc1);
- break;
- case kCheckedLoadWord64:
- case kCheckedStoreWord64:
- UNREACHABLE(); // currently unsupported checked int64 load/store.
- break;
case kAtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lb);
break;
@@ -2593,7 +2469,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (src0 == src1) {
// Unary S32x4 shuffles are handled with shf.w instruction
- unsigned lane = shuffle & 0xff;
+ unsigned lane = shuffle & 0xFF;
if (FLAG_debug_code) {
// range of all four lanes, for unary instruction,
// should belong to the same range, which can be one of these:
@@ -2601,7 +2477,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (lane >= 4) {
int32_t shuffle_helper = shuffle;
for (int i = 0; i < 4; ++i) {
- lane = shuffle_helper & 0xff;
+ lane = shuffle_helper & 0xFF;
CHECK_GE(lane, 4);
shuffle_helper >>= 8;
}
@@ -2609,7 +2485,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
uint32_t i8 = 0;
for (int i = 0; i < 4; i++) {
- lane = shuffle & 0xff;
+ lane = shuffle & 0xFF;
if (lane >= 4) {
lane -= 4;
}
@@ -3163,7 +3039,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
}
}
}
@@ -3438,7 +3314,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the unoptimized
// frame is still on the stack. Optimized code uses OSR values directly from
@@ -3451,10 +3327,12 @@ void CodeGenerator::AssembleConstructFrame() {
const RegList saves = descriptor->CalleeSavedRegisters();
const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ const int returns = frame()->GetReturnSlotCount();
- // Skip callee-saved slots, which are pushed below.
+ // Skip callee-saved and return slots, which are pushed below.
shrink_slots -= base::bits::CountPopulation(saves);
shrink_slots -= 2 * base::bits::CountPopulation(saves_fpu);
+ shrink_slots -= returns;
if (shrink_slots > 0) {
__ Subu(sp, sp, Operand(shrink_slots * kPointerSize));
}
@@ -3469,12 +3347,22 @@ void CodeGenerator::AssembleConstructFrame() {
__ MultiPush(saves);
DCHECK_EQ(kNumCalleeSaved, base::bits::CountPopulation(saves) + 1);
}
+
+ if (returns != 0) {
+ // Create space for returns.
+ __ Subu(sp, sp, Operand(returns * kPointerSize));
+ }
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ __ Addu(sp, sp, Operand(returns * kPointerSize));
+ }
+
// Restore GP registers.
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
diff --git a/deps/v8/src/compiler/mips/instruction-codes-mips.h b/deps/v8/src/compiler/mips/instruction-codes-mips.h
index 3a2a873e48..dd789d0196 100644
--- a/deps/v8/src/compiler/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/mips/instruction-codes-mips.h
@@ -128,6 +128,7 @@ namespace compiler {
V(MipsFloat32Min) \
V(MipsFloat64Min) \
V(MipsPush) \
+ V(MipsPeek) \
V(MipsStoreToStackSlot) \
V(MipsByteSwap32) \
V(MipsStackClaim) \
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index 1053763f0d..35b8a2396d 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -36,7 +36,7 @@ class MipsOperandGenerator final : public OperandGenerator {
InstructionOperand UseRegisterOrImmediateZero(Node* node) {
if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
(IsFloatConstant(node) &&
- (bit_cast<int64_t>(GetFloatConstantValue(node)) == V8_INT64_C(0)))) {
+ (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
return UseImmediate(node);
}
return UseRegister(node);
@@ -92,18 +92,6 @@ class MipsOperandGenerator final : public OperandGenerator {
case kMipsSwc1:
case kMipsLdc1:
case kMipsSdc1:
- case kCheckedLoadInt8:
- case kCheckedLoadUint8:
- case kCheckedLoadInt16:
- case kCheckedLoadUint16:
- case kCheckedLoadWord32:
- case kCheckedStoreWord8:
- case kCheckedStoreWord16:
- case kCheckedStoreWord32:
- case kCheckedLoadFloat32:
- case kCheckedLoadFloat64:
- case kCheckedStoreFloat32:
- case kCheckedStoreFloat64:
// true even for 32b values, offsets > 16b
// are handled in assembler-mips.cc
return is_int32(value);
@@ -233,7 +221,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -432,7 +421,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
// Any shift value can match; int32 shifts use `value % 32`.
- uint32_t lsb = mleft.right().Value() & 0x1f;
+ uint32_t lsb = mleft.right().Value() & 0x1F;
// Ext cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@@ -531,7 +520,7 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher m(node);
if (m.left().IsWord32And() && m.right().HasValue()) {
- uint32_t lsb = m.right().Value() & 0x1f;
+ uint32_t lsb = m.right().Value() & 0x1F;
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && mleft.right().Value() != 0) {
// Select Ext for Shr(And(x, mask), imm) where the result of the mask is
@@ -1181,8 +1170,8 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments.
int slot = kCArgSlotCount;
for (PushParameter input : (*arguments)) {
- if (input.node()) {
- Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ if (input.node) {
+ Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
g.TempImmediate(slot << kPointerSizeLog2));
++slot;
}
@@ -1191,19 +1180,53 @@ void InstructionSelector::EmitPrepareArguments(
// Possibly align stack here for functions.
int push_count = static_cast<int>(descriptor->StackParameterCount());
if (push_count > 0) {
+ // Calculate needed space
+ int stack_size = 0;
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ PushParameter input = (*arguments)[n];
+ if (input.node) {
+ stack_size += input.location.GetSizeInPointers();
+ }
+ }
Emit(kMipsStackClaim, g.NoOutput(),
- g.TempImmediate(push_count << kPointerSizeLog2));
+ g.TempImmediate(stack_size << kPointerSizeLog2));
}
for (size_t n = 0; n < arguments->size(); ++n) {
PushParameter input = (*arguments)[n];
- if (input.node()) {
- Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ if (input.node) {
+ Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
g.TempImmediate(n << kPointerSizeLog2));
}
}
}
}
+void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
+ const CallDescriptor* descriptor,
+ Node* node) {
+ MipsOperandGenerator g(this);
+
+ int reverse_slot = 0;
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ ++reverse_slot;
+ // Skip any alignment holes in nodes.
+ if (output.node != nullptr) {
+ DCHECK(!descriptor->IsCFunctionCall());
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ }
+ InstructionOperand result = g.DefineAsRegister(output.node);
+ Emit(kMipsPeek | MiscField::encode(reverse_slot), result);
+ }
+ if (output.location.GetType() == MachineType::Float64()) {
+ // Float64 require an implicit second slot.
+ ++reverse_slot;
+ }
+ }
+}
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
@@ -1312,99 +1335,6 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
}
}
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- MipsOperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
- ? g.UseImmediate(offset)
- : g.UseRegister(offset);
-
- InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
- ? g.CanBeImmediate(length, opcode)
- ? g.UseImmediate(length)
- : g.UseRegister(length)
- : g.UseRegister(length);
-
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), offset_operand, length_operand,
- g.UseRegister(buffer));
-}
-
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- MipsOperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- default:
- UNREACHABLE();
- return;
- }
- InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
- ? g.UseImmediate(offset)
- : g.UseRegister(offset);
-
- InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
- ? g.CanBeImmediate(length, opcode)
- ? g.UseImmediate(length)
- : g.UseRegister(length)
- : g.UseRegister(length);
-
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- offset_operand, length_operand, g.UseRegisterOrImmediateZero(value),
- g.UseRegister(buffer));
-}
-
-
namespace {
// Shared routine for multiple compare operations.
static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
@@ -1417,7 +1347,8 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -1630,7 +1561,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
g.TempImmediate(0), cont->kind(), cont->reason(),
- cont->frame_state());
+ cont->feedback(), cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
g.TempImmediate(0));
@@ -1652,14 +1583,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -2057,6 +1988,8 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
+void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
+
#define SIMD_TYPE_LIST(V) \
V(F32x4) \
V(I32x4) \
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index 6d43750b1c..d4463008c8 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -143,46 +143,6 @@ static inline bool HasRegisterInput(Instruction* instr, size_t index) {
namespace {
-class OutOfLineLoadSingle final : public OutOfLineCode {
- public:
- OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ Move(result_, std::numeric_limits<float>::quiet_NaN());
- }
-
- private:
- FloatRegister const result_;
-};
-
-
-class OutOfLineLoadDouble final : public OutOfLineCode {
- public:
- OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ Move(result_, std::numeric_limits<double>::quiet_NaN());
- }
-
- private:
- DoubleRegister const result_;
-};
-
-
-class OutOfLineLoadInteger final : public OutOfLineCode {
- public:
- OutOfLineLoadInteger(CodeGenerator* gen, Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final { __ mov(result_, zero_reg); }
-
- private:
- Register const result_;
-};
-
-
class OutOfLineRound : public OutOfLineCode {
public:
OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
@@ -403,109 +363,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
}
} // namespace
-#define ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, length, out_of_bounds) \
- do { \
- if (!length.is_reg() && base::bits::IsPowerOfTwo(length.immediate())) { \
- __ And(kScratchReg, offset, Operand(~(length.immediate() - 1))); \
- __ Branch(USE_DELAY_SLOT, out_of_bounds, ne, kScratchReg, \
- Operand(zero_reg)); \
- } else { \
- __ Branch(USE_DELAY_SLOT, out_of_bounds, hs, offset, length); \
- } \
- } while (0)
-
-#define ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, length, out_of_bounds) \
- do { \
- if (!length.is_reg() && base::bits::IsPowerOfTwo(length.immediate())) { \
- __ Or(kScratchReg, zero_reg, Operand(offset)); \
- __ And(kScratchReg, kScratchReg, Operand(~(length.immediate() - 1))); \
- __ Branch(out_of_bounds, ne, kScratchReg, Operand(zero_reg)); \
- } else { \
- __ Branch(out_of_bounds, ls, length.rm(), Operand(offset)); \
- } \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \
- do { \
- auto result = i.Output##width##Register(); \
- auto ool = new (zone()) OutOfLineLoad##width(this, result); \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), ool->entry()); \
- __ And(kScratchReg, offset, Operand(0xffffffff)); \
- __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg); \
- __ asm_instr(result, MemOperand(kScratchReg, 0)); \
- } else { \
- int offset = static_cast<int>(i.InputOperand(0).immediate()); \
- ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), \
- ool->entry()); \
- __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
- } \
- __ bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
- do { \
- auto result = i.OutputRegister(); \
- auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), ool->entry()); \
- __ And(kScratchReg, offset, Operand(0xffffffff)); \
- __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg); \
- __ asm_instr(result, MemOperand(kScratchReg, 0)); \
- } else { \
- int offset = static_cast<int>(i.InputOperand(0).immediate()); \
- ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), \
- ool->entry()); \
- __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
- } \
- __ bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr) \
- do { \
- Label done; \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- auto value = i.InputOrZero##width##Register(2); \
- if (value == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { \
- __ Move(kDoubleRegZero, 0.0); \
- } \
- ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), &done); \
- __ And(kScratchReg, offset, Operand(0xffffffff)); \
- __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg); \
- __ asm_instr(value, MemOperand(kScratchReg, 0)); \
- } else { \
- int offset = static_cast<int>(i.InputOperand(0).immediate()); \
- auto value = i.InputOrZero##width##Register(2); \
- if (value == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { \
- __ Move(kDoubleRegZero, 0.0); \
- } \
- ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), &done); \
- __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
- } \
- __ bind(&done); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
- do { \
- Label done; \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- auto value = i.InputOrZeroRegister(2); \
- ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), &done); \
- __ And(kScratchReg, offset, Operand(0xffffffff)); \
- __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg); \
- __ asm_instr(value, MemOperand(kScratchReg, 0)); \
- } else { \
- int offset = static_cast<int>(i.InputOperand(0).immediate()); \
- auto value = i.InputOrZeroRegister(2); \
- ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), &done); \
- __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
- } \
- __ bind(&done); \
- } while (0)
#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \
if (kArchVariant == kMips64r6) { \
@@ -833,14 +690,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) {
Address wasm_code = reinterpret_cast<Address>(
i.ToConstant(instr->InputAt(0)).ToInt64());
- __ Jump(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL
+ __ Call(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL
: RelocInfo::JS_TO_WASM_CALL);
} else {
__ daddiu(at, i.InputRegister(0), 0);
- __ Jump(at);
+ __ Call(at);
}
+ RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
- frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchTailCallCodeObjectFromJSFunction:
@@ -886,7 +743,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ Ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
- __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
+ __ Assert(eq, AbortReason::kWrongFunctionContext, cp,
+ Operand(kScratchReg));
}
__ Ld(at, FieldMemOperand(func, JSFunction::kCodeOffset));
__ Daddu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -1050,7 +908,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (FLAG_debug_code && alignment > 0) {
// Verify that the output_register is properly aligned
__ And(kScratchReg, i.OutputRegister(), Operand(kPointerSize - 1));
- __ Assert(eq, kAllocationIsNotDoubleAligned, kScratchReg,
+ __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, kScratchReg,
Operand(zero_reg));
}
if (alignment == 2 * kPointerSize) {
@@ -1369,7 +1227,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register dst = i.OutputRegister();
uint32_t B0 = 0x55555555; // (T)~(T)0/3
uint32_t B1 = 0x33333333; // (T)~(T)0/15*3
- uint32_t B2 = 0x0f0f0f0f; // (T)~(T)0/255*15
+ uint32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15
uint32_t value = 0x01010101; // (T)~(T)0/255
uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
__ srl(kScratchReg, src, 1);
@@ -1394,7 +1252,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register dst = i.OutputRegister();
uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
uint64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3
- uint64_t B2 = 0x0f0f0f0f0f0f0f0fl; // (T)~(T)0/255*15
+ uint64_t B2 = 0x0F0F0F0F0F0F0F0Fl; // (T)~(T)0/255*15
uint64_t value = 0x0101010101010101l; // (T)~(T)0/255
uint64_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
__ dsrl(kScratchReg, src, 1);
@@ -2041,6 +1899,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->IncreaseSPDelta(1);
}
break;
+ case kMips64Peek: {
+ // The incoming value is 0-based, but we need a 1-based value.
+ int reverse_slot = MiscField::decode(instr->opcode()) + 1;
+ int offset =
+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ Ldc1(i.OutputDoubleRegister(), MemOperand(fp, offset));
+ } else {
+ DCHECK_EQ(op->representation(), MachineRepresentation::kFloat32);
+ __ lwc1(i.OutputSingleRegister(0), MemOperand(fp, offset));
+ }
+ } else {
+ __ Ld(i.OutputRegister(0), MemOperand(fp, offset));
+ }
+ break;
+ }
case kMips64StackClaim: {
__ Dsubu(sp, sp, Operand(i.InputInt32(0)));
frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / kPointerSize);
@@ -2063,48 +1939,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ dsrl32(i.OutputRegister(0), i.OutputRegister(0), 0);
break;
}
- case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Lb);
- break;
- case kCheckedLoadUint8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Lbu);
- break;
- case kCheckedLoadInt16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Lh);
- break;
- case kCheckedLoadUint16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Lhu);
- break;
- case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Lw);
- break;
- case kCheckedLoadWord64:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Ld);
- break;
- case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(Single, Lwc1);
- break;
- case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(Double, Ldc1);
- break;
- case kCheckedStoreWord8:
- ASSEMBLE_CHECKED_STORE_INTEGER(Sb);
- break;
- case kCheckedStoreWord16:
- ASSEMBLE_CHECKED_STORE_INTEGER(Sh);
- break;
- case kCheckedStoreWord32:
- ASSEMBLE_CHECKED_STORE_INTEGER(Sw);
- break;
- case kCheckedStoreWord64:
- ASSEMBLE_CHECKED_STORE_INTEGER(Sd);
- break;
- case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT(Single, Swc1);
- break;
- case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FLOAT(Double, Sdc1);
- break;
case kAtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lb);
break;
@@ -2183,7 +2017,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Xor, Xor)
#undef ATOMIC_BINOP_CASE
case kMips64AssertEqual:
- __ Assert(eq, static_cast<BailoutReason>(i.InputOperand(2).immediate()),
+ __ Assert(eq, static_cast<AbortReason>(i.InputOperand(2).immediate()),
i.InputRegister(0), Operand(i.InputRegister(1)));
break;
case kMips64S128Zero: {
@@ -2889,7 +2723,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (src0 == src1) {
// Unary S32x4 shuffles are handled with shf.w instruction
- unsigned lane = shuffle & 0xff;
+ unsigned lane = shuffle & 0xFF;
if (FLAG_debug_code) {
// range of all four lanes, for unary instruction,
// should belong to the same range, which can be one of these:
@@ -2897,7 +2731,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (lane >= 4) {
int32_t shuffle_helper = shuffle;
for (int i = 0; i < 4; ++i) {
- lane = shuffle_helper & 0xff;
+ lane = shuffle_helper & 0xFF;
CHECK_GE(lane, 4);
shuffle_helper >>= 8;
}
@@ -2905,7 +2739,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
uint32_t i8 = 0;
for (int i = 0; i < 4; i++) {
- lane = shuffle & 0xff;
+ lane = shuffle & 0xFF;
if (lane >= 4) {
lane -= 4;
}
@@ -3465,7 +3299,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
}
}
}
@@ -3747,7 +3581,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the unoptimized
// frame is still on the stack. Optimized code uses OSR values directly from
@@ -3760,10 +3594,12 @@ void CodeGenerator::AssembleConstructFrame() {
const RegList saves = descriptor->CalleeSavedRegisters();
const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ const int returns = frame()->GetReturnSlotCount();
- // Skip callee-saved slots, which are pushed below.
+ // Skip callee-saved and return slots, which are pushed below.
shrink_slots -= base::bits::CountPopulation(saves);
shrink_slots -= base::bits::CountPopulation(saves_fpu);
+ shrink_slots -= returns;
if (shrink_slots > 0) {
__ Dsubu(sp, sp, Operand(shrink_slots * kPointerSize));
}
@@ -3779,11 +3615,21 @@ void CodeGenerator::AssembleConstructFrame() {
__ MultiPush(saves);
DCHECK_EQ(kNumCalleeSaved, base::bits::CountPopulation(saves) + 1);
}
+
+ if (returns != 0) {
+ // Create space for returns.
+ __ Dsubu(sp, sp, Operand(returns * kPointerSize));
+ }
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ __ Daddu(sp, sp, Operand(returns * kPointerSize));
+ }
+
// Restore GP registers.
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
@@ -3816,7 +3662,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
int pop_count = static_cast<int>(descriptor->StackParameterCount());
if (pop->IsImmediate()) {
- DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
pop_count += g.ToConstant(pop).ToInt32();
} else {
Register pop_reg = g.ToRegister(pop);
diff --git a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
index 1b420d3819..3058812bec 100644
--- a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
@@ -156,6 +156,7 @@ namespace compiler {
V(Mips64Float64Min) \
V(Mips64Float64SilenceNaN) \
V(Mips64Push) \
+ V(Mips64Peek) \
V(Mips64StoreToStackSlot) \
V(Mips64ByteSwap64) \
V(Mips64ByteSwap32) \
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index 0b490c7d77..38f077c4e6 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -36,7 +36,7 @@ class Mips64OperandGenerator final : public OperandGenerator {
InstructionOperand UseRegisterOrImmediateZero(Node* node) {
if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
(IsFloatConstant(node) &&
- (bit_cast<int64_t>(GetFloatConstantValue(node)) == V8_INT64_C(0)))) {
+ (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
return UseImmediate(node);
}
return UseRegister(node);
@@ -106,20 +106,6 @@ class Mips64OperandGenerator final : public OperandGenerator {
case kMips64Swc1:
case kMips64Ldc1:
case kMips64Sdc1:
- case kCheckedLoadInt8:
- case kCheckedLoadUint8:
- case kCheckedLoadInt16:
- case kCheckedLoadUint16:
- case kCheckedLoadWord32:
- case kCheckedLoadWord64:
- case kCheckedStoreWord8:
- case kCheckedStoreWord16:
- case kCheckedStoreWord32:
- case kCheckedStoreWord64:
- case kCheckedLoadFloat32:
- case kCheckedLoadFloat64:
- case kCheckedStoreFloat32:
- case kCheckedStoreFloat64:
return is_int32(value);
default:
return is_int16(value);
@@ -329,7 +315,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -540,7 +527,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
// Any shift value can match; int32 shifts use `value % 32`.
- uint32_t lsb = mleft.right().Value() & 0x1f;
+ uint32_t lsb = mleft.right().Value() & 0x1F;
// Ext cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@@ -590,7 +577,7 @@ void InstructionSelector::VisitWord64And(Node* node) {
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
// Any shift value can match; int64 shifts use `value % 64`.
- uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3f);
+ uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3F);
// Dext cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@@ -720,7 +707,7 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher m(node);
if (m.left().IsWord32And() && m.right().HasValue()) {
- uint32_t lsb = m.right().Value() & 0x1f;
+ uint32_t lsb = m.right().Value() & 0x1F;
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && mleft.right().Value() != 0) {
// Select Ext for Shr(And(x, mask), imm) where the result of the mask is
@@ -813,7 +800,7 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
void InstructionSelector::VisitWord64Shr(Node* node) {
Int64BinopMatcher m(node);
if (m.left().IsWord64And() && m.right().HasValue()) {
- uint32_t lsb = m.right().Value() & 0x3f;
+ uint32_t lsb = m.right().Value() & 0x3F;
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && mleft.right().Value() != 0) {
// Select Dext for Shr(And(x, mask), imm) where the result of the mask is
@@ -1676,7 +1663,7 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments.
int slot = kCArgSlotCount;
for (PushParameter input : (*arguments)) {
- Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
g.TempImmediate(slot << kPointerSizeLog2));
++slot;
}
@@ -1688,14 +1675,36 @@ void InstructionSelector::EmitPrepareArguments(
}
for (size_t n = 0; n < arguments->size(); ++n) {
PushParameter input = (*arguments)[n];
- if (input.node()) {
- Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ if (input.node) {
+ Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
g.TempImmediate(static_cast<int>(n << kPointerSizeLog2)));
}
}
}
}
+void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
+ const CallDescriptor* descriptor,
+ Node* node) {
+ Mips64OperandGenerator g(this);
+
+ int reverse_slot = 0;
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ // Skip any alignment holes in nodes.
+ if (output.node != nullptr) {
+ DCHECK(!descriptor->IsCFunctionCall());
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ }
+ InstructionOperand result = g.DefineAsRegister(output.node);
+ Emit(kMips64Peek | MiscField::encode(reverse_slot), result);
+ }
+ reverse_slot += output.location.GetSizeInPointers();
+ }
+}
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
@@ -1806,127 +1815,6 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
}
}
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- Mips64OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kCheckedLoadWord64;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit:
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged:
- case MachineRepresentation::kSimd128:
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
- ? g.UseImmediate(offset)
- : g.UseRegister(offset);
-
- InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
- ? g.CanBeImmediate(length, opcode)
- ? g.UseImmediate(length)
- : g.UseRegister(length)
- : g.UseRegister(length);
-
- if (length->opcode() == IrOpcode::kInt32Constant) {
- Int32Matcher m(length);
- if (m.IsPowerOf2()) {
- Emit(opcode, g.DefineAsRegister(node), offset_operand,
- g.UseImmediate(length), g.UseRegister(buffer));
- return;
- }
- }
-
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), offset_operand, length_operand,
- g.UseRegister(buffer));
-}
-
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- Mips64OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kCheckedStoreWord64;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- case MachineRepresentation::kBit:
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged:
- case MachineRepresentation::kSimd128:
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
- ? g.UseImmediate(offset)
- : g.UseRegister(offset);
-
- InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
- ? g.CanBeImmediate(length, opcode)
- ? g.UseImmediate(length)
- : g.UseRegister(length)
- : g.UseRegister(length);
-
- if (length->opcode() == IrOpcode::kInt32Constant) {
- Int32Matcher m(length);
- if (m.IsPowerOf2()) {
- Emit(opcode, g.NoOutput(), offset_operand, g.UseImmediate(length),
- g.UseRegisterOrImmediateZero(value), g.UseRegister(buffer));
- return;
- }
- }
-
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- offset_operand, length_operand, g.UseRegisterOrImmediateZero(value),
- g.UseRegister(buffer));
-}
-
-
namespace {
// Shared routine for multiple compare operations.
@@ -1940,7 +1828,8 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -2111,7 +2000,8 @@ void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node,
selector->Emit(
kMips64AssertEqual, g.NoOutput(), optimizedResult, fullResult,
- g.TempImmediate(BailoutReason::kUnsupportedNonPrimitiveCompare));
+ g.TempImmediate(
+ static_cast<int>(AbortReason::kUnsupportedNonPrimitiveCompare)));
}
VisitWordCompare(selector, node, opcode, cont, false);
@@ -2157,7 +2047,7 @@ void EmitWordCompareZero(InstructionSelector* selector, Node* value,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
g.TempImmediate(0), cont->kind(), cont->reason(),
- cont->frame_state());
+ cont->feedback(), cont->frame_state());
} else if (cont->IsTrap()) {
selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
g.TempImmediate(cont->trap_id()));
@@ -2297,14 +2187,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -2750,6 +2640,8 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
+void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
+
#define SIMD_TYPE_LIST(V) \
V(F32x4) \
V(I32x4) \
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index e312dc4354..22004337eb 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -12,6 +12,7 @@
#include "src/compiler/simplified-operator.h"
#include "src/compiler/verifier.h"
#include "src/handles-inl.h"
+#include "src/zone/zone-handle-set.h"
namespace v8 {
namespace internal {
@@ -462,6 +463,20 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
if (IsSame(receiver, effect)) receiver = GetValueInput(effect, 0);
break;
}
+ case IrOpcode::kEffectPhi: {
+ Node* control = GetControlInput(effect);
+ if (control->opcode() != IrOpcode::kLoop) {
+ DCHECK(control->opcode() == IrOpcode::kDead ||
+ control->opcode() == IrOpcode::kMerge);
+ return kNoReceiverMaps;
+ }
+
+ // Continue search for receiver map outside the loop. Since operations
+ // inside the loop may change the map, the result is unreliable.
+ effect = GetEffectInput(effect, 0);
+ result = kUnreliableReceiverMaps;
+ continue;
+ }
default: {
DCHECK_EQ(1, effect->op()->EffectOutputCount());
if (effect->op()->EffectInputCount() != 1) {
@@ -488,6 +503,19 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
}
// static
+MaybeHandle<Map> NodeProperties::GetMapWitness(Node* node) {
+ ZoneHandleSet<Map> maps;
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &maps);
+ if (result == NodeProperties::kReliableReceiverMaps && maps.size() == 1) {
+ return maps[0];
+ }
+ return MaybeHandle<Map>();
+}
+
+// static
bool NodeProperties::NoObservableSideEffectBetween(Node* effect,
Node* dominator) {
while (effect != dominator) {
@@ -538,19 +566,19 @@ bool NodeProperties::CanBePrimitive(Node* receiver, Node* effect) {
bool NodeProperties::CanBeNullOrUndefined(Node* receiver, Node* effect) {
if (CanBePrimitive(receiver, effect)) {
switch (receiver->opcode()) {
- case IrOpcode::kCheckSmi:
+ case IrOpcode::kCheckInternalizedString:
case IrOpcode::kCheckNumber:
- case IrOpcode::kCheckSymbol:
- case IrOpcode::kCheckString:
case IrOpcode::kCheckSeqString:
- case IrOpcode::kCheckInternalizedString:
- case IrOpcode::kToBoolean:
+ case IrOpcode::kCheckSmi:
+ case IrOpcode::kCheckString:
+ case IrOpcode::kCheckSymbol:
case IrOpcode::kJSToInteger:
case IrOpcode::kJSToLength:
case IrOpcode::kJSToName:
case IrOpcode::kJSToNumber:
case IrOpcode::kJSToNumeric:
case IrOpcode::kJSToString:
+ case IrOpcode::kToBoolean:
return false;
case IrOpcode::kHeapConstant: {
Handle<HeapObject> value = HeapObjectMatcher(receiver).Value();
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 5ccc15c1ab..abc6622c83 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -8,6 +8,7 @@
#include "src/compiler/node.h"
#include "src/compiler/types.h"
#include "src/globals.h"
+#include "src/objects/map.h"
#include "src/zone/zone-handle-set.h"
namespace v8 {
@@ -153,6 +154,8 @@ class V8_EXPORT_PRIVATE NodeProperties final {
static InferReceiverMapsResult InferReceiverMaps(
Node* receiver, Node* effect, ZoneHandleSet<Map>* maps_return);
+ static MaybeHandle<Map> GetMapWitness(Node* node);
+
// Walks up the {effect} chain to check that there's no observable side-effect
// between the {effect} and it's {dominator}. Aborts the walk if there's join
// in the effect chain.
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 3c3650b8f4..ec6c720af2 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -175,24 +175,25 @@
V(JSConstructWithArrayLike) \
V(JSConstructWithSpread)
-#define JS_OTHER_OP_LIST(V) \
- JS_CONSTRUCT_OP_LIST(V) \
- V(JSCallForwardVarargs) \
- V(JSCall) \
- V(JSCallWithArrayLike) \
- V(JSCallWithSpread) \
- V(JSCallRuntime) \
- V(JSForInEnumerate) \
- V(JSForInNext) \
- V(JSForInPrepare) \
- V(JSLoadMessage) \
- V(JSStoreMessage) \
- V(JSLoadModule) \
- V(JSStoreModule) \
- V(JSGeneratorStore) \
- V(JSGeneratorRestoreContinuation) \
- V(JSGeneratorRestoreRegister) \
- V(JSStackCheck) \
+#define JS_OTHER_OP_LIST(V) \
+ JS_CONSTRUCT_OP_LIST(V) \
+ V(JSCallForwardVarargs) \
+ V(JSCall) \
+ V(JSCallWithArrayLike) \
+ V(JSCallWithSpread) \
+ V(JSCallRuntime) \
+ V(JSForInEnumerate) \
+ V(JSForInNext) \
+ V(JSForInPrepare) \
+ V(JSLoadMessage) \
+ V(JSStoreMessage) \
+ V(JSLoadModule) \
+ V(JSStoreModule) \
+ V(JSGeneratorStore) \
+ V(JSGeneratorRestoreContinuation) \
+ V(JSGeneratorRestoreRegister) \
+ V(JSGeneratorRestoreInputOrDebugPos) \
+ V(JSStackCheck) \
V(JSDebugger)
#define JS_OP_LIST(V) \
@@ -317,6 +318,7 @@
V(NumberTrunc) \
V(NumberToBoolean) \
V(NumberToInt32) \
+ V(NumberToString) \
V(NumberToUint32) \
V(NumberToUint8Clamped) \
V(NumberSilenceNaN)
@@ -332,9 +334,12 @@
V(StringCharAt) \
V(StringCharCodeAt) \
V(SeqStringCharCodeAt) \
+ V(StringCodePointAt) \
+ V(SeqStringCodePointAt) \
V(StringFromCharCode) \
V(StringFromCodePoint) \
V(StringIndexOf) \
+ V(StringLength) \
V(StringToLowerCaseIntl) \
V(StringToUpperCaseIntl) \
V(CheckBounds) \
@@ -371,6 +376,7 @@
V(TransitionAndStoreNumberElement) \
V(TransitionAndStoreNonNumberElement) \
V(ToBoolean) \
+ V(NumberIsFloat64Hole) \
V(ObjectIsArrayBufferView) \
V(ObjectIsBigInt) \
V(ObjectIsCallable) \
@@ -390,6 +396,7 @@
V(NewDoubleElements) \
V(NewSmiOrObjectElements) \
V(NewArgumentsElements) \
+ V(NewConsString) \
V(ArrayBufferWasNeutered) \
V(EnsureWritableFastElements) \
V(MaybeGrowFastElements) \
@@ -593,8 +600,6 @@
V(LoadStackPointer) \
V(LoadFramePointer) \
V(LoadParentFramePointer) \
- V(CheckedLoad) \
- V(CheckedStore) \
V(UnalignedLoad) \
V(UnalignedStore) \
V(Int32PairAdd) \
@@ -614,6 +619,7 @@
V(AtomicAnd) \
V(AtomicOr) \
V(AtomicXor) \
+ V(SpeculationFence) \
V(UnsafePointerAdd)
#define MACHINE_SIMD_OP_LIST(V) \
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index 46d6557b21..5819655633 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -24,6 +24,8 @@ OperationTyper::OperationTyper(Isolate* isolate, Zone* zone)
Type* truncating_to_zero = Type::MinusZeroOrNaN();
DCHECK(!truncating_to_zero->Maybe(Type::Integral32()));
+ singleton_NaN_string_ = Type::HeapConstant(factory->NaN_string(), zone);
+ singleton_zero_string_ = Type::HeapConstant(factory->zero_string(), zone);
singleton_false_ = Type::HeapConstant(factory->false_value(), zone);
singleton_true_ = Type::HeapConstant(factory->true_value(), zone);
singleton_the_hole_ = Type::HeapConstant(factory->the_hole_value(), zone);
@@ -503,6 +505,14 @@ Type* OperationTyper::NumberToInt32(Type* type) {
return Type::Signed32();
}
+Type* OperationTyper::NumberToString(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+
+ if (type->Is(Type::NaN())) return singleton_NaN_string_;
+ if (type->Is(cache_.kZeroOrMinusZero)) return singleton_zero_string_;
+ return Type::SeqString();
+}
+
Type* OperationTyper::NumberToUint32(Type* type) {
DCHECK(type->Is(Type::Number()));
diff --git a/deps/v8/src/compiler/operation-typer.h b/deps/v8/src/compiler/operation-typer.h
index 4a9c4ffb08..282cb0c750 100644
--- a/deps/v8/src/compiler/operation-typer.h
+++ b/deps/v8/src/compiler/operation-typer.h
@@ -94,6 +94,8 @@ class V8_EXPORT_PRIVATE OperationTyper {
Type* infinity_;
Type* minus_infinity_;
+ Type* singleton_NaN_string_;
+ Type* singleton_zero_string_;
Type* singleton_false_;
Type* singleton_true_;
Type* singleton_the_hole_;
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 045d695ecf..b4567ab04f 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -253,11 +253,6 @@ class PipelineData {
source_position_output_ = source_position_output;
}
- std::vector<trap_handler::ProtectedInstructionData>* protected_instructions()
- const {
- return protected_instructions_;
- }
-
JumpOptimizationInfo* jump_optimization_info() const {
return jump_optimization_info_;
}
@@ -435,21 +430,26 @@ class PipelineImpl final {
template <typename Phase, typename Arg0, typename Arg1>
void Run(Arg0 arg_0, Arg1 arg_1);
- // Run the graph creation and initial optimization passes.
+ // Step A. Run the graph creation and initial optimization passes.
bool CreateGraph();
- // Run the concurrent optimization passes.
+ // B. Run the concurrent optimization passes.
bool OptimizeGraph(Linkage* linkage);
- // Run the code assembly pass.
+ // Substep B.1. Produce a scheduled graph.
+ void ComputeScheduledGraph();
+
+ // Substep B.2. Select instructions from a scheduled graph.
+ bool SelectInstructions(Linkage* linkage);
+
+ // Step C. Run the code assembly pass.
void AssembleCode(Linkage* linkage);
- // Run the code finalization pass.
+ // Step D. Run the code finalization pass.
Handle<Code> FinalizeCode();
- bool ScheduleAndSelectInstructions(Linkage* linkage, bool trim_graph);
void RunPrintAndVerify(const char* phase, bool untyped = false);
- Handle<Code> ScheduleAndGenerateCode(CallDescriptor* call_descriptor);
+ Handle<Code> GenerateCode(CallDescriptor* call_descriptor);
void AllocateRegisters(const RegisterConfiguration* config,
CallDescriptor* descriptor, bool run_verifier);
@@ -803,7 +803,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
if (!pipeline_.CreateGraph()) {
if (isolate->has_pending_exception()) return FAILED; // Stack overflowed.
- return AbortOptimization(kGraphBuildingFailed);
+ return AbortOptimization(BailoutReason::kGraphBuildingFailed);
}
if (compilation_info()->is_osr()) data_.InitializeOsrHelper();
@@ -826,8 +826,8 @@ PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl(
Isolate* isolate) {
Handle<Code> code = pipeline_.FinalizeCode();
if (code.is_null()) {
- if (compilation_info()->bailout_reason() == kNoReason) {
- return AbortOptimization(kCodeGenerationFailed);
+ if (compilation_info()->bailout_reason() == BailoutReason::kNoReason) {
+ return AbortOptimization(BailoutReason::kCodeGenerationFailed);
}
return FAILED;
}
@@ -964,7 +964,8 @@ PipelineWasmCompilationJob::ExecuteJobImpl() {
pipeline_.RunPrintAndVerify("Optimized Machine", true);
}
- if (!pipeline_.ScheduleAndSelectInstructions(&linkage_, true)) return FAILED;
+ pipeline_.ComputeScheduledGraph();
+ if (!pipeline_.SelectInstructions(&linkage_)) return FAILED;
pipeline_.AssembleCode(&linkage_);
return SUCCEEDED;
}
@@ -995,9 +996,7 @@ PipelineWasmCompilationJob::Status PipelineWasmCompilationJob::FinalizeJobImpl(
}
void PipelineWasmCompilationJob::ValidateImmovableEmbeddedObjects() const {
-#if !DEBUG
- return;
-#endif
+#if DEBUG
// We expect the only embedded objects to be those originating from
// a snapshot, which are immovable.
DisallowHeapAllocation no_gc;
@@ -1038,6 +1037,7 @@ void PipelineWasmCompilationJob::ValidateImmovableEmbeddedObjects() const {
}
CHECK(is_immovable || is_wasm || is_allowed_stub);
}
+#endif
}
template <typename Phase>
@@ -1269,8 +1269,9 @@ struct LoopPeelingPhase {
LoopTree* loop_tree =
LoopFinder::BuildLoopTree(data->jsgraph()->graph(), temp_zone);
- LoopPeeler::PeelInnerLoopsOfTree(data->graph(), data->common(), loop_tree,
- temp_zone);
+ LoopPeeler(data->graph(), data->common(), loop_tree, temp_zone,
+ data->source_positions())
+ .PeelInnerLoopsOfTree();
}
};
@@ -1880,7 +1881,8 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
if (FLAG_turbo_escape) {
Run<EscapeAnalysisPhase>();
if (data->compilation_failed()) {
- info()->AbortOptimization(kCyclicObjectStateDetectedInEscapeAnalysis);
+ info()->AbortOptimization(
+ BailoutReason::kCyclicObjectStateDetectedInEscapeAnalysis);
data->EndPhaseKind();
return false;
}
@@ -1941,7 +1943,9 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
data->source_positions()->RemoveDecorator();
- return ScheduleAndSelectInstructions(linkage, true);
+ ComputeScheduledGraph();
+
+ return SelectInstructions(linkage);
}
Handle<Code> Pipeline::GenerateCodeForCodeStub(
@@ -1982,7 +1986,7 @@ Handle<Code> Pipeline::GenerateCodeForCodeStub(
}
pipeline.Run<VerifyGraphPhase>(false, true);
- return pipeline.ScheduleAndGenerateCode(call_descriptor);
+ return pipeline.GenerateCode(call_descriptor);
}
// static
@@ -2043,7 +2047,12 @@ Handle<Code> Pipeline::GenerateCodeForTesting(
// TODO(rossberg): Should this really be untyped?
pipeline.RunPrintAndVerify("Machine", true);
- return pipeline.ScheduleAndGenerateCode(call_descriptor);
+ // Ensure we have a schedule.
+ if (data.schedule() == nullptr) {
+ pipeline.ComputeScheduledGraph();
+ }
+
+ return pipeline.GenerateCode(call_descriptor);
}
// static
@@ -2082,19 +2091,26 @@ bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
return !data.compilation_failed();
}
-bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage,
- bool trim_graph) {
- CallDescriptor* call_descriptor = linkage->GetIncomingDescriptor();
+void PipelineImpl::ComputeScheduledGraph() {
PipelineData* data = this->data_;
- DCHECK_NOT_NULL(data->graph());
+ // We should only schedule the graph if it is not scheduled yet.
+ DCHECK_NULL(data->schedule());
- if (trim_graph) {
- Run<LateGraphTrimmingPhase>();
- RunPrintAndVerify("Late trimmed", true);
- }
- if (data->schedule() == nullptr) Run<ComputeSchedulePhase>();
+ Run<LateGraphTrimmingPhase>();
+ RunPrintAndVerify("Late trimmed", true);
+
+ Run<ComputeSchedulePhase>();
TraceSchedule(data->info(), data->isolate(), data->schedule());
+}
+
+bool PipelineImpl::SelectInstructions(Linkage* linkage) {
+ CallDescriptor* call_descriptor = linkage->GetIncomingDescriptor();
+ PipelineData* data = this->data_;
+
+ // We should have a scheduled graph.
+ DCHECK_NOT_NULL(data->graph());
+ DCHECK_NOT_NULL(data->schedule());
if (FLAG_turbo_profiling) {
data->set_profiler_data(BasicBlockInstrumentor::Instrument(
@@ -2138,7 +2154,7 @@ bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage,
// Select and schedule instructions covering the scheduled graph.
Run<InstructionSelectionPhase>(linkage);
if (data->compilation_failed()) {
- info()->AbortOptimization(kCodeGenerationFailed);
+ info()->AbortOptimization(BailoutReason::kCodeGenerationFailed);
data->EndPhaseKind();
return false;
}
@@ -2177,7 +2193,8 @@ bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage,
Run<FrameElisionPhase>();
if (data->compilation_failed()) {
- info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
+ info()->AbortOptimization(
+ BailoutReason::kNotEnoughVirtualRegistersRegalloc);
data->EndPhaseKind();
return false;
}
@@ -2208,6 +2225,8 @@ Handle<Code> PipelineImpl::FinalizeCode() {
Run<FinalizeCodePhase>();
Handle<Code> code = data->code();
+ if (code.is_null()) return code;
+
if (data->profiler_data()) {
#if ENABLE_DISASSEMBLER
std::ostringstream os;
@@ -2245,12 +2264,11 @@ Handle<Code> PipelineImpl::FinalizeCode() {
return code;
}
-Handle<Code> PipelineImpl::ScheduleAndGenerateCode(
- CallDescriptor* call_descriptor) {
+Handle<Code> PipelineImpl::GenerateCode(CallDescriptor* call_descriptor) {
Linkage linkage(call_descriptor);
- // Schedule the graph, perform instruction selection and register allocation.
- if (!ScheduleAndSelectInstructions(&linkage, false)) return Handle<Code>();
+ // Perform instruction selection and register allocation.
+ if (!SelectInstructions(&linkage)) return Handle<Code>();
// Generate the final machine code.
AssembleCode(&linkage);
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 2dca7794eb..b5b6b5f142 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -76,7 +76,7 @@ class Pipeline : public AllStatic {
// Run the pipeline on a machine graph and generate code. If {schedule} is
// {nullptr}, then compute a new schedule for code generation.
- static Handle<Code> GenerateCodeForTesting(
+ V8_EXPORT_PRIVATE static Handle<Code> GenerateCodeForTesting(
CompilationInfo* info, Isolate* isolate, CallDescriptor* call_descriptor,
Graph* graph, Schedule* schedule = nullptr,
SourcePositionTable* source_positions = nullptr);
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index 11fde27fc9..7fc537784c 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -118,48 +118,6 @@ static inline bool HasRegisterInput(Instruction* instr, size_t index) {
namespace {
-class OutOfLineLoadNAN32 final : public OutOfLineCode {
- public:
- OutOfLineLoadNAN32(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ LoadDoubleLiteral(
- result_, Double(std::numeric_limits<double>::quiet_NaN()), kScratchReg);
- }
-
- private:
- DoubleRegister const result_;
-};
-
-
-class OutOfLineLoadNAN64 final : public OutOfLineCode {
- public:
- OutOfLineLoadNAN64(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ LoadDoubleLiteral(
- result_, Double(std::numeric_limits<double>::quiet_NaN()), kScratchReg);
- }
-
- private:
- DoubleRegister const result_;
-};
-
-
-class OutOfLineLoadZero final : public OutOfLineCode {
- public:
- OutOfLineLoadZero(CodeGenerator* gen, Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final { __ li(result_, Operand::Zero()); }
-
- private:
- Register const result_;
-};
-
-
class OutOfLineRecordWrite final : public OutOfLineCode {
public:
OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
@@ -653,134 +611,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
#define CleanUInt32(x)
#endif
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, asm_instrx, width) \
- do { \
- DoubleRegister result = i.OutputDoubleRegister(); \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- DCHECK_EQ(kMode_MRR, mode); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ cmplw(offset, i.InputRegister(2)); \
- } else { \
- __ cmplwi(offset, i.InputImmediate(2)); \
- } \
- auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
- __ bge(ool->entry()); \
- if (mode == kMode_MRI) { \
- __ asm_instr(result, operand); \
- } else { \
- CleanUInt32(offset); \
- __ asm_instrx(result, operand); \
- } \
- __ bind(ool->exit()); \
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr, asm_instrx) \
- do { \
- Register result = i.OutputRegister(); \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- DCHECK_EQ(kMode_MRR, mode); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ cmplw(offset, i.InputRegister(2)); \
- } else { \
- __ cmplwi(offset, i.InputImmediate(2)); \
- } \
- auto ool = new (zone()) OutOfLineLoadZero(this, result); \
- __ bge(ool->entry()); \
- if (mode == kMode_MRI) { \
- __ asm_instr(result, operand); \
- } else { \
- CleanUInt32(offset); \
- __ asm_instrx(result, operand); \
- } \
- __ bind(ool->exit()); \
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT32() \
- do { \
- Label done; \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- DCHECK_EQ(kMode_MRR, mode); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ cmplw(offset, i.InputRegister(2)); \
- } else { \
- __ cmplwi(offset, i.InputImmediate(2)); \
- } \
- __ bge(&done); \
- DoubleRegister value = i.InputDoubleRegister(3); \
- __ frsp(kScratchDoubleReg, value); \
- /* removed frsp as instruction-selector checked */ \
- /* value to be kFloat32 */ \
- if (mode == kMode_MRI) { \
- __ stfs(value, operand); \
- } else { \
- CleanUInt32(offset); \
- __ stfsx(value, operand); \
- } \
- __ bind(&done); \
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_DOUBLE() \
- do { \
- Label done; \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- DCHECK_EQ(kMode_MRR, mode); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ cmplw(offset, i.InputRegister(2)); \
- } else { \
- __ cmplwi(offset, i.InputImmediate(2)); \
- } \
- __ bge(&done); \
- DoubleRegister value = i.InputDoubleRegister(3); \
- if (mode == kMode_MRI) { \
- __ stfd(value, operand); \
- } else { \
- CleanUInt32(offset); \
- __ stfdx(value, operand); \
- } \
- __ bind(&done); \
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr, asm_instrx) \
- do { \
- Label done; \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- DCHECK_EQ(kMode_MRR, mode); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ cmplw(offset, i.InputRegister(2)); \
- } else { \
- __ cmplwi(offset, i.InputImmediate(2)); \
- } \
- __ bge(&done); \
- Register value = i.InputRegister(3); \
- if (mode == kMode_MRI) { \
- __ asm_instr(value, operand); \
- } else { \
- CleanUInt32(offset); \
- __ asm_instrx(value, operand); \
- } \
- __ bind(&done); \
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
- } while (0)
-
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr, asm_instrx) \
do { \
Label done; \
@@ -1003,8 +833,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
if (instr->InputAt(0)->IsImmediate()) {
+#ifdef V8_TARGET_ARCH_PPC64
+ Address wasm_code = reinterpret_cast<Address>(
+ i.ToConstant(instr->InputAt(0)).ToInt64());
+#else
Address wasm_code = reinterpret_cast<Address>(
i.ToConstant(instr->InputAt(0)).ToInt32());
+#endif
__ Call(wasm_code, rmode);
} else {
__ Call(i.InputRegister(0));
@@ -1072,7 +907,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ LoadP(kScratchReg,
FieldMemOperand(func, JSFunction::kContextOffset));
__ cmp(cp, kScratchReg);
- __ Assert(eq, kWrongFunctionContext);
+ __ Assert(eq, AbortReason::kWrongFunctionContext);
}
__ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeOffset));
__ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -2021,58 +1856,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kPPC_StoreDouble:
ASSEMBLE_STORE_DOUBLE();
break;
- case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
- __ extsb(i.OutputRegister(), i.OutputRegister());
- break;
- case kCheckedLoadUint8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
- break;
- case kCheckedLoadInt16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lha, lhax);
- break;
- case kCheckedLoadUint16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lhz, lhzx);
- break;
- case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lwz, lwzx);
- break;
- case kCheckedLoadWord64:
-#if V8_TARGET_ARCH_PPC64
- ASSEMBLE_CHECKED_LOAD_INTEGER(ld, ldx);
-#else
- UNREACHABLE();
-#endif
- break;
- case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(lfs, lfsx, 32);
- break;
- case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(lfd, lfdx, 64);
- break;
- case kCheckedStoreWord8:
- ASSEMBLE_CHECKED_STORE_INTEGER(stb, stbx);
- break;
- case kCheckedStoreWord16:
- ASSEMBLE_CHECKED_STORE_INTEGER(sth, sthx);
- break;
- case kCheckedStoreWord32:
- ASSEMBLE_CHECKED_STORE_INTEGER(stw, stwx);
- break;
- case kCheckedStoreWord64:
-#if V8_TARGET_ARCH_PPC64
- ASSEMBLE_CHECKED_STORE_INTEGER(std, stdx);
-#else
- UNREACHABLE();
-#endif
- break;
- case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT32();
- break;
- case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_DOUBLE();
- break;
-
case kAtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
__ extsb(i.OutputRegister(), i.OutputRegister());
@@ -2208,7 +1991,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
}
}
}
@@ -2382,7 +2165,7 @@ void CodeGenerator::AssembleConstructFrame() {
frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the unoptimized
// frame is still on the stack. Optimized code uses OSR values directly from
@@ -2555,10 +2338,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
// converts it to qnan on ia32/x64
if (src.type() == Constant::kFloat32) {
uint32_t val = src.ToFloat32AsInt();
- if ((val & 0x7f800000) == 0x7f800000) {
+ if ((val & 0x7F800000) == 0x7F800000) {
uint64_t dval = static_cast<uint64_t>(val);
- dval = ((dval & 0xc0000000) << 32) | ((dval & 0x40000000) << 31) |
- ((dval & 0x40000000) << 30) | ((dval & 0x7fffffff) << 29);
+ dval = ((dval & 0xC0000000) << 32) | ((dval & 0x40000000) << 31) |
+ ((dval & 0x40000000) << 30) | ((dval & 0x7FFFFFFF) << 29);
value = Double(dval);
} else {
value = Double(static_cast<double>(src.ToFloat32()));
@@ -2672,69 +2455,6 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
return;
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- // Register-register.
- Register temp = kScratchReg;
- Register src = g.ToRegister(source);
- if (destination->IsRegister()) {
- Register dst = g.ToRegister(destination);
- __ mr(temp, src);
- __ mr(src, dst);
- __ mr(dst, temp);
- } else {
- DCHECK(destination->IsStackSlot());
- MemOperand dst = g.ToMemOperand(destination);
- __ mr(temp, src);
- __ LoadP(src, dst);
- __ StoreP(temp, dst);
- }
-#if V8_TARGET_ARCH_PPC64
- } else if (source->IsStackSlot() || source->IsFPStackSlot()) {
-#else
- } else if (source->IsStackSlot()) {
- DCHECK(destination->IsStackSlot());
-#endif
- Register temp_0 = kScratchReg;
- Register temp_1 = r0;
- MemOperand src = g.ToMemOperand(source);
- MemOperand dst = g.ToMemOperand(destination);
- __ LoadP(temp_0, src);
- __ LoadP(temp_1, dst);
- __ StoreP(temp_0, dst);
- __ StoreP(temp_1, src);
- } else if (source->IsFPRegister()) {
- DoubleRegister temp = kScratchDoubleReg;
- DoubleRegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- DoubleRegister dst = g.ToDoubleRegister(destination);
- __ fmr(temp, src);
- __ fmr(src, dst);
- __ fmr(dst, temp);
- } else {
- DCHECK(destination->IsFPStackSlot());
- MemOperand dst = g.ToMemOperand(destination);
- __ fmr(temp, src);
- __ lfd(src, dst);
- __ stfd(temp, dst);
- }
-#if !V8_TARGET_ARCH_PPC64
- } else if (source->IsFPStackSlot()) {
- DCHECK(destination->IsFPStackSlot());
- DoubleRegister temp_0 = kScratchDoubleReg;
- DoubleRegister temp_1 = d0;
- MemOperand src = g.ToMemOperand(source);
- MemOperand dst = g.ToMemOperand(destination);
- __ lfd(temp_0, src);
- __ lfd(temp_1, dst);
- __ stfd(temp_0, dst);
- __ stfd(temp_1, src);
-#endif
- } else {
- // No other combinations are possible.
- UNREACHABLE();
- }
}
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index 8454590ee2..fced5565df 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -154,7 +154,8 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -366,101 +367,6 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- PPCOperandGenerator g(this);
- Node* const base = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
-#if V8_TARGET_ARCH_PPC64
- case MachineRepresentation::kWord64:
- opcode = kCheckedLoadWord64;
- break;
-#endif
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
-#if !V8_TARGET_ARCH_PPC64
- case MachineRepresentation::kWord64: // Fall through.
-#endif
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- AddressingMode addressingMode = kMode_MRR;
- Emit(opcode | AddressingModeField::encode(addressingMode),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
- g.UseOperand(length, kInt16Imm_Unsigned));
-}
-
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- PPCOperandGenerator g(this);
- Node* const base = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
-#if V8_TARGET_ARCH_PPC64
- case MachineRepresentation::kWord64:
- opcode = kCheckedStoreWord64;
- break;
-#endif
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
-#if !V8_TARGET_ARCH_PPC64
- case MachineRepresentation::kWord64: // Fall through.
-#endif
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- AddressingMode addressingMode = kMode_MRR;
- Emit(opcode | AddressingModeField::encode(addressingMode), g.NoOutput(),
- g.UseRegister(base), g.UseRegister(offset),
- g.UseOperand(length, kInt16Imm_Unsigned), g.UseRegister(value));
-}
-
-
template <typename Matcher>
static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
ArchOpcode opcode, bool left_can_cover,
@@ -553,7 +459,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
if (m.left().IsWord32Shr()) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 31 - sh) mb = 31 - sh;
- sh = (32 - sh) & 0x1f;
+ sh = (32 - sh) & 0x1F;
} else {
// Adjust the mask such that it doesn't include any rotated bits.
if (me < sh) me = sh;
@@ -592,7 +498,7 @@ void InstructionSelector::VisitWord64And(Node* node) {
if (m.left().IsWord64Shr()) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 63 - sh) mb = 63 - sh;
- sh = (64 - sh) & 0x3f;
+ sh = (64 - sh) & 0x3F;
} else {
// Adjust the mask such that it doesn't include any rotated bits.
if (me < sh) me = sh;
@@ -756,7 +662,7 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
IsContiguousMask32((uint32_t)(mleft.right().Value()) >> sh, &mb, &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 31 - sh) mb = 31 - sh;
- sh = (32 - sh) & 0x1f;
+ sh = (32 - sh) & 0x1F;
if (mb >= me) {
Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
@@ -782,7 +688,7 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 63 - sh) mb = 63 - sh;
- sh = (64 - sh) & 0x3f;
+ sh = (64 - sh) & 0x3F;
if (mb >= me) {
bool match = false;
ArchOpcode opcode;
@@ -1033,6 +939,8 @@ void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
+
void InstructionSelector::VisitInt32Add(Node* node) {
VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add32, kInt16Imm);
}
@@ -1553,7 +1461,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -1800,14 +1709,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
@@ -1989,7 +1898,7 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments.
int slot = kStackFrameExtraParamSlot;
for (PushParameter input : (*arguments)) {
- Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
g.TempImmediate(slot));
++slot;
}
@@ -1997,8 +1906,8 @@ void InstructionSelector::EmitPrepareArguments(
// Push any stack arguments.
for (PushParameter input : base::Reversed(*arguments)) {
// Skip any alignment holes in pushed nodes.
- if (input.node() == nullptr) continue;
- Emit(kPPC_Push, g.NoOutput(), g.UseRegister(input.node()));
+ if (input.node == nullptr) continue;
+ Emit(kPPC_Push, g.NoOutput(), g.UseRegister(input.node));
}
}
}
@@ -2164,6 +2073,190 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
+void InstructionSelector::VisitI32x4Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Shl(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4ShrS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MaxS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MinS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Eq(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Ne(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MinU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MaxU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4ShrU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4GtS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4GeS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4GtU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4GeU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ExtractLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Shl(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ShrS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ShrU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8MinS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8MaxS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Eq(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Ne(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8MinU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8MaxU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8GtS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8GeS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8GtU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8GeU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16ExtractLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16AddSaturateS(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16SubSaturateS(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16MinS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16MaxS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Eq(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Ne(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16GtS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16GeS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16AddSaturateU(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16SubSaturateU(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16MinU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16MaxU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16GtU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16GeU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128And(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128Or(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128Xor(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128Not(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128Zero(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Eq(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Ne(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Lt(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Le(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
+ const CallDescriptor* descriptor,
+ Node* node) {
+ // TODO(John): Port.
+}
+
+void InstructionSelector::VisitF32x4Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Max(Node* node) { UNIMPLEMENTED(); }
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
@@ -2176,7 +2269,7 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat64RoundTiesAway |
MachineOperatorBuilder::kWord32Popcnt |
MachineOperatorBuilder::kWord64Popcnt;
- // We omit kWord32ShiftIsSafe as s[rl]w use 0x3f as a mask rather than 0x1f.
+ // We omit kWord32ShiftIsSafe as s[rl]w use 0x3F as a mask rather than 0x1F.
}
// static
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index 5e79cbdfec..bead0618f6 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -69,8 +69,9 @@ bool PropertyAccessBuilder::TryBuildStringCheck(MapHandles const& maps,
} else {
// Monormorphic string access (ignoring the fact that there are multiple
// String maps).
- *receiver = *effect = graph()->NewNode(simplified()->CheckString(),
- *receiver, *effect, control);
+ *receiver = *effect =
+ graph()->NewNode(simplified()->CheckString(VectorSlotPair()),
+ *receiver, *effect, control);
}
return true;
}
@@ -82,8 +83,9 @@ bool PropertyAccessBuilder::TryBuildNumberCheck(MapHandles const& maps,
Node* control) {
if (HasOnlyNumberMaps(maps)) {
// Monomorphic number access (we also deal with Smis here).
- *receiver = *effect = graph()->NewNode(simplified()->CheckNumber(),
- *receiver, *effect, control);
+ *receiver = *effect =
+ graph()->NewNode(simplified()->CheckNumber(VectorSlotPair()), *receiver,
+ *effect, control);
return true;
}
return false;
@@ -175,8 +177,9 @@ Node* PropertyAccessBuilder::BuildCheckValue(Node* receiver, Node** effect,
Node* expected = jsgraph()->HeapConstant(value);
Node* check =
graph()->NewNode(simplified()->ReferenceEqual(), receiver, expected);
- *effect = graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
- check, *effect, control);
+ *effect =
+ graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kWrongValue),
+ check, *effect, control);
return expected;
}
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index bed2f628d9..ed67c06cc7 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -134,7 +134,6 @@ void RawMachineAssembler::Return(Node* value) {
current_block_ = nullptr;
}
-
void RawMachineAssembler::Return(Node* v1, Node* v2) {
Node* values[] = {Int32Constant(0), v1, v2};
Node* ret = MakeNode(common()->Return(2), 3, values);
@@ -142,7 +141,6 @@ void RawMachineAssembler::Return(Node* v1, Node* v2) {
current_block_ = nullptr;
}
-
void RawMachineAssembler::Return(Node* v1, Node* v2, Node* v3) {
Node* values[] = {Int32Constant(0), v1, v2, v3};
Node* ret = MakeNode(common()->Return(3), 4, values);
@@ -150,6 +148,24 @@ void RawMachineAssembler::Return(Node* v1, Node* v2, Node* v3) {
current_block_ = nullptr;
}
+void RawMachineAssembler::Return(Node* v1, Node* v2, Node* v3, Node* v4) {
+ Node* values[] = {Int32Constant(0), v1, v2, v3, v4};
+ Node* ret = MakeNode(common()->Return(4), 5, values);
+ schedule()->AddReturn(CurrentBlock(), ret);
+ current_block_ = nullptr;
+}
+
+void RawMachineAssembler::Return(int count, Node* vs[]) {
+ typedef Node* Node_ptr;
+ Node** values = new Node_ptr[count + 1];
+ values[0] = Int32Constant(0);
+ for (int i = 0; i < count; ++i) values[i + 1] = vs[i];
+ Node* ret = MakeNode(common()->Return(count), count + 1, values);
+ schedule()->AddReturn(CurrentBlock(), ret);
+ current_block_ = nullptr;
+ delete[] values;
+}
+
void RawMachineAssembler::PopAndReturn(Node* pop, Node* value) {
Node* values[] = {pop, value};
Node* ret = MakeNode(common()->Return(1), 2, values);
@@ -172,6 +188,14 @@ void RawMachineAssembler::PopAndReturn(Node* pop, Node* v1, Node* v2,
current_block_ = nullptr;
}
+void RawMachineAssembler::PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3,
+ Node* v4) {
+ Node* values[] = {pop, v1, v2, v3, v4};
+ Node* ret = MakeNode(common()->Return(4), 5, values);
+ schedule()->AddReturn(CurrentBlock(), ret);
+ current_block_ = nullptr;
+}
+
void RawMachineAssembler::DebugAbort(Node* message) {
AddNode(machine()->DebugAbort(), message);
}
@@ -430,7 +454,7 @@ void RawMachineAssembler::Bind(RawMachineLabel* label,
str << "Binding label without closing previous block:"
<< "\n# label: " << info
<< "\n# previous block: " << *current_block_;
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
Bind(label);
current_block_->set_debug_info(info);
@@ -495,7 +519,7 @@ RawMachineLabel::~RawMachineLabel() {
} else {
str << "A label has been used but it's not bound.";
}
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
#endif // DEBUG
}
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 9fc3590875..1cc56b3379 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -186,6 +186,10 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
old_value, new_value);
}
+ Node* SpeculationFence() {
+ return AddNode(machine()->SpeculationFence().op());
+ }
+
// Arithmetic Operations.
Node* WordAnd(Node* a, Node* b) {
return AddNode(machine()->WordAnd(), a, b);
@@ -828,9 +832,12 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
void Return(Node* value);
void Return(Node* v1, Node* v2);
void Return(Node* v1, Node* v2, Node* v3);
+ void Return(Node* v1, Node* v2, Node* v3, Node* v4);
+ void Return(int count, Node* v[]);
void PopAndReturn(Node* pop, Node* value);
void PopAndReturn(Node* pop, Node* v1, Node* v2);
void PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3);
+ void PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3, Node* v4);
void Bind(RawMachineLabel* label);
void Deoptimize(Node* state);
void DebugAbort(Node* message);
diff --git a/deps/v8/src/compiler/redundancy-elimination.cc b/deps/v8/src/compiler/redundancy-elimination.cc
index 3a40e8d5bf..eedf946fb6 100644
--- a/deps/v8/src/compiler/redundancy-elimination.cc
+++ b/deps/v8/src/compiler/redundancy-elimination.cc
@@ -5,6 +5,7 @@
#include "src/compiler/redundancy-elimination.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
namespace v8 {
namespace internal {
@@ -19,26 +20,36 @@ Reduction RedundancyElimination::Reduce(Node* node) {
if (node_checks_.Get(node)) return NoChange();
switch (node->opcode()) {
case IrOpcode::kCheckBounds:
+ case IrOpcode::kCheckEqualsInternalizedString:
+ case IrOpcode::kCheckEqualsSymbol:
case IrOpcode::kCheckFloat64Hole:
case IrOpcode::kCheckHeapObject:
case IrOpcode::kCheckIf:
case IrOpcode::kCheckInternalizedString:
+ case IrOpcode::kCheckNotTaggedHole:
case IrOpcode::kCheckNumber:
case IrOpcode::kCheckReceiver:
+ case IrOpcode::kCheckSeqString:
case IrOpcode::kCheckSmi:
case IrOpcode::kCheckString:
- case IrOpcode::kCheckSeqString:
- case IrOpcode::kCheckNotTaggedHole:
+ case IrOpcode::kCheckSymbol:
case IrOpcode::kCheckedFloat64ToInt32:
case IrOpcode::kCheckedInt32Add:
- case IrOpcode::kCheckedInt32Sub:
case IrOpcode::kCheckedInt32Div:
case IrOpcode::kCheckedInt32Mod:
case IrOpcode::kCheckedInt32Mul:
- case IrOpcode::kCheckedTaggedToFloat64:
+ case IrOpcode::kCheckedInt32Sub:
+ case IrOpcode::kCheckedInt32ToTaggedSigned:
case IrOpcode::kCheckedTaggedSignedToInt32:
+ case IrOpcode::kCheckedTaggedToFloat64:
case IrOpcode::kCheckedTaggedToInt32:
+ case IrOpcode::kCheckedTaggedToTaggedPointer:
+ case IrOpcode::kCheckedTaggedToTaggedSigned:
+ case IrOpcode::kCheckedTruncateTaggedToWord32:
+ case IrOpcode::kCheckedUint32Div:
+ case IrOpcode::kCheckedUint32Mod:
case IrOpcode::kCheckedUint32ToInt32:
+ case IrOpcode::kCheckedUint32ToTaggedSigned:
return ReduceCheckNode(node);
case IrOpcode::kSpeculativeNumberAdd:
case IrOpcode::kSpeculativeNumberSubtract:
@@ -124,13 +135,43 @@ RedundancyElimination::EffectPathChecks::AddCheck(Zone* zone,
namespace {
-bool IsCompatibleCheck(Node const* a, Node const* b) {
+// Does check {a} subsume check {b}?
+bool CheckSubsumes(Node const* a, Node const* b) {
if (a->op() != b->op()) {
if (a->opcode() == IrOpcode::kCheckInternalizedString &&
b->opcode() == IrOpcode::kCheckString) {
// CheckInternalizedString(node) implies CheckString(node)
- } else {
+ } else if (a->opcode() != b->opcode()) {
return false;
+ } else {
+ switch (a->opcode()) {
+ case IrOpcode::kCheckBounds:
+ case IrOpcode::kCheckSmi:
+ case IrOpcode::kCheckString:
+ case IrOpcode::kCheckNumber:
+ break;
+ case IrOpcode::kCheckedInt32ToTaggedSigned:
+ case IrOpcode::kCheckedTaggedSignedToInt32:
+ case IrOpcode::kCheckedTaggedToTaggedPointer:
+ case IrOpcode::kCheckedTaggedToTaggedSigned:
+ case IrOpcode::kCheckedUint32ToInt32:
+ case IrOpcode::kCheckedUint32ToTaggedSigned:
+ break;
+ case IrOpcode::kCheckedFloat64ToInt32:
+ case IrOpcode::kCheckedTaggedToInt32: {
+ const CheckMinusZeroParameters& ap =
+ CheckMinusZeroParametersOf(a->op());
+ const CheckMinusZeroParameters& bp =
+ CheckMinusZeroParametersOf(b->op());
+ if (ap.mode() != bp.mode()) {
+ return false;
+ }
+ break;
+ }
+ default:
+ DCHECK(!IsCheckedWithFeedback(a->op()));
+ return false;
+ }
}
}
for (int i = a->op()->ValueInputCount(); --i >= 0;) {
@@ -143,7 +184,7 @@ bool IsCompatibleCheck(Node const* a, Node const* b) {
Node* RedundancyElimination::EffectPathChecks::LookupCheck(Node* node) const {
for (Check const* check = head_; check != nullptr; check = check->next) {
- if (IsCompatibleCheck(check->node, node)) {
+ if (CheckSubsumes(check->node, node)) {
DCHECK(!check->node->IsDead());
return check->node;
}
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index b0a345a57f..f8a5a9c504 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -216,7 +216,9 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
const Operator* op;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- return jsgraph()->DeadValue();
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kTaggedSigned),
+ node);
} else if (IsWord(output_rep)) {
if (output_type->Is(Type::Signed31())) {
op = simplified()->ChangeInt31ToTaggedSigned();
@@ -224,14 +226,14 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
if (SmiValuesAre32Bits()) {
op = simplified()->ChangeInt32ToTagged();
} else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
- op = simplified()->CheckedInt32ToTaggedSigned();
+ op = simplified()->CheckedInt32ToTaggedSigned(use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedSigned);
}
} else if (output_type->Is(Type::Unsigned32()) &&
use_info.type_check() == TypeCheckKind::kSignedSmall) {
- op = simplified()->CheckedUint32ToTaggedSigned();
+ op = simplified()->CheckedUint32ToTaggedSigned(use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedSigned);
@@ -247,7 +249,7 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
if (SmiValuesAre32Bits()) {
op = simplified()->ChangeInt32ToTagged();
} else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
- op = simplified()->CheckedInt32ToTaggedSigned();
+ op = simplified()->CheckedInt32ToTaggedSigned(use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedSigned);
@@ -256,17 +258,18 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
use_info.type_check() == TypeCheckKind::kSignedSmall) {
// float64 -> uint32 -> tagged signed
node = InsertChangeFloat64ToUint32(node);
- op = simplified()->CheckedUint32ToTaggedSigned();
+ op = simplified()->CheckedUint32ToTaggedSigned(use_info.feedback());
} else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
op = simplified()->CheckedFloat64ToInt32(
output_type->Maybe(Type::MinusZero())
? CheckForMinusZeroMode::kCheckForMinusZero
- : CheckForMinusZeroMode::kDontCheckForMinusZero);
+ : CheckForMinusZeroMode::kDontCheckForMinusZero,
+ use_info.feedback());
node = InsertConversion(node, op, use_node);
if (SmiValuesAre32Bits()) {
op = simplified()->ChangeInt32ToTagged();
} else {
- op = simplified()->CheckedInt32ToTaggedSigned();
+ op = simplified()->CheckedInt32ToTaggedSigned(use_info.feedback());
}
} else {
return TypeError(node, output_rep, output_type,
@@ -279,12 +282,13 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
op = simplified()->CheckedFloat64ToInt32(
output_type->Maybe(Type::MinusZero())
? CheckForMinusZeroMode::kCheckForMinusZero
- : CheckForMinusZeroMode::kDontCheckForMinusZero);
+ : CheckForMinusZeroMode::kDontCheckForMinusZero,
+ use_info.feedback());
node = InsertConversion(node, op, use_node);
if (SmiValuesAre32Bits()) {
op = simplified()->ChangeInt32ToTagged();
} else {
- op = simplified()->CheckedInt32ToTaggedSigned();
+ op = simplified()->CheckedInt32ToTaggedSigned(use_info.feedback());
}
} else {
return TypeError(node, output_rep, output_type,
@@ -292,7 +296,7 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
}
} else if (CanBeTaggedPointer(output_rep)) {
if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
- op = simplified()->CheckedTaggedToTaggedSigned();
+ op = simplified()->CheckedTaggedToTaggedSigned(use_info.feedback());
} else if (output_type->Is(Type::SignedSmall())) {
op = simplified()->ChangeTaggedToTaggedSigned();
} else {
@@ -304,7 +308,7 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
// TODO(turbofan): Consider adding a Bailout operator that just deopts.
// Also use that for MachineRepresentation::kPointer case above.
node = InsertChangeBitToTagged(node);
- op = simplified()->CheckedTaggedToTaggedSigned();
+ op = simplified()->CheckedTaggedToTaggedSigned(use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedSigned);
@@ -334,7 +338,9 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
Operator const* op;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- return jsgraph()->DeadValue();
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kTaggedPointer),
+ node);
} else if (output_rep == MachineRepresentation::kBit) {
if (output_type->Is(Type::Boolean())) {
op = simplified()->ChangeBitToTagged();
@@ -378,7 +384,7 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
}
// TODO(turbofan): Consider adding a Bailout operator that just deopts
// for TaggedSigned output representation.
- op = simplified()->CheckedTaggedToTaggedPointer();
+ op = simplified()->CheckedTaggedToTaggedPointer(use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedPointer);
@@ -411,7 +417,8 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
const Operator* op;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- return jsgraph()->DeadValue();
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kTagged), node);
} else if (output_rep == MachineRepresentation::kBit) {
if (output_type->Is(Type::Boolean())) {
op = simplified()->ChangeBitToTagged();
@@ -489,7 +496,8 @@ Node* RepresentationChanger::GetFloat32RepresentationFor(
const Operator* op = nullptr;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- return jsgraph()->DeadValue();
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kFloat32), node);
} else if (IsWord(output_rep)) {
if (output_type->Is(Type::Signed32())) {
// int32 -> float64 -> float32
@@ -549,7 +557,8 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
const Operator* op = nullptr;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- return jsgraph()->DeadValue();
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kFloat64), node);
} else if (IsWord(output_rep)) {
if (output_type->Is(Type::Signed32())) {
op = machine()->ChangeInt32ToFloat64();
@@ -626,7 +635,8 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
const Operator* op = nullptr;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- return jsgraph()->DeadValue();
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kWord32), node);
} else if (output_rep == MachineRepresentation::kBit) {
return node; // Sloppy comparison -> word32
} else if (output_rep == MachineRepresentation::kFloat64) {
@@ -637,7 +647,8 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
op = simplified()->CheckedFloat64ToInt32(
output_type->Maybe(Type::MinusZero())
? use_info.minus_zero_check()
- : CheckForMinusZeroMode::kDontCheckForMinusZero);
+ : CheckForMinusZeroMode::kDontCheckForMinusZero,
+ use_info.feedback());
} else if (output_type->Is(Type::Unsigned32())) {
op = machine()->ChangeFloat64ToUint32();
} else if (use_info.truncation().IsUsedAsWord32()) {
@@ -655,7 +666,8 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
op = simplified()->CheckedFloat64ToInt32(
output_type->Maybe(Type::MinusZero())
? use_info.minus_zero_check()
- : CheckForMinusZeroMode::kDontCheckForMinusZero);
+ : CheckForMinusZeroMode::kDontCheckForMinusZero,
+ use_info.feedback());
} else if (output_type->Is(Type::Unsigned32())) {
op = machine()->ChangeFloat64ToUint32();
} else if (use_info.truncation().IsUsedAsWord32()) {
@@ -671,12 +683,13 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
} else if (output_type->Is(Type::Signed32())) {
op = simplified()->ChangeTaggedToInt32();
} else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
- op = simplified()->CheckedTaggedSignedToInt32();
+ op = simplified()->CheckedTaggedSignedToInt32(use_info.feedback());
} else if (use_info.type_check() == TypeCheckKind::kSigned32) {
op = simplified()->CheckedTaggedToInt32(
output_type->Maybe(Type::MinusZero())
? use_info.minus_zero_check()
- : CheckForMinusZeroMode::kDontCheckForMinusZero);
+ : CheckForMinusZeroMode::kDontCheckForMinusZero,
+ use_info.feedback());
} else if (output_type->Is(Type::Unsigned32())) {
op = simplified()->ChangeTaggedToUint32();
} else if (use_info.truncation().IsUsedAsWord32()) {
@@ -684,10 +697,10 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
op = simplified()->TruncateTaggedToWord32();
} else if (use_info.type_check() == TypeCheckKind::kNumber) {
op = simplified()->CheckedTruncateTaggedToWord32(
- CheckTaggedInputMode::kNumber);
+ CheckTaggedInputMode::kNumber, use_info.feedback());
} else if (use_info.type_check() == TypeCheckKind::kNumberOrOddball) {
op = simplified()->CheckedTruncateTaggedToWord32(
- CheckTaggedInputMode::kNumberOrOddball);
+ CheckTaggedInputMode::kNumberOrOddball, use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord32);
@@ -704,7 +717,7 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
if (output_type->Is(Type::Signed32())) {
return node;
} else if (output_type->Is(Type::Unsigned32())) {
- op = simplified()->CheckedUint32ToInt32();
+ op = simplified()->CheckedUint32ToInt32(use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord32);
@@ -762,7 +775,8 @@ Node* RepresentationChanger::GetBitRepresentationFor(
const Operator* op;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- return jsgraph()->DeadValue();
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kBit), node);
} else if (output_rep == MachineRepresentation::kTagged ||
output_rep == MachineRepresentation::kTaggedPointer) {
if (output_type->Is(Type::BooleanOrNullOrUndefined())) {
@@ -807,7 +821,8 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
Node* node, MachineRepresentation output_rep, Type* output_type) {
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- return jsgraph()->DeadValue();
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kWord32), node);
} else if (output_rep == MachineRepresentation::kBit) {
return node; // Sloppy comparison -> word64
}
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index 52a3e75c8a..b23a3dac5b 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -147,13 +147,18 @@ inline std::ostream& operator<<(std::ostream& os, TypeCheckKind type_check) {
// to the preferred representation. The preferred representation might be
// insufficient to do the conversion (e.g. word32->float64 conv), so we also
// need the signedness information to produce the correct value.
+// Additionally, use info may contain {CheckParameters} which contains
+// information for the deoptimizer such as a CallIC on which speculation
+// should be disallowed if the check fails.
class UseInfo {
public:
UseInfo(MachineRepresentation representation, Truncation truncation,
- TypeCheckKind type_check = TypeCheckKind::kNone)
+ TypeCheckKind type_check = TypeCheckKind::kNone,
+ const VectorSlotPair& feedback = VectorSlotPair())
: representation_(representation),
truncation_(truncation),
- type_check_(type_check) {}
+ type_check_(type_check),
+ feedback_(feedback) {}
static UseInfo TruncatingWord32() {
return UseInfo(MachineRepresentation::kWord32, Truncation::Word32());
}
@@ -187,14 +192,16 @@ class UseInfo {
return UseInfo(MachineRepresentation::kTaggedPointer, Truncation::Any(),
TypeCheckKind::kHeapObject);
}
- static UseInfo CheckedSignedSmallAsTaggedSigned() {
+ static UseInfo CheckedSignedSmallAsTaggedSigned(
+ const VectorSlotPair& feedback) {
return UseInfo(MachineRepresentation::kTaggedSigned, Truncation::Any(),
- TypeCheckKind::kSignedSmall);
+ TypeCheckKind::kSignedSmall, feedback);
}
- static UseInfo CheckedSignedSmallAsWord32(IdentifyZeros identify_zeros) {
+ static UseInfo CheckedSignedSmallAsWord32(IdentifyZeros identify_zeros,
+ const VectorSlotPair& feedback) {
return UseInfo(MachineRepresentation::kWord32,
- Truncation::Any(identify_zeros),
- TypeCheckKind::kSignedSmall);
+ Truncation::Any(identify_zeros), TypeCheckKind::kSignedSmall,
+ feedback);
}
static UseInfo CheckedSigned32AsWord32(IdentifyZeros identify_zeros) {
return UseInfo(MachineRepresentation::kWord32,
@@ -238,11 +245,13 @@ class UseInfo {
? CheckForMinusZeroMode::kDontCheckForMinusZero
: CheckForMinusZeroMode::kCheckForMinusZero;
}
+ const VectorSlotPair& feedback() const { return feedback_; }
private:
MachineRepresentation representation_;
Truncation truncation_;
TypeCheckKind type_check_;
+ VectorSlotPair feedback_;
};
// Contains logic related to changing the representation of values for constants
diff --git a/deps/v8/src/compiler/s390/code-generator-s390.cc b/deps/v8/src/compiler/s390/code-generator-s390.cc
index f49a8e540c..c0d3146be1 100644
--- a/deps/v8/src/compiler/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/s390/code-generator-s390.cc
@@ -156,45 +156,6 @@ static inline bool HasStackSlotInput(Instruction* instr, size_t index) {
namespace {
-class OutOfLineLoadNAN32 final : public OutOfLineCode {
- public:
- OutOfLineLoadNAN32(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ LoadDoubleLiteral(result_, std::numeric_limits<float>::quiet_NaN(),
- kScratchReg);
- }
-
- private:
- DoubleRegister const result_;
-};
-
-class OutOfLineLoadNAN64 final : public OutOfLineCode {
- public:
- OutOfLineLoadNAN64(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ LoadDoubleLiteral(result_, std::numeric_limits<double>::quiet_NaN(),
- kScratchReg);
- }
-
- private:
- DoubleRegister const result_;
-};
-
-class OutOfLineLoadZero final : public OutOfLineCode {
- public:
- OutOfLineLoadZero(CodeGenerator* gen, Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final { __ LoadImmP(result_, Operand::Zero()); }
-
- private:
- Register const result_;
-};
-
class OutOfLineRecordWrite final : public OutOfLineCode {
public:
OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
@@ -938,102 +899,6 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
__ asm_instr(value, operand); \
} while (0)
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, width) \
- do { \
- DoubleRegister result = i.OutputDoubleRegister(); \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ CmpLogical32(offset, i.InputRegister(2)); \
- } else { \
- __ CmpLogical32(offset, i.InputImmediate(2)); \
- } \
- auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
- __ bge(ool->entry()); \
- __ CleanUInt32(offset); \
- __ asm_instr(result, operand); \
- __ bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
- do { \
- Register result = i.OutputRegister(); \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ CmpLogical32(offset, i.InputRegister(2)); \
- } else { \
- __ CmpLogical32(offset, i.InputImmediate(2)); \
- } \
- auto ool = new (zone()) OutOfLineLoadZero(this, result); \
- __ bge(ool->entry()); \
- __ CleanUInt32(offset); \
- __ asm_instr(result, operand); \
- __ bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT32() \
- do { \
- Label done; \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ CmpLogical32(offset, i.InputRegister(2)); \
- } else { \
- __ CmpLogical32(offset, i.InputImmediate(2)); \
- } \
- __ bge(&done); \
- DoubleRegister value = i.InputDoubleRegister(3); \
- __ CleanUInt32(offset); \
- __ StoreFloat32(value, operand); \
- __ bind(&done); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_DOUBLE() \
- do { \
- Label done; \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- DCHECK_EQ(kMode_MRR, mode); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ CmpLogical32(offset, i.InputRegister(2)); \
- } else { \
- __ CmpLogical32(offset, i.InputImmediate(2)); \
- } \
- __ bge(&done); \
- DoubleRegister value = i.InputDoubleRegister(3); \
- __ CleanUInt32(offset); \
- __ StoreDouble(value, operand); \
- __ bind(&done); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
- do { \
- Label done; \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ CmpLogical32(offset, i.InputRegister(2)); \
- } else { \
- __ CmpLogical32(offset, i.InputImmediate(2)); \
- } \
- __ bge(&done); \
- Register value = i.InputRegister(3); \
- __ CleanUInt32(offset); \
- __ asm_instr(value, operand); \
- __ bind(&done); \
- } while (0)
-
void CodeGenerator::AssembleDeconstructFrame() {
__ LeaveFrame(StackFrame::MANUAL);
}
@@ -1219,8 +1084,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
if (instr->InputAt(0)->IsImmediate()) {
+#ifdef V8_TARGET_ARCH_S390X
+ Address wasm_code = reinterpret_cast<Address>(
+ i.ToConstant(instr->InputAt(0)).ToInt64());
+#else
Address wasm_code = reinterpret_cast<Address>(
i.ToConstant(instr->InputAt(0)).ToInt32());
+#endif
__ Call(wasm_code, rmode);
} else {
__ Call(i.InputRegister(0));
@@ -1283,7 +1153,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ LoadP(kScratchReg,
FieldMemOperand(func, JSFunction::kContextOffset));
__ CmpP(cp, kScratchReg);
- __ Assert(eq, kWrongFunctionContext);
+ __ Assert(eq, AbortReason::kWrongFunctionContext);
}
__ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeOffset));
__ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -2107,7 +1977,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Label done;
__ ConvertDoubleToInt32(i.OutputRegister(0), i.InputDoubleRegister(0),
kRoundToNearest);
- __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ b(Condition(0xE), &done, Label::kNear); // normal case
__ lghi(i.OutputRegister(0), Operand::Zero());
__ bind(&done);
break;
@@ -2116,7 +1986,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Label done;
__ ConvertDoubleToUnsignedInt32(i.OutputRegister(0),
i.InputDoubleRegister(0));
- __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ b(Condition(0xE), &done, Label::kNear); // normal case
__ lghi(i.OutputRegister(0), Operand::Zero());
__ bind(&done);
break;
@@ -2127,7 +1997,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ lghi(i.OutputRegister(1), Operand(1));
}
__ ConvertDoubleToInt64(i.OutputRegister(0), i.InputDoubleRegister(0));
- __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ b(Condition(0xE), &done, Label::kNear); // normal case
if (i.OutputCount() > 1) {
__ lghi(i.OutputRegister(1), Operand::Zero());
} else {
@@ -2143,7 +2013,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
__ ConvertDoubleToUnsignedInt64(i.OutputRegister(0),
i.InputDoubleRegister(0));
- __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ b(Condition(0xE), &done, Label::kNear); // normal case
if (i.OutputCount() > 1) {
__ lghi(i.OutputRegister(1), Operand::Zero());
} else {
@@ -2156,7 +2026,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Label done;
__ ConvertFloat32ToInt32(i.OutputRegister(0), i.InputDoubleRegister(0),
kRoundToZero);
- __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ b(Condition(0xE), &done, Label::kNear); // normal case
__ lghi(i.OutputRegister(0), Operand::Zero());
__ bind(&done);
break;
@@ -2165,7 +2035,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Label done;
__ ConvertFloat32ToUnsignedInt32(i.OutputRegister(0),
i.InputDoubleRegister(0));
- __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ b(Condition(0xE), &done, Label::kNear); // normal case
__ lghi(i.OutputRegister(0), Operand::Zero());
__ bind(&done);
break;
@@ -2177,7 +2047,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
__ ConvertFloat32ToUnsignedInt64(i.OutputRegister(0),
i.InputDoubleRegister(0));
- __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ b(Condition(0xE), &done, Label::kNear); // normal case
if (i.OutputCount() > 1) {
__ lghi(i.OutputRegister(1), Operand::Zero());
} else {
@@ -2192,7 +2062,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ lghi(i.OutputRegister(1), Operand(1));
}
__ ConvertFloat32ToInt64(i.OutputRegister(0), i.InputDoubleRegister(0));
- __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ b(Condition(0xE), &done, Label::kNear); // normal case
if (i.OutputCount() > 1) {
__ lghi(i.OutputRegister(1), Operand::Zero());
} else {
@@ -2334,56 +2204,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_Lay:
__ lay(i.OutputRegister(), i.MemoryOperand());
break;
- case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(LoadB);
- break;
- case kCheckedLoadUint8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
- break;
- case kCheckedLoadInt16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(LoadHalfWordP);
- break;
- case kCheckedLoadUint16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(LoadLogicalHalfWordP);
- break;
- case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlW);
- break;
- case kCheckedLoadWord64:
-#if V8_TARGET_ARCH_S390X
- ASSEMBLE_CHECKED_LOAD_INTEGER(LoadP);
-#else
- UNREACHABLE();
-#endif
- break;
- case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(LoadFloat32, 32);
- break;
- case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(LoadDouble, 64);
- break;
- case kCheckedStoreWord8:
- ASSEMBLE_CHECKED_STORE_INTEGER(StoreByte);
- break;
- case kCheckedStoreWord16:
- ASSEMBLE_CHECKED_STORE_INTEGER(StoreHalfWord);
- break;
- case kCheckedStoreWord32:
- ASSEMBLE_CHECKED_STORE_INTEGER(StoreW);
- break;
- case kCheckedStoreWord64:
-#if V8_TARGET_ARCH_S390X
- ASSEMBLE_CHECKED_STORE_INTEGER(StoreP);
-#else
- UNREACHABLE();
-#endif
- break;
- case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT32();
- break;
- case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_DOUBLE();
- break;
case kAtomicLoadInt8:
__ LoadB(i.OutputRegister(), i.MemoryOperand());
break;
@@ -2629,7 +2449,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
}
}
}
@@ -2762,7 +2582,7 @@ void CodeGenerator::AssembleConstructFrame() {
frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the unoptimized
// frame is still on the stack. Optimized code uses OSR values directly from
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
index 54f5a0c68b..457c5a1d82 100644
--- a/deps/v8/src/compiler/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
@@ -613,7 +613,8 @@ void VisitUnaryOp(InstructionSelector* selector, Node* node,
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -688,7 +689,8 @@ void VisitBinOp(InstructionSelector* selector, Node* node,
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -850,99 +852,6 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- S390OperandGenerator g(this);
- Node* const base = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
-#if V8_TARGET_ARCH_S390X
- case MachineRepresentation::kWord64:
- opcode = kCheckedLoadWord64;
- break;
-#endif
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
-#if !V8_TARGET_ARCH_S390X
- case MachineRepresentation::kWord64: // Fall through.
-#endif
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- AddressingMode addressingMode = kMode_MRR;
- Emit(opcode | AddressingModeField::encode(addressingMode),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
- g.UseOperand(length, OperandMode::kUint32Imm));
-}
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- S390OperandGenerator g(this);
- Node* const base = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
-#if V8_TARGET_ARCH_S390X
- case MachineRepresentation::kWord64:
- opcode = kCheckedStoreWord64;
- break;
-#endif
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
-#if !V8_TARGET_ARCH_S390X
- case MachineRepresentation::kWord64: // Fall through.
-#endif
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- AddressingMode addressingMode = kMode_MRR;
- Emit(opcode | AddressingModeField::encode(addressingMode), g.NoOutput(),
- g.UseRegister(base), g.UseRegister(offset),
- g.UseOperand(length, OperandMode::kUint32Imm), g.UseRegister(value));
-}
-
#if 0
static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
int mask_width = base::bits::CountPopulation(value);
@@ -987,7 +896,7 @@ void InstructionSelector::VisitWord64And(Node* node) {
if (m.left().IsWord64Shr()) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 63 - sh) mb = 63 - sh;
- sh = (64 - sh) & 0x3f;
+ sh = (64 - sh) & 0x3F;
} else {
// Adjust the mask such that it doesn't include any rotated bits.
if (me < sh) me = sh;
@@ -1075,7 +984,7 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 63 - sh) mb = 63 - sh;
- sh = (64 - sh) & 0x3f;
+ sh = (64 - sh) & 0x3F;
if (mb >= me) {
bool match = false;
ArchOpcode opcode;
@@ -1249,6 +1158,8 @@ void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
#endif
+void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
+
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
VisitWord32UnaryOp(this, node, kS390_Abs32, OperandMode::kNone);
}
@@ -1728,7 +1639,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -1816,7 +1728,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
DCHECK(input_count <= 8 && output_count <= 1);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -1921,7 +1834,8 @@ void VisitLoadAndTest(InstructionSelector* selector, InstructionCode opcode,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -2183,14 +2097,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
@@ -2351,7 +2265,7 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments.
int slot = kStackFrameExtraParamSlot;
for (PushParameter input : (*arguments)) {
- Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
g.TempImmediate(slot));
++slot;
}
@@ -2361,19 +2275,20 @@ void InstructionSelector::EmitPrepareArguments(
int slot = 0;
for (PushParameter input : *arguments) {
- if (input.node() == nullptr) continue;
- num_slots +=
- input.type().representation() == MachineRepresentation::kFloat64
- ? kDoubleSize / kPointerSize
- : 1;
+ if (input.node == nullptr) continue;
+ num_slots += input.location.GetType().representation() ==
+ MachineRepresentation::kFloat64
+ ? kDoubleSize / kPointerSize
+ : 1;
}
Emit(kS390_StackClaim, g.NoOutput(), g.TempImmediate(num_slots));
for (PushParameter input : *arguments) {
// Skip any alignment holes in pushed nodes.
- if (input.node()) {
- Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ if (input.node) {
+ Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
g.TempImmediate(slot));
- slot += input.type().representation() == MachineRepresentation::kFloat64
+ slot += input.location.GetType().representation() ==
+ MachineRepresentation::kFloat64
? (kDoubleSize / kPointerSize)
: 1;
}
@@ -2489,6 +2404,190 @@ void InstructionSelector::VisitAtomicOr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitAtomicXor(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI32x4Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Shl(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4ShrS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MaxS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MinS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Eq(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Ne(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MinU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MaxU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4ShrU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4GtS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4GeS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4GtU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4GeU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ExtractLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Shl(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ShrS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ShrU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8MinS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8MaxS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Eq(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Ne(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8MinU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8MaxU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8GtS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8GeS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8GtU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8GeU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16ExtractLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16AddSaturateS(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16SubSaturateS(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16MinS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16MaxS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Eq(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Ne(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16GtS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16GeS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16AddSaturateU(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16SubSaturateU(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16MinU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16MaxU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16GtU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16GeU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128And(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128Or(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128Xor(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128Not(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128Zero(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Eq(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Ne(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Lt(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Le(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
+ const CallDescriptor* descriptor,
+ Node* node) {
+ // TODO(John): Port.
+}
+
+void InstructionSelector::VisitF32x4Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Max(Node* node) { UNIMPLEMENTED(); }
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index 582fbd6424..423d757a4f 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -19,8 +19,8 @@ namespace {
static const int kNumLanes32 = 4;
static const int kNumLanes16 = 8;
static const int kNumLanes8 = 16;
-static const int32_t kMask16 = 0xffff;
-static const int32_t kMask8 = 0xff;
+static const int32_t kMask16 = 0xFFFF;
+static const int32_t kMask8 = 0xFF;
static const int32_t kShift16 = 16;
static const int32_t kShift8 = 24;
} // anonymous
@@ -595,7 +595,7 @@ void SimdScalarLowering::LowerConvertFromFloat(Node* node, bool is_signed) {
Node* min = graph()->NewNode(
common()->Float64Constant(static_cast<double>(is_signed ? kMinInt : 0)));
Node* max = graph()->NewNode(common()->Float64Constant(
- static_cast<double>(is_signed ? kMaxInt : 0xffffffffu)));
+ static_cast<double>(is_signed ? kMaxInt : 0xFFFFFFFFu)));
for (int i = 0; i < kNumLanes32; ++i) {
Node* double_rep =
graph()->NewNode(machine()->ChangeFloat32ToFloat64(), rep[i]);
@@ -913,7 +913,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
DCHECK_EQ(1, node->InputCount());
Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
Node* rep_node[kNumLanes32];
- Node* mask = graph()->NewNode(common()->Int32Constant(0xffffffff));
+ Node* mask = graph()->NewNode(common()->Int32Constant(0xFFFFFFFF));
for (int i = 0; i < kNumLanes32; ++i) {
rep_node[i] = graph()->NewNode(machine()->Word32Xor(), rep[i], mask);
}
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 9bdb7cfbaf..6e6c011fc1 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -92,7 +92,8 @@ UseInfo CheckedUseInfoAsWord32FromHint(
switch (hint) {
case NumberOperationHint::kSignedSmall:
case NumberOperationHint::kSignedSmallInputs:
- return UseInfo::CheckedSignedSmallAsWord32(identify_zeros);
+ return UseInfo::CheckedSignedSmallAsWord32(identify_zeros,
+ VectorSlotPair());
case NumberOperationHint::kSigned32:
return UseInfo::CheckedSigned32AsWord32(identify_zeros);
case NumberOperationHint::kNumber:
@@ -1345,17 +1346,6 @@ class RepresentationSelector {
void VisitSpeculativeAdditiveOp(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
- // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we can
- // only eliminate an unused speculative number operation if we know that
- // the inputs are PlainPrimitive, which excludes everything that's might
- // have side effects or throws during a ToNumber conversion. We are only
- // allowed to perform a number addition if neither input is a String, even
- // if the value is never used, so we further limit to NumberOrOddball in
- // order to explicitly exclude String inputs.
- if (BothInputsAre(node, Type::NumberOrOddball())) {
- if (truncation.IsUnused()) return VisitUnused(node);
- }
-
if (BothInputsAre(node, type_cache_.kAdditiveSafeIntegerOrMinusZero) &&
(GetUpperBound(node)->Is(Type::Signed32()) ||
GetUpperBound(node)->Is(Type::Unsigned32()) ||
@@ -1377,13 +1367,6 @@ class RepresentationSelector {
void VisitSpeculativeNumberModulus(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
- // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
- // can only eliminate an unused speculative number operation if we know
- // that the inputs are PlainPrimitive, which excludes everything that's
- // might have side effects or throws during a ToNumber conversion.
- if (BothInputsAre(node, Type::PlainPrimitive())) {
- if (truncation.IsUnused()) return VisitUnused(node);
- }
if (BothInputsAre(node, Type::Unsigned32OrMinusZeroOrNaN()) &&
(truncation.IsUsedAsWord32() ||
NodeProperties::GetType(node)->Is(Type::Unsigned32()))) {
@@ -1514,8 +1497,20 @@ class RepresentationSelector {
return VisitLeaf(node, MachineRepresentation::kWord64);
case IrOpcode::kExternalConstant:
return VisitLeaf(node, MachineType::PointerRepresentation());
- case IrOpcode::kNumberConstant:
- return VisitLeaf(node, MachineRepresentation::kTagged);
+ case IrOpcode::kNumberConstant: {
+ double const value = OpParameter<double>(node);
+ int value_as_int;
+ if (DoubleToSmiInteger(value, &value_as_int)) {
+ VisitLeaf(node, MachineRepresentation::kTaggedSigned);
+ if (lower()) {
+ intptr_t smi = bit_cast<intptr_t>(Smi::FromInt(value_as_int));
+ DeferReplacement(node, lowering->jsgraph()->IntPtrConstant(smi));
+ }
+ return;
+ }
+ VisitLeaf(node, MachineRepresentation::kTagged);
+ return;
+ }
case IrOpcode::kHeapConstant:
return VisitLeaf(node, MachineRepresentation::kTaggedPointer);
case IrOpcode::kPointerConstant: {
@@ -1668,13 +1663,6 @@ class RepresentationSelector {
case IrOpcode::kSpeculativeNumberLessThan:
case IrOpcode::kSpeculativeNumberLessThanOrEqual:
case IrOpcode::kSpeculativeNumberEqual: {
- // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
- // can only eliminate an unused speculative number operation if we know
- // that the inputs are PlainPrimitive, which excludes everything that's
- // might have side effects or throws during a ToNumber conversion.
- if (BothInputsAre(node, Type::PlainPrimitive())) {
- if (truncation.IsUnused()) return VisitUnused(node);
- }
// Number comparisons reduce to integer comparisons for integer inputs.
if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32()) &&
TypeOf(node->InputAt(1))->Is(Type::Unsigned32())) {
@@ -1707,8 +1695,10 @@ class RepresentationSelector {
Node* rhs = node->InputAt(1);
if (IsNodeRepresentationTagged(lhs) &&
IsNodeRepresentationTagged(rhs)) {
- VisitBinop(node, UseInfo::CheckedSignedSmallAsTaggedSigned(),
- MachineRepresentation::kBit);
+ VisitBinop(
+ node,
+ UseInfo::CheckedSignedSmallAsTaggedSigned(VectorSlotPair()),
+ MachineRepresentation::kBit);
ChangeToPureOp(
node, changer_->TaggedSignedOperatorFor(node->opcode()));
@@ -1755,13 +1745,6 @@ class RepresentationSelector {
return;
}
case IrOpcode::kSpeculativeNumberMultiply: {
- // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
- // can only eliminate an unused speculative number operation if we know
- // that the inputs are PlainPrimitive, which excludes everything that's
- // might have side effects or throws during a ToNumber conversion.
- if (BothInputsAre(node, Type::PlainPrimitive())) {
- if (truncation.IsUnused()) return VisitUnused(node);
- }
if (BothInputsAre(node, Type::Integral32()) &&
(NodeProperties::GetType(node)->Is(Type::Signed32()) ||
NodeProperties::GetType(node)->Is(Type::Unsigned32()) ||
@@ -1785,7 +1768,7 @@ class RepresentationSelector {
// Handle the case when no int32 checks on inputs are necessary
// (but an overflow check is needed on the output).
if (BothInputsAre(node, Type::Signed32())) {
- // If both the inputs the feedback are int32, use the overflow op.
+ // If both inputs and feedback are int32, use the overflow op.
if (hint == NumberOperationHint::kSignedSmall ||
hint == NumberOperationHint::kSigned32) {
VisitBinop(node, UseInfo::TruncatingWord32(),
@@ -1836,13 +1819,6 @@ class RepresentationSelector {
return;
}
case IrOpcode::kSpeculativeNumberDivide: {
- // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
- // can only eliminate an unused speculative number operation if we know
- // that the inputs are PlainPrimitive, which excludes everything that's
- // might have side effects or throws during a ToNumber conversion.
- if (BothInputsAre(node, Type::PlainPrimitive())) {
- if (truncation.IsUnused()) return VisitUnused(node);
- }
if (BothInputsAreUnsigned32(node) && truncation.IsUsedAsWord32()) {
// => unsigned Uint32Div
VisitWord32TruncatingBinop(node);
@@ -2014,13 +1990,6 @@ class RepresentationSelector {
return;
}
case IrOpcode::kSpeculativeNumberShiftLeft: {
- // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
- // can only eliminate an unused speculative number operation if we know
- // that the inputs are PlainPrimitive, which excludes everything that's
- // might have side effects or throws during a ToNumber conversion.
- if (BothInputsAre(node, Type::PlainPrimitive())) {
- if (truncation.IsUnused()) return VisitUnused(node);
- }
if (BothInputsAre(node, Type::NumberOrOddball())) {
Type* rhs_type = GetUpperBound(node->InputAt(1));
VisitBinop(node, UseInfo::TruncatingWord32(),
@@ -2050,13 +2019,6 @@ class RepresentationSelector {
return;
}
case IrOpcode::kSpeculativeNumberShiftRight: {
- // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
- // can only eliminate an unused speculative number operation if we know
- // that the inputs are PlainPrimitive, which excludes everything that's
- // might have side effects or throws during a ToNumber conversion.
- if (BothInputsAre(node, Type::PlainPrimitive())) {
- if (truncation.IsUnused()) return VisitUnused(node);
- }
if (BothInputsAre(node, Type::NumberOrOddball())) {
Type* rhs_type = GetUpperBound(node->InputAt(1));
VisitBinop(node, UseInfo::TruncatingWord32(),
@@ -2086,13 +2048,6 @@ class RepresentationSelector {
return;
}
case IrOpcode::kSpeculativeNumberShiftRightLogical: {
- // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
- // can only eliminate an unused speculative number operation if we know
- // that the inputs are PlainPrimitive, which excludes everything that
- // might have side effects or throw during a ToNumber conversion.
- if (BothInputsAre(node, Type::PlainPrimitive())) {
- if (truncation.IsUnused()) return VisitUnused(node);
- }
NumberOperationHint hint = NumberOperationHintOf(node->op());
Type* rhs_type = GetUpperBound(node->InputAt(1));
if (rhs_type->Is(type_cache_.kZeroish) &&
@@ -2107,8 +2062,8 @@ class RepresentationSelector {
MachineRepresentation::kWord32, Type::Unsigned31());
if (lower()) {
node->RemoveInput(1);
- NodeProperties::ChangeOp(node,
- simplified()->CheckedUint32ToInt32());
+ NodeProperties::ChangeOp(
+ node, simplified()->CheckedUint32ToInt32(VectorSlotPair()));
}
return;
}
@@ -2315,6 +2270,11 @@ class RepresentationSelector {
if (lower()) DeferReplacement(node, node->InputAt(0));
return;
}
+ case IrOpcode::kNumberToString: {
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
+ return;
+ }
case IrOpcode::kNumberToUint32: {
// Just change representation if necessary.
VisitUnop(node, UseInfo::TruncatingWord32(),
@@ -2365,6 +2325,13 @@ class RepresentationSelector {
return VisitUnop(node, UseInfo::AnyTagged(),
MachineRepresentation::kTaggedPointer);
}
+ case IrOpcode::kNewConsString: {
+ ProcessInput(node, 0, UseInfo::TaggedSigned()); // length
+ ProcessInput(node, 1, UseInfo::AnyTagged()); // first
+ ProcessInput(node, 2, UseInfo::AnyTagged()); // second
+ SetOutput(node, MachineRepresentation::kTaggedPointer);
+ return;
+ }
case IrOpcode::kStringEqual:
case IrOpcode::kStringLessThan:
case IrOpcode::kStringLessThanOrEqual: {
@@ -2391,6 +2358,12 @@ class RepresentationSelector {
}
return;
}
+ case IrOpcode::kStringCodePointAt: {
+ // TODO(turbofan): Allow builtins to return untagged values.
+ VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
+ MachineRepresentation::kTaggedSigned);
+ return;
+ }
case IrOpcode::kStringFromCharCode: {
VisitUnop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kTaggedPointer);
@@ -2408,6 +2381,14 @@ class RepresentationSelector {
SetOutput(node, MachineRepresentation::kTaggedSigned);
return;
}
+ case IrOpcode::kStringLength: {
+ // TODO(bmeurer): The input representation should be TaggedPointer.
+ // Fix this once we have a dedicated StringConcat/JSStringAdd
+ // operator, which marks it's output as TaggedPointer properly.
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedSigned);
+ return;
+ }
case IrOpcode::kStringToLowerCaseIntl:
case IrOpcode::kStringToUpperCaseIntl: {
VisitUnop(node, UseInfo::AnyTagged(),
@@ -2479,13 +2460,17 @@ class RepresentationSelector {
return;
}
case IrOpcode::kCheckSmi: {
+ const CheckParameters& params = CheckParametersOf(node->op());
if (SmiValuesAre32Bits() && truncation.IsUsedAsWord32()) {
VisitUnop(node,
- UseInfo::CheckedSignedSmallAsWord32(kDistinguishZeros),
+ UseInfo::CheckedSignedSmallAsWord32(kDistinguishZeros,
+ params.feedback()),
MachineRepresentation::kWord32);
} else {
- VisitUnop(node, UseInfo::CheckedSignedSmallAsTaggedSigned(),
- MachineRepresentation::kTaggedSigned);
+ VisitUnop(
+ node,
+ UseInfo::CheckedSignedSmallAsTaggedSigned(params.feedback()),
+ MachineRepresentation::kTaggedSigned);
}
if (lower()) DeferReplacement(node, node->InputAt(0));
return;
@@ -2589,6 +2574,11 @@ class RepresentationSelector {
}
return;
}
+ case IrOpcode::kNumberIsFloat64Hole: {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kBit);
+ return;
+ }
case IrOpcode::kTransitionAndStoreElement: {
Type* value_type = TypeOf(node->InputAt(2));
@@ -2977,7 +2967,6 @@ class RepresentationSelector {
}
ProcessRemainingInputs(node, 1);
SetOutput(node, representation);
- if (lower()) DeferReplacement(node, node->InputAt(0));
return;
}
@@ -3702,7 +3691,7 @@ void SimplifiedLowering::DoShift(Node* node, Operator const* op,
if (!rhs_type->Is(type_cache_.kZeroToThirtyOne)) {
Node* const rhs = NodeProperties::GetValueInput(node, 1);
node->ReplaceInput(1, graph()->NewNode(machine()->Word32And(), rhs,
- jsgraph()->Int32Constant(0x1f)));
+ jsgraph()->Int32Constant(0x1F)));
}
ChangeToPureOp(node, op);
}
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index eaa148ee04..a78d885e6e 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -20,7 +20,7 @@ class RepresentationSelector;
class SourcePositionTable;
class TypeCache;
-class SimplifiedLowering final {
+class V8_EXPORT_PRIVATE SimplifiedLowering final {
public:
SimplifiedLowering(JSGraph* jsgraph, Zone* zone,
SourcePositionTable* source_positions);
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 04bbc7bba8..9978bae122 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -149,9 +149,7 @@ CheckFloat64HoleMode CheckFloat64HoleModeOf(const Operator* op) {
CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator* op) {
DCHECK(op->opcode() == IrOpcode::kChangeFloat64ToTagged ||
- op->opcode() == IrOpcode::kCheckedInt32Mul ||
- op->opcode() == IrOpcode::kCheckedFloat64ToInt32 ||
- op->opcode() == IrOpcode::kCheckedTaggedToInt32);
+ op->opcode() == IrOpcode::kCheckedInt32Mul);
return OpParameter<CheckForMinusZeroMode>(op);
}
@@ -215,15 +213,20 @@ size_t hash_value(MapsParameterInfo const& p) { return hash_value(p.maps()); }
bool operator==(CheckMapsParameters const& lhs,
CheckMapsParameters const& rhs) {
- return lhs.flags() == rhs.flags() && lhs.maps() == rhs.maps();
+ return lhs.flags() == rhs.flags() && lhs.maps() == rhs.maps() &&
+ lhs.feedback() == rhs.feedback();
}
size_t hash_value(CheckMapsParameters const& p) {
- return base::hash_combine(p.flags(), p.maps());
+ return base::hash_combine(p.flags(), p.maps(), p.feedback());
}
std::ostream& operator<<(std::ostream& os, CheckMapsParameters const& p) {
- return os << p.flags() << p.maps_info();
+ os << p.flags() << p.maps_info();
+ if (p.feedback().IsValid()) {
+ os << "; " << p.feedback();
+ }
+ return os;
}
CheckMapsParameters const& CheckMapsParametersOf(Operator const* op) {
@@ -256,8 +259,7 @@ std::ostream& operator<<(std::ostream& os, CheckTaggedInputMode mode) {
}
CheckTaggedInputMode CheckTaggedInputModeOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kCheckedTaggedToFloat64 ||
- op->opcode() == IrOpcode::kCheckedTruncateTaggedToWord32);
+ DCHECK(op->opcode() == IrOpcode::kCheckedTaggedToFloat64);
return OpParameter<CheckTaggedInputMode>(op);
}
@@ -271,9 +273,28 @@ std::ostream& operator<<(std::ostream& os, GrowFastElementsMode mode) {
UNREACHABLE();
}
-GrowFastElementsMode GrowFastElementsModeOf(const Operator* op) {
+bool operator==(const GrowFastElementsParameters& lhs,
+ const GrowFastElementsParameters& rhs) {
+ return lhs.mode() == rhs.mode() && lhs.feedback() == rhs.feedback();
+}
+
+inline size_t hash_value(const GrowFastElementsParameters& params) {
+ return base::hash_combine(params.mode(), params.feedback());
+}
+
+std::ostream& operator<<(std::ostream& os,
+ const GrowFastElementsParameters& params) {
+ os << params.mode();
+ if (params.feedback().IsValid()) {
+ os << params.feedback();
+ }
+ return os;
+}
+
+const GrowFastElementsParameters& GrowFastElementsParametersOf(
+ const Operator* op) {
DCHECK_EQ(IrOpcode::kMaybeGrowFastElements, op->opcode());
- return OpParameter<GrowFastElementsMode>(op);
+ return OpParameter<GrowFastElementsParameters>(op);
}
bool operator==(ElementsTransition const& lhs, ElementsTransition const& rhs) {
@@ -520,9 +541,9 @@ UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
return OpParameter<UnicodeEncoding>(op);
}
-BailoutReason BailoutReasonOf(const Operator* op) {
+AbortReason AbortReasonOf(const Operator* op) {
DCHECK_EQ(IrOpcode::kRuntimeAbort, op->opcode());
- return OpParameter<BailoutReason>(op);
+ return static_cast<AbortReason>(OpParameter<int>(op));
}
DeoptimizeReason DeoptimizeReasonOf(const Operator* op) {
@@ -530,6 +551,54 @@ DeoptimizeReason DeoptimizeReasonOf(const Operator* op) {
return OpParameter<DeoptimizeReason>(op);
}
+const CheckTaggedInputParameters& CheckTaggedInputParametersOf(
+ const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kCheckedTruncateTaggedToWord32);
+ return OpParameter<CheckTaggedInputParameters>(op);
+}
+
+std::ostream& operator<<(std::ostream& os,
+ const CheckTaggedInputParameters& params) {
+ os << params.mode();
+ if (params.feedback().IsValid()) {
+ os << "; " << params.feedback();
+ }
+ return os;
+}
+
+size_t hash_value(const CheckTaggedInputParameters& params) {
+ return base::hash_combine(params.mode(), params.feedback());
+}
+
+bool operator==(CheckTaggedInputParameters const& lhs,
+ CheckTaggedInputParameters const& rhs) {
+ return lhs.mode() == rhs.mode() && lhs.feedback() == rhs.feedback();
+}
+
+const CheckMinusZeroParameters& CheckMinusZeroParametersOf(const Operator* op) {
+ DCHECK(IrOpcode::kCheckedTaggedToInt32 == op->opcode() ||
+ IrOpcode::kCheckedFloat64ToInt32 == op->opcode());
+ return OpParameter<CheckMinusZeroParameters>(op);
+}
+
+std::ostream& operator<<(std::ostream& os,
+ const CheckMinusZeroParameters& params) {
+ os << params.mode();
+ if (params.feedback().IsValid()) {
+ os << "; " << params.feedback();
+ }
+ return os;
+}
+
+size_t hash_value(const CheckMinusZeroParameters& params) {
+ return base::hash_combine(params.mode(), params.feedback());
+}
+
+bool operator==(CheckMinusZeroParameters const& lhs,
+ CheckMinusZeroParameters const& rhs) {
+ return lhs.mode() == rhs.mode() && lhs.feedback() == rhs.feedback();
+}
+
#define PURE_OP_LIST(V) \
V(BooleanNot, Operator::kNoProperties, 1, 0) \
V(NumberEqual, Operator::kCommutative, 2, 0) \
@@ -581,6 +650,7 @@ DeoptimizeReason DeoptimizeReasonOf(const Operator* op) {
V(NumberTrunc, Operator::kNoProperties, 1, 0) \
V(NumberToBoolean, Operator::kNoProperties, 1, 0) \
V(NumberToInt32, Operator::kNoProperties, 1, 0) \
+ V(NumberToString, Operator::kNoProperties, 1, 0) \
V(NumberToUint32, Operator::kNoProperties, 1, 0) \
V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \
V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
@@ -588,8 +658,11 @@ DeoptimizeReason DeoptimizeReasonOf(const Operator* op) {
V(StringCharAt, Operator::kNoProperties, 2, 1) \
V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \
V(SeqStringCharCodeAt, Operator::kNoProperties, 2, 1) \
+ V(StringCodePointAt, Operator::kNoProperties, 2, 1) \
+ V(SeqStringCodePointAt, Operator::kNoProperties, 2, 1) \
V(StringFromCharCode, Operator::kNoProperties, 1, 0) \
V(StringIndexOf, Operator::kNoProperties, 3, 0) \
+ V(StringLength, Operator::kNoProperties, 1, 0) \
V(StringToLowerCaseIntl, Operator::kNoProperties, 1, 0) \
V(StringToUpperCaseIntl, Operator::kNoProperties, 1, 0) \
V(TypeOf, Operator::kNoProperties, 1, 1) \
@@ -626,6 +699,7 @@ DeoptimizeReason DeoptimizeReasonOf(const Operator* op) {
V(ObjectIsString, Operator::kNoProperties, 1, 0) \
V(ObjectIsSymbol, Operator::kNoProperties, 1, 0) \
V(ObjectIsUndetectable, Operator::kNoProperties, 1, 0) \
+ V(NumberIsFloat64Hole, Operator::kNoProperties, 1, 0) \
V(ConvertTaggedHoleToUndefined, Operator::kNoProperties, 1, 0) \
V(SameValue, Operator::kCommutative, 2, 0) \
V(ReferenceEqual, Operator::kCommutative, 2, 0) \
@@ -633,6 +707,7 @@ DeoptimizeReason DeoptimizeReasonOf(const Operator* op) {
V(StringLessThan, Operator::kNoProperties, 2, 0) \
V(StringLessThanOrEqual, Operator::kNoProperties, 2, 0) \
V(ToBoolean, Operator::kNoProperties, 1, 0) \
+ V(NewConsString, Operator::kNoProperties, 3, 0) \
V(MaskIndexWithBound, Operator::kNoProperties, 2, 0)
#define SPECULATIVE_NUMBER_BINOP_LIST(V) \
@@ -642,30 +717,32 @@ DeoptimizeReason DeoptimizeReasonOf(const Operator* op) {
V(SpeculativeNumberLessThanOrEqual)
#define CHECKED_OP_LIST(V) \
- V(CheckBounds, 2, 1) \
+ V(CheckEqualsInternalizedString, 2, 0) \
+ V(CheckEqualsSymbol, 2, 0) \
V(CheckHeapObject, 1, 1) \
V(CheckInternalizedString, 1, 1) \
- V(CheckNumber, 1, 1) \
+ V(CheckNotTaggedHole, 1, 1) \
V(CheckReceiver, 1, 1) \
- V(CheckSmi, 1, 1) \
- V(CheckString, 1, 1) \
V(CheckSeqString, 1, 1) \
V(CheckSymbol, 1, 1) \
- V(CheckNotTaggedHole, 1, 1) \
- V(CheckEqualsInternalizedString, 2, 0) \
- V(CheckEqualsSymbol, 2, 0) \
V(CheckedInt32Add, 2, 1) \
- V(CheckedInt32Sub, 2, 1) \
V(CheckedInt32Div, 2, 1) \
V(CheckedInt32Mod, 2, 1) \
+ V(CheckedInt32Sub, 2, 1) \
V(CheckedUint32Div, 2, 1) \
- V(CheckedUint32Mod, 2, 1) \
- V(CheckedUint32ToInt32, 1, 1) \
- V(CheckedUint32ToTaggedSigned, 1, 1) \
+ V(CheckedUint32Mod, 2, 1)
+
+#define CHECKED_WITH_FEEDBACK_OP_LIST(V) \
+ V(CheckBounds, 2, 1) \
+ V(CheckNumber, 1, 1) \
+ V(CheckSmi, 1, 1) \
+ V(CheckString, 1, 1) \
V(CheckedInt32ToTaggedSigned, 1, 1) \
V(CheckedTaggedSignedToInt32, 1, 1) \
+ V(CheckedTaggedToTaggedPointer, 1, 1) \
V(CheckedTaggedToTaggedSigned, 1, 1) \
- V(CheckedTaggedToTaggedPointer, 1, 1)
+ V(CheckedUint32ToInt32, 1, 1) \
+ V(CheckedUint32ToTaggedSigned, 1, 1)
struct SimplifiedOperatorGlobalCache final {
#define PURE(Name, properties, value_input_count, control_input_count) \
@@ -689,6 +766,18 @@ struct SimplifiedOperatorGlobalCache final {
CHECKED_OP_LIST(CHECKED)
#undef CHECKED
+#define CHECKED_WITH_FEEDBACK(Name, value_input_count, value_output_count) \
+ struct Name##Operator final : public Operator1<CheckParameters> { \
+ Name##Operator() \
+ : Operator1<CheckParameters>( \
+ IrOpcode::k##Name, Operator::kFoldable | Operator::kNoThrow, \
+ #Name, value_input_count, 1, 1, value_output_count, 1, 0, \
+ CheckParameters(VectorSlotPair())) {} \
+ }; \
+ Name##Operator k##Name;
+ CHECKED_WITH_FEEDBACK_OP_LIST(CHECKED_WITH_FEEDBACK)
+#undef CHECKED_WITH_FEEDBACK
+
template <DeoptimizeReason kDeoptimizeReason>
struct CheckIfOperator final : public Operator1<DeoptimizeReason> {
CheckIfOperator()
@@ -772,12 +861,13 @@ struct SimplifiedOperatorGlobalCache final {
template <CheckForMinusZeroMode kMode>
struct CheckedFloat64ToInt32Operator final
- : public Operator1<CheckForMinusZeroMode> {
+ : public Operator1<CheckMinusZeroParameters> {
CheckedFloat64ToInt32Operator()
- : Operator1<CheckForMinusZeroMode>(
+ : Operator1<CheckMinusZeroParameters>(
IrOpcode::kCheckedFloat64ToInt32,
Operator::kFoldable | Operator::kNoThrow, "CheckedFloat64ToInt32",
- 1, 1, 1, 1, 1, 0, kMode) {}
+ 1, 1, 1, 1, 1, 0,
+ CheckMinusZeroParameters(kMode, VectorSlotPair())) {}
};
CheckedFloat64ToInt32Operator<CheckForMinusZeroMode::kCheckForMinusZero>
kCheckedFloat64ToInt32CheckForMinusZeroOperator;
@@ -786,12 +876,13 @@ struct SimplifiedOperatorGlobalCache final {
template <CheckForMinusZeroMode kMode>
struct CheckedTaggedToInt32Operator final
- : public Operator1<CheckForMinusZeroMode> {
+ : public Operator1<CheckMinusZeroParameters> {
CheckedTaggedToInt32Operator()
- : Operator1<CheckForMinusZeroMode>(
+ : Operator1<CheckMinusZeroParameters>(
IrOpcode::kCheckedTaggedToInt32,
Operator::kFoldable | Operator::kNoThrow, "CheckedTaggedToInt32",
- 1, 1, 1, 1, 1, 0, kMode) {}
+ 1, 1, 1, 1, 1, 0,
+ CheckMinusZeroParameters(kMode, VectorSlotPair())) {}
};
CheckedTaggedToInt32Operator<CheckForMinusZeroMode::kCheckForMinusZero>
kCheckedTaggedToInt32CheckForMinusZeroOperator;
@@ -814,12 +905,13 @@ struct SimplifiedOperatorGlobalCache final {
template <CheckTaggedInputMode kMode>
struct CheckedTruncateTaggedToWord32Operator final
- : public Operator1<CheckTaggedInputMode> {
+ : public Operator1<CheckTaggedInputParameters> {
CheckedTruncateTaggedToWord32Operator()
- : Operator1<CheckTaggedInputMode>(
+ : Operator1<CheckTaggedInputParameters>(
IrOpcode::kCheckedTruncateTaggedToWord32,
Operator::kFoldable | Operator::kNoThrow,
- "CheckedTruncateTaggedToWord32", 1, 1, 1, 1, 1, 0, kMode) {}
+ "CheckedTruncateTaggedToWord32", 1, 1, 1, 1, 1, 0,
+ CheckTaggedInputParameters(kMode, VectorSlotPair())) {}
};
CheckedTruncateTaggedToWord32Operator<CheckTaggedInputMode::kNumber>
kCheckedTruncateTaggedToWord32NumberOperator;
@@ -867,6 +959,20 @@ struct SimplifiedOperatorGlobalCache final {
};
EnsureWritableFastElementsOperator kEnsureWritableFastElements;
+ template <GrowFastElementsMode kMode>
+ struct GrowFastElementsOperator final
+ : public Operator1<GrowFastElementsParameters> {
+ GrowFastElementsOperator()
+ : Operator1(IrOpcode::kMaybeGrowFastElements, Operator::kNoThrow,
+ "MaybeGrowFastElements", 4, 1, 1, 1, 1, 0,
+ GrowFastElementsParameters(kMode, VectorSlotPair())) {}
+ };
+
+ GrowFastElementsOperator<GrowFastElementsMode::kDoubleElements>
+ kGrowFastElementsOperatorDoubleElements;
+ GrowFastElementsOperator<GrowFastElementsMode::kSmiOrObjectElements>
+ kGrowFastElementsOperatorSmiOrObjectElements;
+
struct LoadFieldByIndexOperator final : public Operator {
LoadFieldByIndexOperator()
: Operator( // --
@@ -934,13 +1040,38 @@ GET_FROM_CACHE(FindOrderedHashMapEntryForInt32Key)
GET_FROM_CACHE(LoadFieldByIndex)
#undef GET_FROM_CACHE
-const Operator* SimplifiedOperatorBuilder::RuntimeAbort(BailoutReason reason) {
- return new (zone()) Operator1<BailoutReason>( // --
- IrOpcode::kRuntimeAbort, // opcode
- Operator::kNoThrow | Operator::kNoDeopt, // flags
- "RuntimeAbort", // name
- 0, 1, 1, 0, 1, 0, // counts
- reason); // parameter
+#define GET_FROM_CACHE_WITH_FEEDBACK(Name, value_input_count, \
+ value_output_count) \
+ const Operator* SimplifiedOperatorBuilder::Name( \
+ const VectorSlotPair& feedback) { \
+ if (!feedback.IsValid()) { \
+ return &cache_.k##Name; \
+ } \
+ return new (zone()) Operator1<CheckParameters>( \
+ IrOpcode::k##Name, Operator::kFoldable | Operator::kNoThrow, #Name, \
+ value_input_count, 1, 1, value_output_count, 1, 0, \
+ CheckParameters(feedback)); \
+ }
+CHECKED_WITH_FEEDBACK_OP_LIST(GET_FROM_CACHE_WITH_FEEDBACK)
+#undef GET_FROM_CACHE_WITH_FEEDBACK
+
+bool IsCheckedWithFeedback(const Operator* op) {
+#define CASE(Name, ...) case IrOpcode::k##Name:
+ switch (op->opcode()) {
+ CHECKED_WITH_FEEDBACK_OP_LIST(CASE) return true;
+ default:
+ return false;
+ }
+#undef CASE
+}
+
+const Operator* SimplifiedOperatorBuilder::RuntimeAbort(AbortReason reason) {
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kRuntimeAbort, // opcode
+ Operator::kNoThrow | Operator::kNoDeopt, // flags
+ "RuntimeAbort", // name
+ 0, 1, 1, 0, 1, 0, // counts
+ static_cast<int>(reason)); // parameter
}
const Operator* SimplifiedOperatorBuilder::CheckIf(DeoptimizeReason reason) {
@@ -977,25 +1108,35 @@ const Operator* SimplifiedOperatorBuilder::CheckedInt32Mul(
}
const Operator* SimplifiedOperatorBuilder::CheckedFloat64ToInt32(
- CheckForMinusZeroMode mode) {
- switch (mode) {
- case CheckForMinusZeroMode::kCheckForMinusZero:
- return &cache_.kCheckedFloat64ToInt32CheckForMinusZeroOperator;
- case CheckForMinusZeroMode::kDontCheckForMinusZero:
- return &cache_.kCheckedFloat64ToInt32DontCheckForMinusZeroOperator;
+ CheckForMinusZeroMode mode, const VectorSlotPair& feedback) {
+ if (!feedback.IsValid()) {
+ switch (mode) {
+ case CheckForMinusZeroMode::kCheckForMinusZero:
+ return &cache_.kCheckedFloat64ToInt32CheckForMinusZeroOperator;
+ case CheckForMinusZeroMode::kDontCheckForMinusZero:
+ return &cache_.kCheckedFloat64ToInt32DontCheckForMinusZeroOperator;
+ }
}
- UNREACHABLE();
+ return new (zone()) Operator1<CheckMinusZeroParameters>(
+ IrOpcode::kCheckedFloat64ToInt32,
+ Operator::kFoldable | Operator::kNoThrow, "CheckedFloat64ToInt32", 1, 1,
+ 1, 1, 1, 0, CheckMinusZeroParameters(mode, feedback));
}
const Operator* SimplifiedOperatorBuilder::CheckedTaggedToInt32(
- CheckForMinusZeroMode mode) {
- switch (mode) {
- case CheckForMinusZeroMode::kCheckForMinusZero:
- return &cache_.kCheckedTaggedToInt32CheckForMinusZeroOperator;
- case CheckForMinusZeroMode::kDontCheckForMinusZero:
- return &cache_.kCheckedTaggedToInt32DontCheckForMinusZeroOperator;
+ CheckForMinusZeroMode mode, const VectorSlotPair& feedback) {
+ if (!feedback.IsValid()) {
+ switch (mode) {
+ case CheckForMinusZeroMode::kCheckForMinusZero:
+ return &cache_.kCheckedTaggedToInt32CheckForMinusZeroOperator;
+ case CheckForMinusZeroMode::kDontCheckForMinusZero:
+ return &cache_.kCheckedTaggedToInt32DontCheckForMinusZeroOperator;
+ }
}
- UNREACHABLE();
+ return new (zone()) Operator1<CheckMinusZeroParameters>(
+ IrOpcode::kCheckedTaggedToInt32, Operator::kFoldable | Operator::kNoThrow,
+ "CheckedTaggedToInt32", 1, 1, 1, 1, 1, 0,
+ CheckMinusZeroParameters(mode, feedback));
}
const Operator* SimplifiedOperatorBuilder::CheckedTaggedToFloat64(
@@ -1010,19 +1151,25 @@ const Operator* SimplifiedOperatorBuilder::CheckedTaggedToFloat64(
}
const Operator* SimplifiedOperatorBuilder::CheckedTruncateTaggedToWord32(
- CheckTaggedInputMode mode) {
- switch (mode) {
- case CheckTaggedInputMode::kNumber:
- return &cache_.kCheckedTruncateTaggedToWord32NumberOperator;
- case CheckTaggedInputMode::kNumberOrOddball:
- return &cache_.kCheckedTruncateTaggedToWord32NumberOrOddballOperator;
+ CheckTaggedInputMode mode, const VectorSlotPair& feedback) {
+ if (!feedback.IsValid()) {
+ switch (mode) {
+ case CheckTaggedInputMode::kNumber:
+ return &cache_.kCheckedTruncateTaggedToWord32NumberOperator;
+ case CheckTaggedInputMode::kNumberOrOddball:
+ return &cache_.kCheckedTruncateTaggedToWord32NumberOrOddballOperator;
+ }
}
- UNREACHABLE();
+ return new (zone()) Operator1<CheckTaggedInputParameters>(
+ IrOpcode::kCheckedTruncateTaggedToWord32,
+ Operator::kFoldable | Operator::kNoThrow, "CheckedTruncateTaggedToWord32",
+ 1, 1, 1, 1, 1, 0, CheckTaggedInputParameters(mode, feedback));
}
-const Operator* SimplifiedOperatorBuilder::CheckMaps(CheckMapsFlags flags,
- ZoneHandleSet<Map> maps) {
- CheckMapsParameters const parameters(flags, maps);
+const Operator* SimplifiedOperatorBuilder::CheckMaps(
+ CheckMapsFlags flags, ZoneHandleSet<Map> maps,
+ const VectorSlotPair& feedback) {
+ CheckMapsParameters const parameters(flags, maps, feedback);
return new (zone()) Operator1<CheckMapsParameters>( // --
IrOpcode::kCheckMaps, // opcode
Operator::kNoThrow | Operator::kNoWrite, // flags
@@ -1096,13 +1243,21 @@ const Operator* SimplifiedOperatorBuilder::EnsureWritableFastElements() {
}
const Operator* SimplifiedOperatorBuilder::MaybeGrowFastElements(
- GrowFastElementsMode mode) {
- return new (zone()) Operator1<GrowFastElementsMode>( // --
- IrOpcode::kMaybeGrowFastElements, // opcode
- Operator::kNoThrow, // flags
- "MaybeGrowFastElements", // name
- 4, 1, 1, 1, 1, 0, // counts
- mode); // parameter
+ GrowFastElementsMode mode, const VectorSlotPair& feedback) {
+ if (!feedback.IsValid()) {
+ switch (mode) {
+ case GrowFastElementsMode::kDoubleElements:
+ return &cache_.kGrowFastElementsOperatorDoubleElements;
+ case GrowFastElementsMode::kSmiOrObjectElements:
+ return &cache_.kGrowFastElementsOperatorSmiOrObjectElements;
+ }
+ }
+ return new (zone()) Operator1<GrowFastElementsParameters>( // --
+ IrOpcode::kMaybeGrowFastElements, // opcode
+ Operator::kNoThrow, // flags
+ "MaybeGrowFastElements", // name
+ 4, 1, 1, 1, 1, 0, // counts
+ GrowFastElementsParameters(mode, feedback)); // parameter
}
const Operator* SimplifiedOperatorBuilder::TransitionElementsKind(
@@ -1160,6 +1315,23 @@ bool IsRestLengthOf(const Operator* op) {
return OpParameter<ArgumentsLengthParameters>(op).is_rest_length;
}
+bool operator==(CheckParameters const& lhs, CheckParameters const& rhs) {
+ return lhs.feedback() == rhs.feedback();
+}
+
+size_t hash_value(CheckParameters const& p) { return hash_value(p.feedback()); }
+
+std::ostream& operator<<(std::ostream& os, CheckParameters const& p) {
+ return os << p.feedback();
+}
+
+CheckParameters const& CheckParametersOf(Operator const* op) {
+#define MAKE_OR(name, arg2, arg3) op->opcode() == IrOpcode::k##name ||
+ CHECK((CHECKED_WITH_FEEDBACK_OP_LIST(MAKE_OR) false));
+#undef MAKE_OR
+ return OpParameter<CheckParameters>(op);
+}
+
const Operator* SimplifiedOperatorBuilder::NewDoubleElements(
PretenureFlag pretenure) {
return new (zone()) Operator1<PretenureFlag>( // --
@@ -1292,6 +1464,7 @@ const Operator* SimplifiedOperatorBuilder::TransitionAndStoreNonNumberElement(
#undef PURE_OP_LIST
#undef SPECULATIVE_NUMBER_BINOP_LIST
+#undef CHECKED_WITH_FEEDBACK_OP_LIST
#undef CHECKED_OP_LIST
#undef ACCESS_OP_LIST
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 0ed46b0e7a..10961cf452 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -16,6 +16,7 @@
#include "src/machine-type.h"
#include "src/objects.h"
#include "src/type-hints.h"
+#include "src/vector-slot-pair.h"
#include "src/zone/zone-handle-set.h"
namespace v8 {
@@ -91,6 +92,28 @@ ExternalArrayType ExternalArrayTypeOf(const Operator* op) WARN_UNUSED_RESULT;
// The ConvertReceiverMode is used as parameter by ConvertReceiver operators.
ConvertReceiverMode ConvertReceiverModeOf(Operator const* op);
+// A the parameters for several Check nodes. The {feedback} parameter is
+// optional. If {feedback} references a valid CallIC slot and this MapCheck
+// fails, then speculation on that CallIC slot will be disabled.
+class CheckParameters final {
+ public:
+ explicit CheckParameters(const VectorSlotPair& feedback)
+ : feedback_(feedback) {}
+
+ VectorSlotPair const& feedback() const { return feedback_; }
+
+ private:
+ VectorSlotPair feedback_;
+};
+
+bool operator==(CheckParameters const&, CheckParameters const&);
+
+size_t hash_value(CheckParameters const&);
+
+std::ostream& operator<<(std::ostream&, CheckParameters const&);
+
+CheckParameters const& CheckParametersOf(Operator const*) WARN_UNUSED_RESULT;
+
enum class CheckFloat64HoleMode : uint8_t {
kNeverReturnHole, // Never return the hole (deoptimize instead).
kAllowReturnHole // Allow to return the hole (signaling NaN).
@@ -111,7 +134,32 @@ size_t hash_value(CheckTaggedInputMode);
std::ostream& operator<<(std::ostream&, CheckTaggedInputMode);
-CheckTaggedInputMode CheckTaggedInputModeOf(const Operator*) WARN_UNUSED_RESULT;
+CheckTaggedInputMode CheckTaggedInputModeOf(const Operator*);
+
+class CheckTaggedInputParameters {
+ public:
+ CheckTaggedInputParameters(CheckTaggedInputMode mode,
+ const VectorSlotPair& feedback)
+ : mode_(mode), feedback_(feedback) {}
+
+ CheckTaggedInputMode mode() const { return mode_; }
+ const VectorSlotPair& feedback() const { return feedback_; }
+
+ private:
+ CheckTaggedInputMode mode_;
+ VectorSlotPair feedback_;
+};
+
+const CheckTaggedInputParameters& CheckTaggedInputParametersOf(const Operator*)
+ WARN_UNUSED_RESULT;
+
+std::ostream& operator<<(std::ostream&,
+ const CheckTaggedInputParameters& params);
+
+size_t hash_value(const CheckTaggedInputParameters& params);
+
+bool operator==(CheckTaggedInputParameters const&,
+ CheckTaggedInputParameters const&);
enum class CheckForMinusZeroMode : uint8_t {
kCheckForMinusZero,
@@ -125,6 +173,30 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator*) WARN_UNUSED_RESULT;
+class CheckMinusZeroParameters {
+ public:
+ CheckMinusZeroParameters(CheckForMinusZeroMode mode,
+ const VectorSlotPair& feedback)
+ : mode_(mode), feedback_(feedback) {}
+
+ CheckForMinusZeroMode mode() const { return mode_; }
+ const VectorSlotPair& feedback() const { return feedback_; }
+
+ private:
+ CheckForMinusZeroMode mode_;
+ VectorSlotPair feedback_;
+};
+
+const CheckMinusZeroParameters& CheckMinusZeroParametersOf(const Operator* op)
+ WARN_UNUSED_RESULT;
+
+std::ostream& operator<<(std::ostream&, const CheckMinusZeroParameters& params);
+
+size_t hash_value(const CheckMinusZeroParameters& params);
+
+bool operator==(CheckMinusZeroParameters const&,
+ CheckMinusZeroParameters const&);
+
// Flags for map checks.
enum class CheckMapsFlag : uint8_t {
kNone = 0u,
@@ -155,19 +227,24 @@ bool operator!=(MapsParameterInfo const&, MapsParameterInfo const&);
size_t hash_value(MapsParameterInfo const&);
-// A descriptor for map checks.
+// A descriptor for map checks. The {feedback} parameter is optional.
+// If {feedback} references a valid CallIC slot and this MapCheck fails,
+// then speculation on that CallIC slot will be disabled.
class CheckMapsParameters final {
public:
- CheckMapsParameters(CheckMapsFlags flags, ZoneHandleSet<Map> const& maps)
- : flags_(flags), maps_info_(maps) {}
+ CheckMapsParameters(CheckMapsFlags flags, ZoneHandleSet<Map> const& maps,
+ const VectorSlotPair& feedback)
+ : flags_(flags), maps_info_(maps), feedback_(feedback) {}
CheckMapsFlags flags() const { return flags_; }
ZoneHandleSet<Map> const& maps() const { return maps_info_.maps(); }
MapsParameterInfo const& maps_info() const { return maps_info_; }
+ VectorSlotPair const& feedback() const { return feedback_; }
private:
CheckMapsFlags const flags_;
MapsParameterInfo const maps_info_;
+ VectorSlotPair const feedback_;
};
bool operator==(CheckMapsParameters const&, CheckMapsParameters const&);
@@ -197,7 +274,29 @@ inline size_t hash_value(GrowFastElementsMode mode) {
std::ostream& operator<<(std::ostream&, GrowFastElementsMode);
-GrowFastElementsMode GrowFastElementsModeOf(const Operator*) WARN_UNUSED_RESULT;
+class GrowFastElementsParameters {
+ public:
+ GrowFastElementsParameters(GrowFastElementsMode mode,
+ const VectorSlotPair& feedback)
+ : mode_(mode), feedback_(feedback) {}
+
+ GrowFastElementsMode mode() const { return mode_; }
+ const VectorSlotPair& feedback() const { return feedback_; }
+
+ private:
+ GrowFastElementsMode mode_;
+ VectorSlotPair feedback_;
+};
+
+bool operator==(const GrowFastElementsParameters&,
+ const GrowFastElementsParameters&);
+
+inline size_t hash_value(const GrowFastElementsParameters&);
+
+std::ostream& operator<<(std::ostream&, const GrowFastElementsParameters&);
+
+const GrowFastElementsParameters& GrowFastElementsParametersOf(const Operator*)
+ WARN_UNUSED_RESULT;
// A descriptor for elements kind transitions.
class ElementsTransition final {
@@ -270,6 +369,8 @@ class AllocateParameters {
PretenureFlag pretenure_;
};
+bool IsCheckedWithFeedback(const Operator* op);
+
size_t hash_value(AllocateParameters);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, AllocateParameters);
@@ -282,7 +383,7 @@ Type* AllocateTypeOf(const Operator* op) WARN_UNUSED_RESULT;
UnicodeEncoding UnicodeEncodingOf(const Operator*) WARN_UNUSED_RESULT;
-BailoutReason BailoutReasonOf(const Operator* op) WARN_UNUSED_RESULT;
+AbortReason AbortReasonOf(const Operator* op) WARN_UNUSED_RESULT;
DeoptimizeReason DeoptimizeReasonOf(const Operator* op) WARN_UNUSED_RESULT;
@@ -364,6 +465,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* NumberTrunc();
const Operator* NumberToBoolean();
const Operator* NumberToInt32();
+ const Operator* NumberToString();
const Operator* NumberToUint32();
const Operator* NumberToUint8Clamped();
@@ -402,9 +504,12 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* StringCharAt();
const Operator* StringCharCodeAt();
const Operator* SeqStringCharCodeAt();
+ const Operator* StringCodePointAt();
+ const Operator* SeqStringCodePointAt();
const Operator* StringFromCharCode();
const Operator* StringFromCodePoint(UnicodeEncoding encoding);
const Operator* StringIndexOf();
+ const Operator* StringLength();
const Operator* StringToLowerCaseIntl();
const Operator* StringToUpperCaseIntl();
@@ -435,49 +540,52 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* TruncateTaggedToBit();
const Operator* TruncateTaggedPointerToBit();
- const Operator* CheckIf(DeoptimizeReason deoptimize_reason);
- const Operator* CheckBounds();
- const Operator* CheckMaps(CheckMapsFlags, ZoneHandleSet<Map>);
const Operator* MaskIndexWithBound();
const Operator* CompareMaps(ZoneHandleSet<Map>);
const Operator* MapGuard(ZoneHandleSet<Map> maps);
+ const Operator* CheckBounds(const VectorSlotPair& feedback);
+ const Operator* CheckEqualsInternalizedString();
+ const Operator* CheckEqualsSymbol();
+ const Operator* CheckFloat64Hole(CheckFloat64HoleMode);
const Operator* CheckHeapObject();
+ const Operator* CheckIf(DeoptimizeReason deoptimize_reason);
const Operator* CheckInternalizedString();
- const Operator* CheckNumber();
- const Operator* CheckSmi();
- const Operator* CheckString();
+ const Operator* CheckMaps(CheckMapsFlags, ZoneHandleSet<Map>,
+ const VectorSlotPair& = VectorSlotPair());
+ const Operator* CheckNotTaggedHole();
+ const Operator* CheckNumber(const VectorSlotPair& feedback);
+ const Operator* CheckReceiver();
const Operator* CheckSeqString();
+ const Operator* CheckSmi(const VectorSlotPair& feedback);
+ const Operator* CheckString(const VectorSlotPair& feedback);
const Operator* CheckSymbol();
- const Operator* CheckReceiver();
+ const Operator* CheckedFloat64ToInt32(CheckForMinusZeroMode,
+ const VectorSlotPair& feedback);
const Operator* CheckedInt32Add();
- const Operator* CheckedInt32Sub();
const Operator* CheckedInt32Div();
const Operator* CheckedInt32Mod();
- const Operator* CheckedUint32Div();
- const Operator* CheckedUint32Mod();
const Operator* CheckedInt32Mul(CheckForMinusZeroMode);
- const Operator* CheckedInt32ToTaggedSigned();
- const Operator* CheckedUint32ToInt32();
- const Operator* CheckedUint32ToTaggedSigned();
- const Operator* CheckedFloat64ToInt32(CheckForMinusZeroMode);
- const Operator* CheckedTaggedSignedToInt32();
- const Operator* CheckedTaggedToInt32(CheckForMinusZeroMode);
+ const Operator* CheckedInt32Sub();
+ const Operator* CheckedInt32ToTaggedSigned(const VectorSlotPair& feedback);
+ const Operator* CheckedTaggedSignedToInt32(const VectorSlotPair& feedback);
const Operator* CheckedTaggedToFloat64(CheckTaggedInputMode);
- const Operator* CheckedTaggedToTaggedSigned();
- const Operator* CheckedTaggedToTaggedPointer();
- const Operator* CheckedTruncateTaggedToWord32(CheckTaggedInputMode);
+ const Operator* CheckedTaggedToInt32(CheckForMinusZeroMode,
+ const VectorSlotPair& feedback);
+ const Operator* CheckedTaggedToTaggedPointer(const VectorSlotPair& feedback);
+ const Operator* CheckedTaggedToTaggedSigned(const VectorSlotPair& feedback);
+ const Operator* CheckedTruncateTaggedToWord32(CheckTaggedInputMode,
+ const VectorSlotPair& feedback);
+ const Operator* CheckedUint32Div();
+ const Operator* CheckedUint32Mod();
+ const Operator* CheckedUint32ToInt32(const VectorSlotPair& feedback);
+ const Operator* CheckedUint32ToTaggedSigned(const VectorSlotPair& feedback);
const Operator* ConvertReceiver(ConvertReceiverMode);
- const Operator* CheckFloat64Hole(CheckFloat64HoleMode);
- const Operator* CheckNotTaggedHole();
const Operator* ConvertTaggedHoleToUndefined();
- const Operator* CheckEqualsInternalizedString();
- const Operator* CheckEqualsSymbol();
-
const Operator* ObjectIsArrayBufferView();
const Operator* ObjectIsBigInt();
const Operator* ObjectIsCallable();
@@ -493,6 +601,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* ObjectIsSymbol();
const Operator* ObjectIsUndetectable();
+ const Operator* NumberIsFloat64Hole();
+
const Operator* ArgumentsFrame();
const Operator* ArgumentsLength(int formal_parameter_count,
bool is_rest_length);
@@ -503,6 +613,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
// new-arguments-elements arguments-frame, arguments-length
const Operator* NewArgumentsElements(int mapped_count);
+ // new-cons-string length, first, second
+ const Operator* NewConsString();
+
// array-buffer-was-neutered buffer
const Operator* ArrayBufferWasNeutered();
@@ -510,7 +623,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* EnsureWritableFastElements();
// maybe-grow-fast-elements object, elements, index, length
- const Operator* MaybeGrowFastElements(GrowFastElementsMode mode);
+ const Operator* MaybeGrowFastElements(GrowFastElementsMode mode,
+ const VectorSlotPair& feedback);
// transition-elements-kind object, from-map, to-map
const Operator* TransitionElementsKind(ElementsTransition transition);
@@ -549,7 +663,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* StoreTypedElement(ExternalArrayType const&);
// Abort (for terminating execution on internal error).
- const Operator* RuntimeAbort(BailoutReason reason);
+ const Operator* RuntimeAbort(AbortReason reason);
private:
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/compiler/state-values-utils.cc b/deps/v8/src/compiler/state-values-utils.cc
index 30586f307c..26c47e0cb5 100644
--- a/deps/v8/src/compiler/state-values-utils.cc
+++ b/deps/v8/src/compiler/state-values-utils.cc
@@ -109,7 +109,7 @@ int StateValuesHashKey(Node** nodes, size_t count) {
for (size_t i = 0; i < count; i++) {
hash = hash * 23 + (nodes[i] == nullptr ? 0 : nodes[i]->id());
}
- return static_cast<int>(hash & 0x7fffffff);
+ return static_cast<int>(hash & 0x7FFFFFFF);
}
} // namespace
diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc
index 1ed12d245b..672acb203d 100644
--- a/deps/v8/src/compiler/store-store-elimination.cc
+++ b/deps/v8/src/compiler/store-store-elimination.cc
@@ -326,13 +326,11 @@ UnobservablesSet RedundantStoreFinder::RecomputeSet(Node* node,
}
bool RedundantStoreFinder::CannotObserveStoreField(Node* node) {
- return node->opcode() == IrOpcode::kCheckedLoad ||
- node->opcode() == IrOpcode::kLoadElement ||
+ return node->opcode() == IrOpcode::kLoadElement ||
node->opcode() == IrOpcode::kLoad ||
node->opcode() == IrOpcode::kStore ||
node->opcode() == IrOpcode::kEffectPhi ||
node->opcode() == IrOpcode::kStoreElement ||
- node->opcode() == IrOpcode::kCheckedStore ||
node->opcode() == IrOpcode::kUnsafePointerAdd ||
node->opcode() == IrOpcode::kRetain;
}
diff --git a/deps/v8/src/compiler/type-cache.h b/deps/v8/src/compiler/type-cache.h
index 346aa47bfc..428688abde 100644
--- a/deps/v8/src/compiler/type-cache.h
+++ b/deps/v8/src/compiler/type-cache.h
@@ -44,6 +44,8 @@ class TypeCache final {
Type* const kSingletonOne = CreateRange(1.0, 1.0);
Type* const kSingletonTen = CreateRange(10.0, 10.0);
Type* const kSingletonMinusOne = CreateRange(-1.0, -1.0);
+ Type* const kZeroOrMinusZero =
+ Type::Union(kSingletonZero, Type::MinusZero(), zone());
Type* const kZeroOrUndefined =
Type::Union(kSingletonZero, Type::Undefined(), zone());
Type* const kTenOrUndefined =
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 605a96c944..12c9a194b8 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -254,6 +254,9 @@ class Typer::Visitor : public Reducer {
Type* TypeUnaryOp(Node* node, UnaryTyperFun);
Type* TypeBinaryOp(Node* node, BinaryTyperFun);
+ static Type* BinaryNumberOpTyper(Type* lhs, Type* rhs, Typer* t,
+ BinaryTyperFun f);
+
enum ComparisonOutcomeFlags {
kComparisonTrue = 1,
kComparisonFalse = 2,
@@ -399,7 +402,6 @@ Type* Typer::Visitor::TypeUnaryOp(Node* node, UnaryTyperFun f) {
return input->IsNone() ? Type::None() : f(input, typer_);
}
-
Type* Typer::Visitor::TypeBinaryOp(Node* node, BinaryTyperFun f) {
Type* left = Operand(node, 0);
Type* right = Operand(node, 1);
@@ -407,6 +409,23 @@ Type* Typer::Visitor::TypeBinaryOp(Node* node, BinaryTyperFun f) {
: f(left, right, typer_);
}
+Type* Typer::Visitor::BinaryNumberOpTyper(Type* lhs, Type* rhs, Typer* t,
+ BinaryTyperFun f) {
+ lhs = ToNumeric(lhs, t);
+ rhs = ToNumeric(rhs, t);
+ bool lhs_is_number = lhs->Is(Type::Number());
+ bool rhs_is_number = rhs->Is(Type::Number());
+ if (lhs_is_number && rhs_is_number) {
+ return f(lhs, rhs, t);
+ }
+ if (lhs_is_number || rhs_is_number) {
+ return Type::Number();
+ }
+ if (lhs->Is(Type::BigInt()) || rhs->Is(Type::BigInt())) {
+ return Type::BigInt();
+ }
+ return Type::Numeric();
+}
Typer::Visitor::ComparisonOutcome Typer::Visitor::Invert(
ComparisonOutcome outcome, Typer* t) {
@@ -417,7 +436,6 @@ Typer::Visitor::ComparisonOutcome Typer::Visitor::Invert(
return result;
}
-
Type* Typer::Visitor::FalsifyUndefined(ComparisonOutcome outcome, Typer* t) {
if ((outcome & kComparisonFalse) != 0 ||
(outcome & kComparisonUndefined) != 0) {
@@ -947,7 +965,7 @@ Type* Typer::Visitor::TypeDead(Node* node) { return Type::None(); }
Type* Typer::Visitor::TypeDeadValue(Node* node) { return Type::None(); }
-Type* Typer::Visitor::TypeUnreachable(Node* node) { UNREACHABLE(); }
+Type* Typer::Visitor::TypeUnreachable(Node* node) { return Type::None(); }
// JS comparison operators.
@@ -1052,53 +1070,23 @@ Type* Typer::Visitor::JSGreaterThanOrEqualTyper(
Type* Typer::Visitor::JSBitwiseOrTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberBitwiseOr(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberBitwiseOr);
}
-
Type* Typer::Visitor::JSBitwiseAndTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberBitwiseAnd(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberBitwiseAnd);
}
-
Type* Typer::Visitor::JSBitwiseXorTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberBitwiseXor(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberBitwiseXor);
}
-
Type* Typer::Visitor::JSShiftLeftTyper(Type* lhs, Type* rhs, Typer* t) {
- return NumberShiftLeft(ToNumber(lhs, t), ToNumber(rhs, t), t);
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberShiftLeft(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberShiftLeft);
}
-
Type* Typer::Visitor::JSShiftRightTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberShiftRight(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberShiftRight);
}
@@ -1120,51 +1108,27 @@ Type* Typer::Visitor::JSAddTyper(Type* lhs, Type* rhs, Typer* t) {
}
}
// The addition must be numeric.
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberAdd(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberAdd);
}
Type* Typer::Visitor::JSSubtractTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberSubtract(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberSubtract);
}
Type* Typer::Visitor::JSMultiplyTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberMultiply(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberMultiply);
}
Type* Typer::Visitor::JSDivideTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberDivide(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberDivide);
}
Type* Typer::Visitor::JSModulusTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberModulus(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberModulus);
}
Type* Typer::Visitor::JSExponentiateTyper(Type* lhs, Type* rhs, Typer* t) {
+ // TODO(neis): Refine using BinaryNumberOpTyper?
return Type::Numeric();
}
@@ -1556,7 +1520,17 @@ Type* Typer::Visitor::JSCallTyper(Type* fun, Typer* t) {
case kDateGetTime:
return t->cache_.kJSDateValueType;
+ // Symbol functions.
+ case kSymbolConstructor:
+ return Type::Symbol();
+
+ // BigInt functions.
+ case kBigIntConstructor:
+ return Type::BigInt();
+
// Number functions.
+ case kNumberConstructor:
+ return Type::Number();
case kNumberIsFinite:
case kNumberIsInteger:
case kNumberIsNaN:
@@ -1570,6 +1544,8 @@ Type* Typer::Visitor::JSCallTyper(Type* fun, Typer* t) {
return Type::String();
// String functions.
+ case kStringConstructor:
+ return Type::String();
case kStringCharCodeAt:
return Type::Union(Type::Range(0, kMaxUInt16, t->zone()), Type::NaN(),
t->zone());
@@ -1850,6 +1826,10 @@ Type* Typer::Visitor::TypeJSGeneratorRestoreRegister(Node* node) {
return Type::Any();
}
+Type* Typer::Visitor::TypeJSGeneratorRestoreInputOrDebugPos(Node* node) {
+ return Type::Any();
+}
+
Type* Typer::Visitor::TypeJSStackCheck(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeJSDebugger(Node* node) { return Type::Any(); }
@@ -1968,6 +1948,14 @@ Type* Typer::Visitor::TypeSeqStringCharCodeAt(Node* node) {
return typer_->cache_.kUint16;
}
+Type* Typer::Visitor::TypeStringCodePointAt(Node* node) {
+ return Type::Range(0.0, String::kMaxCodePoint, zone());
+}
+
+Type* Typer::Visitor::TypeSeqStringCodePointAt(Node* node) {
+ return Type::Range(0.0, String::kMaxCodePoint, zone());
+}
+
Type* Typer::Visitor::TypeStringFromCharCode(Node* node) {
return TypeUnaryOp(node, StringFromCharCodeTyper);
}
@@ -1976,7 +1964,13 @@ Type* Typer::Visitor::TypeStringFromCodePoint(Node* node) {
return TypeUnaryOp(node, StringFromCodePointTyper);
}
-Type* Typer::Visitor::TypeStringIndexOf(Node* node) { UNREACHABLE(); }
+Type* Typer::Visitor::TypeStringIndexOf(Node* node) {
+ return Type::Range(-1.0, String::kMaxLength, zone());
+}
+
+Type* Typer::Visitor::TypeStringLength(Node* node) {
+ return typer_->cache_.kStringLengthType;
+}
Type* Typer::Visitor::TypeMaskIndexWithBound(Node* node) {
return Type::Union(Operand(node, 0), typer_->cache_.kSingletonZero, zone());
@@ -2151,6 +2145,10 @@ Type* Typer::Visitor::TypeObjectIsMinusZero(Node* node) {
return TypeUnaryOp(node, ObjectIsMinusZero);
}
+Type* Typer::Visitor::TypeNumberIsFloat64Hole(Node* node) {
+ return Type::Boolean();
+}
+
Type* Typer::Visitor::TypeObjectIsNaN(Node* node) {
return TypeUnaryOp(node, ObjectIsNaN);
}
@@ -2205,6 +2203,10 @@ Type* Typer::Visitor::TypeNewArgumentsElements(Node* node) {
return Type::OtherInternal();
}
+Type* Typer::Visitor::TypeNewConsString(Node* node) {
+ return Type::OtherNonSeqString();
+}
+
Type* Typer::Visitor::TypeArrayBufferWasNeutered(Node* node) {
return Type::Boolean();
}
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index 1b6ca6b53f..a3e90d579a 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -309,6 +309,8 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case PROTOTYPE_INFO_TYPE:
case TUPLE2_TYPE:
case TUPLE3_TYPE:
+ case LOAD_HANDLER_TYPE:
+ case STORE_HANDLER_TYPE:
case CONTEXT_EXTENSION_TYPE:
case ASYNC_GENERATOR_REQUEST_TYPE:
case CODE_DATA_CONTAINER_TYPE:
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index d791ec25c5..c4c371dab3 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -170,12 +170,11 @@ namespace compiler {
V(NumberOrHole, kNumber | kHole) \
V(NumberOrOddball, kNumber | kNullOrUndefined | kBoolean | \
kHole) \
- V(NumberOrString, kNumber | kString) \
V(NumericOrString, kNumeric | kString) \
V(NumberOrUndefined, kNumber | kUndefined) \
V(NumberOrUndefinedOrNullOrBoolean, \
kNumber | kNullOrUndefined | kBoolean) \
- V(PlainPrimitive, kNumberOrString | kBoolean | \
+ V(PlainPrimitive, kNumber | kString | kBoolean | \
kNullOrUndefined) \
V(Primitive, kSymbol | kBigInt | kPlainPrimitive) \
V(OtherUndetectableOrUndefined, kOtherUndetectable | kUndefined) \
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index e0c40df63b..a66a73f5d3 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -51,7 +51,7 @@ class Verifier::Visitor {
std::ostringstream str;
str << "TypeError: node #" << node->id() << ":" << *node->op()
<< " should never have a type";
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
void CheckTypeIs(Node* node, Type* type) {
@@ -62,7 +62,7 @@ class Verifier::Visitor {
NodeProperties::GetType(node)->PrintTo(str);
str << " is not ";
type->PrintTo(str);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
void CheckTypeMaybe(Node* node, Type* type) {
@@ -73,7 +73,7 @@ class Verifier::Visitor {
NodeProperties::GetType(node)->PrintTo(str);
str << " must intersect ";
type->PrintTo(str);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
void CheckValueInputIs(Node* node, int i, Type* type) {
@@ -86,7 +86,7 @@ class Verifier::Visitor {
NodeProperties::GetType(input)->PrintTo(str);
str << " is not ";
type->PrintTo(str);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
void CheckOutput(Node* node, Node* use, int count, const char* kind) {
@@ -95,7 +95,7 @@ class Verifier::Visitor {
str << "GraphError: node #" << node->id() << ":" << *node->op()
<< " does not produce " << kind << " output used by node #"
<< use->id() << ":" << *use->op();
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
};
@@ -236,10 +236,19 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// Dead is never connected to the graph.
UNREACHABLE();
case IrOpcode::kDeadValue:
+ CheckValueInputIs(node, 0, Type::None());
CheckTypeIs(node, Type::None());
break;
case IrOpcode::kUnreachable:
- CheckNotTyped(node);
+ CheckTypeIs(node, Type::None());
+ for (Edge edge : node->use_edges()) {
+ Node* use = edge.from();
+ if (NodeProperties::IsValueEdge(edge) && all.IsLive(use)) {
+ // {Unreachable} nodes can only be used by {DeadValue}, because they
+ // don't actually produce a value.
+ CHECK_EQ(IrOpcode::kDeadValue, use->opcode());
+ }
+ }
break;
case IrOpcode::kBranch: {
// Branch uses are IfTrue and IfFalse.
@@ -826,6 +835,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckTypeIs(node, Type::Any());
break;
+ case IrOpcode::kJSGeneratorRestoreInputOrDebugPos:
+ CheckTypeIs(node, Type::Any());
+ break;
+
case IrOpcode::kJSStackCheck:
case IrOpcode::kJSDebugger:
// Type is empty.
@@ -984,6 +997,11 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, Type::Number());
CheckTypeIs(node, Type::Signed32());
break;
+ case IrOpcode::kNumberToString:
+ // Number -> String
+ CheckValueInputIs(node, 0, Type::Number());
+ CheckTypeIs(node, Type::String());
+ break;
case IrOpcode::kNumberToUint32:
case IrOpcode::kNumberToUint8Clamped:
// Number -> Unsigned32
@@ -1041,6 +1059,18 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 1, Type::Unsigned32());
CheckTypeIs(node, Type::UnsignedSmall());
break;
+ case IrOpcode::kStringCodePointAt:
+ // (String, Unsigned32) -> UnsignedSmall
+ CheckValueInputIs(node, 0, Type::String());
+ CheckValueInputIs(node, 1, Type::Unsigned32());
+ CheckTypeIs(node, Type::UnsignedSmall());
+ break;
+ case IrOpcode::kSeqStringCodePointAt:
+ // (String, Unsigned32) -> UnsignedSmall
+ CheckValueInputIs(node, 0, Type::String());
+ CheckValueInputIs(node, 1, Type::Unsigned32());
+ CheckTypeIs(node, Type::UnsignedSmall());
+ break;
case IrOpcode::kStringFromCharCode:
// Number -> String
CheckValueInputIs(node, 0, Type::Number());
@@ -1058,6 +1088,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 2, Type::SignedSmall());
CheckTypeIs(node, Type::SignedSmall());
break;
+ case IrOpcode::kStringLength:
+ CheckValueInputIs(node, 0, Type::String());
+ CheckTypeIs(node, TypeCache::Get().kStringLengthType);
+ break;
case IrOpcode::kStringToLowerCaseIntl:
case IrOpcode::kStringToUpperCaseIntl:
CheckValueInputIs(node, 0, Type::String());
@@ -1094,6 +1128,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::Boolean());
break;
+ case IrOpcode::kNumberIsFloat64Hole:
+ CheckValueInputIs(node, 0, Type::NumberOrHole());
+ CheckTypeIs(node, Type::Boolean());
+ break;
case IrOpcode::kFindOrderedHashMapEntry:
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::SignedSmall());
@@ -1122,6 +1160,12 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
Code::kMaxArguments, zone));
CheckTypeIs(node, Type::OtherInternal());
break;
+ case IrOpcode::kNewConsString:
+ CheckValueInputIs(node, 0, TypeCache::Get().kStringLengthType);
+ CheckValueInputIs(node, 1, Type::String());
+ CheckValueInputIs(node, 2, Type::String());
+ CheckTypeIs(node, Type::OtherString());
+ break;
case IrOpcode::kAllocate:
CheckValueInputIs(node, 0, Type::PlainNumber());
break;
@@ -1591,8 +1635,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kLoadParentFramePointer:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kUnalignedStore:
- case IrOpcode::kCheckedLoad:
- case IrOpcode::kCheckedStore:
case IrOpcode::kAtomicLoad:
case IrOpcode::kAtomicStore:
case IrOpcode::kAtomicExchange:
@@ -1602,6 +1644,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kAtomicAnd:
case IrOpcode::kAtomicOr:
case IrOpcode::kAtomicXor:
+ case IrOpcode::kSpeculationFence:
#define SIMD_MACHINE_OP_CASE(Name) case IrOpcode::k##Name:
MACHINE_SIMD_OP_LIST(SIMD_MACHINE_OP_CASE)
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index a04c7b3e5d..9bbf5f3a3f 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -34,6 +34,8 @@
#include "src/log-inl.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/memory-tracing.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -68,6 +70,13 @@ void MergeControlToEnd(JSGraph* jsgraph, Node* node) {
}
}
+bool ContainsSimd(wasm::FunctionSig* sig) {
+ for (wasm::ValueType t : sig->all()) {
+ if (t == wasm::kWasmS128) return true;
+ }
+ return false;
+}
+
} // namespace
WasmGraphBuilder::WasmGraphBuilder(
@@ -79,21 +88,15 @@ WasmGraphBuilder::WasmGraphBuilder(
jsgraph_(jsgraph),
centry_stub_node_(jsgraph_->HeapConstant(centry_stub)),
env_(env),
- signature_tables_(zone),
function_tables_(zone),
- function_table_sizes_(zone),
cur_buffer_(def_buffer_),
cur_bufsize_(kDefaultBufferSize),
+ has_simd_(ContainsSimd(sig)),
untrusted_code_mitigations_(FLAG_untrusted_code_mitigations),
runtime_exception_support_(exception_support),
sig_(sig),
source_position_table_(source_position_table) {
- for (size_t i = sig->parameter_count(); i > 0 && !has_simd_; --i) {
- if (sig->GetParam(i - 1) == wasm::kWasmS128) has_simd_ = true;
- }
- for (size_t i = sig->return_count(); i > 0 && !has_simd_; --i) {
- if (sig->GetReturn(i - 1) == wasm::kWasmS128) has_simd_ = true;
- }
+ DCHECK_IMPLIES(use_trap_handler(), trap_handler::IsTrapHandlerEnabled());
DCHECK_NOT_NULL(jsgraph_);
}
@@ -561,9 +564,15 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
op = m->Float64Sqrt();
break;
case wasm::kExprI32SConvertF64:
- return BuildI32SConvertF64(input, position);
+ return BuildI32SConvertF64(input, position, NumericImplementation::kTrap);
+ case wasm::kExprI32SConvertSatF64:
+ return BuildI32SConvertF64(input, position,
+ NumericImplementation::kSaturate);
case wasm::kExprI32UConvertF64:
- return BuildI32UConvertF64(input, position);
+ return BuildI32UConvertF64(input, position, NumericImplementation::kTrap);
+ case wasm::kExprI32UConvertSatF64:
+ return BuildI32UConvertF64(input, position,
+ NumericImplementation::kSaturate);
case wasm::kExprI32AsmjsSConvertF64:
return BuildI32AsmjsSConvertF64(input);
case wasm::kExprI32AsmjsUConvertF64:
@@ -584,9 +593,15 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
op = m->RoundUint32ToFloat32();
break;
case wasm::kExprI32SConvertF32:
- return BuildI32SConvertF32(input, position);
+ return BuildI32SConvertF32(input, position, NumericImplementation::kTrap);
+ case wasm::kExprI32SConvertSatF32:
+ return BuildI32SConvertF32(input, position,
+ NumericImplementation::kSaturate);
case wasm::kExprI32UConvertF32:
- return BuildI32UConvertF32(input, position);
+ return BuildI32UConvertF32(input, position, NumericImplementation::kTrap);
+ case wasm::kExprI32UConvertSatF32:
+ return BuildI32UConvertF32(input, position,
+ NumericImplementation::kSaturate);
case wasm::kExprI32AsmjsSConvertF32:
return BuildI32AsmjsSConvertF32(input);
case wasm::kExprI32AsmjsUConvertF32:
@@ -964,7 +979,7 @@ Node* WasmGraphBuilder::Unreachable(wasm::WasmCodePosition position) {
}
Node* WasmGraphBuilder::MaskShiftCount32(Node* node) {
- static const int32_t kMask32 = 0x1f;
+ static const int32_t kMask32 = 0x1F;
if (!jsgraph()->machine()->Word32ShiftIsSafe()) {
// Shifts by constants are so common we pattern-match them here.
Int32Matcher match(node);
@@ -980,7 +995,7 @@ Node* WasmGraphBuilder::MaskShiftCount32(Node* node) {
}
Node* WasmGraphBuilder::MaskShiftCount64(Node* node) {
- static const int64_t kMask64 = 0x3f;
+ static const int64_t kMask64 = 0x3F;
if (!jsgraph()->machine()->Word32ShiftIsSafe()) {
// Shifts by constants are so common we pattern-match them here.
Int64Matcher match(node);
@@ -1009,9 +1024,8 @@ static bool ReverseBytesSupported(MachineOperatorBuilder* m,
return false;
}
-Node* WasmGraphBuilder::BuildChangeEndiannessStore(Node* node,
- MachineType memtype,
- wasm::ValueType wasmtype) {
+Node* WasmGraphBuilder::BuildChangeEndiannessStore(
+ Node* node, MachineRepresentation mem_rep, wasm::ValueType wasmtype) {
Node* result;
Node* value = node;
MachineOperatorBuilder* m = jsgraph()->machine();
@@ -1040,23 +1054,22 @@ Node* WasmGraphBuilder::BuildChangeEndiannessStore(Node* node,
break;
}
- if (memtype.representation() == MachineRepresentation::kWord8) {
+ if (mem_rep == MachineRepresentation::kWord8) {
// No need to change endianness for byte size, return original node
return node;
}
- if (wasmtype == wasm::kWasmI64 &&
- memtype.representation() < MachineRepresentation::kWord64) {
+ if (wasmtype == wasm::kWasmI64 && mem_rep < MachineRepresentation::kWord64) {
// In case we store lower part of WasmI64 expression, we can truncate
// upper 32bits
value = graph()->NewNode(m->TruncateInt64ToInt32(), value);
valueSizeInBytes = 1 << ElementSizeLog2Of(wasm::kWasmI32);
valueSizeInBits = 8 * valueSizeInBytes;
- if (memtype.representation() == MachineRepresentation::kWord16) {
+ if (mem_rep == MachineRepresentation::kWord16) {
value =
graph()->NewNode(m->Word32Shl(), value, jsgraph()->Int32Constant(16));
}
} else if (wasmtype == wasm::kWasmI32 &&
- memtype.representation() == MachineRepresentation::kWord16) {
+ mem_rep == MachineRepresentation::kWord16) {
value =
graph()->NewNode(m->Word32Shl(), value, jsgraph()->Int32Constant(16));
}
@@ -1325,7 +1338,7 @@ Node* WasmGraphBuilder::BuildF32CopySign(Node* left, Node* right) {
wasm::kExprF32ReinterpretI32,
Binop(wasm::kExprI32Ior,
Binop(wasm::kExprI32And, Unop(wasm::kExprI32ReinterpretF32, left),
- jsgraph()->Int32Constant(0x7fffffff)),
+ jsgraph()->Int32Constant(0x7FFFFFFF)),
Binop(wasm::kExprI32And, Unop(wasm::kExprI32ReinterpretF32, right),
jsgraph()->Int32Constant(0x80000000))));
@@ -1338,7 +1351,7 @@ Node* WasmGraphBuilder::BuildF64CopySign(Node* left, Node* right) {
wasm::kExprF64ReinterpretI64,
Binop(wasm::kExprI64Ior,
Binop(wasm::kExprI64And, Unop(wasm::kExprI64ReinterpretF64, left),
- jsgraph()->Int64Constant(0x7fffffffffffffff)),
+ jsgraph()->Int64Constant(0x7FFFFFFFFFFFFFFF)),
Binop(wasm::kExprI64And, Unop(wasm::kExprI64ReinterpretF64, right),
jsgraph()->Int64Constant(0x8000000000000000))));
@@ -1350,78 +1363,177 @@ Node* WasmGraphBuilder::BuildF64CopySign(Node* left, Node* right) {
Node* high_word_right =
graph()->NewNode(m->Float64ExtractHighWord32(), right);
- Node* new_high_word =
- Binop(wasm::kExprI32Ior, Binop(wasm::kExprI32And, high_word_left,
- jsgraph()->Int32Constant(0x7fffffff)),
- Binop(wasm::kExprI32And, high_word_right,
- jsgraph()->Int32Constant(0x80000000)));
+ Node* new_high_word = Binop(wasm::kExprI32Ior,
+ Binop(wasm::kExprI32And, high_word_left,
+ jsgraph()->Int32Constant(0x7FFFFFFF)),
+ Binop(wasm::kExprI32And, high_word_right,
+ jsgraph()->Int32Constant(0x80000000)));
return graph()->NewNode(m->Float64InsertHighWord32(), left, new_high_word);
#endif
}
-Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input,
- wasm::WasmCodePosition position) {
- MachineOperatorBuilder* m = jsgraph()->machine();
+// Helper classes for float to int conversions.
+struct WasmGraphBuilder::IntConvertOps {
+ MachineRepresentation word_rep() const {
+ return MachineRepresentation::kWord32;
+ }
+ Node* zero() const { return builder_->Int32Constant(0); }
+ virtual Node* min() const = 0;
+ virtual Node* max() const = 0;
+ virtual ~IntConvertOps() = default;
+
+ protected:
+ explicit IntConvertOps(WasmGraphBuilder* builder) : builder_(builder) {}
+ WasmGraphBuilder* builder_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(IntConvertOps);
+};
+
+struct I32SConvertOps final : public WasmGraphBuilder::IntConvertOps {
+ explicit I32SConvertOps(WasmGraphBuilder* builder)
+ : WasmGraphBuilder::IntConvertOps(builder) {}
+ ~I32SConvertOps() = default;
+ Node* min() const {
+ return builder_->Int32Constant(std::numeric_limits<int32_t>::min());
+ }
+ Node* max() const {
+ return builder_->Int32Constant(std::numeric_limits<int32_t>::max());
+ }
+ DISALLOW_IMPLICIT_CONSTRUCTORS(I32SConvertOps);
+};
+
+struct I32UConvertOps final : public WasmGraphBuilder::IntConvertOps {
+ explicit I32UConvertOps(WasmGraphBuilder* builder)
+ : WasmGraphBuilder::IntConvertOps(builder) {}
+ ~I32UConvertOps() = default;
+ Node* min() const {
+ return builder_->Int32Constant(std::numeric_limits<uint32_t>::min());
+ }
+ Node* max() const {
+ return builder_->Int32Constant(std::numeric_limits<uint32_t>::max());
+ }
+ DISALLOW_IMPLICIT_CONSTRUCTORS(I32UConvertOps);
+};
+
+struct WasmGraphBuilder::FloatConvertOps {
+ virtual Node* zero() const = 0;
+ virtual wasm::WasmOpcode trunc_op() const = 0;
+ virtual wasm::WasmOpcode ne_op() const = 0;
+ virtual wasm::WasmOpcode lt_op() const = 0;
+ virtual ~FloatConvertOps() = default;
+
+ protected:
+ explicit FloatConvertOps(WasmGraphBuilder* builder) : builder_(builder) {}
+ WasmGraphBuilder* builder_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FloatConvertOps);
+};
+
+struct F32ConvertOps final : public WasmGraphBuilder::FloatConvertOps {
+ explicit F32ConvertOps(WasmGraphBuilder* builder)
+ : WasmGraphBuilder::FloatConvertOps(builder) {}
+ ~F32ConvertOps() = default;
+ Node* zero() const { return builder_->Float32Constant(0.0); }
+ wasm::WasmOpcode trunc_op() const { return wasm::kExprF32Trunc; }
+ wasm::WasmOpcode ne_op() const { return wasm::kExprF32Ne; }
+ wasm::WasmOpcode lt_op() const { return wasm::kExprF32Lt; }
+ DISALLOW_IMPLICIT_CONSTRUCTORS(F32ConvertOps);
+};
+
+struct F64ConvertOps final : public WasmGraphBuilder::FloatConvertOps {
+ explicit F64ConvertOps(WasmGraphBuilder* builder)
+ : WasmGraphBuilder::FloatConvertOps(builder) {}
+ ~F64ConvertOps() = default;
+ Node* zero() const { return builder_->Float64Constant(0.0); }
+ wasm::WasmOpcode trunc_op() const { return wasm::kExprF64Trunc; }
+ wasm::WasmOpcode ne_op() const { return wasm::kExprF64Ne; }
+ wasm::WasmOpcode lt_op() const { return wasm::kExprF64Lt; }
+ DISALLOW_IMPLICIT_CONSTRUCTORS(F64ConvertOps);
+};
+
+Node* WasmGraphBuilder::BuildConvertCheck(Node* test, Node* result, Node* input,
+ wasm::WasmCodePosition position,
+ NumericImplementation impl,
+ const IntConvertOps* int_ops,
+ const FloatConvertOps* float_ops) {
+ switch (impl) {
+ case NumericImplementation::kTrap:
+ TrapIfTrue(wasm::kTrapFloatUnrepresentable, test, position);
+ return result;
+ case NumericImplementation::kSaturate: {
+ Diamond tl_d(graph(), jsgraph()->common(), test, BranchHint::kFalse);
+ tl_d.Chain(*control_);
+ Diamond nan_d(graph(), jsgraph()->common(),
+ Binop(float_ops->ne_op(), input, input), // Checks if NaN.
+ BranchHint::kFalse);
+ nan_d.Nest(tl_d, true);
+ Diamond sat_d(graph(), jsgraph()->common(),
+ Binop(float_ops->lt_op(), input, float_ops->zero()),
+ BranchHint::kNone);
+ sat_d.Nest(nan_d, false);
+ Node* sat_val =
+ sat_d.Phi(int_ops->word_rep(), int_ops->min(), int_ops->max());
+ Node* nan_val = nan_d.Phi(int_ops->word_rep(), int_ops->zero(), sat_val);
+ return tl_d.Phi(int_ops->word_rep(), nan_val, result);
+ }
+ }
+ UNREACHABLE();
+}
+
+Node* WasmGraphBuilder::BuildI32ConvertOp(
+ Node* input, wasm::WasmCodePosition position, NumericImplementation impl,
+ const Operator* op, wasm::WasmOpcode check_op, const IntConvertOps* int_ops,
+ const FloatConvertOps* float_ops) {
// Truncation of the input value is needed for the overflow check later.
- Node* trunc = Unop(wasm::kExprF32Trunc, input);
- Node* result = graph()->NewNode(m->TruncateFloat32ToInt32(), trunc);
+ Node* trunc = Unop(float_ops->trunc_op(), input);
+ Node* result = graph()->NewNode(op, trunc);
// Convert the result back to f64. If we end up at a different value than the
- // truncated input value, then there has been an overflow and we trap.
- Node* check = Unop(wasm::kExprF32SConvertI32, result);
- Node* overflow = Binop(wasm::kExprF32Ne, trunc, check);
- TrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
+ // truncated input value, then there has been an overflow and we
+ // trap/saturate.
+ Node* check = Unop(check_op, result);
+ Node* overflow = Binop(float_ops->ne_op(), trunc, check);
+ return BuildConvertCheck(overflow, result, input, position, impl, int_ops,
+ float_ops);
+}
- return result;
+Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input,
+ wasm::WasmCodePosition position,
+ NumericImplementation impl) {
+ I32SConvertOps int_ops(this);
+ F32ConvertOps float_ops(this);
+ return BuildI32ConvertOp(input, position, impl,
+ jsgraph()->machine()->TruncateFloat32ToInt32(),
+ wasm::kExprF32SConvertI32, &int_ops, &float_ops);
}
Node* WasmGraphBuilder::BuildI32SConvertF64(Node* input,
- wasm::WasmCodePosition position) {
- MachineOperatorBuilder* m = jsgraph()->machine();
- // Truncation of the input value is needed for the overflow check later.
- Node* trunc = Unop(wasm::kExprF64Trunc, input);
- Node* result = graph()->NewNode(m->ChangeFloat64ToInt32(), trunc);
-
- // Convert the result back to f64. If we end up at a different value than the
- // truncated input value, then there has been an overflow and we trap.
- Node* check = Unop(wasm::kExprF64SConvertI32, result);
- Node* overflow = Binop(wasm::kExprF64Ne, trunc, check);
- TrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
-
- return result;
+ wasm::WasmCodePosition position,
+ NumericImplementation impl) {
+ I32SConvertOps int_ops(this);
+ F64ConvertOps float_ops(this);
+ return BuildI32ConvertOp(input, position, impl,
+ jsgraph()->machine()->ChangeFloat64ToInt32(),
+ wasm::kExprF64SConvertI32, &int_ops, &float_ops);
}
Node* WasmGraphBuilder::BuildI32UConvertF32(Node* input,
- wasm::WasmCodePosition position) {
- MachineOperatorBuilder* m = jsgraph()->machine();
- // Truncation of the input value is needed for the overflow check later.
- Node* trunc = Unop(wasm::kExprF32Trunc, input);
- Node* result = graph()->NewNode(m->TruncateFloat32ToUint32(), trunc);
-
- // Convert the result back to f32. If we end up at a different value than the
- // truncated input value, then there has been an overflow and we trap.
- Node* check = Unop(wasm::kExprF32UConvertI32, result);
- Node* overflow = Binop(wasm::kExprF32Ne, trunc, check);
- TrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
-
- return result;
+ wasm::WasmCodePosition position,
+ NumericImplementation impl) {
+ I32UConvertOps int_ops(this);
+ F32ConvertOps float_ops(this);
+ return BuildI32ConvertOp(input, position, impl,
+ jsgraph()->machine()->TruncateFloat32ToUint32(),
+ wasm::kExprF32UConvertI32, &int_ops, &float_ops);
}
Node* WasmGraphBuilder::BuildI32UConvertF64(Node* input,
- wasm::WasmCodePosition position) {
- MachineOperatorBuilder* m = jsgraph()->machine();
- // Truncation of the input value is needed for the overflow check later.
- Node* trunc = Unop(wasm::kExprF64Trunc, input);
- Node* result = graph()->NewNode(m->TruncateFloat64ToUint32(), trunc);
-
- // Convert the result back to f64. If we end up at a different value than the
- // truncated input value, then there has been an overflow and we trap.
- Node* check = Unop(wasm::kExprF64UConvertI32, result);
- Node* overflow = Binop(wasm::kExprF64Ne, trunc, check);
- TrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
-
- return result;
+ wasm::WasmCodePosition position,
+ NumericImplementation impl) {
+ I32UConvertOps int_ops(this);
+ F64ConvertOps float_ops(this);
+ return BuildI32ConvertOp(input, position, impl,
+ jsgraph()->machine()->TruncateFloat64ToUint32(),
+ wasm::kExprF64UConvertI32, &int_ops, &float_ops);
}
Node* WasmGraphBuilder::BuildI32AsmjsSConvertF32(Node* input) {
@@ -1861,8 +1973,7 @@ Node* WasmGraphBuilder::Throw(uint32_t tag,
break;
}
default:
- CHECK(false);
- break;
+ UNREACHABLE();
}
}
DCHECK_EQ(encoded_size, index);
@@ -1961,8 +2072,7 @@ Node** WasmGraphBuilder::GetExceptionValues(
break;
}
default:
- CHECK(false);
- break;
+ UNREACHABLE();
}
values[i] = value;
}
@@ -2330,7 +2440,7 @@ Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
// Make room for the wasm_context parameter at index 1, just after code.
memmove(&args[2], &args[1], params * sizeof(Node*));
- args[1] = wasm_context_;
+ args[1] = wasm_context_.get();
// Add effect and control inputs.
args[params + 2] = *effect_;
@@ -2364,7 +2474,7 @@ Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args, Node*** rets,
DCHECK_NULL(args[0]);
wasm::FunctionSig* sig = env_->module->functions[index].sig;
if (FLAG_wasm_jit_to_native) {
- // Simply encode the index of the target.
+ // Just encode the function index. This will be patched at instantiation.
Address code = reinterpret_cast<Address>(index);
args[0] = jsgraph()->RelocatableIntPtrConstant(
reinterpret_cast<intptr_t>(code), RelocInfo::WASM_CALL);
@@ -2396,45 +2506,39 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
Node* key = args[0];
// Bounds check against the table size.
- Node* size = function_table_sizes_[table_index];
+ Node* size = function_tables_[table_index].size;
Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
TrapIfFalse(wasm::kTrapFuncInvalid, in_bounds, position);
- Node* table_address = function_tables_[table_index];
+ Node* table_address = function_tables_[table_index].table_addr;
Node* table = graph()->NewNode(
jsgraph()->machine()->Load(MachineType::AnyTagged()), table_address,
jsgraph()->IntPtrConstant(0), *effect_, *control_);
- Node* signatures_address = signature_tables_[table_index];
- Node* signatures = graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::AnyTagged()), signatures_address,
- jsgraph()->IntPtrConstant(0), *effect_, *control_);
// Load signature from the table and check.
// The table is a FixedArray; signatures are encoded as SMIs.
- // [sig1, sig2, sig3, ...., code1, code2, code3 ...]
+ // [sig1, code1, sig2, code2, sig3, code3, ...]
+ static_assert(compiler::kFunctionTableEntrySize == 2, "consistency");
+ static_assert(compiler::kFunctionTableSignatureOffset == 0, "consistency");
+ static_assert(compiler::kFunctionTableCodeOffset == 1, "consistency");
ElementAccess access = AccessBuilder::ForFixedArrayElement();
const int fixed_offset = access.header_size - access.tag();
- {
- Node* load_sig = graph()->NewNode(
- machine->Load(MachineType::AnyTagged()), signatures,
- graph()->NewNode(machine->Int32Add(),
- graph()->NewNode(machine->Word32Shl(), key,
- Int32Constant(kPointerSizeLog2)),
- Int32Constant(fixed_offset)),
- *effect_, *control_);
- int32_t canonical_sig_num = env_->module->signature_ids[sig_index];
- CHECK_GE(sig_index, 0);
- Node* sig_match =
- graph()->NewNode(machine->WordEqual(), load_sig,
- jsgraph()->SmiConstant(canonical_sig_num));
- TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
- }
+ Node* key_offset = graph()->NewNode(machine->Word32Shl(), key,
+ Int32Constant(kPointerSizeLog2 + 1));
+ Node* load_sig =
+ graph()->NewNode(machine->Load(MachineType::AnyTagged()), table,
+ graph()->NewNode(machine->Int32Add(), key_offset,
+ Int32Constant(fixed_offset)),
+ *effect_, *control_);
+ int32_t canonical_sig_num = env_->module->signature_ids[sig_index];
+ CHECK_GE(sig_index, 0);
+ Node* sig_match = graph()->NewNode(machine->WordEqual(), load_sig,
+ jsgraph()->SmiConstant(canonical_sig_num));
+ TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
// Load code object from the table. It is held by a Foreign.
Node* entry = graph()->NewNode(
machine->Load(MachineType::AnyTagged()), table,
- graph()->NewNode(machine->Int32Add(),
- graph()->NewNode(machine->Word32Shl(), key,
- Int32Constant(kPointerSizeLog2)),
- Uint32Constant(fixed_offset)),
+ graph()->NewNode(machine->Int32Add(), key_offset,
+ Uint32Constant(fixed_offset + kPointerSize)),
*effect_, *control_);
if (FLAG_wasm_jit_to_native) {
Node* address = graph()->NewNode(
@@ -2715,12 +2819,8 @@ Node* WasmGraphBuilder::BuildChangeSmiToInt32(Node* value) {
}
Node* WasmGraphBuilder::BuildChangeUint32ToSmi(Node* value) {
- if (jsgraph()->machine()->Is64()) {
- value =
- graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), value);
- }
- return graph()->NewNode(jsgraph()->machine()->WordShl(), value,
- BuildSmiShiftBitsConstant());
+ return graph()->NewNode(jsgraph()->machine()->WordShl(),
+ Uint32ToUintptr(value), BuildSmiShiftBitsConstant());
}
Node* WasmGraphBuilder::BuildChangeSmiToFloat64(Node* value) {
@@ -2826,7 +2926,7 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(WasmCodeWrapper wasm_code,
// the wasm function could not be re-imported into another wasm module.
int pos = 0;
args[pos++] = wasm_code_node;
- args[pos++] = wasm_context_;
+ args[pos++] = wasm_context_.get();
args[pos++] = *effect_;
args[pos++] = *control_;
@@ -2841,7 +2941,7 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(WasmCodeWrapper wasm_code,
int pos = 0;
args[pos++] = wasm_code_node;
- args[pos++] = wasm_context_;
+ args[pos++] = wasm_context_.get();
// Convert JS parameters to wasm numbers.
for (int i = 0; i < wasm_count; ++i) {
@@ -3177,7 +3277,7 @@ void WasmGraphBuilder::BuildCWasmEntry(Address wasm_context_address) {
int pos = 0;
args[pos++] = code_obj;
- args[pos++] = wasm_context_;
+ args[pos++] = wasm_context_.get();
int offset = 0;
for (wasm::ValueType type : sig_->parameters()) {
@@ -3232,7 +3332,7 @@ void WasmGraphBuilder::InitContextCache(WasmContextCacheNodes* context_cache) {
// Load the memory start.
Node* mem_start = graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::UintPtr()), wasm_context_,
+ jsgraph()->machine()->Load(MachineType::UintPtr()), wasm_context_.get(),
jsgraph()->Int32Constant(
static_cast<int32_t>(offsetof(WasmContext, mem_start))),
*effect_, *control_);
@@ -3241,7 +3341,7 @@ void WasmGraphBuilder::InitContextCache(WasmContextCacheNodes* context_cache) {
// Load the memory size.
Node* mem_size = graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::Uint32()), wasm_context_,
+ jsgraph()->machine()->Load(MachineType::Uint32()), wasm_context_.get(),
jsgraph()->Int32Constant(
static_cast<int32_t>(offsetof(WasmContext, mem_size))),
*effect_, *control_);
@@ -3251,7 +3351,7 @@ void WasmGraphBuilder::InitContextCache(WasmContextCacheNodes* context_cache) {
if (untrusted_code_mitigations_) {
// Load the memory mask.
Node* mem_mask = graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::Uint32()), wasm_context_,
+ jsgraph()->machine()->Load(MachineType::Uint32()), wasm_context_.get(),
jsgraph()->Int32Constant(
static_cast<int32_t>(offsetof(WasmContext, mem_mask))),
*effect_, *control_);
@@ -3353,12 +3453,12 @@ void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type,
// possible to express in the graph, and would essentially constitute a
// "mem2reg" optimization in TurboFan.
globals_start_ = graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::UintPtr()), wasm_context_,
+ jsgraph()->machine()->Load(MachineType::UintPtr()), wasm_context_.get(),
jsgraph()->Int32Constant(
static_cast<int32_t>(offsetof(WasmContext, globals_start))),
graph()->start(), graph()->start());
}
- *base_node = globals_start_;
+ *base_node = globals_start_.get();
*offset_node = jsgraph()->Int32Constant(offset);
if (mem_type == MachineType::Simd128() && offset != 0) {
@@ -3392,7 +3492,7 @@ Node* WasmGraphBuilder::CurrentMemoryPages() {
}
return graph()->NewNode(
jsgraph()->machine()->Word32Shr(), mem_size,
- jsgraph()->Int32Constant(WhichPowerOf2(wasm::WasmModule::kPageSize)));
+ jsgraph()->Int32Constant(WhichPowerOf2(wasm::kWasmPageSize)));
}
void WasmGraphBuilder::EnsureFunctionTableNodes() {
@@ -3401,25 +3501,21 @@ void WasmGraphBuilder::EnsureFunctionTableNodes() {
for (size_t i = 0; i < tables_size; ++i) {
wasm::GlobalHandleAddress function_handle_address =
env_->function_tables[i];
- wasm::GlobalHandleAddress signature_handle_address =
- env_->signature_tables[i];
- function_tables_.push_back(jsgraph()->RelocatableIntPtrConstant(
+ Node* table_addr = jsgraph()->RelocatableIntPtrConstant(
reinterpret_cast<intptr_t>(function_handle_address),
- RelocInfo::WASM_GLOBAL_HANDLE));
- signature_tables_.push_back(jsgraph()->RelocatableIntPtrConstant(
- reinterpret_cast<intptr_t>(signature_handle_address),
- RelocInfo::WASM_GLOBAL_HANDLE));
+ RelocInfo::WASM_GLOBAL_HANDLE);
uint32_t table_size = env_->module->function_tables[i].initial_size;
- function_table_sizes_.push_back(jsgraph()->RelocatableInt32Constant(
+ Node* size = jsgraph()->RelocatableInt32Constant(
static_cast<uint32_t>(table_size),
- RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE));
+ RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
+ function_tables_.push_back({table_addr, size});
}
}
Node* WasmGraphBuilder::BuildModifyThreadInWasmFlag(bool new_value) {
// TODO(eholk): generate code to modify the thread-local storage directly,
// rather than calling the runtime.
- if (!trap_handler::UseTrapHandler()) {
+ if (!use_trap_handler()) {
return *control_;
}
@@ -3507,46 +3603,55 @@ Node* WasmGraphBuilder::SetGlobal(uint32_t index, Node* val) {
return node;
}
-Node* WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
+Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
uint32_t offset,
wasm::WasmCodePosition position,
EnforceBoundsCheck enforce_check) {
- if (FLAG_wasm_no_bounds_checks) return index;
+ if (FLAG_wasm_no_bounds_checks) return Uint32ToUintptr(index);
DCHECK_NOT_NULL(context_cache_);
Node* mem_size = context_cache_->mem_size;
DCHECK_NOT_NULL(mem_size);
auto m = jsgraph()->machine();
- if (trap_handler::UseTrapHandler() && enforce_check == kCanOmitBoundsCheck) {
+ if (use_trap_handler() && enforce_check == kCanOmitBoundsCheck) {
// Simply zero out the 32-bits on 64-bit targets and let the trap handler
// do its job.
- return m->Is64() ? graph()->NewNode(m->ChangeUint32ToUint64(), index)
- : index;
+ return Uint32ToUintptr(index);
}
- uint32_t min_size = env_->module->initial_pages * wasm::WasmModule::kPageSize;
+ uint32_t min_size = env_->module->initial_pages * wasm::kWasmPageSize;
uint32_t max_size =
(env_->module->has_maximum_pages ? env_->module->maximum_pages
: wasm::kV8MaxWasmMemoryPages) *
- wasm::WasmModule::kPageSize;
-
- byte access_size = wasm::WasmOpcodes::MemSize(memtype);
+ wasm::kWasmPageSize;
if (access_size > max_size || offset > max_size - access_size) {
// The access will be out of bounds, even for the largest memory.
- TrapIfEq32(wasm::kTrapMemOutOfBounds, jsgraph()->Int32Constant(0), 0,
- position);
+ TrapIfEq32(wasm::kTrapMemOutOfBounds, Int32Constant(0), 0, position);
return jsgraph()->IntPtrConstant(0);
}
- uint32_t end_offset = offset + access_size;
-
- if (end_offset > min_size) {
+ DCHECK_LE(1, access_size);
+ // This computation cannot overflow, since
+ // {offset <= max_size - access_size <= kMaxUint32 - access_size}.
+ // It also cannot underflow, since {access_size >= 1}.
+ uint32_t end_offset = offset + access_size - 1;
+ Node* end_offset_node = Int32Constant(end_offset);
+
+ // The accessed memory is [index + offset, index + end_offset].
+ // Check that the last read byte (at {index + end_offset}) is in bounds.
+ // 1) Check that {end_offset < mem_size}. This also ensures that we can safely
+ // compute {effective_size} as {mem_size - end_offset)}.
+ // {effective_size} is >= 1 if condition 1) holds.
+ // 2) Check that {index + end_offset < mem_size} by
+ // - computing {effective_size} as {mem_size - end_offset} and
+ // - checking that {index < effective_size}.
+
+ if (end_offset >= min_size) {
// The end offset is larger than the smallest memory.
// Dynamically check the end offset against the actual memory size, which
// is not known at compile time.
- Node* cond =
- graph()->NewNode(jsgraph()->machine()->Uint32LessThanOrEqual(),
- jsgraph()->Int32Constant(end_offset), mem_size);
+ Node* cond = graph()->NewNode(jsgraph()->machine()->Uint32LessThan(),
+ end_offset_node, mem_size);
TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
} else {
// The end offset is within the bounds of the smallest memory, so only
@@ -3554,22 +3659,17 @@ Node* WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
Uint32Matcher match(index);
if (match.HasValue()) {
uint32_t index_val = match.Value();
- if (index_val <= min_size - end_offset) {
+ if (index_val < min_size - end_offset) {
// The input index is a constant and everything is statically within
// bounds of the smallest possible memory.
- return m->Is64() ? graph()->NewNode(m->ChangeUint32ToUint64(), index)
- : index;
+ return Uint32ToUintptr(index);
}
}
}
- // Compute the effective size of the memory, which is the size of the memory
- // minus the statically known offset, minus the byte size of the access minus
- // one.
- // This produces a positive number since {end_offset <= min_size <= mem_size}.
- Node* effective_size =
- graph()->NewNode(jsgraph()->machine()->Int32Sub(), mem_size,
- jsgraph()->Int32Constant(end_offset - 1));
+ // This produces a positive number, since {end_offset < min_size <= mem_size}.
+ Node* effective_size = graph()->NewNode(jsgraph()->machine()->Int32Sub(),
+ mem_size, end_offset_node);
// Introduce the actual bounds check.
Node* cond = graph()->NewNode(m->Uint32LessThan(), index, effective_size);
@@ -3581,7 +3681,7 @@ Node* WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
DCHECK_NOT_NULL(mem_mask);
index = graph()->NewNode(m->Word32And(), index, mem_mask);
}
- return m->Is64() ? graph()->NewNode(m->ChangeUint32ToUint64(), index) : index;
+ return Uint32ToUintptr(index);
}
const Operator* WasmGraphBuilder::GetSafeLoadOperator(int offset,
@@ -3609,21 +3709,28 @@ Node* WasmGraphBuilder::TraceMemoryOperation(bool is_store,
MachineRepresentation rep,
Node* index, uint32_t offset,
wasm::WasmCodePosition position) {
+ int kAlign = 4; // Ensure that the LSB is 0, such that this looks like a Smi.
+ Node* info = graph()->NewNode(
+ jsgraph()->machine()->StackSlot(sizeof(wasm::MemoryTracingInfo), kAlign));
+
Node* address = graph()->NewNode(jsgraph()->machine()->Int32Add(),
Int32Constant(offset), index);
- Node* addr_low = BuildChangeInt32ToSmi(graph()->NewNode(
- jsgraph()->machine()->Word32And(), address, Int32Constant(0xffff)));
- Node* addr_high = BuildChangeInt32ToSmi(graph()->NewNode(
- jsgraph()->machine()->Word32Shr(), address, Int32Constant(16)));
- int32_t rep_i = static_cast<int32_t>(rep);
- Node* params[] = {
- jsgraph()->SmiConstant(is_store), // is_store
- jsgraph()->SmiConstant(rep_i), // mem rep
- addr_low, // address lower half word
- addr_high // address higher half word
+ auto store = [&](int offset, MachineRepresentation rep, Node* data) {
+ *effect_ = graph()->NewNode(
+ jsgraph()->machine()->Store(StoreRepresentation(rep, kNoWriteBarrier)),
+ info, jsgraph()->Int32Constant(offset), data, *effect_, *control_);
};
- Node* call =
- BuildCallToRuntime(Runtime::kWasmTraceMemory, params, arraysize(params));
+ // Store address, is_store, and mem_rep.
+ store(offsetof(wasm::MemoryTracingInfo, address),
+ MachineRepresentation::kWord32, address);
+ store(offsetof(wasm::MemoryTracingInfo, is_store),
+ MachineRepresentation::kWord8,
+ jsgraph()->Int32Constant(is_store ? 1 : 0));
+ store(offsetof(wasm::MemoryTracingInfo, mem_rep),
+ MachineRepresentation::kWord8,
+ jsgraph()->Int32Constant(static_cast<int>(rep)));
+
+ Node* call = BuildCallToRuntime(Runtime::kWasmTraceMemory, &info, 1);
SetSourcePosition(call, position);
return call;
}
@@ -3636,11 +3743,12 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
// Wasm semantics throw on OOB. Introduce explicit bounds check and
// conditioning when not using the trap handler.
- index = BoundsCheckMem(memtype, index, offset, position, kCanOmitBoundsCheck);
+ index = BoundsCheckMem(wasm::WasmOpcodes::MemSize(memtype), index, offset,
+ position, kCanOmitBoundsCheck);
if (memtype.representation() == MachineRepresentation::kWord8 ||
jsgraph()->machine()->UnalignedLoadSupported(memtype.representation())) {
- if (trap_handler::UseTrapHandler()) {
+ if (use_trap_handler()) {
load = graph()->NewNode(jsgraph()->machine()->ProtectedLoad(memtype),
MemBuffer(offset), index, *effect_, *control_);
SetSourcePosition(load, position);
@@ -3650,7 +3758,7 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
}
} else {
// TODO(eholk): Support unaligned loads with trap handlers.
- DCHECK(!trap_handler::UseTrapHandler());
+ DCHECK(!use_trap_handler());
load = graph()->NewNode(jsgraph()->machine()->UnalignedLoad(memtype),
MemBuffer(offset), index, *effect_, *control_);
}
@@ -3682,35 +3790,36 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
return load;
}
-Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
+Node* WasmGraphBuilder::StoreMem(MachineRepresentation mem_rep, Node* index,
uint32_t offset, uint32_t alignment, Node* val,
wasm::WasmCodePosition position,
wasm::ValueType type) {
Node* store;
- index = BoundsCheckMem(memtype, index, offset, position, kCanOmitBoundsCheck);
+ index = BoundsCheckMem(wasm::WasmOpcodes::MemSize(mem_rep), index, offset,
+ position, kCanOmitBoundsCheck);
#if defined(V8_TARGET_BIG_ENDIAN)
- val = BuildChangeEndiannessStore(val, memtype, type);
+ val = BuildChangeEndiannessStore(val, mem_rep, type);
#endif
- if (memtype.representation() == MachineRepresentation::kWord8 ||
- jsgraph()->machine()->UnalignedStoreSupported(memtype.representation())) {
- if (trap_handler::UseTrapHandler()) {
- store = graph()->NewNode(
- jsgraph()->machine()->ProtectedStore(memtype.representation()),
- MemBuffer(offset), index, val, *effect_, *control_);
+ if (mem_rep == MachineRepresentation::kWord8 ||
+ jsgraph()->machine()->UnalignedStoreSupported(mem_rep)) {
+ if (use_trap_handler()) {
+ store =
+ graph()->NewNode(jsgraph()->machine()->ProtectedStore(mem_rep),
+ MemBuffer(offset), index, val, *effect_, *control_);
SetSourcePosition(store, position);
} else {
- StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
+ StoreRepresentation rep(mem_rep, kNoWriteBarrier);
store =
graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
index, val, *effect_, *control_);
}
} else {
// TODO(eholk): Support unaligned stores with trap handlers.
- DCHECK(!trap_handler::UseTrapHandler());
- UnalignedStoreRepresentation rep(memtype.representation());
+ DCHECK(!use_trap_handler());
+ UnalignedStoreRepresentation rep(mem_rep);
store =
graph()->NewNode(jsgraph()->machine()->UnalignedStore(rep),
MemBuffer(offset), index, val, *effect_, *control_);
@@ -3719,8 +3828,7 @@ Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
*effect_ = store;
if (FLAG_wasm_trace_memory) {
- TraceMemoryOperation(true, memtype.representation(), index, offset,
- position);
+ TraceMemoryOperation(true, mem_rep, index, offset, position);
}
return store;
@@ -3772,10 +3880,7 @@ Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
graph()->NewNode(jsgraph()->machine()->Word32And(), index, mem_mask);
}
- if (jsgraph()->machine()->Is64()) {
- index =
- graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), index);
- }
+ index = Uint32ToUintptr(index);
Node* load = graph()->NewNode(jsgraph()->machine()->Load(type), mem_start,
index, *effect_, bounds_check.if_true);
Node* value_phi =
@@ -3788,6 +3893,11 @@ Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
return value_phi;
}
+Node* WasmGraphBuilder::Uint32ToUintptr(Node* node) {
+ if (jsgraph()->machine()->Is32()) return node;
+ return graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), node);
+}
+
Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
Node* val) {
DCHECK_NOT_NULL(context_cache_);
@@ -3814,10 +3924,7 @@ Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
graph()->NewNode(jsgraph()->machine()->Word32And(), index, mem_mask);
}
- if (jsgraph()->machine()->Is64()) {
- index =
- graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), index);
- }
+ index = Uint32ToUintptr(index);
const Operator* store_op = jsgraph()->machine()->Store(StoreRepresentation(
type.representation(), WriteBarrierKind::kNoWriteBarrier));
Node* store = graph()->NewNode(store_op, mem_start, index, val, *effect_,
@@ -4302,22 +4409,24 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
// TODO(gdeepti): Add alignment validation, traps on misalignment
Node* node;
switch (opcode) {
-#define BUILD_ATOMIC_BINOP(Name, Operation, Type) \
- case wasm::kExpr##Name: { \
- Node* index = BoundsCheckMem(MachineType::Type(), inputs[0], offset, \
- position, kNeedsBoundsCheck); \
- node = graph()->NewNode( \
- jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \
- MemBuffer(offset), index, inputs[1], *effect_, *control_); \
- break; \
+#define BUILD_ATOMIC_BINOP(Name, Operation, Type) \
+ case wasm::kExpr##Name: { \
+ Node* index = \
+ BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
+ inputs[0], offset, position, kNeedsBoundsCheck); \
+ node = graph()->NewNode( \
+ jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \
+ MemBuffer(offset), index, inputs[1], *effect_, *control_); \
+ break; \
}
ATOMIC_BINOP_LIST(BUILD_ATOMIC_BINOP)
#undef BUILD_ATOMIC_BINOP
#define BUILD_ATOMIC_TERNARY_OP(Name, Operation, Type) \
case wasm::kExpr##Name: { \
- Node* index = BoundsCheckMem(MachineType::Type(), inputs[0], offset, \
- position, kNeedsBoundsCheck); \
+ Node* index = \
+ BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
+ inputs[0], offset, position, kNeedsBoundsCheck); \
node = graph()->NewNode( \
jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \
MemBuffer(offset), index, inputs[1], inputs[2], *effect_, *control_); \
@@ -4326,26 +4435,28 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
ATOMIC_TERNARY_LIST(BUILD_ATOMIC_TERNARY_OP)
#undef BUILD_ATOMIC_TERNARY_OP
-#define BUILD_ATOMIC_LOAD_OP(Name, Type) \
- case wasm::kExpr##Name: { \
- Node* index = BoundsCheckMem(MachineType::Type(), inputs[0], offset, \
- position, kNeedsBoundsCheck); \
- node = graph()->NewNode( \
- jsgraph()->machine()->AtomicLoad(MachineType::Type()), \
- MemBuffer(offset), index, *effect_, *control_); \
- break; \
+#define BUILD_ATOMIC_LOAD_OP(Name, Type) \
+ case wasm::kExpr##Name: { \
+ Node* index = \
+ BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
+ inputs[0], offset, position, kNeedsBoundsCheck); \
+ node = graph()->NewNode( \
+ jsgraph()->machine()->AtomicLoad(MachineType::Type()), \
+ MemBuffer(offset), index, *effect_, *control_); \
+ break; \
}
ATOMIC_LOAD_LIST(BUILD_ATOMIC_LOAD_OP)
#undef BUILD_ATOMIC_LOAD_OP
-#define BUILD_ATOMIC_STORE_OP(Name, Type, Rep) \
- case wasm::kExpr##Name: { \
- Node* index = BoundsCheckMem(MachineType::Type(), inputs[0], offset, \
- position, kNeedsBoundsCheck); \
- node = graph()->NewNode( \
- jsgraph()->machine()->AtomicStore(MachineRepresentation::Rep), \
- MemBuffer(offset), index, inputs[1], *effect_, *control_); \
- break; \
+#define BUILD_ATOMIC_STORE_OP(Name, Type, Rep) \
+ case wasm::kExpr##Name: { \
+ Node* index = \
+ BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
+ inputs[0], offset, position, kNeedsBoundsCheck); \
+ node = graph()->NewNode( \
+ jsgraph()->machine()->AtomicStore(MachineRepresentation::Rep), \
+ MemBuffer(offset), index, inputs[1], *effect_, *control_); \
+ break; \
}
ATOMIC_STORE_LIST(BUILD_ATOMIC_STORE_OP)
#undef BUILD_ATOMIC_STORE_OP
@@ -4391,7 +4502,8 @@ void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
WasmCodeWrapper wasm_code, uint32_t index,
- Address wasm_context_address) {
+ Address wasm_context_address,
+ bool use_trap_handler) {
const wasm::WasmFunction* func = &module->functions[index];
//----------------------------------------------------------------------------
@@ -4410,15 +4522,11 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
Node* effect = nullptr;
// TODO(titzer): compile JS to WASM wrappers without a {ModuleEnv}.
- ModuleEnv env = {
- module,
- std::vector<Address>(), // function_tables
- std::vector<Address>(), // signature_tables
- // TODO(mtrofin): remove these 2 lines when we don't need
- // FLAG_wasm_jit_to_native
- std::vector<Handle<Code>>(), // function_code
- BUILTIN_CODE(isolate, Illegal) // default_function_code
- };
+ ModuleEnv env(module,
+ // TODO(mtrofin): remove the Illegal builtin when we don't need
+ // FLAG_wasm_jit_to_native
+ BUILTIN_CODE(isolate, Illegal), // default_function_code
+ use_trap_handler);
WasmGraphBuilder builder(&env, &zone, &jsgraph,
CEntryStub(isolate, 1).GetCode(), func->sig);
@@ -4470,9 +4578,7 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
namespace {
void ValidateImportWrapperReferencesImmovables(Handle<Code> wrapper) {
-#if !DEBUG
- return;
-#endif
+#ifdef DEBUG
// We expect the only embedded objects to be those originating from
// a snapshot, which are immovable.
DisallowHeapAllocation no_gc;
@@ -4493,7 +4599,7 @@ void ValidateImportWrapperReferencesImmovables(Handle<Code> wrapper) {
default:
UNREACHABLE();
}
- CHECK_NOT_NULL(target);
+ DCHECK_NOT_NULL(target);
bool is_immovable =
target->IsSmi() || Heap::IsImmovable(HeapObject::cast(target));
bool is_allowed_stub = false;
@@ -4503,15 +4609,16 @@ void ValidateImportWrapperReferencesImmovables(Handle<Code> wrapper) {
code->kind() == Code::STUB &&
CodeStub::MajorKeyFromKey(code->stub_key()) == CodeStub::DoubleToI;
}
- CHECK(is_immovable || is_allowed_stub);
+ DCHECK(is_immovable || is_allowed_stub);
}
+#endif
}
} // namespace
Handle<Code> CompileWasmToJSWrapper(
Isolate* isolate, Handle<JSReceiver> target, wasm::FunctionSig* sig,
- uint32_t index, wasm::ModuleOrigin origin,
+ uint32_t index, wasm::ModuleOrigin origin, bool use_trap_handler,
Handle<FixedArray> global_js_imports_table) {
//----------------------------------------------------------------------------
// Create the Graph
@@ -4532,7 +4639,8 @@ Handle<Code> CompileWasmToJSWrapper(
origin == wasm::kAsmJsOrigin ? new (&zone) SourcePositionTable(&graph)
: nullptr;
- WasmGraphBuilder builder(nullptr, &zone, &jsgraph,
+ ModuleEnv env(nullptr, Handle<Code>::null(), use_trap_handler);
+ WasmGraphBuilder builder(&env, &zone, &jsgraph,
CEntryStub(isolate, 1).GetCode(), sig,
source_position_table);
builder.set_control_ptr(&control);
@@ -4618,7 +4726,10 @@ Handle<Code> CompileWasmToWasmWrapper(Isolate* isolate, WasmCodeWrapper target,
Node* control = nullptr;
Node* effect = nullptr;
- WasmGraphBuilder builder(nullptr, &zone, &jsgraph, Handle<Code>(), sig);
+ ModuleEnv env(
+ nullptr, Handle<Code>::null(),
+ !target.IsCodeObject() && target.GetWasmCode()->HasTrapHandlerIndex());
+ WasmGraphBuilder builder(&env, &zone, &jsgraph, Handle<Code>(), sig);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
builder.BuildWasmToWasmWrapper(target, new_wasm_context_address);
@@ -4804,13 +4915,6 @@ Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig,
SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
double* decode_ms) {
-#if DEBUG
- if (env_) {
- size_t tables_size = env_->module->function_tables.size();
- DCHECK_EQ(tables_size, env_->function_tables.size());
- DCHECK_EQ(tables_size, env_->signature_tables.size());
- }
-#endif
base::ElapsedTimer decode_timer;
if (FLAG_trace_wasm_decode_time) {
@@ -4825,7 +4929,6 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
runtime_exception_support_);
tf_.graph_construction_result_ =
wasm::BuildTFGraph(isolate_->allocator(), &builder, func_body_);
-
if (tf_.graph_construction_result_.failed()) {
if (FLAG_trace_wasm_compiler) {
OFStream os(stdout);
@@ -4844,7 +4947,8 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
if (func_index_ >= FLAG_trace_wasm_ast_start &&
func_index_ < FLAG_trace_wasm_ast_end) {
- PrintRawWasmCode(isolate_->allocator(), func_body_, env_->module);
+ PrintRawWasmCode(isolate_->allocator(), func_body_, env_->module,
+ wasm::kPrintLocals);
}
if (FLAG_trace_wasm_decode_time) {
*decode_ms = decode_timer.Elapsed().InMillisecondsF();
@@ -4857,9 +4961,7 @@ Vector<const char> GetDebugName(Zone* zone, wasm::WasmName name, int index) {
if (!name.is_empty()) {
return name;
}
-#ifndef DEBUG
- return {};
-#endif
+#ifdef DEBUG
constexpr int kBufferLength = 15;
EmbeddedVector<char, kBufferLength> name_vector;
@@ -4869,6 +4971,9 @@ Vector<const char> GetDebugName(Zone* zone, wasm::WasmName name, int index) {
char* index_name = zone->NewArray<char>(name_len);
memcpy(index_name, name_vector.start(), name_len);
return Vector<const char>(index_name, name_len);
+#else
+ return {};
+#endif
}
} // namespace
@@ -5090,7 +5195,7 @@ WasmCodeWrapper WasmCompilationUnit::FinishTurbofanCompilation(
desc, tf_.job_->compilation_info()->wasm_code_desc()->frame_slot_count,
func_index_,
tf_.job_->compilation_info()->wasm_code_desc()->safepoint_table_offset,
- protected_instructions_);
+ std::move(protected_instructions_));
if (!code) {
return WasmCodeWrapper(code);
}
@@ -5107,13 +5212,24 @@ WasmCodeWrapper WasmCompilationUnit::FinishTurbofanCompilation(
MaybeHandle<HandlerTable> handler_table =
tf_.job_->compilation_info()->wasm_code_desc()->handler_table;
- int function_index_as_int = static_cast<int>(func_index_);
native_module_->compiled_module()->source_positions()->set(
- function_index_as_int, *source_positions);
+ func_index_, *source_positions);
if (!handler_table.is_null()) {
native_module_->compiled_module()->handler_table()->set(
- function_index_as_int, *handler_table.ToHandleChecked());
+ func_index_, *handler_table.ToHandleChecked());
}
+
+#ifdef ENABLE_DISASSEMBLER
+ // Note: only do this after setting source positions, as this will be
+ // accessed and printed here.
+ if (FLAG_print_code || FLAG_print_wasm_code) {
+ // TODO(wasm): Use proper log files, here and elsewhere.
+ PrintF("--- Native Wasm code ---\n");
+ code->Print(isolate_);
+ PrintF("--- End code ---\n");
+ }
+#endif
+
// TODO(mtrofin): this should probably move up in the common caller,
// once liftoff has source positions. Until then, we'd need to handle
// undefined values, which is complicating the code.
@@ -5147,21 +5263,21 @@ WasmCodeWrapper WasmCompilationUnit::FinishLiftoffCompilation(
wasm::ErrorThrower* thrower) {
CodeDesc desc;
liftoff_.asm_.GetCode(isolate_, &desc);
+
+ Handle<ByteArray> source_positions =
+ liftoff_.source_position_table_builder_.ToSourcePositionTable(isolate_);
+
WasmCodeWrapper ret;
if (!FLAG_wasm_jit_to_native) {
Handle<Code> code;
- code = isolate_->factory()->NewCode(desc, Code::WASM_FUNCTION, code);
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_code || FLAG_print_wasm_code) {
- // TODO(wasm): Use proper log files, here and elsewhere.
- OFStream os(stdout);
- os << "--- Wasm liftoff code ---\n";
- EmbeddedVector<char, 32> func_name;
- func_name.Truncate(SNPrintF(func_name, "wasm#%d-liftoff", func_index_));
- code->Disassemble(func_name.start(), os);
- os << "--- End code ---\n";
- }
-#endif
+ code = isolate_->factory()->NewCode(
+ desc, Code::WASM_FUNCTION, code, Builtins::kNoBuiltinId,
+ MaybeHandle<HandlerTable>(), source_positions,
+ MaybeHandle<DeoptimizationData>(), kMovable,
+ 0, // stub_key
+ false, // is_turbofanned
+ liftoff_.asm_.GetTotalFrameSlotCount(), // stack_slots
+ liftoff_.safepoint_table_offset_);
if (isolate_->logger()->is_logging_code_events() ||
isolate_->is_profiling()) {
RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate_, code,
@@ -5169,15 +5285,34 @@ WasmCodeWrapper WasmCompilationUnit::FinishLiftoffCompilation(
}
PackProtectedInstructions(code);
- return WasmCodeWrapper(code);
+ ret = WasmCodeWrapper(code);
} else {
- // TODO(mtrofin): figure a way to raise events; also, disassembly.
- // Consider lifting them both to FinishCompilation.
- return WasmCodeWrapper(native_module_->AddCode(
- desc, liftoff_.asm_.GetTotalFrameSlotCount(), func_index_,
- liftoff_.asm_.GetSafepointTableOffset(), protected_instructions_,
- true));
+ // TODO(mtrofin): figure a way to raise events.
+ // Consider lifting it to FinishCompilation.
+ native_module_->compiled_module()->source_positions()->set(
+ func_index_, *source_positions);
+ ret = WasmCodeWrapper(
+ native_module_->AddCode(desc, liftoff_.asm_.GetTotalFrameSlotCount(),
+ func_index_, liftoff_.safepoint_table_offset_,
+ std::move(protected_instructions_), true));
}
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_code || FLAG_print_wasm_code) {
+ // TODO(wasm): Use proper log files, here and elsewhere.
+ OFStream os(stdout);
+ os << "--- Wasm liftoff code ---\n";
+ EmbeddedVector<char, 64> func_name;
+ if (func_name_.start() != nullptr) {
+ SNPrintF(func_name, "#%d:%.*s", func_index(), func_name_.length(),
+ func_name_.start());
+ } else {
+ SNPrintF(func_name, "wasm#%d", func_index());
+ }
+ ret.Disassemble(func_name.start(), isolate_, os);
+ os << "--- End code ---\n";
+ }
+#endif
+ return ret;
}
// static
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 146f3044ca..22a2e1071e 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -43,25 +43,34 @@ class WasmCode;
namespace compiler {
+// Indirect function tables contain a <smi(sig), code> pair for each entry.
+enum FunctionTableEntries : int {
+ kFunctionTableSignatureOffset = 0,
+ kFunctionTableCodeOffset = 1,
+ kFunctionTableEntrySize = 2
+};
+constexpr inline int FunctionTableSigOffset(int i) {
+ return kFunctionTableEntrySize * i + kFunctionTableSignatureOffset;
+}
+constexpr inline int FunctionTableCodeOffset(int i) {
+ return kFunctionTableEntrySize * i + kFunctionTableCodeOffset;
+}
+
// The {ModuleEnv} encapsulates the module data that is used by the
// {WasmGraphBuilder} during graph building. It represents the parameters to
// which the compiled code should be specialized, including which code to call
// for direct calls {function_code}, which tables to use for indirect calls
// {function_tables}, memory start address and size {mem_start, mem_size},
-// as well as signature maps {signature_maps} and the module itself {module}.
+// as well as the module itself {module}.
// ModuleEnvs are shareable across multiple compilations.
struct ModuleEnv {
// A pointer to the decoded module's static representation.
const wasm::WasmModule* module;
- // The function tables are FixedArrays of code used to dispatch indirect
- // calls. (the same length as module.function_tables). We use the address
- // to a global handle to the FixedArray.
+ // The function tables are FixedArrays of <smi, code> pairs used to signature
+ // check and dispatch indirect calls. It has the same length as
+ // module.function_tables. We use the address to a global handle to the
+ // FixedArray.
const std::vector<Address> function_tables;
- // The signatures tables are FixedArrays of SMIs used to check signatures
- // match at runtime.
- // (the same length as module.function_tables)
- // We use the address to a global handle to the FixedArray.
- const std::vector<Address> signature_tables;
// TODO(mtrofin): remove these 2 once we don't need FLAG_wasm_jit_to_native
// Contains the code objects to call for each direct call.
@@ -69,6 +78,25 @@ struct ModuleEnv {
const std::vector<Handle<Code>> function_code;
// If the default code is not a null handle, always use it for direct calls.
const Handle<Code> default_function_code;
+ // True if trap handling should be used in compiled code, rather than
+ // compiling in bounds checks for each memory access.
+ const bool use_trap_handler;
+
+ ModuleEnv(const wasm::WasmModule* module, Handle<Code> default_function_code,
+ bool use_trap_handler)
+ : module(module),
+ default_function_code(default_function_code),
+ use_trap_handler(use_trap_handler) {}
+
+ ModuleEnv(const wasm::WasmModule* module,
+ std::vector<Address> function_tables,
+ std::vector<Handle<Code>> function_code,
+ Handle<Code> default_function_code, bool use_trap_handler)
+ : module(module),
+ function_tables(std::move(function_tables)),
+ function_code(std::move(function_code)),
+ default_function_code(default_function_code),
+ use_trap_handler(use_trap_handler) {}
};
enum RuntimeExceptionSupport : bool {
@@ -114,6 +142,11 @@ class WasmCompilationUnit final {
struct LiftoffData {
wasm::LiftoffAssembler asm_;
+ int safepoint_table_offset_;
+ SourcePositionTableBuilder source_position_table_builder_;
+ // The {codegen_zone_} needs to survive until FinishCompilation. It's only
+ // rarely used (e.g. for runtime calls), so it's only allocated when needed.
+ std::unique_ptr<Zone> codegen_zone_;
explicit LiftoffData(Isolate* isolate) : asm_(isolate) {}
};
struct TurbofanData {
@@ -151,7 +184,7 @@ class WasmCompilationUnit final {
size_t memory_cost_ = 0;
wasm::NativeModule* native_module_;
bool lower_simd_;
- std::shared_ptr<std::vector<trap_handler::ProtectedInstructionData>>
+ std::unique_ptr<std::vector<trap_handler::ProtectedInstructionData>>
protected_instructions_;
CompilationMode mode_;
// {liftoff_} is valid if mode_ == kLiftoff, tf_ if mode_ == kTurbofan.
@@ -172,12 +205,13 @@ class WasmCompilationUnit final {
Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
wasm::FunctionSig* sig, uint32_t index,
wasm::ModuleOrigin origin,
+ bool use_trap_handler,
Handle<FixedArray> global_js_imports_table);
// Wraps a given wasm code object, producing a code object.
V8_EXPORT_PRIVATE Handle<Code> CompileJSToWasmWrapper(
Isolate* isolate, wasm::WasmModule* module, WasmCodeWrapper wasm_code,
- uint32_t index, Address wasm_context_address);
+ uint32_t index, Address wasm_context_address, bool use_trap_handler);
// Wraps a wasm function, producing a code object that can be called from other
// wasm instances (the WasmContext address must be changed).
@@ -221,6 +255,8 @@ typedef ZoneVector<Node*> NodeVector;
class WasmGraphBuilder {
public:
enum EnforceBoundsCheck : bool { kNeedsBoundsCheck, kCanOmitBoundsCheck };
+ struct IntConvertOps;
+ struct FloatConvertOps;
WasmGraphBuilder(ModuleEnv* env, Zone* zone, JSGraph* graph,
Handle<Code> centry_stub, wasm::FunctionSig* sig,
@@ -351,7 +387,7 @@ class WasmGraphBuilder {
Node* LoadMem(wasm::ValueType type, MachineType memtype, Node* index,
uint32_t offset, uint32_t alignment,
wasm::WasmCodePosition position);
- Node* StoreMem(MachineType memtype, Node* index, uint32_t offset,
+ Node* StoreMem(MachineRepresentation mem_rep, Node* index, uint32_t offset,
uint32_t alignment, Node* val, wasm::WasmCodePosition position,
wasm::ValueType type);
static void PrintDebugName(Node* node);
@@ -413,36 +449,43 @@ class WasmGraphBuilder {
const wasm::WasmModule* module() { return env_ ? env_->module : nullptr; }
+ bool use_trap_handler() const { return env_ && env_->use_trap_handler; }
+
private:
+ enum class NumericImplementation : uint8_t { kTrap, kSaturate };
static const int kDefaultBufferSize = 16;
- Zone* zone_;
- JSGraph* jsgraph_;
- Node* centry_stub_node_;
- ModuleEnv* env_ = nullptr;
- Node* wasm_context_ = nullptr;
- NodeVector signature_tables_;
- NodeVector function_tables_;
- NodeVector function_table_sizes_;
+ Zone* const zone_;
+ JSGraph* const jsgraph_;
+ Node* const centry_stub_node_;
+ // env_ == nullptr means we're not compiling Wasm functions, such as for
+ // wrappers or interpreter stubs.
+ ModuleEnv* const env_ = nullptr;
+ SetOncePointer<Node> wasm_context_;
+ struct FunctionTableNodes {
+ Node* table_addr;
+ Node* size;
+ };
+ ZoneVector<FunctionTableNodes> function_tables_;
Node** control_ = nullptr;
Node** effect_ = nullptr;
WasmContextCacheNodes* context_cache_ = nullptr;
- Node* globals_start_ = nullptr;
+ SetOncePointer<Node> globals_start_;
Node** cur_buffer_;
size_t cur_bufsize_;
Node* def_buffer_[kDefaultBufferSize];
bool has_simd_ = false;
bool needs_stack_check_ = false;
- bool untrusted_code_mitigations_ = true;
+ const bool untrusted_code_mitigations_ = true;
// If the runtime doesn't support exception propagation,
// we won't generate stack checks, and trap handling will also
// be generated differently.
- RuntimeExceptionSupport runtime_exception_support_;
+ const RuntimeExceptionSupport runtime_exception_support_;
- wasm::FunctionSig* sig_;
+ wasm::FunctionSig* const sig_;
SetOncePointer<const Operator> allocate_heap_number_operator_;
- compiler::SourcePositionTable* source_position_table_ = nullptr;
+ compiler::SourcePositionTable* const source_position_table_ = nullptr;
// Internal helper methods.
JSGraph* jsgraph() { return jsgraph_; }
@@ -451,11 +494,12 @@ class WasmGraphBuilder {
Node* String(const char* string);
Node* MemBuffer(uint32_t offset);
// BoundsCheckMem receives a uint32 {index} node and returns a ptrsize index.
- Node* BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset,
+ Node* BoundsCheckMem(uint8_t access_size, Node* index, uint32_t offset,
wasm::WasmCodePosition, EnforceBoundsCheck);
+ Node* Uint32ToUintptr(Node*);
const Operator* GetSafeLoadOperator(int offset, wasm::ValueType type);
const Operator* GetSafeStoreOperator(int offset, wasm::ValueType type);
- Node* BuildChangeEndiannessStore(Node* node, MachineType type,
+ Node* BuildChangeEndiannessStore(Node* node, MachineRepresentation rep,
wasm::ValueType wasmtype = wasm::kWasmStmt);
Node* BuildChangeEndiannessLoad(Node* node, MachineType type,
wasm::ValueType wasmtype = wasm::kWasmStmt);
@@ -470,10 +514,25 @@ class WasmGraphBuilder {
Node* BuildF32CopySign(Node* left, Node* right);
Node* BuildF64CopySign(Node* left, Node* right);
- Node* BuildI32SConvertF32(Node* input, wasm::WasmCodePosition position);
- Node* BuildI32SConvertF64(Node* input, wasm::WasmCodePosition position);
- Node* BuildI32UConvertF32(Node* input, wasm::WasmCodePosition position);
- Node* BuildI32UConvertF64(Node* input, wasm::WasmCodePosition position);
+
+ Node* BuildI32ConvertOp(Node* input, wasm::WasmCodePosition position,
+ NumericImplementation impl, const Operator* op,
+ wasm::WasmOpcode check_op,
+ const IntConvertOps* int_ops,
+ const FloatConvertOps* float_ops);
+ Node* BuildConvertCheck(Node* test, Node* result, Node* input,
+ wasm::WasmCodePosition position,
+ NumericImplementation impl,
+ const IntConvertOps* int_ops,
+ const FloatConvertOps* float_ops);
+ Node* BuildI32SConvertF32(Node* input, wasm::WasmCodePosition position,
+ NumericImplementation impl);
+ Node* BuildI32SConvertF64(Node* input, wasm::WasmCodePosition position,
+ NumericImplementation impl);
+ Node* BuildI32UConvertF32(Node* input, wasm::WasmCodePosition position,
+ NumericImplementation impl);
+ Node* BuildI32UConvertF64(Node* input, wasm::WasmCodePosition position,
+ NumericImplementation impl);
Node* BuildI32Ctz(Node* input);
Node* BuildI32Popcnt(Node* input);
Node* BuildI64Ctz(Node* input);
diff --git a/deps/v8/src/compiler/wasm-linkage.cc b/deps/v8/src/compiler/wasm-linkage.cc
index e231d15f10..e7bb3c164a 100644
--- a/deps/v8/src/compiler/wasm-linkage.cc
+++ b/deps/v8/src/compiler/wasm-linkage.cc
@@ -47,7 +47,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == ia32 ===================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS esi, eax, edx, ecx, ebx
-#define GP_RETURN_REGISTERS eax, edx, ecx
+#define GP_RETURN_REGISTERS eax, edx
#define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
#define FP_RETURN_REGISTERS xmm1, xmm2
@@ -56,7 +56,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == x64 ====================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS rsi, rax, rdx, rcx, rbx, rdi
-#define GP_RETURN_REGISTERS rax, rdx, rcx
+#define GP_RETURN_REGISTERS rax, rdx
#define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
#define FP_RETURN_REGISTERS xmm1, xmm2
@@ -65,7 +65,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == arm ====================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS r3, r0, r1, r2
-#define GP_RETURN_REGISTERS r0, r1, r3
+#define GP_RETURN_REGISTERS r0, r1
#define FP_PARAM_REGISTERS d0, d1, d2, d3, d4, d5, d6, d7
#define FP_RETURN_REGISTERS d0, d1
@@ -74,7 +74,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == arm64 ====================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS x7, x0, x1, x2, x3, x4, x5, x6
-#define GP_RETURN_REGISTERS x0, x1, x2
+#define GP_RETURN_REGISTERS x0, x1
#define FP_PARAM_REGISTERS d0, d1, d2, d3, d4, d5, d6, d7
#define FP_RETURN_REGISTERS d0, d1
@@ -83,7 +83,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == mips ===================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS a0, a1, a2, a3
-#define GP_RETURN_REGISTERS v0, v1, t7
+#define GP_RETURN_REGISTERS v0, v1
#define FP_PARAM_REGISTERS f2, f4, f6, f8, f10, f12, f14
#define FP_RETURN_REGISTERS f2, f4
@@ -92,7 +92,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == mips64 =================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7
-#define GP_RETURN_REGISTERS v0, v1, t3
+#define GP_RETURN_REGISTERS v0, v1
#define FP_PARAM_REGISTERS f2, f4, f6, f8, f10, f12, f14
#define FP_RETURN_REGISTERS f2, f4
@@ -101,7 +101,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == ppc & ppc64 ============================================================
// ===========================================================================
#define GP_PARAM_REGISTERS r10, r3, r4, r5, r6, r7, r8, r9
-#define GP_RETURN_REGISTERS r3, r4, r5
+#define GP_RETURN_REGISTERS r3, r4
#define FP_PARAM_REGISTERS d1, d2, d3, d4, d5, d6, d7, d8
#define FP_RETURN_REGISTERS d1, d2
@@ -110,7 +110,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == s390x ==================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS r6, r2, r3, r4, r5
-#define GP_RETURN_REGISTERS r2, r3, r4
+#define GP_RETURN_REGISTERS r2, r3
#define FP_PARAM_REGISTERS d0, d2, d4, d6
#define FP_RETURN_REGISTERS d0, d2, d4, d6
@@ -119,7 +119,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == s390 ===================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS r6, r2, r3, r4, r5
-#define GP_RETURN_REGISTERS r2, r3, r4
+#define GP_RETURN_REGISTERS r2, r3
#define FP_PARAM_REGISTERS d0, d2
#define FP_RETURN_REGISTERS d0, d2
@@ -158,6 +158,8 @@ struct Allocator {
int stack_offset;
+ void AdjustStackOffset(int offset) { stack_offset += offset; }
+
LinkageLocation Next(ValueType type) {
if (IsFloatingPoint(type)) {
// Allocate a floating point register/stack location.
@@ -226,25 +228,28 @@ CallDescriptor* GetWasmCallDescriptor(Zone* zone, wasm::FunctionSig* fsig) {
LocationSignature::Builder locations(zone, fsig->return_count(),
fsig->parameter_count() + 1);
- Allocator rets = return_registers;
-
- // Add return location(s).
- const int return_count = static_cast<int>(locations.return_count_);
- for (int i = 0; i < return_count; i++) {
- ValueType ret = fsig->GetReturn(i);
- locations.AddReturn(rets.Next(ret));
- }
-
+ // Add register and/or stack parameter(s).
Allocator params = parameter_registers;
- // Add parameter for the wasm_context.
+ // The wasm_context.
locations.AddParam(params.Next(MachineType::PointerRepresentation()));
- // Add register and/or stack parameter(s).
const int parameter_count = static_cast<int>(fsig->parameter_count());
for (int i = 0; i < parameter_count; i++) {
ValueType param = fsig->GetParam(i);
- locations.AddParam(params.Next(param));
+ auto l = params.Next(param);
+ locations.AddParam(l);
+ }
+
+ // Add return location(s).
+ Allocator rets = return_registers;
+ rets.AdjustStackOffset(params.stack_offset);
+
+ const int return_count = static_cast<int>(locations.return_count_);
+ for (int i = 0; i < return_count; i++) {
+ ValueType ret = fsig->GetReturn(i);
+ auto l = rets.Next(ret);
+ locations.AddReturn(l);
}
const RegList kCalleeSaveRegisters = 0;
@@ -255,22 +260,23 @@ CallDescriptor* GetWasmCallDescriptor(Zone* zone, wasm::FunctionSig* fsig) {
: MachineType::AnyTagged();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
- CallDescriptor::Flags flags = CallDescriptor::kUseNativeStack;
CallDescriptor::Kind kind = FLAG_wasm_jit_to_native
? CallDescriptor::kCallWasmFunction
: CallDescriptor::kCallCodeObject;
- return new (zone) CallDescriptor( // --
- kind, // kind
- target_type, // target MachineType
- target_loc, // target location
- locations.Build(), // location_sig
- params.stack_offset, // stack_parameter_count
- compiler::Operator::kNoProperties, // properties
- kCalleeSaveRegisters, // callee-saved registers
- kCalleeSaveFPRegisters, // callee-saved fp regs
- flags, // flags
- "wasm-call");
+ return new (zone) CallDescriptor( // --
+ kind, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ locations.Build(), // location_sig
+ params.stack_offset, // stack_parameter_count
+ compiler::Operator::kNoProperties, // properties
+ kCalleeSaveRegisters, // callee-saved registers
+ kCalleeSaveFPRegisters, // callee-saved fp regs
+ CallDescriptor::kNoFlags, // flags
+ "wasm-call", // debug name
+ 0, // allocatable registers
+ rets.stack_offset - params.stack_offset); // stack_return_count
}
CallDescriptor* ReplaceTypeInCallDescriptorWith(
@@ -295,21 +301,7 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith(
LocationSignature::Builder locations(zone, return_count, parameter_count);
- Allocator rets = return_registers;
-
- for (size_t i = 0; i < descriptor->ReturnCount(); i++) {
- if (descriptor->GetReturnType(i) == input_type) {
- for (size_t j = 0; j < num_replacements; j++) {
- locations.AddReturn(rets.Next(output_type));
- }
- } else {
- locations.AddReturn(
- rets.Next(descriptor->GetReturnType(i).representation()));
- }
- }
-
Allocator params = parameter_registers;
-
for (size_t i = 0; i < descriptor->ParameterCount(); i++) {
if (descriptor->GetParameterType(i) == input_type) {
for (size_t j = 0; j < num_replacements; j++) {
@@ -321,17 +313,32 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith(
}
}
- return new (zone) CallDescriptor( // --
- descriptor->kind(), // kind
- descriptor->GetInputType(0), // target MachineType
- descriptor->GetInputLocation(0), // target location
- locations.Build(), // location_sig
- params.stack_offset, // stack_parameter_count
- descriptor->properties(), // properties
- descriptor->CalleeSavedRegisters(), // callee-saved registers
- descriptor->CalleeSavedFPRegisters(), // callee-saved fp regs
- descriptor->flags(), // flags
- descriptor->debug_name());
+ Allocator rets = return_registers;
+ rets.AdjustStackOffset(params.stack_offset);
+ for (size_t i = 0; i < descriptor->ReturnCount(); i++) {
+ if (descriptor->GetReturnType(i) == input_type) {
+ for (size_t j = 0; j < num_replacements; j++) {
+ locations.AddReturn(rets.Next(output_type));
+ }
+ } else {
+ locations.AddReturn(
+ rets.Next(descriptor->GetReturnType(i).representation()));
+ }
+ }
+
+ return new (zone) CallDescriptor( // --
+ descriptor->kind(), // kind
+ descriptor->GetInputType(0), // target MachineType
+ descriptor->GetInputLocation(0), // target location
+ locations.Build(), // location_sig
+ params.stack_offset, // stack_parameter_count
+ descriptor->properties(), // properties
+ descriptor->CalleeSavedRegisters(), // callee-saved registers
+ descriptor->CalleeSavedFPRegisters(), // callee-saved fp regs
+ descriptor->flags(), // flags
+ descriptor->debug_name(), // debug name
+ descriptor->AllocatableRegisters(), // allocatable registers
+ rets.stack_offset - params.stack_offset); // stack_return_count
}
CallDescriptor* GetI32WasmCallDescriptor(Zone* zone,
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index ea417533f2..bc92f9707c 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -156,18 +156,6 @@ bool HasImmediateInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsImmediate();
}
-
-class OutOfLineLoadZero final : public OutOfLineCode {
- public:
- OutOfLineLoadZero(CodeGenerator* gen, Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final { __ xorl(result_, result_); }
-
- private:
- Register const result_;
-};
-
class OutOfLineLoadFloat32NaN final : public OutOfLineCode {
public:
OutOfLineLoadFloat32NaN(CodeGenerator* gen, XMMRegister result)
@@ -295,7 +283,7 @@ class WasmOutOfLineTrap final : public OutOfLineCode {
ReferenceMap* reference_map = new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
- __ AssertUnreachable(kUnexpectedReturnFromWasmTrap);
+ __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
private:
@@ -456,241 +444,6 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
} \
} while (0)
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, OutOfLineLoadNaN) \
- do { \
- auto result = i.OutputDoubleRegister(); \
- auto buffer = i.InputRegister(0); \
- auto index1 = i.InputRegister(1); \
- auto index2 = i.InputUint32(2); \
- OutOfLineCode* ool; \
- if (instr->InputAt(3)->IsRegister()) { \
- auto length = i.InputRegister(3); \
- DCHECK_EQ(0u, index2); \
- __ cmpl(index1, length); \
- ool = new (zone()) OutOfLineLoadNaN(this, result); \
- } else { \
- auto length = i.InputUint32(3); \
- RelocInfo::Mode rmode = i.ToConstant(instr->InputAt(3)).rmode(); \
- DCHECK_LE(index2, length); \
- __ cmpl(index1, Immediate(length - index2, rmode)); \
- class OutOfLineLoadFloat final : public OutOfLineCode { \
- public: \
- OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result, \
- Register buffer, Register index1, int32_t index2, \
- int32_t length, RelocInfo::Mode rmode) \
- : OutOfLineCode(gen), \
- result_(result), \
- buffer_(buffer), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- rmode_(rmode) {} \
- \
- void Generate() final { \
- __ leal(kScratchRegister, Operand(index1_, index2_)); \
- __ Pcmpeqd(result_, result_); \
- __ cmpl(kScratchRegister, Immediate(length_, rmode_)); \
- __ j(above_equal, exit()); \
- __ asm_instr(result_, \
- Operand(buffer_, kScratchRegister, times_1, 0)); \
- } \
- \
- private: \
- XMMRegister const result_; \
- Register const buffer_; \
- Register const index1_; \
- int32_t const index2_; \
- int32_t const length_; \
- RelocInfo::Mode rmode_; \
- }; \
- ool = new (zone()) OutOfLineLoadFloat(this, result, buffer, index1, \
- index2, length, rmode); \
- } \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
- __ bind(ool->exit()); \
- } while (false)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
- do { \
- auto result = i.OutputRegister(); \
- auto buffer = i.InputRegister(0); \
- auto index1 = i.InputRegister(1); \
- auto index2 = i.InputUint32(2); \
- OutOfLineCode* ool; \
- if (instr->InputAt(3)->IsRegister()) { \
- auto length = i.InputRegister(3); \
- DCHECK_EQ(0u, index2); \
- __ cmpl(index1, length); \
- ool = new (zone()) OutOfLineLoadZero(this, result); \
- } else { \
- auto length = i.InputUint32(3); \
- RelocInfo::Mode rmode = i.ToConstant(instr->InputAt(3)).rmode(); \
- DCHECK_LE(index2, length); \
- __ cmpl(index1, Immediate(length - index2, rmode)); \
- class OutOfLineLoadInteger final : public OutOfLineCode { \
- public: \
- OutOfLineLoadInteger(CodeGenerator* gen, Register result, \
- Register buffer, Register index1, int32_t index2, \
- int32_t length, RelocInfo::Mode rmode) \
- : OutOfLineCode(gen), \
- result_(result), \
- buffer_(buffer), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- rmode_(rmode) {} \
- \
- void Generate() final { \
- Label oob; \
- __ leal(kScratchRegister, Operand(index1_, index2_)); \
- __ cmpl(kScratchRegister, Immediate(length_, rmode_)); \
- __ j(above_equal, &oob, Label::kNear); \
- __ asm_instr(result_, \
- Operand(buffer_, kScratchRegister, times_1, 0)); \
- __ jmp(exit()); \
- __ bind(&oob); \
- __ xorl(result_, result_); \
- } \
- \
- private: \
- Register const result_; \
- Register const buffer_; \
- Register const index1_; \
- int32_t const index2_; \
- int32_t const length_; \
- RelocInfo::Mode const rmode_; \
- }; \
- ool = new (zone()) OutOfLineLoadInteger(this, result, buffer, index1, \
- index2, length, rmode); \
- } \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
- __ bind(ool->exit()); \
- } while (false)
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
- do { \
- auto buffer = i.InputRegister(0); \
- auto index1 = i.InputRegister(1); \
- auto index2 = i.InputUint32(2); \
- auto value = i.InputDoubleRegister(4); \
- if (instr->InputAt(3)->IsRegister()) { \
- auto length = i.InputRegister(3); \
- DCHECK_EQ(0u, index2); \
- Label done; \
- __ cmpl(index1, length); \
- __ j(above_equal, &done, Label::kNear); \
- __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
- __ bind(&done); \
- } else { \
- auto length = i.InputUint32(3); \
- RelocInfo::Mode rmode = i.ToConstant(instr->InputAt(3)).rmode(); \
- DCHECK_LE(index2, length); \
- __ cmpl(index1, Immediate(length - index2, rmode)); \
- class OutOfLineStoreFloat final : public OutOfLineCode { \
- public: \
- OutOfLineStoreFloat(CodeGenerator* gen, Register buffer, \
- Register index1, int32_t index2, int32_t length, \
- XMMRegister value, RelocInfo::Mode rmode) \
- : OutOfLineCode(gen), \
- buffer_(buffer), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- value_(value), \
- rmode_(rmode) {} \
- \
- void Generate() final { \
- __ leal(kScratchRegister, Operand(index1_, index2_)); \
- __ cmpl(kScratchRegister, Immediate(length_, rmode_)); \
- __ j(above_equal, exit()); \
- __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0), \
- value_); \
- } \
- \
- private: \
- Register const buffer_; \
- Register const index1_; \
- int32_t const index2_; \
- int32_t const length_; \
- XMMRegister const value_; \
- RelocInfo::Mode rmode_; \
- }; \
- auto ool = new (zone()) OutOfLineStoreFloat( \
- this, buffer, index1, index2, length, value, rmode); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
- __ bind(ool->exit()); \
- } \
- } while (false)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value) \
- do { \
- auto buffer = i.InputRegister(0); \
- auto index1 = i.InputRegister(1); \
- auto index2 = i.InputUint32(2); \
- if (instr->InputAt(3)->IsRegister()) { \
- auto length = i.InputRegister(3); \
- DCHECK_EQ(0u, index2); \
- Label done; \
- __ cmpl(index1, length); \
- __ j(above_equal, &done, Label::kNear); \
- __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
- __ bind(&done); \
- } else { \
- auto length = i.InputUint32(3); \
- RelocInfo::Mode rmode = i.ToConstant(instr->InputAt(3)).rmode(); \
- DCHECK_LE(index2, length); \
- __ cmpl(index1, Immediate(length - index2, rmode)); \
- class OutOfLineStoreInteger final : public OutOfLineCode { \
- public: \
- OutOfLineStoreInteger(CodeGenerator* gen, Register buffer, \
- Register index1, int32_t index2, int32_t length, \
- Value value, RelocInfo::Mode rmode) \
- : OutOfLineCode(gen), \
- buffer_(buffer), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- value_(value), \
- rmode_(rmode) {} \
- \
- void Generate() final { \
- __ leal(kScratchRegister, Operand(index1_, index2_)); \
- __ cmpl(kScratchRegister, Immediate(length_, rmode_)); \
- __ j(above_equal, exit()); \
- __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0), \
- value_); \
- } \
- \
- private: \
- Register const buffer_; \
- Register const index1_; \
- int32_t const index2_; \
- int32_t const length_; \
- Value const value_; \
- RelocInfo::Mode rmode_; \
- }; \
- auto ool = new (zone()) OutOfLineStoreInteger( \
- this, buffer, index1, index2, length, value, rmode); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
- __ bind(ool->exit()); \
- } \
- } while (false)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
- do { \
- if (instr->InputAt(4)->IsRegister()) { \
- Register value = i.InputRegister(4); \
- ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register); \
- } else { \
- Immediate value = i.InputImmediate(4); \
- ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
- } \
- } while (false)
-
#define ASSEMBLE_IEEE754_BINOP(name) \
do { \
__ PrepareCallCFunction(2); \
@@ -840,6 +593,11 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ j(not_zero, code, RelocInfo::CODE_TARGET);
}
+inline bool HasCallDescriptorFlag(Instruction* instr,
+ CallDescriptor::Flag flag) {
+ return MiscField::decode(instr->opcode()) & flag;
+}
+
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -854,7 +612,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
__ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(reg);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineCall(reg);
+ } else {
+ __ call(reg);
+ }
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -867,11 +629,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (info()->IsWasm()) {
__ near_call(wasm_code, RelocInfo::WASM_CALL);
} else {
- __ Call(wasm_code, RelocInfo::JS_TO_WASM_CALL);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineCall(wasm_code, RelocInfo::JS_TO_WASM_CALL);
+ } else {
+ __ Call(wasm_code, RelocInfo::JS_TO_WASM_CALL);
+ }
}
} else {
Register reg = i.InputRegister(0);
- __ call(reg);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineCall(reg);
+ } else {
+ __ call(reg);
+ }
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -890,7 +660,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
__ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(reg);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineJump(reg);
+ } else {
+ __ jmp(reg);
+ }
}
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
@@ -909,7 +683,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
} else {
Register reg = i.InputRegister(0);
- __ jmp(reg);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineJump(reg);
+ } else {
+ __ jmp(reg);
+ }
}
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
@@ -919,7 +697,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTailCallAddress: {
CHECK(!HasImmediateInput(instr, 0));
Register reg = i.InputRegister(0);
- __ jmp(reg);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineJump(reg);
+ } else {
+ __ jmp(reg);
+ }
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -930,7 +712,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
- __ Assert(equal, kWrongFunctionContext);
+ __ Assert(equal, AbortReason::kWrongFunctionContext);
}
__ movp(rcx, FieldOperand(func, JSFunction::kCodeOffset));
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
@@ -1093,6 +875,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(ool->exit());
break;
}
+ case kLFence:
+ __ lfence();
+ break;
case kArchStackSlot: {
FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0));
@@ -2216,22 +2001,41 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->IncreaseSPDelta(1);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kPointerSize);
- } else if (instr->InputAt(0)->IsFPRegister()) {
+ } else if (instr->InputAt(0)->IsFloatRegister() ||
+ instr->InputAt(0)->IsDoubleRegister()) {
// TODO(titzer): use another machine instruction?
__ subq(rsp, Immediate(kDoubleSize));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kDoubleSize);
__ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
- } else {
+ } else if (instr->InputAt(0)->IsSimd128Register()) {
+ // TODO(titzer): use another machine instruction?
+ __ subq(rsp, Immediate(kSimd128Size));
+ frame_access_state()->IncreaseSPDelta(kSimd128Size / kPointerSize);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kSimd128Size);
+ __ Movups(Operand(rsp, 0), i.InputSimd128Register(0));
+ } else if (instr->InputAt(0)->IsStackSlot() ||
+ instr->InputAt(0)->IsFloatStackSlot() ||
+ instr->InputAt(0)->IsDoubleStackSlot()) {
__ pushq(i.InputOperand(0));
frame_access_state()->IncreaseSPDelta(1);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kPointerSize);
+ } else {
+ DCHECK(instr->InputAt(0)->IsSimd128StackSlot());
+ __ Movups(kScratchDoubleReg, i.InputOperand(0));
+ // TODO(titzer): use another machine instruction?
+ __ subq(rsp, Immediate(kSimd128Size));
+ frame_access_state()->IncreaseSPDelta(kSimd128Size / kPointerSize);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kSimd128Size);
+ __ Movups(Operand(rsp, 0), kScratchDoubleReg);
}
break;
case kX64Poke: {
- int const slot = MiscField::decode(instr->opcode());
+ int slot = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) {
__ movq(Operand(rsp, slot * kPointerSize), i.InputImmediate(0));
} else {
@@ -2239,6 +2043,101 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kX64Peek: {
+ int reverse_slot = i.InputInt32(0);
+ int offset =
+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ Movsd(i.OutputDoubleRegister(), Operand(rbp, offset));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+ __ Movss(i.OutputFloatRegister(), Operand(rbp, offset));
+ }
+ } else {
+ __ movq(i.OutputRegister(), Operand(rbp, offset));
+ }
+ break;
+ }
+ // TODO(gdeepti): Get rid of redundant moves for F32x4Splat/Extract below
+ case kX64F32x4Splat: {
+ XMMRegister dst = i.OutputSimd128Register();
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ Movss(dst, i.InputDoubleRegister(0));
+ } else {
+ __ Movss(dst, i.InputOperand(0));
+ }
+ __ shufps(dst, dst, 0x0);
+ break;
+ }
+ case kX64F32x4ExtractLane: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ extractps(kScratchRegister, i.InputSimd128Register(0), i.InputInt8(1));
+ __ movd(i.OutputDoubleRegister(), kScratchRegister);
+ break;
+ }
+ case kX64F32x4ReplaceLane: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ // The insertps instruction uses imm8[5:4] to indicate the lane
+ // that needs to be replaced.
+ byte select = i.InputInt8(1) << 4 & 0x30;
+ __ insertps(i.OutputSimd128Register(), i.InputDoubleRegister(2), select);
+ break;
+ }
+ case kX64F32x4RecipApprox: {
+ __ rcpps(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kX64F32x4RecipSqrtApprox: {
+ __ rsqrtps(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kX64F32x4Add: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ addps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F32x4Sub: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ subps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F32x4Mul: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ mulps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F32x4Min: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ minps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F32x4Max: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ maxps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F32x4Eq: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpps(i.OutputSimd128Register(), i.InputSimd128Register(1), 0x0);
+ break;
+ }
+ case kX64F32x4Ne: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpps(i.OutputSimd128Register(), i.InputSimd128Register(1), 0x4);
+ break;
+ }
+ case kX64F32x4Lt: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpltps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F32x4Le: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpleps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
case kX64I32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ movd(dst, i.InputRegister(0));
@@ -2669,48 +2568,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ xorps(dst, i.InputSimd128Register(2));
break;
}
- case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
- break;
- case kCheckedLoadUint8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl);
- break;
- case kCheckedLoadInt16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movsxwl);
- break;
- case kCheckedLoadUint16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movzxwl);
- break;
- case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
- break;
- case kCheckedLoadWord64:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movq);
- break;
- case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(Movss, OutOfLineLoadFloat32NaN);
- break;
- case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(Movsd, OutOfLineLoadFloat64NaN);
- break;
- case kCheckedStoreWord8:
- ASSEMBLE_CHECKED_STORE_INTEGER(movb);
- break;
- case kCheckedStoreWord16:
- ASSEMBLE_CHECKED_STORE_INTEGER(movw);
- break;
- case kCheckedStoreWord32:
- ASSEMBLE_CHECKED_STORE_INTEGER(movl);
- break;
- case kCheckedStoreWord64:
- ASSEMBLE_CHECKED_STORE_INTEGER(movq);
- break;
- case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT(Movss);
- break;
- case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FLOAT(Movsd);
- break;
case kX64StackCheck:
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
break;
@@ -2954,7 +2811,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
- __ AssertUnreachable(kUnexpectedReturnFromWasmTrap);
+ __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
}
@@ -3082,7 +2939,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the unoptimized
// frame is still on the stack. Optimized code uses OSR values directly from
@@ -3124,13 +2981,15 @@ void CodeGenerator::AssembleConstructFrame() {
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
- __ AssertUnreachable(kUnexpectedReturnFromWasmTrap);
+ __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
__ bind(&done);
}
- // Skip callee-saved slots, which are pushed below.
+ // Skip callee-saved and return slots, which are created below.
shrink_slots -= base::bits::CountPopulation(saves);
- shrink_slots -= base::bits::CountPopulation(saves_fp);
+ shrink_slots -=
+ base::bits::CountPopulation(saves_fp) * (kQuadWordSize / kPointerSize);
+ shrink_slots -= frame()->GetReturnSlotCount();
if (shrink_slots > 0) {
__ subq(rsp, Immediate(shrink_slots * kPointerSize));
}
@@ -3157,6 +3016,11 @@ void CodeGenerator::AssembleConstructFrame() {
__ pushq(Register::from_code(i));
}
}
+
+ // Allocate return slots (located after callee-saved).
+ if (frame()->GetReturnSlotCount() > 0) {
+ __ subq(rsp, Immediate(frame()->GetReturnSlotCount() * kPointerSize));
+ }
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
@@ -3165,6 +3029,10 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
// Restore registers.
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ __ addq(rsp, Immediate(returns * kPointerSize));
+ }
for (int i = 0; i < Register::kNumRegisters; i++) {
if (!((1 << i) & saves)) continue;
__ popq(Register::from_code(i));
@@ -3212,7 +3080,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
if (pop->IsImmediate()) {
- DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
pop_size += g.ToConstant(pop).ToInt32() * kPointerSize;
CHECK_LT(pop_size, static_cast<size_t>(std::numeric_limits<int>::max()));
__ Ret(static_cast<int>(pop_size), rcx);
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
index 9c268ededf..6d9bc6f820 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -56,6 +56,7 @@ namespace compiler {
V(X64Tzcnt32) \
V(X64Popcnt) \
V(X64Popcnt32) \
+ V(LFence) \
V(SSEFloat32Cmp) \
V(SSEFloat32Add) \
V(SSEFloat32Sub) \
@@ -143,7 +144,22 @@ namespace compiler {
V(X64Inc32) \
V(X64Push) \
V(X64Poke) \
+ V(X64Peek) \
V(X64StackCheck) \
+ V(X64F32x4Splat) \
+ V(X64F32x4ExtractLane) \
+ V(X64F32x4ReplaceLane) \
+ V(X64F32x4RecipApprox) \
+ V(X64F32x4RecipSqrtApprox) \
+ V(X64F32x4Add) \
+ V(X64F32x4Sub) \
+ V(X64F32x4Mul) \
+ V(X64F32x4Min) \
+ V(X64F32x4Max) \
+ V(X64F32x4Eq) \
+ V(X64F32x4Ne) \
+ V(X64F32x4Lt) \
+ V(X64F32x4Le) \
V(X64I32x4Splat) \
V(X64I32x4ExtractLane) \
V(X64I32x4ReplaceLane) \
diff --git a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
index ba775e72af..c16fee5861 100644
--- a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
@@ -123,6 +123,20 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Lea:
case kX64Dec32:
case kX64Inc32:
+ case kX64F32x4Splat:
+ case kX64F32x4ExtractLane:
+ case kX64F32x4ReplaceLane:
+ case kX64F32x4RecipApprox:
+ case kX64F32x4RecipSqrtApprox:
+ case kX64F32x4Add:
+ case kX64F32x4Sub:
+ case kX64F32x4Mul:
+ case kX64F32x4Min:
+ case kX64F32x4Max:
+ case kX64F32x4Eq:
+ case kX64F32x4Ne:
+ case kX64F32x4Lt:
+ case kX64F32x4Le:
case kX64I32x4Splat:
case kX64I32x4ExtractLane:
case kX64I32x4ReplaceLane:
@@ -240,12 +254,16 @@ int InstructionScheduler::GetTargetInstructionFlags(
return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
case kX64StackCheck:
+ case kX64Peek:
return kIsLoadOperation;
case kX64Push:
case kX64Poke:
return kHasSideEffect;
+ case kLFence:
+ return kHasSideEffect;
+
#define CASE(Name) case k##Name:
COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
@@ -261,20 +279,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
// Basic latency modeling for x64 instructions. They have been determined
// in an empirical way.
switch (instr->arch_opcode()) {
- case kCheckedLoadInt8:
- case kCheckedLoadUint8:
- case kCheckedLoadInt16:
- case kCheckedLoadUint16:
- case kCheckedLoadWord32:
- case kCheckedLoadWord64:
- case kCheckedLoadFloat32:
- case kCheckedLoadFloat64:
- case kCheckedStoreWord8:
- case kCheckedStoreWord16:
- case kCheckedStoreWord32:
- case kCheckedStoreWord64:
- case kCheckedStoreFloat32:
- case kCheckedStoreFloat64:
case kSSEFloat64Mul:
return 5;
case kX64Imul:
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index 04fec146de..a0f14c687c 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -109,7 +109,7 @@ class X64OperandGenerator final : public OperandGenerator {
DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
inputs[(*input_count)++] = UseRegister(index);
if (displacement != nullptr) {
- inputs[(*input_count)++] = displacement_mode
+ inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
? UseNegatedImmediate(displacement)
: UseImmediate(displacement);
static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
@@ -289,6 +289,11 @@ void InstructionSelector::VisitDebugAbort(Node* node) {
Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), rdx));
}
+void InstructionSelector::VisitSpeculationFence(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kLFence, g.NoOutput());
+}
+
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
X64OperandGenerator g(this);
@@ -399,118 +404,6 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- X64OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kCheckedLoadWord64;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
- Int32Matcher mlength(length);
- Int32BinopMatcher moffset(offset);
- if (mlength.HasValue() && moffset.right().HasValue() &&
- moffset.right().Value() >= 0 &&
- mlength.Value() >= moffset.right().Value()) {
- Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
- g.UseRegister(moffset.left().node()),
- g.UseImmediate(moffset.right().node()), g.UseImmediate(length));
- return;
- }
- }
- InstructionOperand length_operand =
- g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
- Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
- g.UseRegister(offset), g.TempImmediate(0), length_operand);
-}
-
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- X64OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kCheckedStoreWord64;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- InstructionOperand value_operand =
- g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
- if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
- Int32Matcher mlength(length);
- Int32BinopMatcher moffset(offset);
- if (mlength.HasValue() && moffset.right().HasValue() &&
- moffset.right().Value() >= 0 &&
- mlength.Value() >= moffset.right().Value()) {
- Emit(opcode, g.NoOutput(), g.UseRegister(buffer),
- g.UseRegister(moffset.left().node()),
- g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
- value_operand);
- return;
- }
- }
- InstructionOperand length_operand =
- g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
- Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
- g.TempImmediate(0), length_operand, value_operand);
-}
-
-
// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
@@ -579,7 +472,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -597,9 +491,9 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitWord32And(Node* node) {
X64OperandGenerator g(this);
Uint32BinopMatcher m(node);
- if (m.right().Is(0xff)) {
+ if (m.right().Is(0xFF)) {
Emit(kX64Movzxbl, g.DefineAsRegister(node), g.Use(m.left().node()));
- } else if (m.right().Is(0xffff)) {
+ } else if (m.right().Is(0xFFFF)) {
Emit(kX64Movzxwl, g.DefineAsRegister(node), g.Use(m.left().node()));
} else {
VisitBinop(this, node, kX64And32);
@@ -823,6 +717,10 @@ bool TryMatchLoadWord64AndShiftRight(InstructionSelector* selector, Node* node,
}
inputs[input_count++] = ImmediateOperand(ImmediateOperand::INLINE, 4);
} else {
+ // In the case that the base address was zero, the displacement will be
+ // in a register and replacing it with an immediate is not allowed. This
+ // usually only happens in dead code anyway.
+ if (!inputs[input_count - 1].IsImmediate()) return false;
int32_t displacement = g.GetImmediateIntegerValue(mleft.displacement());
inputs[input_count - 1] =
ImmediateOperand(ImmediateOperand::INLINE, displacement + 4);
@@ -1369,6 +1267,7 @@ void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
}
RO_OP_LIST(RO_VISITOR)
#undef RO_VISITOR
+#undef RO_OP_LIST
#define RR_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -1376,6 +1275,7 @@ RO_OP_LIST(RO_VISITOR)
}
RR_OP_LIST(RR_VISITOR)
#undef RR_VISITOR
+#undef RR_OP_LIST
void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
VisitRR(this, node, kArchTruncateDoubleToI);
@@ -1538,11 +1438,11 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments.
for (size_t n = 0; n < arguments->size(); ++n) {
PushParameter input = (*arguments)[n];
- if (input.node()) {
+ if (input.node) {
int slot = static_cast<int>(n);
- InstructionOperand value = g.CanBeImmediate(input.node())
- ? g.UseImmediate(input.node())
- : g.UseRegister(input.node());
+ InstructionOperand value = g.CanBeImmediate(input.node)
+ ? g.UseImmediate(input.node)
+ : g.UseRegister(input.node);
Emit(kX64Poke | MiscField::encode(slot), g.NoOutput(), value);
}
}
@@ -1550,31 +1450,55 @@ void InstructionSelector::EmitPrepareArguments(
// Push any stack arguments.
int effect_level = GetEffectLevel(node);
for (PushParameter input : base::Reversed(*arguments)) {
- Node* input_node = input.node();
- if (g.CanBeImmediate(input_node)) {
- Emit(kX64Push, g.NoOutput(), g.UseImmediate(input_node));
+ // Skip any alignment holes in pushed nodes. We may have one in case of a
+ // Simd128 stack argument.
+ if (input.node == nullptr) continue;
+ if (g.CanBeImmediate(input.node)) {
+ Emit(kX64Push, g.NoOutput(), g.UseImmediate(input.node));
} else if (IsSupported(ATOM) ||
- sequence()->IsFP(GetVirtualRegister(input_node))) {
+ sequence()->IsFP(GetVirtualRegister(input.node))) {
// TODO(titzer): X64Push cannot handle stack->stack double moves
// because there is no way to encode fixed double slots.
- Emit(kX64Push, g.NoOutput(), g.UseRegister(input_node));
- } else if (g.CanBeMemoryOperand(kX64Push, node, input_node,
+ Emit(kX64Push, g.NoOutput(), g.UseRegister(input.node));
+ } else if (g.CanBeMemoryOperand(kX64Push, node, input.node,
effect_level)) {
InstructionOperand outputs[1];
InstructionOperand inputs[4];
size_t input_count = 0;
InstructionCode opcode = kX64Push;
AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
- input_node, inputs, &input_count);
+ input.node, inputs, &input_count);
opcode |= AddressingModeField::encode(mode);
Emit(opcode, 0, outputs, input_count, inputs);
} else {
- Emit(kX64Push, g.NoOutput(), g.Use(input_node));
+ Emit(kX64Push, g.NoOutput(), g.Use(input.node));
}
}
}
}
+void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
+ const CallDescriptor* descriptor,
+ Node* node) {
+ X64OperandGenerator g(this);
+
+ int reverse_slot = 0;
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ reverse_slot += output.location.GetSizeInPointers();
+ // Skip any alignment holes in nodes.
+ if (output.node == nullptr) continue;
+ DCHECK(!descriptor->IsCFunctionCall());
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ }
+ InstructionOperand result = g.DefineAsRegister(output.node);
+ InstructionOperand slot = g.UseImmediate(reverse_slot);
+ Emit(kX64Peek, 1, &result, 1, &slot);
+ }
+}
bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
@@ -1602,7 +1526,8 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
selector->Emit(opcode, 0, nullptr, input_count, inputs);
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
InstructionOperand output = g.DefineAsRegister(cont->result());
selector->Emit(opcode, 1, &output, input_count, inputs);
@@ -1624,7 +1549,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -1812,7 +1738,8 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()));
} else {
@@ -2012,14 +1939,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -2442,16 +2369,21 @@ VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
#define SIMD_TYPES(V) \
+ V(F32x4) \
V(I32x4) \
V(I16x8) \
V(I8x16)
-#define SIMD_FORMAT_LIST(V) \
- V(32x4) \
- V(16x8) \
- V(8x16)
-
#define SIMD_BINOP_LIST(V) \
+ V(F32x4Add) \
+ V(F32x4Sub) \
+ V(F32x4Mul) \
+ V(F32x4Min) \
+ V(F32x4Max) \
+ V(F32x4Eq) \
+ V(F32x4Ne) \
+ V(F32x4Lt) \
+ V(F32x4Le) \
V(I32x4Add) \
V(I32x4AddHoriz) \
V(I32x4Sub) \
@@ -2505,6 +2437,8 @@ VISIT_ATOMIC_BINOP(Xor)
V(S128Xor)
#define SIMD_UNOP_LIST(V) \
+ V(F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox) \
V(I32x4Neg) \
V(I16x8Neg) \
V(I8x16Neg) \
@@ -2580,6 +2514,10 @@ SIMD_UNOP_LIST(VISIT_SIMD_UNOP)
}
SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
#undef VISIT_SIMD_BINOP
+#undef SIMD_TYPES
+#undef SIMD_BINOP_LIST
+#undef SIMD_UNOP_LIST
+#undef SIMD_SHIFT_OPCODES
void InstructionSelector::VisitS128Select(Node* node) {
X64OperandGenerator g(this);
@@ -2601,7 +2539,8 @@ MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::kWord32ShiftIsSafe |
- MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord64Ctz;
+ MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord64Ctz |
+ MachineOperatorBuilder::kSpeculationFence;
if (CpuFeatures::IsSupported(POPCNT)) {
flags |= MachineOperatorBuilder::kWord32Popcnt |
MachineOperatorBuilder::kWord64Popcnt;
diff --git a/deps/v8/src/contexts-inl.h b/deps/v8/src/contexts-inl.h
index 4a1deb00e2..22e3606e98 100644
--- a/deps/v8/src/contexts-inl.h
+++ b/deps/v8/src/contexts-inl.h
@@ -24,10 +24,10 @@ ScriptContextTable* ScriptContextTable::cast(Object* context) {
return reinterpret_cast<ScriptContextTable*>(context);
}
-int ScriptContextTable::used() const { return Smi::ToInt(get(kUsedSlot)); }
+int ScriptContextTable::used() const { return Smi::ToInt(get(kUsedSlotIndex)); }
void ScriptContextTable::set_used(int used) {
- set(kUsedSlot, Smi::FromInt(used));
+ set(kUsedSlotIndex, Smi::FromInt(used));
}
@@ -36,7 +36,7 @@ Handle<Context> ScriptContextTable::GetContext(Handle<ScriptContextTable> table,
int i) {
DCHECK(i < table->used());
return Handle<Context>::cast(
- FixedArray::get(*table, i + kFirstContextSlot, table->GetIsolate()));
+ FixedArray::get(*table, i + kFirstContextSlotIndex, table->GetIsolate()));
}
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index bf55b391e7..04c4b4899d 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -19,7 +19,7 @@ Handle<ScriptContextTable> ScriptContextTable::Extend(
int used = table->used();
int length = table->length();
CHECK(used >= 0 && length > 0 && used < length);
- if (used + kFirstContextSlot == length) {
+ if (used + kFirstContextSlotIndex == length) {
CHECK(length < Smi::kMaxValue / 2);
Isolate* isolate = table->GetIsolate();
Handle<FixedArray> copy =
@@ -32,7 +32,7 @@ Handle<ScriptContextTable> ScriptContextTable::Extend(
result->set_used(used + 1);
DCHECK(script_context->IsScriptContext());
- result->set(used + kFirstContextSlot, *script_context);
+ result->set(used + kFirstContextSlotIndex, *script_context);
return result;
}
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 5f8eecb201..c1bca7557e 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -5,7 +5,7 @@
#ifndef V8_CONTEXTS_H_
#define V8_CONTEXTS_H_
-#include "src/objects.h"
+#include "src/objects/fixed-array.h"
namespace v8 {
namespace internal {
@@ -316,6 +316,8 @@ enum ContextLookupFlags {
V(PROXY_CONSTRUCTOR_MAP_INDEX, Map, proxy_constructor_map) \
V(PROXY_FUNCTION_INDEX, JSFunction, proxy_function) \
V(PROXY_MAP_INDEX, Map, proxy_map) \
+ V(PROXY_REVOCABLE_RESULT_MAP_INDEX, Map, proxy_revocable_result_map) \
+ V(PROXY_REVOKE_SHARED_FUN, SharedFunctionInfo, proxy_revoke_shared_fun) \
V(PROMISE_GET_CAPABILITIES_EXECUTOR_SHARED_FUN, SharedFunctionInfo, \
promise_get_capabilities_executor_shared_fun) \
V(PROMISE_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
@@ -343,6 +345,7 @@ enum ContextLookupFlags {
V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
V(SECURITY_TOKEN_INDEX, Object, security_token) \
V(SELF_WEAK_CELL_INDEX, WeakCell, self_weak_cell) \
+ V(SERIALIZED_OBJECTS, FixedArray, serialized_objects) \
V(SET_VALUE_ITERATOR_MAP_INDEX, Map, set_value_iterator_map) \
V(SET_KEY_VALUE_ITERATOR_MAP_INDEX, Map, set_key_value_iterator_map) \
V(SHARED_ARRAY_BUFFER_FUN_INDEX, JSFunction, shared_array_buffer_fun) \
@@ -450,15 +453,8 @@ class ScriptContextTable : public FixedArray {
static Handle<ScriptContextTable> Extend(Handle<ScriptContextTable> table,
Handle<Context> script_context);
- static int GetContextOffset(int context_index) {
- return kFirstContextOffset + context_index * kPointerSize;
- }
-
- private:
- static const int kUsedSlot = 0;
- static const int kFirstContextSlot = kUsedSlot + 1;
- static const int kFirstContextOffset =
- FixedArray::kHeaderSize + kFirstContextSlot * kPointerSize;
+ static const int kUsedSlotIndex = 0;
+ static const int kFirstContextSlotIndex = 1;
DISALLOW_IMPLICIT_CONSTRUCTORS(ScriptContextTable);
};
@@ -566,6 +562,9 @@ class Context: public FixedArray {
static const int FIRST_FUNCTION_MAP_INDEX = SLOPPY_FUNCTION_MAP_INDEX;
static const int LAST_FUNCTION_MAP_INDEX = CLASS_FUNCTION_MAP_INDEX;
+ static const int kNoContext = 0;
+ static const int kInvalidContext = 1;
+
void ResetErrorsThrown();
void IncrementErrorsThrown();
int GetErrorsThrown();
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index b9be0e097c..c5ea1b8366 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -333,12 +333,12 @@ void StringToIntHelper::DetectRadixInternal(Char current, int length) {
(*current == 'o' || *current == 'O')) {
radix_ = 8;
++current;
- DCHECK(current != end);
+ if (current == end) return set_state(kJunk);
} else if (allow_binary_and_octal_prefixes_ &&
(*current == 'b' || *current == 'B')) {
radix_ = 2;
++current;
- DCHECK(current != end);
+ if (current == end) return set_state(kJunk);
} else {
leading_zero_ = true;
}
@@ -413,7 +413,7 @@ void StringToIntHelper::ParseInternal(Char start) {
// in 32 bits. When we can't guarantee that the next iteration
// will not overflow the multiplier, we stop parsing the part
// by leaving the loop.
- const uint32_t kMaximumMultiplier = 0xffffffffU / 36;
+ const uint32_t kMaximumMultiplier = 0xFFFFFFFFU / 36;
uint32_t m = multiplier * static_cast<uint32_t>(radix_);
if (m > kMaximumMultiplier) break;
part = part * radix_ + d;
@@ -953,6 +953,7 @@ MaybeHandle<BigInt> BigIntParseInt(Isolate* isolate, Handle<String> string,
}
MaybeHandle<BigInt> StringToBigInt(Isolate* isolate, Handle<String> string) {
+ string = String::Flatten(string);
BigIntParseIntHelper helper(isolate, string);
return helper.GetResult();
}
diff --git a/deps/v8/src/counters-inl.h b/deps/v8/src/counters-inl.h
index f085478bf3..abde3a1af5 100644
--- a/deps/v8/src/counters-inl.h
+++ b/deps/v8/src/counters-inl.h
@@ -15,7 +15,7 @@ void RuntimeCallTimer::Start(RuntimeCallCounter* counter,
DCHECK(!IsStarted());
counter_ = counter;
parent_.SetValue(parent);
- if (FLAG_runtime_stats ==
+ if (base::AsAtomic32::Relaxed_Load(&FLAG_runtime_stats) ==
v8::tracing::TracingCategoryObserver::ENABLED_BY_SAMPLING) {
return;
}
@@ -57,8 +57,8 @@ void RuntimeCallTimer::CommitTimeToCounter() {
bool RuntimeCallTimer::IsStarted() { return start_ticks_ != base::TimeTicks(); }
-RuntimeCallTimerScope::RuntimeCallTimerScope(
- HeapObject* heap_object, RuntimeCallStats::CounterId counter_id)
+RuntimeCallTimerScope::RuntimeCallTimerScope(HeapObject* heap_object,
+ RuntimeCallCounterId counter_id)
: RuntimeCallTimerScope(heap_object->GetIsolate(), counter_id) {}
} // namespace internal
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index c754e6fdef..e41fa276a8 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -444,80 +444,46 @@ RuntimeCallStats::RuntimeCallStats() : in_use_(false) {
FOR_EACH_HANDLER_COUNTER(CALL_BUILTIN_COUNTER) //
#undef CALL_BUILTIN_COUNTER
};
- for (int i = 0; i < counters_count; i++) {
- this->*(counters[i]) = RuntimeCallCounter(kNames[i]);
+ for (int i = 0; i < kNumberOfCounters; i++) {
+ this->counters_[i] = RuntimeCallCounter(kNames[i]);
}
}
-// static
-const RuntimeCallStats::CounterId RuntimeCallStats::counters[] = {
-#define CALL_BUILTIN_COUNTER(name) &RuntimeCallStats::GC_##name,
- FOR_EACH_GC_COUNTER(CALL_BUILTIN_COUNTER) //
-#undef CALL_BUILTIN_COUNTER
-#define CALL_RUNTIME_COUNTER(name) &RuntimeCallStats::name,
- FOR_EACH_MANUAL_COUNTER(CALL_RUNTIME_COUNTER) //
-#undef CALL_RUNTIME_COUNTER
-#define CALL_RUNTIME_COUNTER(name, nargs, ressize) \
- &RuntimeCallStats::Runtime_##name, //
- FOR_EACH_INTRINSIC(CALL_RUNTIME_COUNTER) //
-#undef CALL_RUNTIME_COUNTER
-#define CALL_BUILTIN_COUNTER(name) &RuntimeCallStats::Builtin_##name,
- BUILTIN_LIST_C(CALL_BUILTIN_COUNTER) //
-#undef CALL_BUILTIN_COUNTER
-#define CALL_BUILTIN_COUNTER(name) &RuntimeCallStats::API_##name,
- FOR_EACH_API_COUNTER(CALL_BUILTIN_COUNTER) //
-#undef CALL_BUILTIN_COUNTER
-#define CALL_BUILTIN_COUNTER(name) &RuntimeCallStats::Handler_##name,
- FOR_EACH_HANDLER_COUNTER(CALL_BUILTIN_COUNTER) //
-#undef CALL_BUILTIN_COUNTER
-};
-
-// static
-const int RuntimeCallStats::counters_count =
- arraysize(RuntimeCallStats::counters);
-
-// static
-void RuntimeCallStats::Enter(RuntimeCallStats* stats, RuntimeCallTimer* timer,
- CounterId counter_id) {
- DCHECK(stats->IsCalledOnTheSameThread());
- RuntimeCallCounter* counter = &(stats->*counter_id);
+void RuntimeCallStats::Enter(RuntimeCallTimer* timer,
+ RuntimeCallCounterId counter_id) {
+ DCHECK(IsCalledOnTheSameThread());
+ RuntimeCallCounter* counter = GetCounter(counter_id);
DCHECK_NOT_NULL(counter->name());
- timer->Start(counter, stats->current_timer());
- stats->current_timer_.SetValue(timer);
- stats->current_counter_.SetValue(counter);
+ timer->Start(counter, current_timer());
+ current_timer_.SetValue(timer);
+ current_counter_.SetValue(counter);
}
-// static
-void RuntimeCallStats::Leave(RuntimeCallStats* stats, RuntimeCallTimer* timer) {
- DCHECK(stats->IsCalledOnTheSameThread());
- RuntimeCallTimer* stack_top = stats->current_timer();
+void RuntimeCallStats::Leave(RuntimeCallTimer* timer) {
+ DCHECK(IsCalledOnTheSameThread());
+ RuntimeCallTimer* stack_top = current_timer();
if (stack_top == nullptr) return; // Missing timer is a result of Reset().
CHECK(stack_top == timer);
- stats->current_timer_.SetValue(timer->Stop());
- RuntimeCallTimer* cur_timer = stats->current_timer();
- stats->current_counter_.SetValue(cur_timer ? cur_timer->counter() : nullptr);
+ current_timer_.SetValue(timer->Stop());
+ RuntimeCallTimer* cur_timer = current_timer();
+ current_counter_.SetValue(cur_timer ? cur_timer->counter() : nullptr);
}
void RuntimeCallStats::Add(RuntimeCallStats* other) {
- for (const RuntimeCallStats::CounterId counter_id :
- RuntimeCallStats::counters) {
- RuntimeCallCounter* counter = &(this->*counter_id);
- RuntimeCallCounter* other_counter = &(other->*counter_id);
- counter->Add(other_counter);
+ for (int i = 0; i < kNumberOfCounters; i++) {
+ GetCounter(i)->Add(other->GetCounter(i));
}
}
// static
-void RuntimeCallStats::CorrectCurrentCounterId(RuntimeCallStats* stats,
- CounterId counter_id) {
- DCHECK(stats->IsCalledOnTheSameThread());
- // When RCS are enabled dynamically there might be no stats or timer set up.
- if (stats == nullptr) return;
- RuntimeCallTimer* timer = stats->current_timer_.Value();
+void RuntimeCallStats::CorrectCurrentCounterId(
+ RuntimeCallCounterId counter_id) {
+ DCHECK(IsCalledOnTheSameThread());
+ RuntimeCallTimer* timer = current_timer();
if (timer == nullptr) return;
- RuntimeCallCounter* counter = &(stats->*counter_id);
+ RuntimeCallCounter* counter = GetCounter(counter_id);
timer->set_counter(counter);
- stats->current_counter_.SetValue(counter);
+ current_counter_.SetValue(counter);
}
bool RuntimeCallStats::IsCalledOnTheSameThread() {
@@ -537,10 +503,8 @@ void RuntimeCallStats::Print(std::ostream& os) {
if (current_timer_.Value() != nullptr) {
current_timer_.Value()->Snapshot();
}
- for (const RuntimeCallStats::CounterId counter_id :
- RuntimeCallStats::counters) {
- RuntimeCallCounter* counter = &(this->*counter_id);
- entries.Add(counter);
+ for (int i = 0; i < kNumberOfCounters; i++) {
+ entries.Add(GetCounter(i));
}
entries.Print(os);
}
@@ -556,22 +520,17 @@ void RuntimeCallStats::Reset() {
current_timer_.SetValue(current_timer_.Value()->Stop());
}
- for (const RuntimeCallStats::CounterId counter_id :
- RuntimeCallStats::counters) {
- RuntimeCallCounter* counter = &(this->*counter_id);
- counter->Reset();
+ for (int i = 0; i < kNumberOfCounters; i++) {
+ GetCounter(i)->Reset();
}
in_use_ = true;
}
void RuntimeCallStats::Dump(v8::tracing::TracedValue* value) {
- for (const RuntimeCallStats::CounterId counter_id :
- RuntimeCallStats::counters) {
- RuntimeCallCounter* counter = &(this->*counter_id);
- if (counter->count() > 0) counter->Dump(value);
+ for (int i = 0; i < kNumberOfCounters; i++) {
+ if (GetCounter(i)->count() > 0) GetCounter(i)->Dump(value);
}
-
in_use_ = false;
}
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index 56873db092..b3c6f8c8ff 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -579,6 +579,7 @@ double AggregatedMemoryHistogram<Histogram>::Aggregate(double current_ms,
class RuntimeCallCounter final {
public:
+ RuntimeCallCounter() : RuntimeCallCounter(nullptr) {}
explicit RuntimeCallCounter(const char* name)
: name_(name), count_(0), time_(0) {}
V8_NOINLINE void Reset();
@@ -596,8 +597,6 @@ class RuntimeCallCounter final {
private:
friend class RuntimeCallStats;
- RuntimeCallCounter() {}
-
const char* name_;
int64_t count_;
// Stored as int64_t so that its initialization can be deferred.
@@ -634,7 +633,9 @@ class RuntimeCallTimer final {
base::TimeDelta elapsed_;
};
-#define FOR_EACH_GC_COUNTER(V) TRACER_SCOPES(V)
+#define FOR_EACH_GC_COUNTER(V) \
+ TRACER_SCOPES(V) \
+ TRACER_BACKGROUND_SCOPES(V)
#define FOR_EACH_API_COUNTER(V) \
V(ArrayBuffer_Cast) \
@@ -785,8 +786,7 @@ class RuntimeCallTimer final {
#define FOR_EACH_MANUAL_COUNTER(V) \
V(AccessorGetterCallback) \
- V(AccessorNameGetterCallback) \
- V(AccessorNameSetterCallback) \
+ V(AccessorSetterCallback) \
V(ArrayLengthGetter) \
V(ArrayLengthSetter) \
V(BoundFunctionNameGetter) \
@@ -821,18 +821,21 @@ class RuntimeCallTimer final {
V(GC_Custom_SlowAllocateRaw) \
V(GCEpilogueCallback) \
V(GCPrologueCallback) \
- V(GenericNamedPropertyDefinerCallback) \
- V(GenericNamedPropertyDeleterCallback) \
- V(GenericNamedPropertyDescriptorCallback) \
- V(GenericNamedPropertyQueryCallback) \
- V(GenericNamedPropertySetterCallback) \
V(GetMoreDataCallback) \
- V(IndexedPropertyDefinerCallback) \
- V(IndexedPropertyDeleterCallback) \
- V(IndexedPropertyDescriptorCallback) \
- V(IndexedPropertyGetterCallback) \
- V(IndexedPropertyQueryCallback) \
- V(IndexedPropertySetterCallback) \
+ V(NamedDefinerCallback) \
+ V(NamedDeleterCallback) \
+ V(NamedDescriptorCallback) \
+ V(NamedQueryCallback) \
+ V(NamedSetterCallback) \
+ V(NamedGetterCallback) \
+ V(NamedEnumeratorCallback) \
+ V(IndexedDefinerCallback) \
+ V(IndexedDeleterCallback) \
+ V(IndexedDescriptorCallback) \
+ V(IndexedGetterCallback) \
+ V(IndexedQueryCallback) \
+ V(IndexedSetterCallback) \
+ V(IndexedEnumeratorCallback) \
V(InvokeApiInterruptCallbacks) \
V(InvokeFunctionCallback) \
V(JS_Execution) \
@@ -878,6 +881,8 @@ class RuntimeCallTimer final {
V(KeyedStoreIC_SlowStub) \
V(KeyedStoreIC_StoreFastElementStub) \
V(KeyedStoreIC_StoreElementStub) \
+ V(LoadGlobalIC_LoadScriptContextField) \
+ V(LoadGlobalIC_SlowStub) \
V(LoadIC_FunctionPrototypeStub) \
V(LoadIC_HandlerCacheHit_Accessor) \
V(LoadIC_LoadAccessorDH) \
@@ -899,12 +904,13 @@ class RuntimeCallTimer final {
V(LoadIC_LoadNonexistentDH) \
V(LoadIC_LoadNormalDH) \
V(LoadIC_LoadNormalFromPrototypeDH) \
- V(LoadIC_LoadScriptContextFieldStub) \
V(LoadIC_NonReceiver) \
V(LoadIC_Premonomorphic) \
V(LoadIC_SlowStub) \
V(LoadIC_StringLength) \
V(LoadIC_StringWrapperLength) \
+ V(StoreGlobalIC_StoreScriptContextField) \
+ V(StoreGlobalIC_SlowStub) \
V(StoreIC_HandlerCacheHit_Accessor) \
V(StoreIC_NonReceiver) \
V(StoreIC_Premonomorphic) \
@@ -919,53 +925,48 @@ class RuntimeCallTimer final {
V(StoreIC_StoreNativeDataPropertyDH) \
V(StoreIC_StoreNativeDataPropertyOnPrototypeDH) \
V(StoreIC_StoreNormalDH) \
- V(StoreIC_StoreScriptContextFieldStub) \
V(StoreIC_StoreTransitionDH)
-class RuntimeCallStats final : public ZoneObject {
- public:
- typedef RuntimeCallCounter RuntimeCallStats::*CounterId;
- V8_EXPORT_PRIVATE RuntimeCallStats();
-
-#define CALL_RUNTIME_COUNTER(name) RuntimeCallCounter GC_##name;
+enum RuntimeCallCounterId {
+#define CALL_RUNTIME_COUNTER(name) kGC_##name,
FOR_EACH_GC_COUNTER(CALL_RUNTIME_COUNTER)
#undef CALL_RUNTIME_COUNTER
-#define CALL_RUNTIME_COUNTER(name) RuntimeCallCounter name;
- FOR_EACH_MANUAL_COUNTER(CALL_RUNTIME_COUNTER)
+#define CALL_RUNTIME_COUNTER(name) k##name,
+ FOR_EACH_MANUAL_COUNTER(CALL_RUNTIME_COUNTER)
#undef CALL_RUNTIME_COUNTER
-#define CALL_RUNTIME_COUNTER(name, nargs, ressize) \
- RuntimeCallCounter Runtime_##name;
- FOR_EACH_INTRINSIC(CALL_RUNTIME_COUNTER)
+#define CALL_RUNTIME_COUNTER(name, nargs, ressize) kRuntime_##name,
+ FOR_EACH_INTRINSIC(CALL_RUNTIME_COUNTER)
#undef CALL_RUNTIME_COUNTER
-#define CALL_BUILTIN_COUNTER(name) RuntimeCallCounter Builtin_##name;
- BUILTIN_LIST_C(CALL_BUILTIN_COUNTER)
+#define CALL_BUILTIN_COUNTER(name) kBuiltin_##name,
+ BUILTIN_LIST_C(CALL_BUILTIN_COUNTER)
#undef CALL_BUILTIN_COUNTER
-#define CALL_BUILTIN_COUNTER(name) RuntimeCallCounter API_##name;
- FOR_EACH_API_COUNTER(CALL_BUILTIN_COUNTER)
+#define CALL_BUILTIN_COUNTER(name) kAPI_##name,
+ FOR_EACH_API_COUNTER(CALL_BUILTIN_COUNTER)
#undef CALL_BUILTIN_COUNTER
-#define CALL_BUILTIN_COUNTER(name) RuntimeCallCounter Handler_##name;
- FOR_EACH_HANDLER_COUNTER(CALL_BUILTIN_COUNTER)
+#define CALL_BUILTIN_COUNTER(name) kHandler_##name,
+ FOR_EACH_HANDLER_COUNTER(CALL_BUILTIN_COUNTER)
#undef CALL_BUILTIN_COUNTER
+ kNumberOfCounters
+};
- static const CounterId counters[];
- static const int counters_count;
+class RuntimeCallStats final : public ZoneObject {
+ public:
+ V8_EXPORT_PRIVATE RuntimeCallStats();
// Starting measuring the time for a function. This will establish the
// connection to the parent counter for properly calculating the own times.
- V8_EXPORT_PRIVATE static void Enter(RuntimeCallStats* stats,
- RuntimeCallTimer* timer,
- CounterId counter_id);
+ V8_EXPORT_PRIVATE void Enter(RuntimeCallTimer* timer,
+ RuntimeCallCounterId counter_id);
// Leave a scope for a measured runtime function. This will properly add
// the time delta to the current_counter and subtract the delta from its
// parent.
- V8_EXPORT_PRIVATE static void Leave(RuntimeCallStats* stats,
- RuntimeCallTimer* timer);
+ V8_EXPORT_PRIVATE void Leave(RuntimeCallTimer* timer);
// Set counter id for the innermost measurement. It can be used to refine
// event kind when a runtime entry counter is too generic.
- V8_EXPORT_PRIVATE static void CorrectCurrentCounterId(RuntimeCallStats* stats,
- CounterId counter_id);
+ V8_EXPORT_PRIVATE void CorrectCurrentCounterId(
+ RuntimeCallCounterId counter_id);
V8_EXPORT_PRIVATE void Reset();
// Add all entries from another stats object.
@@ -980,6 +981,15 @@ class RuntimeCallStats final : public ZoneObject {
bool InUse() { return in_use_; }
bool IsCalledOnTheSameThread();
+ static const int kNumberOfCounters =
+ static_cast<int>(RuntimeCallCounterId::kNumberOfCounters);
+ RuntimeCallCounter* GetCounter(RuntimeCallCounterId counter_id) {
+ return &counters_[static_cast<int>(counter_id)];
+ }
+ RuntimeCallCounter* GetCounter(int counter_id) {
+ return &counters_[counter_id];
+ }
+
private:
// Top of a stack of active timers.
base::AtomicValue<RuntimeCallTimer*> current_timer_;
@@ -988,40 +998,41 @@ class RuntimeCallStats final : public ZoneObject {
// Used to track nested tracing scopes.
bool in_use_;
ThreadId thread_id_;
+ RuntimeCallCounter counters_[kNumberOfCounters];
};
-#define CHANGE_CURRENT_RUNTIME_COUNTER(runtime_call_stats, counter_name) \
- do { \
- if (V8_UNLIKELY(FLAG_runtime_stats)) { \
- RuntimeCallStats::CorrectCurrentCounterId( \
- runtime_call_stats, &RuntimeCallStats::counter_name); \
- } \
+#define CHANGE_CURRENT_RUNTIME_COUNTER(runtime_call_stats, counter_id) \
+ do { \
+ if (V8_UNLIKELY(FLAG_runtime_stats) && runtime_call_stats) { \
+ runtime_call_stats->CorrectCurrentCounterId(counter_id); \
+ } \
} while (false)
-#define TRACE_HANDLER_STATS(isolate, counter_name) \
- CHANGE_CURRENT_RUNTIME_COUNTER(isolate->counters()->runtime_call_stats(), \
- Handler_##counter_name)
+#define TRACE_HANDLER_STATS(isolate, counter_name) \
+ CHANGE_CURRENT_RUNTIME_COUNTER( \
+ isolate->counters()->runtime_call_stats(), \
+ RuntimeCallCounterId::kHandler_##counter_name)
// A RuntimeCallTimerScopes wraps around a RuntimeCallTimer to measure the
// the time of C++ scope.
class RuntimeCallTimerScope {
public:
inline RuntimeCallTimerScope(Isolate* isolate,
- RuntimeCallStats::CounterId counter_id);
+ RuntimeCallCounterId counter_id);
// This constructor is here just to avoid calling GetIsolate() when the
// stats are disabled and the isolate is not directly available.
inline RuntimeCallTimerScope(HeapObject* heap_object,
- RuntimeCallStats::CounterId counter_id);
+ RuntimeCallCounterId counter_id);
inline RuntimeCallTimerScope(RuntimeCallStats* stats,
- RuntimeCallStats::CounterId counter_id) {
+ RuntimeCallCounterId counter_id) {
if (V8_LIKELY(!FLAG_runtime_stats || stats == nullptr)) return;
stats_ = stats;
- RuntimeCallStats::Enter(stats_, &timer_, counter_id);
+ stats_->Enter(&timer_, counter_id);
}
inline ~RuntimeCallTimerScope() {
if (V8_UNLIKELY(stats_ != nullptr)) {
- RuntimeCallStats::Leave(stats_, &timer_);
+ stats_->Leave(&timer_);
}
}
@@ -1034,6 +1045,9 @@ class RuntimeCallTimerScope {
#define HISTOGRAM_RANGE_LIST(HR) \
/* Generic range histograms: HR(name, caption, min, max, num_buckets) */ \
+ HR(background_marking, V8.GCBackgroundMarking, 0, 10000, 101) \
+ HR(background_scavenger, V8.GCBackgroundScavenger, 0, 10000, 101) \
+ HR(background_sweeping, V8.GCBackgroundSweeping, 0, 10000, 101) \
HR(detached_context_age_in_gc, V8.DetachedContextAgeInGC, 0, 20, 21) \
HR(code_cache_reject_reason, V8.CodeCacheRejectReason, 1, 6, 6) \
HR(errors_thrown_per_context, V8.ErrorsThrownPerContext, 0, 200, 20) \
@@ -1131,6 +1145,8 @@ class RuntimeCallTimerScope {
1000000, MICROSECOND) \
HT(wasm_compile_wasm_function_time, V8.WasmCompileFunctionMicroSeconds.wasm, \
1000000, MICROSECOND) \
+ HT(liftoff_compile_time, V8.LiftoffCompileMicroSeconds, 10000000, \
+ MICROSECOND) \
HT(wasm_instantiate_wasm_module_time, \
V8.WasmInstantiateModuleMicroSeconds.wasm, 10000000, MICROSECOND) \
HT(wasm_instantiate_asm_module_time, \
@@ -1384,28 +1400,6 @@ class Counters : public std::enable_shared_from_this<Counters> {
STATS_COUNTER_TS_LIST(SC)
#undef SC
-#define SC(name) \
- StatsCounter* count_of_##name() { return &count_of_##name##_; } \
- StatsCounter* size_of_##name() { return &size_of_##name##_; }
- INSTANCE_TYPE_LIST(SC)
-#undef SC
-
-#define SC(name) \
- StatsCounter* count_of_CODE_TYPE_##name() \
- { return &count_of_CODE_TYPE_##name##_; } \
- StatsCounter* size_of_CODE_TYPE_##name() \
- { return &size_of_CODE_TYPE_##name##_; }
- CODE_KIND_LIST(SC)
-#undef SC
-
-#define SC(name) \
- StatsCounter* count_of_FIXED_ARRAY_##name() \
- { return &count_of_FIXED_ARRAY_##name##_; } \
- StatsCounter* size_of_FIXED_ARRAY_##name() \
- { return &size_of_FIXED_ARRAY_##name##_; }
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
-#undef SC
-
// clang-format off
enum Id {
#define RATE_ID(name, caption, max, res) k_##name,
@@ -1541,11 +1535,11 @@ void HistogramTimer::Stop() {
TimedHistogram::Stop(&timer_, counters()->isolate());
}
-RuntimeCallTimerScope::RuntimeCallTimerScope(
- Isolate* isolate, RuntimeCallStats::CounterId counter_id) {
+RuntimeCallTimerScope::RuntimeCallTimerScope(Isolate* isolate,
+ RuntimeCallCounterId counter_id) {
if (V8_LIKELY(!FLAG_runtime_stats)) return;
stats_ = isolate->counters()->runtime_call_stats();
- RuntimeCallStats::Enter(stats_, &timer_, counter_id);
+ stats_->Enter(&timer_, counter_id);
}
} // namespace internal
diff --git a/deps/v8/src/d8-posix.cc b/deps/v8/src/d8-posix.cc
index 8836fdb0e5..3aae30799f 100644
--- a/deps/v8/src/d8-posix.cc
+++ b/deps/v8/src/d8-posix.cc
@@ -31,16 +31,16 @@ static int LengthWithoutIncompleteUtf8(char* buffer, int len) {
static const int kUtf8SingleByteMask = 0x80;
static const int kUtf8SingleByteValue = 0x00;
// 2-byte encoding.
- static const int kUtf8TwoByteMask = 0xe0;
- static const int kUtf8TwoByteValue = 0xc0;
+ static const int kUtf8TwoByteMask = 0xE0;
+ static const int kUtf8TwoByteValue = 0xC0;
// 3-byte encoding.
- static const int kUtf8ThreeByteMask = 0xf0;
- static const int kUtf8ThreeByteValue = 0xe0;
+ static const int kUtf8ThreeByteMask = 0xF0;
+ static const int kUtf8ThreeByteValue = 0xE0;
// 4-byte encoding.
- static const int kUtf8FourByteMask = 0xf8;
- static const int kUtf8FourByteValue = 0xf0;
+ static const int kUtf8FourByteMask = 0xF8;
+ static const int kUtf8FourByteValue = 0xF0;
// Subsequent bytes of a multi-byte encoding.
- static const int kMultiByteMask = 0xc0;
+ static const int kMultiByteMask = 0xC0;
static const int kMultiByteValue = 0x80;
int multi_byte_bytes_seen = 0;
while (answer > 0) {
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 54a41fc00e..32f129821a 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -41,10 +41,6 @@
#include "src/utils.h"
#include "src/v8.h"
-#if defined(LEAK_SANITIZER)
-#include <sanitizer/lsan_interface.h>
-#endif
-
#if !defined(_WIN32) && !defined(_WIN64)
#include <unistd.h> // NOLINT
#else
@@ -66,136 +62,128 @@ namespace v8 {
namespace {
-const int MB = 1024 * 1024;
-const int kMaxWorkers = 50;
-const int kMaxSerializerMemoryUsage = 1 * MB; // Arbitrary maximum for testing.
+const int kMB = 1024 * 1024;
-#define USE_VM 1
-#define VM_THRESHOLD 65536
-// TODO(titzer): allocations should fail if >= 2gb because of
-// array buffers storing the lengths as a SMI internally.
-#define TWO_GB (2u * 1024u * 1024u * 1024u)
+const int kMaxWorkers = 50;
+const int kMaxSerializerMemoryUsage =
+ 1 * kMB; // Arbitrary maximum for testing.
-// Forwards memory reservation and protection functions to the V8 default
-// allocator. Used by ShellArrayBufferAllocator and MockArrayBufferAllocator.
+// Base class for shell ArrayBuffer allocators. It forwards all opertions to
+// the default v8 allocator.
class ArrayBufferAllocatorBase : public v8::ArrayBuffer::Allocator {
- std::unique_ptr<Allocator> allocator_ =
- std::unique_ptr<Allocator>(NewDefaultAllocator());
-
public:
- void* Reserve(size_t length) override { return allocator_->Reserve(length); }
+ void* Allocate(size_t length) override {
+ return allocator_->Allocate(length);
+ }
- void Free(void*, size_t) override = 0;
+ void* AllocateUninitialized(size_t length) override {
+ return allocator_->AllocateUninitialized(length);
+ }
+
+ void Free(void* data, size_t length) override {
+ allocator_->Free(data, length);
+ }
+
+ void* Reserve(size_t length) override { return allocator_->Reserve(length); }
void Free(void* data, size_t length, AllocationMode mode) override {
- switch (mode) {
- case AllocationMode::kNormal: {
- return Free(data, length);
- }
- case AllocationMode::kReservation: {
- return allocator_->Free(data, length, mode);
- }
- }
+ allocator_->Free(data, length, mode);
}
void SetProtection(void* data, size_t length,
Protection protection) override {
allocator_->SetProtection(data, length, protection);
}
+
+ private:
+ std::unique_ptr<Allocator> allocator_ =
+ std::unique_ptr<Allocator>(NewDefaultAllocator());
};
+// ArrayBuffer allocator that can use virtual memory to improve performance.
class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
public:
void* Allocate(size_t length) override {
-#if USE_VM
- if (RoundToPageSize(&length)) {
- void* data = VirtualMemoryAllocate(length);
-#if DEBUG
- if (data) {
- // In debug mode, check the memory is zero-initialized.
- size_t limit = length / sizeof(uint64_t);
- uint64_t* ptr = reinterpret_cast<uint64_t*>(data);
- for (size_t i = 0; i < limit; i++) {
- DCHECK_EQ(0u, ptr[i]);
- }
- }
-#endif
- return data;
- }
-#endif
- void* data = AllocateUninitialized(length);
- return data == nullptr ? data : memset(data, 0, length);
+ if (length >= kVMThreshold) return AllocateVM(length);
+ return ArrayBufferAllocatorBase::Allocate(length);
}
+
void* AllocateUninitialized(size_t length) override {
-#if USE_VM
- if (RoundToPageSize(&length)) return VirtualMemoryAllocate(length);
-#endif
-// Work around for GCC bug on AIX
-// See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79839
-#if V8_OS_AIX && _LINUX_SOURCE_COMPAT
- return __linux_malloc(length);
-#else
- return malloc(length);
-#endif
+ if (length >= kVMThreshold) return AllocateVM(length);
+ return ArrayBufferAllocatorBase::AllocateUninitialized(length);
}
- using ArrayBufferAllocatorBase::Free;
+
void Free(void* data, size_t length) override {
-#if USE_VM
- if (RoundToPageSize(&length)) {
- CHECK(base::OS::Free(data, length));
- return;
+ if (length >= kVMThreshold) {
+ FreeVM(data, length);
+ } else {
+ ArrayBufferAllocatorBase::Free(data, length);
}
-#endif
- free(data);
}
- // If {length} is at least {VM_THRESHOLD}, round up to next page size and
- // return {true}. Otherwise return {false}.
- bool RoundToPageSize(size_t* length) {
- size_t page_size = base::OS::AllocatePageSize();
- if (*length >= VM_THRESHOLD && *length < TWO_GB) {
- *length = RoundUp(*length, page_size);
- return true;
- }
- return false;
+
+ void* Reserve(size_t length) override {
+ // |length| must be over the threshold so we can distinguish VM from
+ // malloced memory.
+ DCHECK_LE(kVMThreshold, length);
+ return ArrayBufferAllocatorBase::Reserve(length);
}
-#if USE_VM
- void* VirtualMemoryAllocate(size_t length) {
- size_t page_size = base::OS::AllocatePageSize();
- size_t alloc_size = RoundUp(length, page_size);
- void* address = base::OS::Allocate(nullptr, alloc_size, page_size,
- base::OS::MemoryPermission::kReadWrite);
- if (address != nullptr) {
-#if defined(LEAK_SANITIZER)
- __lsan_register_root_region(address, alloc_size);
-#endif
- MSAN_MEMORY_IS_INITIALIZED(address, alloc_size);
- }
- return address;
+
+ void Free(void* data, size_t length, AllocationMode) override {
+ // Ignore allocation mode; the appropriate action is determined by |length|.
+ Free(data, length);
}
-#endif
-};
-class MockArrayBufferAllocator : public ArrayBufferAllocatorBase {
- const size_t kAllocationLimit = 10 * MB;
- size_t get_actual_length(size_t length) const {
- return length > kAllocationLimit ? base::OS::AllocatePageSize() : length;
+ private:
+ static constexpr size_t kVMThreshold = 65536;
+ static constexpr size_t kTwoGB = 2u * 1024u * 1024u * 1024u;
+
+ void* AllocateVM(size_t length) {
+ DCHECK_LE(kVMThreshold, length);
+ // TODO(titzer): allocations should fail if >= 2gb because array buffers
+ // store their lengths as a SMI internally.
+ if (length >= kTwoGB) return nullptr;
+
+ size_t page_size = i::AllocatePageSize();
+ size_t allocated = RoundUp(length, page_size);
+ // Rounding up could go over the limit.
+ if (allocated >= kTwoGB) return nullptr;
+ return i::AllocatePages(nullptr, allocated, page_size,
+ PageAllocator::kReadWrite);
}
- public:
+ void FreeVM(void* data, size_t length) {
+ size_t page_size = i::AllocatePageSize();
+ size_t allocated = RoundUp(length, page_size);
+ CHECK(i::FreePages(data, allocated));
+ }
+};
+
+// ArrayBuffer allocator that never allocates over 10MB.
+class MockArrayBufferAllocator : public ArrayBufferAllocatorBase {
void* Allocate(size_t length) override {
- const size_t actual_length = get_actual_length(length);
- void* data = AllocateUninitialized(actual_length);
- return data == nullptr ? data : memset(data, 0, actual_length);
+ return ArrayBufferAllocatorBase::Allocate(Adjust(length));
}
+
void* AllocateUninitialized(size_t length) override {
- return malloc(get_actual_length(length));
+ return ArrayBufferAllocatorBase::AllocateUninitialized(Adjust(length));
}
- void Free(void* p, size_t) override { free(p); }
- void Free(void* data, size_t length, AllocationMode mode) override {
- ArrayBufferAllocatorBase::Free(data, get_actual_length(length), mode);
+
+ void Free(void* data, size_t length) override {
+ return ArrayBufferAllocatorBase::Free(data, Adjust(length));
}
+
void* Reserve(size_t length) override {
- return ArrayBufferAllocatorBase::Reserve(get_actual_length(length));
+ return ArrayBufferAllocatorBase::Reserve(Adjust(length));
+ }
+
+ void Free(void* data, size_t length, AllocationMode mode) override {
+ return ArrayBufferAllocatorBase::Free(data, Adjust(length), mode);
+ }
+
+ private:
+ size_t Adjust(size_t length) {
+ const size_t kAllocationLimit = 10 * kMB;
+ return length > kAllocationLimit ? i::AllocatePageSize() : length;
}
};
@@ -209,6 +197,18 @@ class PredictablePlatform : public Platform {
DCHECK_NOT_NULL(platform_);
}
+ PageAllocator* GetPageAllocator() override {
+ return platform_->GetPageAllocator();
+ }
+
+ void OnCriticalMemoryPressure() override {
+ platform_->OnCriticalMemoryPressure();
+ }
+
+ bool OnCriticalMemoryPressure(size_t length) override {
+ return platform_->OnCriticalMemoryPressure(length);
+ }
+
std::shared_ptr<TaskRunner> GetForegroundTaskRunner(
v8::Isolate* isolate) override {
return platform_->GetForegroundTaskRunner(isolate);
@@ -300,7 +300,7 @@ base::Thread::Options GetThreadOptions(const char* name) {
// which is not enough to parse the big literal expressions used in tests.
// The stack size should be at least StackGuard::kLimitSize + some
// OS-specific padding for thread startup code. 2Mbytes seems to be enough.
- return base::Thread::Options(name, 2 * MB);
+ return base::Thread::Options(name, 2 * kMB);
}
} // namespace
@@ -506,6 +506,9 @@ std::vector<Worker*> Shell::workers_;
std::vector<ExternalizedContents> Shell::externalized_contents_;
base::LazyMutex Shell::isolate_status_lock_;
std::map<v8::Isolate*, bool> Shell::isolate_status_;
+base::LazyMutex Shell::cached_code_mutex_;
+std::map<std::string, std::unique_ptr<ScriptCompiler::CachedData>>
+ Shell::cached_code_map_;
Global<Context> Shell::evaluation_context_;
ArrayBuffer::Allocator* Shell::array_buffer_allocator;
@@ -566,95 +569,39 @@ class BackgroundCompileThread : public base::Thread {
std::unique_ptr<v8::ScriptCompiler::ScriptStreamingTask> task_;
};
-ScriptCompiler::CachedData* CompileForCachedData(
- Local<String> source, Local<Value> name,
- ScriptCompiler::CompileOptions compile_options) {
- int source_length = source->Length();
- uint16_t* source_buffer = new uint16_t[source_length];
- source->Write(source_buffer, 0, source_length);
- int name_length = 0;
- uint16_t* name_buffer = nullptr;
- if (name->IsString()) {
- Local<String> name_string = Local<String>::Cast(name);
- name_length = name_string->Length();
- name_buffer = new uint16_t[name_length];
- name_string->Write(name_buffer, 0, name_length);
+ScriptCompiler::CachedData* Shell::LookupCodeCache(Isolate* isolate,
+ Local<Value> source) {
+ base::LockGuard<base::Mutex> lock_guard(cached_code_mutex_.Pointer());
+ CHECK(source->IsString());
+ v8::String::Utf8Value key(isolate, source);
+ DCHECK(*key);
+ auto entry = cached_code_map_.find(*key);
+ if (entry != cached_code_map_.end() && entry->second) {
+ int length = entry->second->length;
+ uint8_t* cache = new uint8_t[length];
+ memcpy(cache, entry->second->data, length);
+ ScriptCompiler::CachedData* cached_data = new ScriptCompiler::CachedData(
+ cache, length, ScriptCompiler::CachedData::BufferOwned);
+ return cached_data;
}
- Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = Shell::array_buffer_allocator;
- i::FLAG_hash_seed ^= 1337; // Use a different hash seed.
- Isolate* temp_isolate = Isolate::New(create_params);
- i::FLAG_hash_seed ^= 1337; // Restore old hash seed.
- temp_isolate->SetHostImportModuleDynamicallyCallback(
- Shell::HostImportModuleDynamically);
- temp_isolate->SetHostInitializeImportMetaObjectCallback(
- Shell::HostInitializeImportMetaObject);
- ScriptCompiler::CachedData* result = nullptr;
- {
- Isolate::Scope isolate_scope(temp_isolate);
- HandleScope handle_scope(temp_isolate);
- Context::Scope context_scope(Context::New(temp_isolate));
- Local<String> source_copy =
- v8::String::NewFromTwoByte(temp_isolate, source_buffer,
- v8::NewStringType::kNormal, source_length)
- .ToLocalChecked();
- Local<Value> name_copy;
- if (name_buffer) {
- name_copy =
- v8::String::NewFromTwoByte(temp_isolate, name_buffer,
- v8::NewStringType::kNormal, name_length)
- .ToLocalChecked();
- } else {
- name_copy = v8::Undefined(temp_isolate);
- }
- ScriptCompiler::Source script_source(source_copy, ScriptOrigin(name_copy));
- if (!ScriptCompiler::CompileUnboundScript(temp_isolate, &script_source,
- compile_options)
- .IsEmpty() &&
- script_source.GetCachedData()) {
- int length = script_source.GetCachedData()->length;
- uint8_t* cache = new uint8_t[length];
- memcpy(cache, script_source.GetCachedData()->data, length);
- result = new ScriptCompiler::CachedData(
- cache, length, ScriptCompiler::CachedData::BufferOwned);
- }
- }
- temp_isolate->Dispose();
- delete[] source_buffer;
- delete[] name_buffer;
- return result;
+ return nullptr;
}
-
-// Compile a string within the current v8 context.
-MaybeLocal<Script> Shell::CompileString(
- Isolate* isolate, Local<String> source, Local<Value> name,
- ScriptCompiler::CompileOptions compile_options) {
- Local<Context> context(isolate->GetCurrentContext());
- ScriptOrigin origin(name);
- if (compile_options == ScriptCompiler::kNoCompileOptions) {
- ScriptCompiler::Source script_source(source, origin);
- return ScriptCompiler::Compile(context, &script_source, compile_options);
- }
-
- ScriptCompiler::CachedData* data =
- CompileForCachedData(source, name, compile_options);
- ScriptCompiler::Source cached_source(source, origin, data);
- if (compile_options == ScriptCompiler::kProduceCodeCache) {
- compile_options = ScriptCompiler::kConsumeCodeCache;
- } else if (compile_options == ScriptCompiler::kProduceParserCache) {
- compile_options = ScriptCompiler::kConsumeParserCache;
- } else {
- DCHECK(false); // A new compile option?
- }
- if (data == nullptr) compile_options = ScriptCompiler::kNoCompileOptions;
- MaybeLocal<Script> result =
- ScriptCompiler::Compile(context, &cached_source, compile_options);
- CHECK(data == nullptr || !data->rejected);
- return result;
+void Shell::StoreInCodeCache(Isolate* isolate, Local<Value> source,
+ const ScriptCompiler::CachedData* cache_data) {
+ base::LockGuard<base::Mutex> lock_guard(cached_code_mutex_.Pointer());
+ CHECK(source->IsString());
+ if (cache_data == nullptr) return;
+ v8::String::Utf8Value key(isolate, source);
+ DCHECK(*key);
+ int length = cache_data->length;
+ uint8_t* cache = new uint8_t[length];
+ memcpy(cache, cache_data->data, length);
+ cached_code_map_[*key] = std::unique_ptr<ScriptCompiler::CachedData>(
+ new ScriptCompiler::CachedData(cache, length,
+ ScriptCompiler::CachedData::BufferOwned));
}
-
// Executes a string within the current v8 context.
bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
Local<Value> name, bool print_result,
@@ -671,7 +618,24 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
Local<Context>::New(isolate, data->realms_[data->realm_current_]);
Context::Scope context_scope(realm);
MaybeLocal<Script> maybe_script;
- if (options.stress_background_compile) {
+ Local<Context> context(isolate->GetCurrentContext());
+ ScriptOrigin origin(name);
+
+ if (options.compile_options == ScriptCompiler::kConsumeCodeCache ||
+ options.compile_options == ScriptCompiler::kConsumeParserCache) {
+ ScriptCompiler::CachedData* cached_code =
+ LookupCodeCache(isolate, source);
+ if (cached_code != nullptr) {
+ ScriptCompiler::Source script_source(source, origin, cached_code);
+ maybe_script = ScriptCompiler::Compile(context, &script_source,
+ options.compile_options);
+ CHECK(!cached_code->rejected);
+ } else {
+ ScriptCompiler::Source script_source(source, origin);
+ maybe_script = ScriptCompiler::Compile(
+ context, &script_source, ScriptCompiler::kNoCompileOptions);
+ }
+ } else if (options.stress_background_compile) {
// Start a background thread compiling the script.
BackgroundCompileThread background_compile_thread(isolate, source);
background_compile_thread.Start();
@@ -679,18 +643,22 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
// In parallel, compile on the main thread to flush out any data races.
{
TryCatch ignore_try_catch(isolate);
- Shell::CompileString(isolate, source, name, options.compile_options);
+ ScriptCompiler::Source script_source(source, origin);
+ USE(ScriptCompiler::Compile(context, &script_source,
+ ScriptCompiler::kNoCompileOptions));
}
// Join with background thread and finalize compilation.
background_compile_thread.Join();
- ScriptOrigin origin(name);
maybe_script = v8::ScriptCompiler::Compile(
- isolate->GetCurrentContext(),
- background_compile_thread.streamed_source(), source, origin);
+ context, background_compile_thread.streamed_source(), source, origin);
} else {
- maybe_script =
- Shell::CompileString(isolate, source, name, options.compile_options);
+ ScriptCompiler::Source script_source(source, origin);
+ maybe_script = ScriptCompiler::Compile(context, &script_source,
+ options.compile_options);
+ if (options.compile_options == ScriptCompiler::kProduceParserCache) {
+ StoreInCodeCache(isolate, source, script_source.GetCachedData());
+ }
}
Local<Script> script;
@@ -700,7 +668,23 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
return false;
}
+ if (options.code_cache_options ==
+ ShellOptions::CodeCacheOptions::kProduceCache) {
+ // Serialize and store it in memory for the next execution.
+ ScriptCompiler::CachedData* cached_data =
+ ScriptCompiler::CreateCodeCache(script->GetUnboundScript(), source);
+ StoreInCodeCache(isolate, source, cached_data);
+ delete cached_data;
+ }
maybe_result = script->Run(realm);
+ if (options.code_cache_options ==
+ ShellOptions::CodeCacheOptions::kProduceCacheAfterExecute) {
+ // Serialize and store it in memory for the next execution.
+ ScriptCompiler::CachedData* cached_data =
+ ScriptCompiler::CreateCodeCache(script->GetUnboundScript(), source);
+ StoreInCodeCache(isolate, source, cached_data);
+ delete cached_data;
+ }
if (!EmptyMessageQueues(isolate)) success = false;
data->realm_current_ = data->realm_switch_;
}
@@ -2292,7 +2276,7 @@ Local<String> Shell::ReadFile(Isolate* isolate, const char* name) {
char* chars = ReadChars(name, &size);
if (chars == nullptr) return Local<String>();
Local<String> result;
- if (i::FLAG_use_external_strings && internal::String::IsAscii(chars, size)) {
+ if (i::FLAG_use_external_strings && i::String::IsAscii(chars, size)) {
String::ExternalOneByteStringResource* resource =
new ExternalOwningOneByteStringResource(
std::unique_ptr<const char[]>(chars), size);
@@ -2557,11 +2541,11 @@ void SourceGroup::ExecuteInThread() {
Shell::options.enable_inspector);
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
Execute(isolate);
+ Shell::CompleteMessageLoop(isolate);
}
DisposeModuleEmbedderData(context);
}
Shell::CollectGarbage(isolate);
- Shell::CompleteMessageLoop(isolate);
}
done_semaphore_.Signal();
}
@@ -2591,7 +2575,9 @@ void SourceGroup::JoinThread() {
}
ExternalizedContents::~ExternalizedContents() {
- Shell::array_buffer_allocator->Free(data_, size_);
+ if (base_ != nullptr) {
+ Shell::array_buffer_allocator->Free(base_, length_, mode_);
+ }
}
void SerializationDataQueue::Enqueue(std::unique_ptr<SerializationData> data) {
@@ -2863,11 +2849,23 @@ bool Shell::SetOptions(int argc, char* argv[]) {
strncmp(argv[i], "--cache=", 8) == 0) {
const char* value = argv[i] + 7;
if (!*value || strncmp(value, "=code", 6) == 0) {
- options.compile_options = v8::ScriptCompiler::kProduceCodeCache;
+ options.compile_options = v8::ScriptCompiler::kNoCompileOptions;
+ options.code_cache_options =
+ ShellOptions::CodeCacheOptions::kProduceCache;
} else if (strncmp(value, "=parse", 7) == 0) {
options.compile_options = v8::ScriptCompiler::kProduceParserCache;
} else if (strncmp(value, "=none", 6) == 0) {
options.compile_options = v8::ScriptCompiler::kNoCompileOptions;
+ options.code_cache_options =
+ ShellOptions::CodeCacheOptions::kNoProduceCache;
+ } else if (strncmp(value, "=after-execute", 15) == 0) {
+ options.compile_options = v8::ScriptCompiler::kNoCompileOptions;
+ options.code_cache_options =
+ ShellOptions::CodeCacheOptions::kProduceCacheAfterExecute;
+ } else if (strncmp(value, "=full-code-cache", 17) == 0) {
+ options.compile_options = v8::ScriptCompiler::kEagerCompile;
+ options.code_cache_options =
+ ShellOptions::CodeCacheOptions::kProduceCache;
} else {
printf("Unknown option to --cache.\n");
return false;
@@ -2876,6 +2874,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--enable-tracing") == 0) {
options.trace_enabled = true;
argv[i] = nullptr;
+ } else if (strncmp(argv[i], "--trace-path=", 13) == 0) {
+ options.trace_path = argv[i] + 13;
+ argv[i] = nullptr;
} else if (strncmp(argv[i], "--trace-config=", 15) == 0) {
options.trace_config = argv[i] + 15;
argv[i] = nullptr;
@@ -2956,6 +2957,7 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[], bool last_run) {
InspectorClient inspector_client(context, options.enable_inspector);
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
options.isolate_sources[0].Execute(isolate);
+ CompleteMessageLoop(isolate);
}
if (!use_existing_context) {
DisposeModuleEmbedderData(context);
@@ -2963,7 +2965,6 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[], bool last_run) {
WriteLcovData(isolate, options.lcov_file);
}
CollectGarbage(isolate);
- CompleteMessageLoop(isolate);
for (int i = 1; i < options.num_isolates; ++i) {
if (last_run) {
options.isolate_sources[i].JoinThread();
@@ -3306,7 +3307,8 @@ int Shell::Main(int argc, char* argv[]) {
std::unique_ptr<platform::tracing::TracingController> tracing;
if (options.trace_enabled && !i::FLAG_verify_predictable) {
tracing = base::make_unique<platform::tracing::TracingController>();
- trace_file.open("v8_trace.json");
+
+ trace_file.open(options.trace_path ? options.trace_path : "v8_trace.json");
platform::tracing::TraceBuffer* trace_buffer =
platform::tracing::TraceBuffer::CreateTraceBufferRingBuffer(
platform::tracing::TraceBuffer::kRingBufferChunks,
@@ -3356,7 +3358,7 @@ int Shell::Main(int argc, char* argv[]) {
create_params.add_histogram_sample_callback = AddHistogramSample;
}
- if (i::trap_handler::UseTrapHandler()) {
+ if (i::trap_handler::IsTrapHandlerEnabled()) {
if (!v8::V8::RegisterDefaultSignalHandler()) {
fprintf(stderr, "Could not register signal handler");
exit(1);
@@ -3413,6 +3415,42 @@ int Shell::Main(int argc, char* argv[]) {
bool last_run = i == options.stress_runs - 1;
result = RunMain(isolate, argc, argv, last_run);
}
+ } else if (options.code_cache_options !=
+ ShellOptions::CodeCacheOptions::kNoProduceCache) {
+ printf("============ Run: Produce code cache ============\n");
+ // First run to produce the cache
+ result = RunMain(isolate, argc, argv, false);
+
+ // Change the options to consume cache
+ if (options.compile_options == v8::ScriptCompiler::kProduceParserCache) {
+ options.compile_options = v8::ScriptCompiler::kConsumeParserCache;
+ } else {
+ DCHECK(options.compile_options == v8::ScriptCompiler::kEagerCompile ||
+ options.compile_options ==
+ v8::ScriptCompiler::kNoCompileOptions);
+ options.compile_options = v8::ScriptCompiler::kConsumeCodeCache;
+ }
+
+ printf("============ Run: Consume code cache ============\n");
+ // Second run to consume the cache in new isolate
+ Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = Shell::array_buffer_allocator;
+ i::FLAG_hash_seed ^= 1337; // Use a different hash seed.
+ Isolate* isolate2 = Isolate::New(create_params);
+ i::FLAG_hash_seed ^= 1337; // Restore old hash seed.
+ isolate2->SetHostImportModuleDynamicallyCallback(
+ Shell::HostImportModuleDynamically);
+ isolate2->SetHostInitializeImportMetaObjectCallback(
+ Shell::HostInitializeImportMetaObject);
+ {
+ D8Console console(isolate2);
+ debug::SetConsoleDelegate(isolate2, &console);
+ PerIsolateData data(isolate2);
+ Isolate::Scope isolate_scope(isolate2);
+
+ result = RunMain(isolate2, argc, argv, true);
+ }
+ isolate2->Dispose();
} else {
bool last_run = true;
result = RunMain(isolate, argc, argv, last_run);
@@ -3430,6 +3468,7 @@ int Shell::Main(int argc, char* argv[]) {
}
// Shut down contexts and collect garbage.
+ cached_code_map_.clear();
evaluation_context_.Reset();
stringify_function_.Reset();
CollectGarbage(isolate);
@@ -3438,6 +3477,9 @@ int Shell::Main(int argc, char* argv[]) {
V8::Dispose();
V8::ShutdownPlatform();
+ // Delete the platform explicitly here to write the tracing output to the
+ // tracing file.
+ g_platform.reset();
return result;
}
@@ -3449,3 +3491,6 @@ int main(int argc, char* argv[]) {
return v8::Shell::Main(argc, argv);
}
#endif
+
+#undef CHECK
+#undef DCHECK
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index c699d91d68..8fc6eab046 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -149,28 +149,36 @@ class SourceGroup {
class ExternalizedContents {
public:
explicit ExternalizedContents(const ArrayBuffer::Contents& contents)
- : data_(contents.Data()), size_(contents.ByteLength()) {}
+ : base_(contents.AllocationBase()),
+ length_(contents.AllocationLength()),
+ mode_(contents.AllocationMode()) {}
explicit ExternalizedContents(const SharedArrayBuffer::Contents& contents)
- : data_(contents.Data()), size_(contents.ByteLength()) {}
+ : base_(contents.AllocationBase()),
+ length_(contents.AllocationLength()),
+ mode_(contents.AllocationMode()) {}
ExternalizedContents(ExternalizedContents&& other)
- : data_(other.data_), size_(other.size_) {
- other.data_ = nullptr;
- other.size_ = 0;
+ : base_(other.base_), length_(other.length_), mode_(other.mode_) {
+ other.base_ = nullptr;
+ other.length_ = 0;
+ other.mode_ = ArrayBuffer::Allocator::AllocationMode::kNormal;
}
ExternalizedContents& operator=(ExternalizedContents&& other) {
if (this != &other) {
- data_ = other.data_;
- size_ = other.size_;
- other.data_ = nullptr;
- other.size_ = 0;
+ base_ = other.base_;
+ length_ = other.length_;
+ mode_ = other.mode_;
+ other.base_ = nullptr;
+ other.length_ = 0;
+ other.mode_ = ArrayBuffer::Allocator::AllocationMode::kNormal;
}
return *this;
}
~ExternalizedContents();
private:
- void* data_;
- size_t size_;
+ void* base_;
+ size_t length_;
+ ArrayBuffer::Allocator::AllocationMode mode_;
DISALLOW_COPY_AND_ASSIGN(ExternalizedContents);
};
@@ -280,6 +288,12 @@ class Worker {
class ShellOptions {
public:
+ enum CodeCacheOptions {
+ kNoProduceCache,
+ kProduceCache,
+ kProduceCacheAfterExecute
+ };
+
ShellOptions()
: script_executed(false),
send_idle_notification(false),
@@ -296,11 +310,13 @@ class ShellOptions {
num_isolates(1),
compile_options(v8::ScriptCompiler::kNoCompileOptions),
stress_background_compile(false),
+ code_cache_options(CodeCacheOptions::kNoProduceCache),
isolate_sources(nullptr),
icu_data_file(nullptr),
natives_blob(nullptr),
snapshot_blob(nullptr),
trace_enabled(false),
+ trace_path(nullptr),
trace_config(nullptr),
lcov_file(nullptr),
disable_in_process_stack_traces(false),
@@ -329,11 +345,13 @@ class ShellOptions {
int num_isolates;
v8::ScriptCompiler::CompileOptions compile_options;
bool stress_background_compile;
+ CodeCacheOptions code_cache_options;
SourceGroup* isolate_sources;
const char* icu_data_file;
const char* natives_blob;
const char* snapshot_blob;
bool trace_enabled;
+ const char* trace_path;
const char* trace_config;
const char* lcov_file;
bool disable_in_process_stack_traces;
@@ -344,9 +362,6 @@ class ShellOptions {
class Shell : public i::AllStatic {
public:
- static MaybeLocal<Script> CompileString(
- Isolate* isolate, Local<String> source, Local<Value> name,
- v8::ScriptCompiler::CompileOptions compile_options);
static bool ExecuteString(Isolate* isolate, Local<String> source,
Local<Value> name, bool print_result,
bool report_exceptions);
@@ -504,10 +519,18 @@ class Shell : public i::AllStatic {
int index);
static MaybeLocal<Module> FetchModuleTree(v8::Local<v8::Context> context,
const std::string& file_name);
+ static ScriptCompiler::CachedData* LookupCodeCache(Isolate* isolate,
+ Local<Value> name);
+ static void StoreInCodeCache(Isolate* isolate, Local<Value> name,
+ const ScriptCompiler::CachedData* data);
// We may have multiple isolates running concurrently, so the access to
// the isolate_status_ needs to be concurrency-safe.
static base::LazyMutex isolate_status_lock_;
static std::map<Isolate*, bool> isolate_status_;
+
+ static base::LazyMutex cached_code_mutex_;
+ static std::map<std::string, std::unique_ptr<ScriptCompiler::CachedData>>
+ cached_code_map_;
};
diff --git a/deps/v8/src/debug/debug-coverage.cc b/deps/v8/src/debug/debug-coverage.cc
index d53a6fdc4e..3eae96aa11 100644
--- a/deps/v8/src/debug/debug-coverage.cc
+++ b/deps/v8/src/debug/debug-coverage.cc
@@ -344,6 +344,16 @@ bool IsBlockMode(debug::Coverage::Mode mode) {
}
}
+bool IsBinaryMode(debug::Coverage::Mode mode) {
+ switch (mode) {
+ case debug::Coverage::kBlockBinary:
+ case debug::Coverage::kPreciseBinary:
+ return true;
+ default:
+ return false;
+ }
+}
+
void CollectBlockCoverage(Isolate* isolate, CoverageFunction* function,
SharedFunctionInfo* info,
debug::Coverage::Mode mode) {
@@ -535,14 +545,29 @@ void Coverage::SelectMode(Isolate* isolate, debug::Coverage::Mode mode) {
case debug::Coverage::kPreciseBinary:
case debug::Coverage::kPreciseCount: {
HandleScope scope(isolate);
+
// Remove all optimized function. Optimized and inlined functions do not
// increment invocation count.
Deoptimizer::DeoptimizeAll(isolate);
- if (isolate->factory()
- ->feedback_vectors_for_profiling_tools()
- ->IsUndefined(isolate)) {
- isolate->InitializeVectorListFromHeap();
+
+ // Root all feedback vectors to avoid early collection.
+ isolate->MaybeInitializeVectorListFromHeap();
+
+ HeapIterator heap_iterator(isolate->heap());
+ while (HeapObject* o = heap_iterator.next()) {
+ if (IsBinaryMode(mode) && o->IsSharedFunctionInfo()) {
+ // If collecting binary coverage, reset
+ // SFI::has_reported_binary_coverage to avoid optimizing / inlining
+ // functions before they have reported coverage.
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(o);
+ shared->set_has_reported_binary_coverage(false);
+ } else if (o->IsFeedbackVector()) {
+ // In any case, clear any collected invocation counts.
+ FeedbackVector* vector = FeedbackVector::cast(o);
+ vector->clear_invocation_count();
+ }
}
+
break;
}
}
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index b6e3f14ed1..33bc81e5f7 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -16,6 +16,7 @@
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecodes.h"
#include "src/isolate-inl.h"
+#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
@@ -58,13 +59,6 @@ MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
if (!it.is_javascript()) return isolate->factory()->undefined_value();
JavaScriptFrame* frame = it.javascript_frame();
- // Traverse the saved contexts chain to find the active context for the
- // selected frame.
- SaveContext* save =
- DebugFrameHelper::FindSavedContextForFrame(isolate, frame);
- SaveContext savex(isolate);
- isolate->set_context(*(save->context()));
-
// This is not a lot different than DebugEvaluate::Global, except that
// variables accessible by the function we are evaluating from are
// materialized and included on top of the native context. Changes to
@@ -284,7 +278,7 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(ToString) \
V(ToLength) \
V(ToNumber) \
- V(NumberToString) \
+ V(NumberToStringSkipCache) \
/* Type checks */ \
V(IsJSReceiver) \
V(IsSmi) \
@@ -349,7 +343,11 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(AllocateSeqOneByteString) \
V(AllocateSeqTwoByteString) \
V(ObjectCreate) \
+ V(ObjectEntries) \
+ V(ObjectEntriesSkipFastPath) \
V(ObjectHasOwnProperty) \
+ V(ObjectValues) \
+ V(ObjectValuesSkipFastPath) \
V(ArrayIndexOf) \
V(ArrayIncludes_Slow) \
V(ArrayIsArray) \
@@ -361,6 +359,7 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(ThrowRangeError) \
V(ToName) \
V(GetOwnPropertyDescriptor) \
+ V(StackGuard) \
/* Misc. */ \
V(Call) \
V(MaxSmi) \
@@ -522,6 +521,8 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
case Builtins::kArrayPrototypeValues:
case Builtins::kArrayIncludes:
case Builtins::kArrayPrototypeEntries:
+ case Builtins::kArrayPrototypeFind:
+ case Builtins::kArrayPrototypeFindIndex:
case Builtins::kArrayPrototypeKeys:
case Builtins::kArrayForEach:
case Builtins::kArrayEvery:
@@ -751,16 +752,29 @@ bool DebugEvaluate::FunctionHasNoSideEffect(Handle<SharedFunctionInfo> info) {
? info->lazy_deserialization_builtin_id()
: info->code()->builtin_index();
DCHECK_NE(Builtins::kDeserializeLazy, builtin_index);
- if (builtin_index >= 0 && builtin_index < Builtins::builtin_count &&
+ if (Builtins::IsBuiltinId(builtin_index) &&
BuiltinHasNoSideEffect(static_cast<Builtins::Name>(builtin_index))) {
#ifdef DEBUG
- if (info->code()->builtin_index() == Builtins::kDeserializeLazy) {
- return true; // Target builtin is not yet deserialized.
+ Isolate* isolate = info->GetIsolate();
+ Code* code = isolate->builtins()->builtin(builtin_index);
+ if (code->builtin_index() == Builtins::kDeserializeLazy) {
+ // Target builtin is not yet deserialized. Deserialize it now.
+
+ DCHECK(Builtins::IsLazy(builtin_index));
+ DCHECK_EQ(Builtins::TFJ, Builtins::KindOf(builtin_index));
+
+ if (FLAG_trace_lazy_deserialization) {
+ PrintF("Lazy-deserializing builtin %s\n",
+ Builtins::name(builtin_index));
+ }
+
+ code = Snapshot::DeserializeBuiltin(isolate, builtin_index);
+ DCHECK_NE(Builtins::kDeserializeLazy, code->builtin_index());
}
// TODO(yangguo): Check builtin-to-builtin calls too.
int mode = RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE);
bool failed = false;
- for (RelocIterator it(info->code(), mode); !it.done(); it.next()) {
+ for (RelocIterator it(code, mode); !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
Address address = rinfo->target_external_reference();
const Runtime::Function* function = Runtime::FunctionForEntry(address);
diff --git a/deps/v8/src/debug/debug-frames.h b/deps/v8/src/debug/debug-frames.h
index 9b669ea096..6b4f8c23f6 100644
--- a/deps/v8/src/debug/debug-frames.h
+++ b/deps/v8/src/debug/debug-frames.h
@@ -9,15 +9,11 @@
#include "src/frames.h"
#include "src/isolate.h"
#include "src/objects.h"
+#include "src/wasm/wasm-interpreter.h"
namespace v8 {
namespace internal {
-// Forward declaration:
-namespace wasm {
-class InterpretedFrame;
-}
-
class FrameInspector {
public:
FrameInspector(StandardFrame* frame, int inlined_frame_index,
@@ -61,7 +57,7 @@ class FrameInspector {
StandardFrame* frame_;
std::unique_ptr<DeoptimizedFrameInfo> deoptimized_frame_;
- std::unique_ptr<wasm::InterpretedFrame> wasm_interpreted_frame_;
+ wasm::WasmInterpreter::FramePtr wasm_interpreted_frame_;
Isolate* isolate_;
Handle<Script> script_;
Handle<Object> receiver_;
diff --git a/deps/v8/src/debug/debug-type-profile.cc b/deps/v8/src/debug/debug-type-profile.cc
index c89849e350..6288c11b94 100644
--- a/deps/v8/src/debug/debug-type-profile.cc
+++ b/deps/v8/src/debug/debug-type-profile.cc
@@ -105,10 +105,7 @@ void TypeProfile::SelectMode(Isolate* isolate, debug::TypeProfile::Mode mode) {
}
} else {
DCHECK_EQ(debug::TypeProfile::Mode::kCollect, mode);
- if (isolate->factory()->feedback_vectors_for_profiling_tools()->IsUndefined(
- isolate)) {
- isolate->InitializeVectorListFromHeap();
- }
+ isolate->MaybeInitializeVectorListFromHeap();
}
isolate->set_type_profile_mode(mode);
}
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 78cb102fa8..c087a0868c 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -1782,13 +1782,13 @@ void Debug::RunPromiseHook(PromiseHookType hook_type, Handle<JSPromise> promise,
if (*code == *BUILTIN_CODE(isolate_, AsyncFunctionPromiseCreate)) {
type = debug::kDebugAsyncFunctionPromiseCreated;
last_frame_was_promise_builtin = true;
- } else if (*code == *BUILTIN_CODE(isolate_, PromiseThen)) {
+ } else if (*code == *BUILTIN_CODE(isolate_, PromisePrototypeThen)) {
type = debug::kDebugPromiseThen;
last_frame_was_promise_builtin = true;
- } else if (*code == *BUILTIN_CODE(isolate_, PromiseCatch)) {
+ } else if (*code == *BUILTIN_CODE(isolate_, PromisePrototypeCatch)) {
type = debug::kDebugPromiseCatch;
last_frame_was_promise_builtin = true;
- } else if (*code == *BUILTIN_CODE(isolate_, PromiseFinally)) {
+ } else if (*code == *BUILTIN_CODE(isolate_, PromisePrototypeFinally)) {
type = debug::kDebugPromiseFinally;
last_frame_was_promise_builtin = true;
}
@@ -2147,7 +2147,7 @@ bool Debug::PerformSideEffectCheck(Handle<JSFunction> function) {
return false;
}
Deoptimizer::DeoptimizeFunction(*function);
- if (!function->shared()->HasNoSideEffect()) {
+ if (!SharedFunctionInfo::HasNoSideEffect(handle(function->shared()))) {
if (FLAG_trace_side_effect_free_debug_evaluate) {
PrintF("[debug-evaluate] Function %s failed side effect check.\n",
function->shared()->DebugName()->ToCString().get());
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index 9180608b21..a2b22d58d4 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -840,7 +840,7 @@ void LiveEdit::ReplaceFunctionCode(
}
shared_info->set_scope_info(new_shared_info->scope_info());
shared_info->set_outer_scope_info(new_shared_info->outer_scope_info());
- shared_info->DisableOptimization(kLiveEdit);
+ shared_info->DisableOptimization(BailoutReason::kLiveEdit);
// Update the type feedback vector, if needed.
Handle<FeedbackMetadata> new_feedback_metadata(
new_shared_info->feedback_metadata());
@@ -898,7 +898,7 @@ void LiveEdit::SetFunctionScript(Handle<JSValue> function_wrapper,
Isolate* isolate = function_wrapper->GetIsolate();
CHECK(script_handle->IsScript() || script_handle->IsUndefined(isolate));
SharedFunctionInfo::SetScript(shared_info, script_handle);
- shared_info->DisableOptimization(kLiveEdit);
+ shared_info->DisableOptimization(BailoutReason::kLiveEdit);
function_wrapper->GetIsolate()->compilation_cache()->Remove(shared_info);
}
@@ -1009,7 +1009,8 @@ static Handle<Script> CreateScriptCopy(Handle<Script> original) {
copy->set_column_offset(original->column_offset());
copy->set_type(original->type());
copy->set_context_data(original->context_data());
- copy->set_eval_from_shared(original->eval_from_shared());
+ copy->set_eval_from_shared_or_wrapped_arguments(
+ original->eval_from_shared_or_wrapped_arguments());
copy->set_eval_from_position(original->eval_from_position());
Handle<FixedArray> infos(isolate->factory()->NewFixedArray(
diff --git a/deps/v8/src/debug/mirrors.js b/deps/v8/src/debug/mirrors.js
index 8e9a5bf3da..15d5e64258 100644
--- a/deps/v8/src/debug/mirrors.js
+++ b/deps/v8/src/debug/mirrors.js
@@ -569,7 +569,7 @@ inherits(NumberMirror, ValueMirror);
NumberMirror.prototype.toText = function() {
- return %NumberToString(this.value_);
+ return '' + this.value_;
};
diff --git a/deps/v8/src/deoptimize-reason.h b/deps/v8/src/deoptimize-reason.h
index ddfe637293..3fabf555be 100644
--- a/deps/v8/src/deoptimize-reason.h
+++ b/deps/v8/src/deoptimize-reason.h
@@ -11,24 +11,16 @@ namespace v8 {
namespace internal {
#define DEOPTIMIZE_REASON_LIST(V) \
- V(AccessCheck, "Access check needed") \
- V(NoReason, "no reason") \
V(ArrayBufferWasNeutered, "array buffer was neutered") \
- V(ConstantGlobalVariableAssignment, "Constant global variable assignment") \
- V(ConversionOverflow, "conversion overflow") \
V(CowArrayElementsChanged, "copy-on-write array's elements changed") \
+ V(CouldNotGrowElements, "failed to grow elements store") \
+ V(DeoptimizeNow, "%_DeoptimizeNow") \
V(DivisionByZero, "division by zero") \
- V(ExpectedHeapNumber, "Expected heap number") \
- V(ExpectedSmi, "Expected smi") \
- V(ForcedDeoptToRuntime, "Forced deopt to runtime") \
V(Hole, "hole") \
V(InstanceMigrationFailed, "instance migration failed") \
V(InsufficientTypeFeedbackForCall, "Insufficient type feedback for call") \
- V(InsufficientTypeFeedbackForCallWithArguments, \
- "Insufficient type feedback for call with arguments") \
V(InsufficientTypeFeedbackForConstruct, \
"Insufficient type feedback for construct") \
- V(FastPathFailed, "Falling off the fast path") \
V(InsufficientTypeFeedbackForForIn, "Insufficient type feedback for for-in") \
V(InsufficientTypeFeedbackForBinaryOperation, \
"Insufficient type feedback for binary operation") \
@@ -40,48 +32,28 @@ namespace internal {
"Insufficient type feedback for generic keyed access") \
V(InsufficientTypeFeedbackForUnaryOperation, \
"Insufficient type feedback for unary operation") \
- V(KeyIsNegative, "key is negative") \
V(LostPrecision, "lost precision") \
V(LostPrecisionOrNaN, "lost precision or NaN") \
- V(MementoFound, "memento found") \
V(MinusZero, "minus zero") \
V(NaN, "NaN") \
- V(NegativeKeyEncountered, "Negative key encountered") \
- V(NegativeValue, "negative value") \
V(NoCache, "no cache") \
V(NotAHeapNumber, "not a heap number") \
- V(NotAHeapNumberUndefined, "not a heap number/undefined") \
V(NotAJavaScriptObject, "not a JavaScript object") \
V(NotANumberOrOddball, "not a Number or Oddball") \
V(NotASmi, "not a Smi") \
V(NotASymbol, "not a Symbol") \
V(OutOfBounds, "out of bounds") \
- V(OutsideOfRange, "Outside of range") \
V(Overflow, "overflow") \
- V(Proxy, "proxy") \
V(ReceiverNotAGlobalProxy, "receiver was not a global proxy") \
- V(ReceiverWasAGlobalObject, "receiver was a global object") \
V(Smi, "Smi") \
- V(TooManyArguments, "too many arguments") \
- V(TracingElementsTransitions, "Tracing elements transitions") \
- V(TypeMismatchBetweenFeedbackAndConstant, \
- "Type mismatch between feedback and constant") \
- V(UnexpectedCellContentsInConstantGlobalStore, \
- "Unexpected cell contents in constant global store") \
- V(UnexpectedCellContentsInGlobalStore, \
- "Unexpected cell contents in global store") \
- V(UnexpectedObject, "unexpected object") \
- V(UnknownMapInPolymorphicAccess, "Unknown map in polymorphic access") \
- V(UnknownMapInPolymorphicCall, "Unknown map in polymorphic call") \
- V(UnknownMapInPolymorphicElementAccess, \
- "Unknown map in polymorphic element access") \
- V(UnknownMap, "Unknown map") \
+ V(Unknown, "(unknown)") \
V(ValueMismatch, "value mismatch") \
+ V(WrongCallTarget, "wrong call target") \
+ V(WrongEnumIndices, "wrong enum indices") \
V(WrongInstanceType, "wrong instance type") \
V(WrongMap, "wrong map") \
V(WrongName, "wrong name") \
- V(UndefinedOrNullInForIn, "null or undefined in for-in") \
- V(UndefinedOrNullInToObject, "null or undefined in ToObject")
+ V(WrongValue, "wrong value")
enum class DeoptimizeReason : uint8_t {
#define DEOPTIMIZE_REASON(Name, message) k##Name,
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index ac6818ed0d..362bd12cb6 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -19,6 +19,8 @@
#include "src/tracing/trace-event.h"
#include "src/v8.h"
+// Has to be the last include (doesn't have include guards)
+#include "src/objects/object-macros.h"
namespace v8 {
namespace internal {
@@ -267,7 +269,7 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
RuntimeCallTimerScope runtimeTimer(isolate,
- &RuntimeCallStats::DeoptimizeCode);
+ RuntimeCallCounterId::kDeoptimizeCode);
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
if (FLAG_trace_deopt) {
@@ -288,7 +290,7 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
RuntimeCallTimerScope runtimeTimer(isolate,
- &RuntimeCallStats::DeoptimizeCode);
+ RuntimeCallCounterId::kDeoptimizeCode);
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
if (FLAG_trace_deopt) {
@@ -319,7 +321,7 @@ void Deoptimizer::MarkAllCodeForContext(Context* context) {
void Deoptimizer::DeoptimizeFunction(JSFunction* function, Code* code) {
Isolate* isolate = function->GetIsolate();
RuntimeCallTimerScope runtimeTimer(isolate,
- &RuntimeCallStats::DeoptimizeCode);
+ RuntimeCallCounterId::kDeoptimizeCode);
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
if (code == nullptr) code = function->code();
@@ -381,7 +383,6 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
function_(function),
bailout_id_(bailout_id),
bailout_type_(type),
- preserve_optimized_(false),
from_(from),
fp_to_sp_delta_(fp_to_sp_delta),
deoptimizing_throw_(false),
@@ -555,6 +556,10 @@ int LookupCatchHandler(TranslatedFrame* translated_frame, int* data_out) {
return -1;
}
+bool ShouldPadArguments(int arg_count) {
+ return kPadArguments && (arg_count % 2 != 0);
+}
+
} // namespace
// We rely on this function not causing a GC. It is called from generated code
@@ -600,7 +605,8 @@ void Deoptimizer::DoComputeOutputFrames() {
input_data->OptimizationId()->value(), bailout_id_, fp_to_sp_delta_,
caller_frame_top_);
if (bailout_type_ == EAGER || bailout_type_ == SOFT) {
- compiled_code_->PrintDeoptLocation(trace_scope_->file(), from_);
+ compiled_code_->PrintDeoptLocation(
+ trace_scope_->file(), " ;;; deoptimize at ", from_);
}
}
@@ -727,7 +733,8 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
}
// The 'fixed' part of the frame consists of the incoming parameters and
- // the part described by InterpreterFrameConstants.
+ // the part described by InterpreterFrameConstants. This will include
+ // argument padding, when needed.
unsigned fixed_frame_size = ComputeInterpretedFixedSize(shared);
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
@@ -752,12 +759,20 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
// Compute the incoming parameter translation.
unsigned output_offset = output_frame_size;
+
+ if (ShouldPadArguments(parameter_count)) {
+ output_offset -= kPointerSize;
+ WriteValueToOutput(isolate()->heap()->the_hole_value(), 0, frame_index,
+ output_offset, "padding ");
+ }
+
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
output_offset);
}
+ DCHECK_EQ(output_offset, output_frame->GetLastArgumentSlotOffset());
if (trace_scope_ != nullptr) {
PrintF(trace_scope_->file(), " -------------------------\n");
}
@@ -977,6 +992,9 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
unsigned height = translated_frame->height();
unsigned height_in_bytes = height * kPointerSize;
+ int parameter_count = height;
+ if (ShouldPadArguments(parameter_count)) height_in_bytes += kPointerSize;
+
TranslatedFrame::iterator function_iterator = value_iterator;
Object* function = value_iterator->GetRawValue();
value_iterator++;
@@ -990,7 +1008,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
- int parameter_count = height;
FrameDescription* output_frame = new (output_frame_size)
FrameDescription(output_frame_size, parameter_count);
@@ -1009,14 +1026,21 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
}
output_frame->SetTop(top_address);
- // Compute the incoming parameter translation.
unsigned output_offset = output_frame_size;
+ if (ShouldPadArguments(parameter_count)) {
+ output_offset -= kPointerSize;
+ WriteValueToOutput(isolate()->heap()->the_hole_value(), 0, frame_index,
+ output_offset, "padding ");
+ }
+
+ // Compute the incoming parameter translation.
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
output_offset);
}
+ DCHECK_EQ(output_offset, output_frame->GetLastArgumentSlotOffset());
// Read caller's PC from the previous frame.
output_offset -= kPCOnStackSize;
intptr_t value;
@@ -1080,6 +1104,10 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
PrintF(trace_scope_->file(), "(%d)\n", height - 1);
}
+ output_offset -= kPointerSize;
+ WriteValueToOutput(isolate()->heap()->the_hole_value(), 0, frame_index,
+ output_offset, "padding ");
+
DCHECK_EQ(0, output_offset);
Builtins* builtins = isolate_->builtins();
@@ -1125,6 +1153,9 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
if (PadTopOfStackRegister()) height_in_bytes += kPointerSize;
}
+ int parameter_count = height;
+ if (ShouldPadArguments(parameter_count)) height_in_bytes += kPointerSize;
+
JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
value_iterator++;
input_index++;
@@ -1140,8 +1171,8 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
- FrameDescription* output_frame =
- new (output_frame_size) FrameDescription(output_frame_size);
+ FrameDescription* output_frame = new (output_frame_size)
+ FrameDescription(output_frame_size, parameter_count);
// Construct stub can not be topmost.
DCHECK(frame_index > 0 && frame_index < output_count_);
@@ -1154,9 +1185,15 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
output_frame->SetTop(top_address);
- // Compute the incoming parameter translation.
- int parameter_count = height;
unsigned output_offset = output_frame_size;
+
+ if (ShouldPadArguments(parameter_count)) {
+ output_offset -= kPointerSize;
+ WriteValueToOutput(isolate()->heap()->the_hole_value(), 0, frame_index,
+ output_offset, "padding ");
+ }
+
+ // Compute the incoming parameter translation.
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
// The allocated receiver of a construct stub frame is passed as the
@@ -1167,6 +1204,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
(i == 0) ? reinterpret_cast<Address>(top_address) : nullptr);
}
+ DCHECK_EQ(output_offset, output_frame->GetLastArgumentSlotOffset());
// Read caller's PC from the previous frame.
output_offset -= kPCOnStackSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
@@ -1224,10 +1262,21 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
"constructor function ");
// The deopt info contains the implicit receiver or the new target at the
- // position of the receiver. Copy it to the top of stack.
+ // position of the receiver. Copy it to the top of stack, with the hole value
+ // as padding to maintain alignment.
output_offset -= kPointerSize;
- value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
+ WriteValueToOutput(isolate()->heap()->the_hole_value(), 0, frame_index,
+ output_offset, "padding");
+
+ output_offset -= kPointerSize;
+
+ if (ShouldPadArguments(parameter_count)) {
+ value = output_frame->GetFrameSlot(output_frame_size - 2 * kPointerSize);
+ } else {
+ value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
+ }
output_frame->SetFrameSlot(output_offset, value);
+
if (bailout_id == BailoutId::ConstructStubCreate()) {
DebugPrintOutputSlot(value, frame_index, output_offset, "new target\n");
} else {
@@ -1371,7 +1420,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
// parameter count.
int stack_param_count = height_in_words - register_parameter_count - 1;
if (must_handle_result) stack_param_count++;
- int output_frame_size =
+ unsigned output_frame_size =
kPointerSize * (stack_param_count + allocatable_register_count +
padding_slot_count) +
BuiltinContinuationFrameConstants::kFixedFrameSize;
@@ -1413,9 +1462,12 @@ void Deoptimizer::DoComputeBuiltinContinuation(
stack_param_count);
}
- unsigned output_frame_offset = output_frame_size;
- FrameDescription* output_frame =
- new (output_frame_size) FrameDescription(output_frame_size);
+ int translated_stack_parameters =
+ must_handle_result ? stack_param_count - 1 : stack_param_count;
+
+ if (ShouldPadArguments(stack_param_count)) output_frame_size += kPointerSize;
+ FrameDescription* output_frame = new (output_frame_size)
+ FrameDescription(output_frame_size, stack_param_count);
output_[frame_index] = output_frame;
// The top address of the frame is computed from the previous frame's top and
@@ -1446,8 +1498,12 @@ void Deoptimizer::DoComputeBuiltinContinuation(
intptr_t value;
- int translated_stack_parameters =
- must_handle_result ? stack_param_count - 1 : stack_param_count;
+ unsigned output_frame_offset = output_frame_size;
+ if (ShouldPadArguments(stack_param_count)) {
+ output_frame_offset -= kPointerSize;
+ WriteValueToOutput(isolate()->heap()->the_hole_value(), 0, frame_index,
+ output_frame_offset, "padding ");
+ }
for (int i = 0; i < translated_stack_parameters; ++i) {
output_frame_offset -= kPointerSize;
@@ -1462,6 +1518,8 @@ void Deoptimizer::DoComputeBuiltinContinuation(
"placeholder for return result on lazy deopt ");
}
+ DCHECK_EQ(output_frame_offset, output_frame->GetLastArgumentSlotOffset());
+
for (int i = 0; i < register_parameter_count; ++i) {
Object* object = value_iterator->GetRawValue();
int code = continuation_descriptor.GetRegisterParameter(i).code();
@@ -1612,13 +1670,6 @@ void Deoptimizer::DoComputeBuiltinContinuation(
output_frame->SetRegister(context_reg.code(), context_value);
}
- // TODO(6898): For eager deopts within builtin stub frames we currently skip
- // marking the underlying function as deoptimized. This is to avoid deopt
- // loops where we would just generate the same optimized code all over again.
- if (is_topmost && bailout_type_ != LAZY) {
- preserve_optimized_ = true;
- }
-
// Ensure the frame pointer register points to the callee's frame. The builtin
// will build its own frame once we continue to it.
Register fp_reg = JavaScriptFrame::fp_register();
@@ -1663,6 +1714,15 @@ void Deoptimizer::MaterializeHeapObjects() {
reinterpret_cast<intptr_t>(*value);
}
+ translated_state_.VerifyMaterializedObjects();
+
+ bool feedback_updated = translated_state_.DoUpdateFeedback();
+ if (trace_scope_ != nullptr && feedback_updated) {
+ PrintF(trace_scope_->file(), "Feedback updated");
+ compiled_code_->PrintDeoptLocation(trace_scope_->file(),
+ " from deoptimization at ", from_);
+ }
+
isolate_->materialized_object_store()->Remove(
reinterpret_cast<Address>(stack_fp_));
}
@@ -1747,14 +1807,6 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
}
// static
-unsigned Deoptimizer::ComputeJavascriptFixedSize(SharedFunctionInfo* shared) {
- // The fixed part of the frame consists of the return address, frame
- // pointer, function, context, and all the incoming arguments.
- return ComputeIncomingArgumentSize(shared) +
- StandardFrameConstants::kFixedFrameSize;
-}
-
-// static
unsigned Deoptimizer::ComputeInterpretedFixedSize(SharedFunctionInfo* shared) {
// The fixed part of the frame consists of the return address, frame
// pointer, function, context, bytecode offset and all the incoming arguments.
@@ -1764,7 +1816,9 @@ unsigned Deoptimizer::ComputeInterpretedFixedSize(SharedFunctionInfo* shared) {
// static
unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo* shared) {
- return (shared->internal_formal_parameter_count() + 1) * kPointerSize;
+ int parameter_slots = shared->internal_formal_parameter_count() + 1;
+ if (kPadArguments) parameter_slots = RoundUp(parameter_slots, 2);
+ return parameter_slots * kPointerSize;
}
void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
@@ -2001,6 +2055,11 @@ void Translation::StoreLiteral(int literal_id) {
buffer_->Add(literal_id);
}
+void Translation::AddUpdateFeedback(int vector_literal, int slot) {
+ buffer_->Add(UPDATE_FEEDBACK);
+ buffer_->Add(vector_literal);
+ buffer_->Add(slot);
+}
void Translation::StoreJSFrameFunction() {
StoreStackSlot((StandardFrameConstants::kCallerPCOffset -
@@ -2028,9 +2087,10 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
case DOUBLE_STACK_SLOT:
case LITERAL:
return 1;
- case BEGIN:
case ARGUMENTS_ADAPTOR_FRAME:
+ case UPDATE_FEEDBACK:
return 2;
+ case BEGIN:
case INTERPRETED_FRAME:
case CONSTRUCT_STUB_FRAME:
case BUILTIN_CONTINUATION_FRAME:
@@ -2229,7 +2289,7 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state,
Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code* code, Address pc) {
CHECK(code->instruction_start() <= pc && pc <= code->instruction_end());
SourcePosition last_position = SourcePosition::Unknown();
- DeoptimizeReason last_reason = DeoptimizeReason::kNoReason;
+ DeoptimizeReason last_reason = DeoptimizeReason::kUnknown;
int last_deopt_id = kNoDeoptimizationId;
int mask = RelocInfo::ModeMask(RelocInfo::DEOPT_REASON) |
RelocInfo::ModeMask(RelocInfo::DEOPT_ID) |
@@ -2385,9 +2445,8 @@ int TranslatedValue::object_index() const {
Object* TranslatedValue::GetRawValue() const {
// If we have a value, return it.
- Handle<Object> result_handle;
- if (value_.ToHandle(&result_handle)) {
- return *result_handle;
+ if (materialization_state() == kFinished) {
+ return *storage_;
}
// Otherwise, do a best effort to get the value without allocation.
@@ -2429,11 +2488,15 @@ Object* TranslatedValue::GetRawValue() const {
return isolate()->heap()->arguments_marker();
}
+void TranslatedValue::set_initialized_storage(Handle<Object> storage) {
+ DCHECK_EQ(kUninitialized, materialization_state());
+ storage_ = storage;
+ materialization_state_ = kFinished;
+}
Handle<Object> TranslatedValue::GetValue() {
- Handle<Object> result;
// If we already have a value, then get it.
- if (value_.ToHandle(&result)) return result;
+ if (materialization_state() == kFinished) return storage_;
// Otherwise we have to materialize.
switch (kind()) {
@@ -2444,12 +2507,27 @@ Handle<Object> TranslatedValue::GetValue() {
case TranslatedValue::kFloat:
case TranslatedValue::kDouble: {
MaterializeSimple();
- return value_.ToHandleChecked();
+ return storage_;
}
case TranslatedValue::kCapturedObject:
- case TranslatedValue::kDuplicatedObject:
- return container_->MaterializeObjectAt(object_index());
+ case TranslatedValue::kDuplicatedObject: {
+ // We need to materialize the object (or possibly even object graphs).
+ // To make the object verifier happy, we materialize in two steps.
+
+ // 1. Allocate storage for reachable objects. This makes sure that for
+ // each object we have allocated space on heap. The space will be
+ // a byte array that will be later initialized, or a fully
+ // initialized object if it is safe to allocate one that will
+ // pass the verifier.
+ container_->EnsureObjectAllocatedAt(this);
+
+ // 2. Initialize the objects. If we have allocated only byte arrays
+ // for some objects, we now overwrite the byte arrays with the
+ // correct object fields. Note that this phase does not allocate
+ // any new objects, so it does not trigger the object verifier.
+ return container_->InitializeObjectAt(this);
+ }
case TranslatedValue::kInvalid:
FATAL("unexpected case");
@@ -2460,36 +2538,39 @@ Handle<Object> TranslatedValue::GetValue() {
return Handle<Object>::null();
}
-
void TranslatedValue::MaterializeSimple() {
// If we already have materialized, return.
- if (!value_.is_null()) return;
+ if (materialization_state() == kFinished) return;
Object* raw_value = GetRawValue();
if (raw_value != isolate()->heap()->arguments_marker()) {
// We can get the value without allocation, just return it here.
- value_ = Handle<Object>(raw_value, isolate());
+ set_initialized_storage(Handle<Object>(raw_value, isolate()));
return;
}
switch (kind()) {
case kInt32:
- value_ = Handle<Object>(isolate()->factory()->NewNumber(int32_value()));
+ set_initialized_storage(
+ Handle<Object>(isolate()->factory()->NewNumber(int32_value())));
return;
case kUInt32:
- value_ = Handle<Object>(isolate()->factory()->NewNumber(uint32_value()));
+ set_initialized_storage(
+ Handle<Object>(isolate()->factory()->NewNumber(uint32_value())));
return;
case kFloat: {
double scalar_value = float_value().get_scalar();
- value_ = Handle<Object>(isolate()->factory()->NewNumber(scalar_value));
+ set_initialized_storage(
+ Handle<Object>(isolate()->factory()->NewNumber(scalar_value)));
return;
}
case kDouble: {
double scalar_value = double_value().get_scalar();
- value_ = Handle<Object>(isolate()->factory()->NewNumber(scalar_value));
+ set_initialized_storage(
+ Handle<Object>(isolate()->factory()->NewNumber(scalar_value)));
return;
}
@@ -2551,7 +2632,7 @@ Float64 TranslatedState::GetDoubleSlot(Address fp, int slot_offset) {
void TranslatedValue::Handlify() {
if (kind() == kTagged) {
- value_ = Handle<Object>(raw_literal(), isolate());
+ set_initialized_storage(Handle<Object>(raw_literal(), isolate()));
raw_literal_ = nullptr;
}
}
@@ -2712,7 +2793,7 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
return TranslatedFrame::JavaScriptBuiltinContinuationFrame(
bailout_id, shared_info, height_with_context);
}
-
+ case Translation::UPDATE_FEEDBACK:
case Translation::BEGIN:
case Translation::DUPLICATED_OBJECT:
case Translation::ARGUMENTS_ELEMENTS:
@@ -2802,6 +2883,7 @@ void TranslatedState::CreateArgumentsElementsTranslatedValues(
PrintF(trace_file, "arguments elements object #%d (type = %d, length = %d)",
object_index, static_cast<uint8_t>(type), length);
}
+
object_positions_.push_back({frame_index, value_index});
frame.Add(TranslatedValue::NewDeferredObject(
this, length + FixedArray::kHeaderSize / kPointerSize, object_index));
@@ -2855,6 +2937,7 @@ int TranslatedState::CreateNextTranslatedValue(
case Translation::CONSTRUCT_STUB_FRAME:
case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME:
case Translation::BUILTIN_CONTINUATION_FRAME:
+ case Translation::UPDATE_FEEDBACK:
// Peeled off before getting here.
break;
@@ -3117,8 +3200,7 @@ int TranslatedState::CreateNextTranslatedValue(
FATAL("We should never get here - unexpected deopt info.");
}
-TranslatedState::TranslatedState(const JavaScriptFrame* frame)
- : isolate_(nullptr), stack_frame_pointer_(nullptr) {
+TranslatedState::TranslatedState(const JavaScriptFrame* frame) {
int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationData* data =
static_cast<const OptimizedFrame*>(frame)->GetDeoptimizationData(
@@ -3131,9 +3213,6 @@ TranslatedState::TranslatedState(const JavaScriptFrame* frame)
frame->function()->shared()->internal_formal_parameter_count());
}
-TranslatedState::TranslatedState()
- : isolate_(nullptr), stack_frame_pointer_(nullptr) {}
-
void TranslatedState::Init(Address input_frame_pointer,
TranslationIterator* iterator,
FixedArray* literal_array, RegisterValues* registers,
@@ -3149,9 +3228,15 @@ void TranslatedState::Init(Address input_frame_pointer,
CHECK(opcode == Translation::BEGIN);
int count = iterator->Next();
+ frames_.reserve(count);
iterator->Next(); // Drop JS frames count.
+ int update_feedback_count = iterator->Next();
+ CHECK_GE(update_feedback_count, 0);
+ CHECK_LE(update_feedback_count, 1);
- frames_.reserve(count);
+ if (update_feedback_count == 1) {
+ ReadUpdateFeedback(iterator, literal_array, trace_file);
+ }
std::stack<int> nested_counts;
@@ -3209,563 +3294,472 @@ void TranslatedState::Init(Address input_frame_pointer,
void TranslatedState::Prepare(Address stack_frame_pointer) {
for (auto& frame : frames_) frame.Handlify();
+ if (feedback_vector_ != nullptr) {
+ feedback_vector_handle_ =
+ Handle<FeedbackVector>(feedback_vector_, isolate());
+ feedback_vector_ = nullptr;
+ }
stack_frame_pointer_ = stack_frame_pointer;
UpdateFromPreviouslyMaterializedObjects();
}
-class TranslatedState::CapturedObjectMaterializer {
- public:
- CapturedObjectMaterializer(TranslatedState* state, int frame_index,
- int field_count)
- : state_(state), frame_index_(frame_index), field_count_(field_count) {}
-
- // Ensure the properties never contain mutable heap numbers. This is necessary
- // because the deoptimizer generalizes all maps to tagged representation
- // fields (so mutable heap numbers are not allowed).
- static void EnsurePropertiesGeneralized(Handle<Object> properties_or_hash) {
- if (properties_or_hash->IsPropertyArray()) {
- Handle<PropertyArray> properties =
- Handle<PropertyArray>::cast(properties_or_hash);
- int length = properties->length();
- for (int i = 0; i < length; i++) {
- if (properties->get(i)->IsMutableHeapNumber()) {
- Handle<HeapObject> box(HeapObject::cast(properties->get(i)));
- box->set_map(properties->GetIsolate()->heap()->heap_number_map());
- }
+TranslatedValue* TranslatedState::GetValueByObjectIndex(int object_index) {
+ CHECK_LT(static_cast<size_t>(object_index), object_positions_.size());
+ TranslatedState::ObjectPosition pos = object_positions_[object_index];
+ return &(frames_[pos.frame_index_].values_[pos.value_index_]);
+}
+
+Handle<Object> TranslatedState::InitializeObjectAt(TranslatedValue* slot) {
+ slot = ResolveCapturedObject(slot);
+
+ DisallowHeapAllocation no_allocation;
+ if (slot->materialization_state() != TranslatedValue::kFinished) {
+ std::stack<int> worklist;
+ worklist.push(slot->object_index());
+ slot->mark_finished();
+
+ while (!worklist.empty()) {
+ int index = worklist.top();
+ worklist.pop();
+ InitializeCapturedObjectAt(index, &worklist, no_allocation);
+ }
+ }
+ return slot->GetStorage();
+}
+
+void TranslatedState::InitializeCapturedObjectAt(
+ int object_index, std::stack<int>* worklist,
+ const DisallowHeapAllocation& no_allocation) {
+ CHECK_LT(static_cast<size_t>(object_index), object_positions_.size());
+ TranslatedState::ObjectPosition pos = object_positions_[object_index];
+ int value_index = pos.value_index_;
+
+ TranslatedFrame* frame = &(frames_[pos.frame_index_]);
+ TranslatedValue* slot = &(frame->values_[value_index]);
+ value_index++;
+
+ CHECK_EQ(TranslatedValue::kFinished, slot->materialization_state());
+ CHECK_EQ(TranslatedValue::kCapturedObject, slot->kind());
+
+ // Ensure all fields are initialized.
+ int children_init_index = value_index;
+ for (int i = 0; i < slot->GetChildrenCount(); i++) {
+ // If the field is an object that has not been initialized yet, queue it
+ // for initialization (and mark it as such).
+ TranslatedValue* child_slot = frame->ValueAt(children_init_index);
+ if (child_slot->kind() == TranslatedValue::kCapturedObject ||
+ child_slot->kind() == TranslatedValue::kDuplicatedObject) {
+ child_slot = ResolveCapturedObject(child_slot);
+ if (child_slot->materialization_state() != TranslatedValue::kFinished) {
+ DCHECK_EQ(TranslatedValue::kAllocated,
+ child_slot->materialization_state());
+ worklist->push(child_slot->object_index());
+ child_slot->mark_finished();
}
}
+ SkipSlots(1, frame, &children_init_index);
}
- Handle<Object> FieldAt(int* value_index) {
- CHECK_GT(field_count_, 0);
- --field_count_;
- Handle<Object> object = state_->MaterializeAt(frame_index_, value_index);
- // This is a big hammer to make sure that the materialized objects do not
- // have property arrays with mutable heap numbers (mutable heap numbers are
- // bad because we generalize maps for all materialized objects).
- EnsurePropertiesGeneralized(object);
- return object;
+ // Read the map.
+ // The map should never be materialized, so let us check we already have
+ // an existing object here.
+ CHECK_EQ(frame->values_[value_index].kind(), TranslatedValue::kTagged);
+ Handle<Map> map = Handle<Map>::cast(frame->values_[value_index].GetValue());
+ CHECK(map->IsMap());
+ value_index++;
+
+ // Handle the special cases.
+ switch (map->instance_type()) {
+ case MUTABLE_HEAP_NUMBER_TYPE:
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ return;
+
+ case FIXED_ARRAY_TYPE:
+ case HASH_TABLE_TYPE:
+ case PROPERTY_ARRAY_TYPE:
+ case CONTEXT_EXTENSION_TYPE:
+ InitializeObjectWithTaggedFieldsAt(frame, &value_index, slot, map,
+ no_allocation);
+ break;
+
+ default:
+ CHECK(map->IsJSObjectMap());
+ InitializeJSObjectAt(frame, &value_index, slot, map, no_allocation);
+ break;
}
+ CHECK_EQ(value_index, children_init_index);
+}
- ~CapturedObjectMaterializer() { CHECK_EQ(0, field_count_); }
+void TranslatedState::EnsureObjectAllocatedAt(TranslatedValue* slot) {
+ slot = ResolveCapturedObject(slot);
- private:
- TranslatedState* state_;
- int frame_index_;
- int field_count_;
+ if (slot->materialization_state() == TranslatedValue::kUninitialized) {
+ std::stack<int> worklist;
+ worklist.push(slot->object_index());
+ slot->mark_allocated();
+
+ while (!worklist.empty()) {
+ int index = worklist.top();
+ worklist.pop();
+ EnsureCapturedObjectAllocatedAt(index, &worklist);
+ }
+ }
+}
+
+void TranslatedState::MaterializeFixedDoubleArray(TranslatedFrame* frame,
+ int* value_index,
+ TranslatedValue* slot,
+ Handle<Map> map) {
+ int length = Smi::cast(frame->values_[*value_index].GetRawValue())->value();
+ (*value_index)++;
+ Handle<FixedDoubleArray> array = Handle<FixedDoubleArray>::cast(
+ isolate()->factory()->NewFixedDoubleArray(length));
+ CHECK_GT(length, 0);
+ for (int i = 0; i < length; i++) {
+ CHECK_NE(TranslatedValue::kCapturedObject,
+ frame->values_[*value_index].kind());
+ Handle<Object> value = frame->values_[*value_index].GetValue();
+ if (value->IsNumber()) {
+ array->set(i, value->Number());
+ } else {
+ CHECK(value.is_identical_to(isolate()->factory()->the_hole_value()));
+ array->set_the_hole(isolate(), i);
+ }
+ (*value_index)++;
+ }
+ slot->set_storage(array);
+}
+
+void TranslatedState::MaterializeMutableHeapNumber(TranslatedFrame* frame,
+ int* value_index,
+ TranslatedValue* slot) {
+ CHECK_NE(TranslatedValue::kCapturedObject,
+ frame->values_[*value_index].kind());
+ Handle<Object> value = frame->values_[*value_index].GetValue();
+ Handle<HeapNumber> box;
+ CHECK(value->IsNumber());
+ box = isolate()->factory()->NewHeapNumber(value->Number(), MUTABLE);
+ (*value_index)++;
+ slot->set_storage(box);
+}
+
+namespace {
+
+enum DoubleStorageKind : uint8_t {
+ kStoreTagged,
+ kStoreUnboxedDouble,
+ kStoreMutableHeapNumber,
};
-Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
- TranslatedValue* slot, int frame_index, int* value_index) {
- int length = slot->GetChildrenCount();
+} // namespace
- CapturedObjectMaterializer materializer(this, frame_index, length);
+void TranslatedState::SkipSlots(int slots_to_skip, TranslatedFrame* frame,
+ int* value_index) {
+ while (slots_to_skip > 0) {
+ TranslatedValue* slot = &(frame->values_[*value_index]);
+ (*value_index)++;
+ slots_to_skip--;
- Handle<Object> result;
- if (slot->value_.ToHandle(&result)) {
- // This has been previously materialized, return the previous value.
- // We still need to skip all the nested objects.
- for (int i = 0; i < length; i++) {
- materializer.FieldAt(value_index);
+ if (slot->kind() == TranslatedValue::kCapturedObject) {
+ slots_to_skip += slot->GetChildrenCount();
}
-
- return result;
}
+}
- Handle<Object> map_object = materializer.FieldAt(value_index);
- Handle<Map> map = Map::GeneralizeAllFields(Handle<Map>::cast(map_object));
+void TranslatedState::EnsureCapturedObjectAllocatedAt(
+ int object_index, std::stack<int>* worklist) {
+ CHECK_LT(static_cast<size_t>(object_index), object_positions_.size());
+ TranslatedState::ObjectPosition pos = object_positions_[object_index];
+ int value_index = pos.value_index_;
+
+ TranslatedFrame* frame = &(frames_[pos.frame_index_]);
+ TranslatedValue* slot = &(frame->values_[value_index]);
+ value_index++;
+
+ CHECK_EQ(TranslatedValue::kAllocated, slot->materialization_state());
+ CHECK_EQ(TranslatedValue::kCapturedObject, slot->kind());
+
+ // Read the map.
+ // The map should never be materialized, so let us check we already have
+ // an existing object here.
+ CHECK_EQ(frame->values_[value_index].kind(), TranslatedValue::kTagged);
+ Handle<Map> map = Handle<Map>::cast(frame->values_[value_index].GetValue());
+ CHECK(map->IsMap());
+ value_index++;
+
+ // Handle the special cases.
switch (map->instance_type()) {
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ // Materialize (i.e. allocate&initialize) the array and return since
+ // there is no need to process the children.
+ return MaterializeFixedDoubleArray(frame, &value_index, slot, map);
+
case MUTABLE_HEAP_NUMBER_TYPE:
- case HEAP_NUMBER_TYPE: {
- // Reuse the HeapNumber value directly as it is already properly
- // tagged and skip materializing the HeapNumber explicitly.
- Handle<Object> object = materializer.FieldAt(value_index);
- slot->value_ = object;
- // On 32-bit architectures, there is an extra slot there because
- // the escape analysis calculates the number of slots as
- // object-size/pointer-size. To account for this, we read out
- // any extra slots.
- for (int i = 0; i < length - 2; i++) {
- materializer.FieldAt(value_index);
- }
- return object;
- }
- case JS_OBJECT_TYPE:
- case JS_ERROR_TYPE:
- case JS_ARGUMENTS_TYPE: {
- Handle<JSObject> object =
- isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED);
- slot->value_ = object;
- Handle<Object> properties = materializer.FieldAt(value_index);
- Handle<Object> elements = materializer.FieldAt(value_index);
- object->set_raw_properties_or_hash(*properties);
- object->set_elements(FixedArrayBase::cast(*elements));
- int in_object_properties = map->GetInObjectProperties();
- for (int i = 0; i < in_object_properties; ++i) {
- Handle<Object> value = materializer.FieldAt(value_index);
- FieldIndex index = FieldIndex::ForPropertyIndex(object->map(), i);
- object->FastPropertyAtPut(index, *value);
- }
- return object;
- }
- case JS_SET_KEY_VALUE_ITERATOR_TYPE:
- case JS_SET_VALUE_ITERATOR_TYPE: {
- Handle<JSSetIterator> object = Handle<JSSetIterator>::cast(
- isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
- slot->value_ = object;
- Handle<Object> properties = materializer.FieldAt(value_index);
- Handle<Object> elements = materializer.FieldAt(value_index);
- Handle<Object> table = materializer.FieldAt(value_index);
- Handle<Object> index = materializer.FieldAt(value_index);
- object->set_raw_properties_or_hash(*properties);
- object->set_elements(FixedArrayBase::cast(*elements));
- object->set_table(*table);
- object->set_index(*index);
- return object;
- }
- case JS_MAP_KEY_ITERATOR_TYPE:
- case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
- case JS_MAP_VALUE_ITERATOR_TYPE: {
- Handle<JSMapIterator> object = Handle<JSMapIterator>::cast(
- isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
- slot->value_ = object;
- Handle<Object> properties = materializer.FieldAt(value_index);
- Handle<Object> elements = materializer.FieldAt(value_index);
- Handle<Object> table = materializer.FieldAt(value_index);
- Handle<Object> index = materializer.FieldAt(value_index);
- object->set_raw_properties_or_hash(*properties);
- object->set_elements(FixedArrayBase::cast(*elements));
- object->set_table(*table);
- object->set_index(*index);
- return object;
- }
-#define ARRAY_ITERATOR_CASE(type) case type:
- ARRAY_ITERATOR_TYPE_LIST(ARRAY_ITERATOR_CASE)
-#undef ARRAY_ITERATOR_CASE
- {
- Handle<JSArrayIterator> object = Handle<JSArrayIterator>::cast(
- isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
- slot->value_ = object;
- // Initialize the index to zero to make the heap verifier happy.
- object->set_index(Smi::FromInt(0));
- Handle<Object> properties = materializer.FieldAt(value_index);
- Handle<Object> elements = materializer.FieldAt(value_index);
- Handle<Object> iterated_object = materializer.FieldAt(value_index);
- Handle<Object> next_index = materializer.FieldAt(value_index);
- Handle<Object> iterated_object_map = materializer.FieldAt(value_index);
- object->set_raw_properties_or_hash(*properties);
- object->set_elements(FixedArrayBase::cast(*elements));
- object->set_object(*iterated_object);
- object->set_index(*next_index);
- object->set_object_map(*iterated_object_map);
- return object;
- }
- case JS_STRING_ITERATOR_TYPE: {
- Handle<JSStringIterator> object = Handle<JSStringIterator>::cast(
- isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
- slot->value_ = object;
- // Initialize the index to zero to make the heap verifier happy.
- object->set_index(0);
- Handle<Object> properties = materializer.FieldAt(value_index);
- Handle<Object> elements = materializer.FieldAt(value_index);
- Handle<Object> iterated_string = materializer.FieldAt(value_index);
- Handle<Object> next_index = materializer.FieldAt(value_index);
- object->set_raw_properties_or_hash(*properties);
- object->set_elements(FixedArrayBase::cast(*elements));
- CHECK(iterated_string->IsString());
- object->set_string(String::cast(*iterated_string));
- CHECK(next_index->IsSmi());
- object->set_index(Smi::ToInt(*next_index));
- return object;
- }
- case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE: {
- Handle<JSAsyncFromSyncIterator> object =
- Handle<JSAsyncFromSyncIterator>::cast(
- isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
- slot->value_ = object;
- Handle<Object> properties = materializer.FieldAt(value_index);
- Handle<Object> elements = materializer.FieldAt(value_index);
- Handle<Object> sync_iterator = materializer.FieldAt(value_index);
- object->set_raw_properties_or_hash(*properties);
- object->set_elements(FixedArrayBase::cast(*elements));
- object->set_sync_iterator(JSReceiver::cast(*sync_iterator));
- return object;
- }
- case JS_ARRAY_TYPE: {
- Handle<JSArray> object = Handle<JSArray>::cast(
- isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
- slot->value_ = object;
- Handle<Object> properties = materializer.FieldAt(value_index);
- Handle<Object> elements = materializer.FieldAt(value_index);
- Handle<Object> array_length = materializer.FieldAt(value_index);
- object->set_raw_properties_or_hash(*properties);
- object->set_elements(FixedArrayBase::cast(*elements));
- object->set_length(*array_length);
- int in_object_properties = map->GetInObjectProperties();
- for (int i = 0; i < in_object_properties; ++i) {
- Handle<Object> value = materializer.FieldAt(value_index);
- FieldIndex index = FieldIndex::ForPropertyIndex(object->map(), i);
- object->FastPropertyAtPut(index, *value);
- }
- return object;
- }
- case JS_BOUND_FUNCTION_TYPE: {
- Handle<JSBoundFunction> object = Handle<JSBoundFunction>::cast(
- isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
- slot->value_ = object;
- Handle<Object> properties = materializer.FieldAt(value_index);
- Handle<Object> elements = materializer.FieldAt(value_index);
- Handle<Object> bound_target_function = materializer.FieldAt(value_index);
- Handle<Object> bound_this = materializer.FieldAt(value_index);
- Handle<Object> bound_arguments = materializer.FieldAt(value_index);
- object->set_raw_properties_or_hash(*properties);
- object->set_elements(FixedArrayBase::cast(*elements));
- object->set_bound_target_function(
- JSReceiver::cast(*bound_target_function));
- object->set_bound_this(*bound_this);
- object->set_bound_arguments(FixedArray::cast(*bound_arguments));
- return object;
- }
- case JS_FUNCTION_TYPE: {
- Handle<JSFunction> object = isolate_->factory()->NewFunction(
- map, handle(isolate_->object_function()->shared()),
- handle(isolate_->context()), NOT_TENURED);
- slot->value_ = object;
- // We temporarily allocated a JSFunction for the {Object} function
- // within the current context, to break cycles in the object graph.
- // The correct function and context will be set below once available.
- Handle<Object> properties = materializer.FieldAt(value_index);
- Handle<Object> elements = materializer.FieldAt(value_index);
- Handle<Object> shared = materializer.FieldAt(value_index);
- Handle<Object> context = materializer.FieldAt(value_index);
- Handle<Object> vector_cell = materializer.FieldAt(value_index);
- Handle<Object> code = materializer.FieldAt(value_index);
- bool has_prototype_slot = map->has_prototype_slot();
- Handle<Object> prototype;
- if (has_prototype_slot) {
- prototype = materializer.FieldAt(value_index);
- }
- object->set_map(*map);
- object->set_raw_properties_or_hash(*properties);
- object->set_elements(FixedArrayBase::cast(*elements));
- object->set_shared(SharedFunctionInfo::cast(*shared));
- object->set_context(Context::cast(*context));
- object->set_feedback_vector_cell(Cell::cast(*vector_cell));
- object->set_code(Code::cast(*code));
- if (has_prototype_slot) {
- object->set_prototype_or_initial_map(*prototype);
- }
- int in_object_properties = map->GetInObjectProperties();
- for (int i = 0; i < in_object_properties; ++i) {
- Handle<Object> value = materializer.FieldAt(value_index);
- FieldIndex index = FieldIndex::ForPropertyIndex(object->map(), i);
- object->FastPropertyAtPut(index, *value);
- }
- return object;
- }
- case JS_ASYNC_GENERATOR_OBJECT_TYPE:
- case JS_GENERATOR_OBJECT_TYPE: {
- Handle<JSGeneratorObject> object = Handle<JSGeneratorObject>::cast(
- isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
- slot->value_ = object;
- Handle<Object> properties = materializer.FieldAt(value_index);
- Handle<Object> elements = materializer.FieldAt(value_index);
- Handle<Object> function = materializer.FieldAt(value_index);
- Handle<Object> context = materializer.FieldAt(value_index);
- Handle<Object> receiver = materializer.FieldAt(value_index);
- Handle<Object> input_or_debug_pos = materializer.FieldAt(value_index);
- Handle<Object> resume_mode = materializer.FieldAt(value_index);
- Handle<Object> continuation_offset = materializer.FieldAt(value_index);
- Handle<Object> register_file = materializer.FieldAt(value_index);
- object->set_raw_properties_or_hash(*properties);
- object->set_elements(FixedArrayBase::cast(*elements));
- object->set_function(JSFunction::cast(*function));
- object->set_context(Context::cast(*context));
- object->set_receiver(*receiver);
- object->set_input_or_debug_pos(*input_or_debug_pos);
- object->set_resume_mode(Smi::ToInt(*resume_mode));
- object->set_continuation(Smi::ToInt(*continuation_offset));
- object->set_register_file(FixedArray::cast(*register_file));
-
- if (object->IsJSAsyncGeneratorObject()) {
- auto generator = Handle<JSAsyncGeneratorObject>::cast(object);
- Handle<Object> queue = materializer.FieldAt(value_index);
- Handle<Object> awaited_promise = materializer.FieldAt(value_index);
- generator->set_queue(HeapObject::cast(*queue));
- generator->set_awaited_promise(HeapObject::cast(*awaited_promise));
+ // Materialize (i.e. allocate&initialize) the heap number and return.
+ // There is no need to process the children.
+ return MaterializeMutableHeapNumber(frame, &value_index, slot);
+
+ case FIXED_ARRAY_TYPE:
+ case HASH_TABLE_TYPE: {
+ // Check we have the right size.
+ int array_length =
+ Smi::cast(frame->values_[value_index].GetRawValue())->value();
+
+ int instance_size = FixedArray::SizeFor(array_length);
+ CHECK_EQ(instance_size, slot->GetChildrenCount() * kPointerSize);
+
+ // Canonicalize empty fixed array.
+ if (*map == isolate()->heap()->empty_fixed_array()->map() &&
+ array_length == 0) {
+ slot->set_storage(isolate()->factory()->empty_fixed_array());
+ } else {
+ slot->set_storage(AllocateStorageFor(slot));
}
- int in_object_properties = map->GetInObjectProperties();
- for (int i = 0; i < in_object_properties; ++i) {
- Handle<Object> value = materializer.FieldAt(value_index);
- FieldIndex index = FieldIndex::ForPropertyIndex(object->map(), i);
- object->FastPropertyAtPut(index, *value);
- }
- return object;
- }
- case CONS_STRING_TYPE: {
- Handle<ConsString> object = Handle<ConsString>::cast(
- isolate_->factory()
- ->NewConsString(isolate_->factory()->undefined_string(),
- isolate_->factory()->undefined_string())
- .ToHandleChecked());
- slot->value_ = object;
- Handle<Object> hash = materializer.FieldAt(value_index);
- Handle<Object> string_length = materializer.FieldAt(value_index);
- Handle<Object> first = materializer.FieldAt(value_index);
- Handle<Object> second = materializer.FieldAt(value_index);
- object->set_map(*map);
- object->set_length(Smi::ToInt(*string_length));
- object->set_first(String::cast(*first));
- object->set_second(String::cast(*second));
- CHECK(hash->IsNumber()); // The {Name::kEmptyHashField} value.
- return object;
+ // Make sure all the remaining children (after the map) are allocated.
+ return EnsureChildrenAllocated(slot->GetChildrenCount() - 1, frame,
+ &value_index, worklist);
}
- case CONTEXT_EXTENSION_TYPE: {
- Handle<ContextExtension> object =
- isolate_->factory()->NewContextExtension(
- isolate_->factory()->NewScopeInfo(1),
- isolate_->factory()->undefined_value());
- slot->value_ = object;
- Handle<Object> scope_info = materializer.FieldAt(value_index);
- Handle<Object> extension = materializer.FieldAt(value_index);
- object->set_scope_info(ScopeInfo::cast(*scope_info));
- object->set_extension(*extension);
- return object;
+
+ case PROPERTY_ARRAY_TYPE: {
+ // Check we have the right size.
+ int length_or_hash =
+ Smi::cast(frame->values_[value_index].GetRawValue())->value();
+ int array_length = PropertyArray::LengthField::decode(length_or_hash);
+ int instance_size = PropertyArray::SizeFor(array_length);
+ CHECK_EQ(instance_size, slot->GetChildrenCount() * kPointerSize);
+
+ slot->set_storage(AllocateStorageFor(slot));
+ // Make sure all the remaining children (after the map) are allocated.
+ return EnsureChildrenAllocated(slot->GetChildrenCount() - 1, frame,
+ &value_index, worklist);
}
- case HASH_TABLE_TYPE:
- case FIXED_ARRAY_TYPE: {
- Handle<Object> lengthObject = materializer.FieldAt(value_index);
- int32_t array_length = 0;
- CHECK(lengthObject->ToInt32(&array_length));
- Handle<FixedArray> object =
- isolate_->factory()->NewFixedArray(array_length);
- // We need to set the map, because the fixed array we are
- // materializing could be a context or an arguments object,
- // in which case we must retain that information.
- object->set_map(*map);
- slot->value_ = object;
- for (int i = 0; i < array_length; ++i) {
- Handle<Object> value = materializer.FieldAt(value_index);
- object->set(i, *value);
- }
- return object;
+
+ case CONTEXT_EXTENSION_TYPE: {
+ CHECK_EQ(map->instance_size(), slot->GetChildrenCount() * kPointerSize);
+ slot->set_storage(AllocateStorageFor(slot));
+ // Make sure all the remaining children (after the map) are allocated.
+ return EnsureChildrenAllocated(slot->GetChildrenCount() - 1, frame,
+ &value_index, worklist);
}
- case PROPERTY_ARRAY_TYPE: {
- DCHECK_EQ(*map, isolate_->heap()->property_array_map());
- Handle<Object> lengthObject = materializer.FieldAt(value_index);
- int32_t array_length = 0;
- CHECK(lengthObject->ToInt32(&array_length));
- Handle<PropertyArray> object =
- isolate_->factory()->NewPropertyArray(array_length);
- slot->value_ = object;
- for (int i = 0; i < array_length; ++i) {
- Handle<Object> value = materializer.FieldAt(value_index);
- object->set(i, *value);
- }
- return object;
- }
- case FIXED_DOUBLE_ARRAY_TYPE: {
- DCHECK_EQ(*map, isolate_->heap()->fixed_double_array_map());
- Handle<Object> lengthObject = materializer.FieldAt(value_index);
- int32_t array_length = 0;
- CHECK(lengthObject->ToInt32(&array_length));
- Handle<FixedArrayBase> object =
- isolate_->factory()->NewFixedDoubleArray(array_length);
- slot->value_ = object;
- if (array_length > 0) {
- Handle<FixedDoubleArray> double_array =
- Handle<FixedDoubleArray>::cast(object);
- for (int i = 0; i < array_length; ++i) {
- Handle<Object> value = materializer.FieldAt(value_index);
- if (value.is_identical_to(isolate_->factory()->the_hole_value())) {
- double_array->set_the_hole(isolate_, i);
- } else {
- CHECK(value->IsNumber());
- double_array->set(i, value->Number());
- }
- }
+
+ default:
+ CHECK(map->IsJSObjectMap());
+ EnsureJSObjectAllocated(slot, map);
+ TranslatedValue* properties_slot = &(frame->values_[value_index]);
+ value_index++;
+ if (properties_slot->kind() == TranslatedValue::kCapturedObject) {
+ // If we are materializing the property array, make sure we put
+ // the mutable heap numbers at the right places.
+ EnsurePropertiesAllocatedAndMarked(properties_slot, map);
+ EnsureChildrenAllocated(properties_slot->GetChildrenCount(), frame,
+ &value_index, worklist);
}
- return object;
- }
- case JS_REGEXP_TYPE: {
- Handle<JSRegExp> object = Handle<JSRegExp>::cast(
- isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
- slot->value_ = object;
- Handle<Object> properties = materializer.FieldAt(value_index);
- Handle<Object> elements = materializer.FieldAt(value_index);
- Handle<Object> data = materializer.FieldAt(value_index);
- Handle<Object> source = materializer.FieldAt(value_index);
- Handle<Object> flags = materializer.FieldAt(value_index);
- Handle<Object> last_index = materializer.FieldAt(value_index);
- object->set_raw_properties_or_hash(*properties);
- object->set_elements(FixedArrayBase::cast(*elements));
- object->set_data(*data);
- object->set_source(*source);
- object->set_flags(*flags);
- object->set_last_index(*last_index);
- return object;
- }
- case STRING_TYPE:
- case ONE_BYTE_STRING_TYPE:
- case CONS_ONE_BYTE_STRING_TYPE:
- case SLICED_STRING_TYPE:
- case SLICED_ONE_BYTE_STRING_TYPE:
- case EXTERNAL_STRING_TYPE:
- case EXTERNAL_ONE_BYTE_STRING_TYPE:
- case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
- case SHORT_EXTERNAL_STRING_TYPE:
- case SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE:
- case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
- case THIN_STRING_TYPE:
- case THIN_ONE_BYTE_STRING_TYPE:
- case INTERNALIZED_STRING_TYPE:
- case ONE_BYTE_INTERNALIZED_STRING_TYPE:
- case EXTERNAL_INTERNALIZED_STRING_TYPE:
- case EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
- case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
- case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE:
- case SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
- case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
- case SYMBOL_TYPE:
- case ODDBALL_TYPE:
- case JS_GLOBAL_OBJECT_TYPE:
- case JS_GLOBAL_PROXY_TYPE:
- case JS_API_OBJECT_TYPE:
- case JS_SPECIAL_API_OBJECT_TYPE:
- case JS_VALUE_TYPE:
- case JS_MESSAGE_OBJECT_TYPE:
- case JS_DATE_TYPE:
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_MODULE_NAMESPACE_TYPE:
- case JS_ARRAY_BUFFER_TYPE:
- case JS_TYPED_ARRAY_TYPE:
- case JS_DATA_VIEW_TYPE:
- case JS_SET_TYPE:
- case JS_MAP_TYPE:
- case JS_WEAK_MAP_TYPE:
- case JS_WEAK_SET_TYPE:
- case JS_PROMISE_TYPE:
- case JS_PROXY_TYPE:
- case MAP_TYPE:
- case ALLOCATION_SITE_TYPE:
- case ACCESSOR_INFO_TYPE:
- case SHARED_FUNCTION_INFO_TYPE:
- case FUNCTION_TEMPLATE_INFO_TYPE:
- case ACCESSOR_PAIR_TYPE:
- case BYTE_ARRAY_TYPE:
- case BYTECODE_ARRAY_TYPE:
- case DESCRIPTOR_ARRAY_TYPE:
- case TRANSITION_ARRAY_TYPE:
- case FEEDBACK_VECTOR_TYPE:
- case FOREIGN_TYPE:
- case SCRIPT_TYPE:
- case CODE_TYPE:
- case PROPERTY_CELL_TYPE:
- case BIGINT_TYPE:
- case MODULE_TYPE:
- case MODULE_INFO_ENTRY_TYPE:
- case FREE_SPACE_TYPE:
-#define FIXED_TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case FIXED_##TYPE##_ARRAY_TYPE:
- TYPED_ARRAYS(FIXED_TYPED_ARRAY_CASE)
-#undef FIXED_TYPED_ARRAY_CASE
- case FILLER_TYPE:
- case ACCESS_CHECK_INFO_TYPE:
- case INTERCEPTOR_INFO_TYPE:
- case OBJECT_TEMPLATE_INFO_TYPE:
- case ALLOCATION_MEMENTO_TYPE:
- case ALIASED_ARGUMENTS_ENTRY_TYPE:
- case PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE:
- case PROMISE_REACTION_JOB_INFO_TYPE:
- case DEBUG_INFO_TYPE:
- case STACK_FRAME_INFO_TYPE:
- case CELL_TYPE:
- case WEAK_CELL_TYPE:
- case SMALL_ORDERED_HASH_MAP_TYPE:
- case SMALL_ORDERED_HASH_SET_TYPE:
- case CODE_DATA_CONTAINER_TYPE:
- case PROTOTYPE_INFO_TYPE:
- case TUPLE2_TYPE:
- case TUPLE3_TYPE:
- case ASYNC_GENERATOR_REQUEST_TYPE:
- case WASM_MODULE_TYPE:
- case WASM_INSTANCE_TYPE:
- case WASM_MEMORY_TYPE:
- case WASM_TABLE_TYPE:
- OFStream os(stderr);
- os << "[couldn't handle instance type " << map->instance_type() << "]"
- << std::endl;
- UNREACHABLE();
- break;
+ // Make sure all the remaining children (after the map and properties) are
+ // allocated.
+ return EnsureChildrenAllocated(slot->GetChildrenCount() - 2, frame,
+ &value_index, worklist);
}
UNREACHABLE();
}
-Handle<Object> TranslatedState::MaterializeAt(int frame_index,
- int* value_index) {
- CHECK_LT(static_cast<size_t>(frame_index), frames().size());
- TranslatedFrame* frame = &(frames_[frame_index]);
- CHECK_LT(static_cast<size_t>(*value_index), frame->values_.size());
+void TranslatedState::EnsureChildrenAllocated(int count, TranslatedFrame* frame,
+ int* value_index,
+ std::stack<int>* worklist) {
+ // Ensure all children are allocated.
+ for (int i = 0; i < count; i++) {
+ // If the field is an object that has not been allocated yet, queue it
+ // for initialization (and mark it as such).
+ TranslatedValue* child_slot = frame->ValueAt(*value_index);
+ if (child_slot->kind() == TranslatedValue::kCapturedObject ||
+ child_slot->kind() == TranslatedValue::kDuplicatedObject) {
+ child_slot = ResolveCapturedObject(child_slot);
+ if (child_slot->materialization_state() ==
+ TranslatedValue::kUninitialized) {
+ worklist->push(child_slot->object_index());
+ child_slot->mark_allocated();
+ }
+ } else {
+ // Make sure the simple values (heap numbers, etc.) are properly
+ // initialized.
+ child_slot->MaterializeSimple();
+ }
+ SkipSlots(1, frame, value_index);
+ }
+}
- TranslatedValue* slot = &(frame->values_[*value_index]);
- (*value_index)++;
+void TranslatedState::EnsurePropertiesAllocatedAndMarked(
+ TranslatedValue* properties_slot, Handle<Map> map) {
+ CHECK_EQ(TranslatedValue::kUninitialized,
+ properties_slot->materialization_state());
- switch (slot->kind()) {
- case TranslatedValue::kTagged:
- case TranslatedValue::kInt32:
- case TranslatedValue::kUInt32:
- case TranslatedValue::kBoolBit:
- case TranslatedValue::kFloat:
- case TranslatedValue::kDouble: {
- slot->MaterializeSimple();
- Handle<Object> value = slot->GetValue();
- if (value->IsMutableHeapNumber()) {
- HeapNumber::cast(*value)->set_map(isolate()->heap()->heap_number_map());
- }
- return value;
+ Handle<ByteArray> object_storage = AllocateStorageFor(properties_slot);
+ properties_slot->mark_allocated();
+ properties_slot->set_storage(object_storage);
+
+ // Set markers for the double properties.
+ Handle<DescriptorArray> descriptors(map->instance_descriptors());
+ int field_count = map->NumberOfOwnDescriptors();
+ for (int i = 0; i < field_count; i++) {
+ FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+ if (descriptors->GetDetails(i).representation().IsDouble() &&
+ !index.is_inobject()) {
+ CHECK(!map->IsUnboxedDoubleField(index));
+ int outobject_index = index.outobject_array_index();
+ int array_index = outobject_index * kPointerSize;
+ object_storage->set(array_index, kStoreMutableHeapNumber);
}
+ }
+}
+
+Handle<ByteArray> TranslatedState::AllocateStorageFor(TranslatedValue* slot) {
+ int allocate_size =
+ ByteArray::LengthFor(slot->GetChildrenCount() * kPointerSize);
+ // It is important to allocate all the objects tenured so that the marker
+ // does not visit them.
+ Handle<ByteArray> object_storage =
+ isolate()->factory()->NewByteArray(allocate_size, TENURED);
+ for (int i = 0; i < object_storage->length(); i++) {
+ object_storage->set(i, kStoreTagged);
+ }
+ return object_storage;
+}
- case TranslatedValue::kCapturedObject: {
- // The map must be a tagged object.
- CHECK_EQ(frame->values_[*value_index].kind(), TranslatedValue::kTagged);
- CHECK(frame->values_[*value_index].GetValue()->IsMap());
- return MaterializeCapturedObjectAt(slot, frame_index, value_index);
+void TranslatedState::EnsureJSObjectAllocated(TranslatedValue* slot,
+ Handle<Map> map) {
+ CHECK_EQ(map->instance_size(), slot->GetChildrenCount() * kPointerSize);
+
+ Handle<ByteArray> object_storage = AllocateStorageFor(slot);
+ // Now we handle the interesting (JSObject) case.
+ Handle<DescriptorArray> descriptors(map->instance_descriptors());
+ int field_count = map->NumberOfOwnDescriptors();
+
+ // Set markers for the double properties.
+ for (int i = 0; i < field_count; i++) {
+ FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+ if (descriptors->GetDetails(i).representation().IsDouble() &&
+ index.is_inobject()) {
+ CHECK_GE(index.index(), FixedArray::kHeaderSize / kPointerSize);
+ int array_index = index.index() * kPointerSize - FixedArray::kHeaderSize;
+ uint8_t marker = map->IsUnboxedDoubleField(index)
+ ? kStoreUnboxedDouble
+ : kStoreMutableHeapNumber;
+ object_storage->set(array_index, marker);
}
- case TranslatedValue::kDuplicatedObject: {
- int object_index = slot->object_index();
- TranslatedState::ObjectPosition pos = object_positions_[object_index];
+ }
+ slot->set_storage(object_storage);
+}
+
+Handle<Object> TranslatedState::GetValueAndAdvance(TranslatedFrame* frame,
+ int* value_index) {
+ TranslatedValue* slot = frame->ValueAt(*value_index);
+ SkipSlots(1, frame, value_index);
+ if (slot->kind() == TranslatedValue::kDuplicatedObject) {
+ slot = ResolveCapturedObject(slot);
+ }
+ CHECK_NE(TranslatedValue::kUninitialized, slot->materialization_state());
+ return slot->GetStorage();
+}
- // Make sure the duplicate is referring to a previous object.
- CHECK(pos.frame_index_ < frame_index ||
- (pos.frame_index_ == frame_index &&
- pos.value_index_ < *value_index - 1));
+void TranslatedState::InitializeJSObjectAt(
+ TranslatedFrame* frame, int* value_index, TranslatedValue* slot,
+ Handle<Map> map, const DisallowHeapAllocation& no_allocation) {
+ Handle<HeapObject> object_storage = Handle<HeapObject>::cast(slot->storage_);
+ DCHECK_EQ(TranslatedValue::kCapturedObject, slot->kind());
- Handle<Object> object =
- frames_[pos.frame_index_].values_[pos.value_index_].GetValue();
+ // The object should have at least a map and some payload.
+ CHECK_GE(slot->GetChildrenCount(), 2);
- // The object should have a (non-sentinel) value.
- CHECK(!object.is_null() &&
- !object.is_identical_to(isolate_->factory()->arguments_marker()));
+ // Notify the concurrent marker about the layout change.
+ isolate()->heap()->NotifyObjectLayoutChange(
+ *object_storage, slot->GetChildrenCount() * kPointerSize, no_allocation);
- slot->value_ = object;
- return object;
+ // Fill the property array field.
+ {
+ Handle<Object> properties = GetValueAndAdvance(frame, value_index);
+ WRITE_FIELD(*object_storage, JSObject::kPropertiesOrHashOffset,
+ *properties);
+ WRITE_BARRIER(isolate()->heap(), *object_storage,
+ JSObject::kPropertiesOrHashOffset, *properties);
+ }
+
+ // For all the other fields we first look at the fixed array and check the
+ // marker to see if we store an unboxed double.
+ DCHECK_EQ(kPointerSize, JSObject::kPropertiesOrHashOffset);
+ for (int i = 2; i < slot->GetChildrenCount(); i++) {
+ // Initialize and extract the value from its slot.
+ Handle<Object> field_value = GetValueAndAdvance(frame, value_index);
+
+ // Read out the marker and ensure the field is consistent with
+ // what the markers in the storage say (note that all heap numbers
+ // should be fully initialized by now).
+ int offset = i * kPointerSize;
+ uint8_t marker = READ_UINT8_FIELD(*object_storage, offset);
+ if (marker == kStoreUnboxedDouble) {
+ double double_field_value;
+ if (field_value->IsSmi()) {
+ double_field_value = Smi::cast(*field_value)->value();
+ } else {
+ CHECK(field_value->IsHeapNumber());
+ double_field_value = HeapNumber::cast(*field_value)->value();
+ }
+ WRITE_DOUBLE_FIELD(*object_storage, offset, double_field_value);
+ } else if (marker == kStoreMutableHeapNumber) {
+ CHECK(field_value->IsMutableHeapNumber());
+ WRITE_FIELD(*object_storage, offset, *field_value);
+ WRITE_BARRIER(isolate()->heap(), *object_storage, offset, *field_value);
+ } else {
+ CHECK_EQ(kStoreTagged, marker);
+ WRITE_FIELD(*object_storage, offset, *field_value);
+ WRITE_BARRIER(isolate()->heap(), *object_storage, offset, *field_value);
}
+ }
+ object_storage->synchronized_set_map(*map);
+}
- case TranslatedValue::kInvalid:
- UNREACHABLE();
- break;
+void TranslatedState::InitializeObjectWithTaggedFieldsAt(
+ TranslatedFrame* frame, int* value_index, TranslatedValue* slot,
+ Handle<Map> map, const DisallowHeapAllocation& no_allocation) {
+ Handle<HeapObject> object_storage = Handle<HeapObject>::cast(slot->storage_);
+
+ // Skip the writes if we already have the canonical empty fixed array.
+ if (*object_storage == isolate()->heap()->empty_fixed_array()) {
+ CHECK_EQ(2, slot->GetChildrenCount());
+ Handle<Object> length_value = GetValueAndAdvance(frame, value_index);
+ CHECK_EQ(*length_value, Smi::FromInt(0));
+ return;
}
- FATAL("We should never get here - unexpected deopt slot kind.");
- return Handle<Object>::null();
+ // Notify the concurrent marker about the layout change.
+ isolate()->heap()->NotifyObjectLayoutChange(
+ *object_storage, slot->GetChildrenCount() * kPointerSize, no_allocation);
+
+ // Write the fields to the object.
+ for (int i = 1; i < slot->GetChildrenCount(); i++) {
+ Handle<Object> field_value = GetValueAndAdvance(frame, value_index);
+ int offset = i * kPointerSize;
+ uint8_t marker = READ_UINT8_FIELD(*object_storage, offset);
+ if (i > 1 && marker == kStoreMutableHeapNumber) {
+ CHECK(field_value->IsMutableHeapNumber());
+ } else {
+ CHECK(marker == kStoreTagged || i == 1);
+ CHECK(!field_value->IsMutableHeapNumber());
+ }
+
+ WRITE_FIELD(*object_storage, offset, *field_value);
+ WRITE_BARRIER(isolate()->heap(), *object_storage, offset, *field_value);
+ }
+
+ object_storage->synchronized_set_map(*map);
}
-Handle<Object> TranslatedState::MaterializeObjectAt(int object_index) {
- CHECK_LT(static_cast<size_t>(object_index), object_positions_.size());
- TranslatedState::ObjectPosition pos = object_positions_[object_index];
- return MaterializeAt(pos.frame_index_, &(pos.value_index_));
+TranslatedValue* TranslatedState::ResolveCapturedObject(TranslatedValue* slot) {
+ while (slot->kind() == TranslatedValue::kDuplicatedObject) {
+ slot = GetValueByObjectIndex(slot->object_index());
+ }
+ CHECK_EQ(TranslatedValue::kCapturedObject, slot->kind());
+ return slot;
}
TranslatedFrame* TranslatedState::GetFrameFromJSFrameIndex(int jsframe_index) {
@@ -3818,7 +3812,7 @@ void TranslatedState::StoreMaterializedValuesAndDeopt(JavaScriptFrame* frame) {
bool new_store = false;
if (previously_materialized_objects.is_null()) {
previously_materialized_objects =
- isolate_->factory()->NewFixedArray(length);
+ isolate_->factory()->NewFixedArray(length, TENURED);
for (int i = 0; i < length; i++) {
previously_materialized_objects->set(i, *marker);
}
@@ -3835,6 +3829,10 @@ void TranslatedState::StoreMaterializedValuesAndDeopt(JavaScriptFrame* frame) {
CHECK(value_info->IsMaterializedObject());
+ // Skip duplicate objects (i.e., those that point to some
+ // other object id).
+ if (value_info->object_index() != i) continue;
+
Handle<Object> value(value_info->GetRawValue(), isolate_);
if (!value.is_identical_to(marker)) {
@@ -3878,11 +3876,57 @@ void TranslatedState::UpdateFromPreviouslyMaterializedObjects() {
&(frames_[pos.frame_index_].values_[pos.value_index_]);
CHECK(value_info->IsMaterializedObject());
- value_info->value_ =
- Handle<Object>(previously_materialized_objects->get(i), isolate_);
+ if (value_info->kind() == TranslatedValue::kCapturedObject) {
+ value_info->set_initialized_storage(
+ Handle<Object>(previously_materialized_objects->get(i), isolate_));
+ }
+ }
+ }
+}
+
+void TranslatedState::VerifyMaterializedObjects() {
+#if VERIFY_HEAP
+ int length = static_cast<int>(object_positions_.size());
+ for (int i = 0; i < length; i++) {
+ TranslatedValue* slot = GetValueByObjectIndex(i);
+ if (slot->kind() == TranslatedValue::kCapturedObject) {
+ CHECK_EQ(slot, GetValueByObjectIndex(slot->object_index()));
+ if (slot->materialization_state() == TranslatedValue::kFinished) {
+ slot->GetStorage()->ObjectVerify();
+ } else {
+ CHECK_EQ(slot->materialization_state(),
+ TranslatedValue::kUninitialized);
+ }
}
}
+#endif
+}
+
+bool TranslatedState::DoUpdateFeedback() {
+ if (!feedback_vector_handle_.is_null()) {
+ CHECK(!feedback_slot_.IsInvalid());
+ isolate()->CountUsage(v8::Isolate::kDeoptimizerDisableSpeculation);
+ CallICNexus nexus(feedback_vector_handle_, feedback_slot_);
+ nexus.SetSpeculationMode(SpeculationMode::kDisallowSpeculation);
+ return true;
+ }
+ return false;
+}
+
+void TranslatedState::ReadUpdateFeedback(TranslationIterator* iterator,
+ FixedArray* literal_array,
+ FILE* trace_file) {
+ CHECK_EQ(Translation::UPDATE_FEEDBACK, iterator->Next());
+ feedback_vector_ = FeedbackVector::cast(literal_array->get(iterator->Next()));
+ feedback_slot_ = FeedbackSlot(iterator->Next());
+ if (trace_file != nullptr) {
+ PrintF(trace_file, " reading FeedbackVector (slot %d)\n",
+ feedback_slot_.ToInt());
+ }
}
} // namespace internal
} // namespace v8
+
+// Undefine the heap manipulation macros.
+#include "src/objects/object-macros-undef.h"
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index b8ab648b9c..e72878654d 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -5,6 +5,7 @@
#ifndef V8_DEOPTIMIZER_H_
#define V8_DEOPTIMIZER_H_
+#include <stack>
#include <vector>
#include "src/allocation.h"
@@ -31,6 +32,9 @@ class TranslatedValue {
// Returns heap()->arguments_marker() if allocation would be
// necessary to get the value.
Object* GetRawValue() const;
+
+ // Getter for the value, takes care of materializing the subgraph
+ // reachable from this value.
Handle<Object> GetValue();
bool IsMaterializedObject() const;
@@ -40,7 +44,7 @@ class TranslatedValue {
friend class TranslatedState;
friend class TranslatedFrame;
- enum Kind {
+ enum Kind : uint8_t {
kInvalid,
kTagged,
kInt32,
@@ -56,9 +60,20 @@ class TranslatedValue {
kDuplicatedObject // Duplicated object of a deferred object.
};
+ enum MaterializationState : uint8_t {
+ kUninitialized,
+ kAllocated, // Storage for the object has been allocated (or
+ // enqueued for allocation).
+ kFinished, // The object has been initialized (or enqueued for
+ // initialization).
+ };
+
TranslatedValue(TranslatedState* container, Kind kind)
: kind_(kind), container_(container) {}
Kind kind() const { return kind_; }
+ MaterializationState materialization_state() const {
+ return materialization_state_;
+ }
void Handlify();
int GetChildrenCount() const;
@@ -76,15 +91,25 @@ class TranslatedValue {
Isolate* isolate() const;
void MaterializeSimple();
+ void set_storage(Handle<HeapObject> storage) { storage_ = storage; }
+ void set_initialized_storage(Handle<Object> storage);
+ void mark_finished() { materialization_state_ = kFinished; }
+ void mark_allocated() { materialization_state_ = kAllocated; }
+
+ Handle<Object> GetStorage() {
+ DCHECK_NE(kUninitialized, materialization_state());
+ return storage_;
+ }
+
Kind kind_;
+ MaterializationState materialization_state_ = kUninitialized;
TranslatedState* container_; // This is only needed for materialization of
// objects and constructing handles (to get
// to the isolate).
- MaybeHandle<Object> value_; // Before handlification, this is always null,
- // after materialization it is never null,
- // in between it is only null if the value needs
- // to be materialized.
+ Handle<Object> storage_; // Contains the materialized value or the
+ // byte-array that will be later morphed into
+ // the materialized object.
struct MaterializedObjectInfo {
int id_;
@@ -211,6 +236,7 @@ class TranslatedFrame {
height_(height) {}
void Add(const TranslatedValue& value) { values_.push_back(value); }
+ TranslatedValue* ValueAt(int index) { return &(values_[index]); }
void Handlify();
Kind kind_;
@@ -242,7 +268,7 @@ class TranslatedFrame {
class TranslatedState {
public:
- TranslatedState();
+ TranslatedState() {}
explicit TranslatedState(const JavaScriptFrame* frame);
void Prepare(Address stack_frame_pointer);
@@ -270,6 +296,9 @@ class TranslatedState {
FixedArray* literal_array, RegisterValues* registers,
FILE* trace_file, int parameter_count);
+ void VerifyMaterializedObjects();
+ bool DoUpdateFeedback();
+
private:
friend TranslatedValue;
@@ -288,19 +317,47 @@ class TranslatedState {
FILE* trace_file);
void UpdateFromPreviouslyMaterializedObjects();
- Handle<Object> MaterializeAt(int frame_index, int* value_index);
- Handle<Object> MaterializeObjectAt(int object_index);
- class CapturedObjectMaterializer;
- Handle<Object> MaterializeCapturedObjectAt(TranslatedValue* slot,
- int frame_index, int* value_index);
+ void MaterializeFixedDoubleArray(TranslatedFrame* frame, int* value_index,
+ TranslatedValue* slot, Handle<Map> map);
+ void MaterializeMutableHeapNumber(TranslatedFrame* frame, int* value_index,
+ TranslatedValue* slot);
+
+ void EnsureObjectAllocatedAt(TranslatedValue* slot);
+
+ void SkipSlots(int slots_to_skip, TranslatedFrame* frame, int* value_index);
+
+ Handle<ByteArray> AllocateStorageFor(TranslatedValue* slot);
+ void EnsureJSObjectAllocated(TranslatedValue* slot, Handle<Map> map);
+ void EnsurePropertiesAllocatedAndMarked(TranslatedValue* properties_slot,
+ Handle<Map> map);
+ void EnsureChildrenAllocated(int count, TranslatedFrame* frame,
+ int* value_index, std::stack<int>* worklist);
+ void EnsureCapturedObjectAllocatedAt(int object_index,
+ std::stack<int>* worklist);
+ Handle<Object> InitializeObjectAt(TranslatedValue* slot);
+ void InitializeCapturedObjectAt(int object_index, std::stack<int>* worklist,
+ const DisallowHeapAllocation& no_allocation);
+ void InitializeJSObjectAt(TranslatedFrame* frame, int* value_index,
+ TranslatedValue* slot, Handle<Map> map,
+ const DisallowHeapAllocation& no_allocation);
+ void InitializeObjectWithTaggedFieldsAt(
+ TranslatedFrame* frame, int* value_index, TranslatedValue* slot,
+ Handle<Map> map, const DisallowHeapAllocation& no_allocation);
+
+ void ReadUpdateFeedback(TranslationIterator* iterator,
+ FixedArray* literal_array, FILE* trace_file);
+
+ TranslatedValue* ResolveCapturedObject(TranslatedValue* slot);
+ TranslatedValue* GetValueByObjectIndex(int object_index);
+ Handle<Object> GetValueAndAdvance(TranslatedFrame* frame, int* value_index);
static uint32_t GetUInt32Slot(Address fp, int slot_index);
static Float32 GetFloatSlot(Address fp, int slot_index);
static Float64 GetDoubleSlot(Address fp, int slot_index);
std::vector<TranslatedFrame> frames_;
- Isolate* isolate_;
- Address stack_frame_pointer_;
+ Isolate* isolate_ = nullptr;
+ Address stack_frame_pointer_ = nullptr;
int formal_parameter_count_;
struct ObjectPosition {
@@ -308,6 +365,9 @@ class TranslatedState {
int value_index_;
};
std::deque<ObjectPosition> object_positions_;
+ Handle<FeedbackVector> feedback_vector_handle_;
+ FeedbackVector* feedback_vector_ = nullptr;
+ FeedbackSlot feedback_slot_;
};
@@ -366,7 +426,6 @@ class Deoptimizer : public Malloced {
Handle<JSFunction> function() const;
Handle<Code> compiled_code() const;
BailoutType bailout_type() const { return bailout_type_; }
- bool preserve_optimized() const { return preserve_optimized_; }
// Number of created JS frames. Not all created frames are necessarily JS.
int jsframe_count() const { return jsframe_count_; }
@@ -488,7 +547,6 @@ class Deoptimizer : public Malloced {
unsigned ComputeInputFrameAboveFpFixedSize() const;
unsigned ComputeInputFrameSize() const;
- static unsigned ComputeJavascriptFixedSize(SharedFunctionInfo* shared);
static unsigned ComputeInterpretedFixedSize(SharedFunctionInfo* shared);
static unsigned ComputeIncomingArgumentSize(SharedFunctionInfo* shared);
@@ -517,7 +575,6 @@ class Deoptimizer : public Malloced {
Code* compiled_code_;
unsigned bailout_id_;
BailoutType bailout_type_;
- bool preserve_optimized_;
Address from_;
int fp_to_sp_delta_;
bool deoptimizing_throw_;
@@ -642,9 +699,15 @@ class FrameDescription {
return *GetFrameSlotPointer(offset);
}
+ unsigned GetLastArgumentSlotOffset() {
+ int parameter_slots = parameter_count();
+ if (kPadArguments) parameter_slots = RoundUp(parameter_slots, 2);
+ return GetFrameSize() - parameter_slots * kPointerSize;
+ }
+
Address GetFramePointerAddress() {
- int fp_offset = GetFrameSize() - parameter_count() * kPointerSize -
- StandardFrameConstants::kCallerSPOffset;
+ int fp_offset =
+ GetLastArgumentSlotOffset() - StandardFrameConstants::kCallerSPOffset;
return reinterpret_cast<Address>(GetFrameSlotPointer(fp_offset));
}
@@ -826,7 +889,8 @@ class TranslationIterator BASE_EMBEDDED {
V(BOOL_STACK_SLOT) \
V(FLOAT_STACK_SLOT) \
V(DOUBLE_STACK_SLOT) \
- V(LITERAL)
+ V(LITERAL) \
+ V(UPDATE_FEEDBACK)
class Translation BASE_EMBEDDED {
public:
@@ -838,13 +902,12 @@ class Translation BASE_EMBEDDED {
#undef DECLARE_TRANSLATION_OPCODE_ENUM
Translation(TranslationBuffer* buffer, int frame_count, int jsframe_count,
- Zone* zone)
- : buffer_(buffer),
- index_(buffer->CurrentIndex()),
- zone_(zone) {
+ int update_feedback_count, Zone* zone)
+ : buffer_(buffer), index_(buffer->CurrentIndex()), zone_(zone) {
buffer_->Add(BEGIN);
buffer_->Add(frame_count);
buffer_->Add(jsframe_count);
+ buffer_->Add(update_feedback_count);
}
int index() const { return index_; }
@@ -862,6 +925,7 @@ class Translation BASE_EMBEDDED {
void ArgumentsElements(CreateArgumentsType type);
void ArgumentsLength(CreateArgumentsType type);
void BeginCapturedObject(int length);
+ void AddUpdateFeedback(int vector_literal, int slot);
void DuplicateObject(int object_index);
void StoreRegister(Register reg);
void StoreInt32Register(Register reg);
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index 603f0bbe03..a26517b432 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -17,6 +17,8 @@
#include "src/objects-inl.h"
#include "src/snapshot/serializer-common.h"
#include "src/string-stream.h"
+#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-engine.h"
namespace v8 {
namespace internal {
@@ -37,21 +39,29 @@ class V8NameConverter: public disasm::NameConverter {
const char* V8NameConverter::NameOfAddress(byte* pc) const {
- const char* name =
- code_ == nullptr ? nullptr : code_->GetIsolate()->builtins()->Lookup(pc);
+ if (code_ != nullptr) {
+ Isolate* isolate = code_->GetIsolate();
+ const char* name = isolate->builtins()->Lookup(pc);
- if (name != nullptr) {
- SNPrintF(v8_buffer_, "%p (%s)", static_cast<void*>(pc), name);
- return v8_buffer_.start();
- }
+ if (name != nullptr) {
+ SNPrintF(v8_buffer_, "%p (%s)", static_cast<void*>(pc), name);
+ return v8_buffer_.start();
+ }
- if (code_ != nullptr) {
int offs = static_cast<int>(pc - code_->instruction_start());
// print as code offset, if it seems reasonable
if (0 <= offs && offs < code_->instruction_size()) {
SNPrintF(v8_buffer_, "%p <+0x%x>", static_cast<void*>(pc), offs);
return v8_buffer_.start();
}
+
+ wasm::WasmCode* wasm_code =
+ isolate->wasm_engine()->code_manager()->LookupCode(pc);
+ if (wasm_code != nullptr) {
+ SNPrintF(v8_buffer_, "%p (%s)", static_cast<void*>(pc),
+ GetWasmCodeKindAsString(wasm_code->kind()));
+ return v8_buffer_.start();
+ }
}
return disasm::NameConverter::NameOfAddress(pc);
@@ -155,7 +165,8 @@ static void PrintRelocInfo(StringBuilder* out, Isolate* isolate,
}
static int DecodeIt(Isolate* isolate, std::ostream* os,
- const V8NameConverter& converter, byte* begin, byte* end) {
+ const V8NameConverter& converter, byte* begin, byte* end,
+ void* current_pc) {
SealHandleScope shs(isolate);
DisallowHeapAllocation no_alloc;
ExternalReferenceEncoder ref_encoder(isolate);
@@ -232,6 +243,10 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
}
// Instruction address and instruction offset.
+ if (FLAG_log_colour && prev_pc == current_pc) {
+ // If this is the given "current" pc, make it yellow and bold.
+ out.AddFormatted("\033[33;1m");
+ }
out.AddFormatted("%p %4" V8PRIxPTRDIFF " ", static_cast<void*>(prev_pc),
prev_pc - begin);
@@ -269,6 +284,10 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
}
}
+ if (FLAG_log_colour && prev_pc == current_pc) {
+ out.AddFormatted("\033[m");
+ }
+
DumpBuffer(os, &out);
}
@@ -287,17 +306,16 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
return static_cast<int>(pc - begin);
}
-
int Disassembler::Decode(Isolate* isolate, std::ostream* os, byte* begin,
- byte* end, Code* code) {
+ byte* end, Code* code, void* current_pc) {
V8NameConverter v8NameConverter(code);
- return DecodeIt(isolate, os, v8NameConverter, begin, end);
+ return DecodeIt(isolate, os, v8NameConverter, begin, end, current_pc);
}
#else // ENABLE_DISASSEMBLER
int Disassembler::Decode(Isolate* isolate, std::ostream* os, byte* begin,
- byte* end, Code* code) {
+ byte* end, Code* code, void* current_pc) {
return 0;
}
diff --git a/deps/v8/src/disassembler.h b/deps/v8/src/disassembler.h
index c0df0e6586..51ed0bf196 100644
--- a/deps/v8/src/disassembler.h
+++ b/deps/v8/src/disassembler.h
@@ -17,7 +17,7 @@ class Disassembler : public AllStatic {
// instruction could be decoded.
// the code object is used for name resolution and may be null.
static int Decode(Isolate* isolate, std::ostream* os, byte* begin, byte* end,
- Code* code = nullptr);
+ Code* code = nullptr, void* current_pc = nullptr);
};
} // namespace internal
diff --git a/deps/v8/src/eh-frame.cc b/deps/v8/src/eh-frame.cc
index 3dbfa46507..f0e413cf33 100644
--- a/deps/v8/src/eh-frame.cc
+++ b/deps/v8/src/eh-frame.cc
@@ -367,7 +367,7 @@ void EhFrameWriter::GetEhFrame(CodeDesc* desc) {
void EhFrameWriter::WriteULeb128(uint32_t value) {
do {
- byte chunk = value & 0x7f;
+ byte chunk = value & 0x7F;
value >>= 7;
if (value != 0) chunk |= 0x80;
WriteByte(chunk);
@@ -378,7 +378,7 @@ void EhFrameWriter::WriteSLeb128(int32_t value) {
static const int kSignBitMask = 0x40;
bool done;
do {
- byte chunk = value & 0x7f;
+ byte chunk = value & 0x7F;
value >>= 7;
done = ((value == 0) && ((chunk & kSignBitMask) == 0)) ||
((value == -1) && ((chunk & kSignBitMask) != 0));
@@ -412,7 +412,7 @@ uint32_t EhFrameIterator::DecodeULeb128(const byte* encoded,
do {
DCHECK_LT(shift, 8 * static_cast<int>(sizeof(result)));
- result |= (*current & 0x7f) << shift;
+ result |= (*current & 0x7F) << shift;
shift += 7;
} while (*current++ >= 128);
@@ -434,7 +434,7 @@ int32_t EhFrameIterator::DecodeSLeb128(const byte* encoded, int* encoded_size) {
do {
chunk = *current++;
DCHECK_LT(shift, 8 * static_cast<int>(sizeof(result)));
- result |= (chunk & 0x7f) << shift;
+ result |= (chunk & 0x7F) << shift;
shift += 7;
} while (chunk >= 128);
@@ -478,7 +478,7 @@ void EhFrameDisassembler::DumpDwarfDirectives(std::ostream& stream, // NOLINT
byte bytecode = eh_frame_iterator.GetNextByte();
- if (((bytecode >> EhFrameConstants::kLocationMaskSize) & 0xff) ==
+ if (((bytecode >> EhFrameConstants::kLocationMaskSize) & 0xFF) ==
EhFrameConstants::kLocationTag) {
int value = (bytecode & EhFrameConstants::kLocationMask) *
EhFrameConstants::kCodeAlignmentFactor;
@@ -488,7 +488,7 @@ void EhFrameDisassembler::DumpDwarfDirectives(std::ostream& stream, // NOLINT
continue;
}
- if (((bytecode >> EhFrameConstants::kSavedRegisterMaskSize) & 0xff) ==
+ if (((bytecode >> EhFrameConstants::kSavedRegisterMaskSize) & 0xFF) ==
EhFrameConstants::kSavedRegisterTag) {
int32_t decoded_offset = eh_frame_iterator.GetNextULeb128();
stream << "| " << DwarfRegisterCodeToString(
@@ -499,7 +499,7 @@ void EhFrameDisassembler::DumpDwarfDirectives(std::ostream& stream, // NOLINT
continue;
}
- if (((bytecode >> EhFrameConstants::kFollowInitialRuleMaskSize) & 0xff) ==
+ if (((bytecode >> EhFrameConstants::kFollowInitialRuleMaskSize) & 0xFF) ==
EhFrameConstants::kFollowInitialRuleTag) {
stream << "| " << DwarfRegisterCodeToString(
bytecode & EhFrameConstants::kLocationMask)
diff --git a/deps/v8/src/elements-kind.h b/deps/v8/src/elements-kind.h
index 838fa47769..b03f9340f3 100644
--- a/deps/v8/src/elements-kind.h
+++ b/deps/v8/src/elements-kind.h
@@ -191,6 +191,43 @@ inline ElementsKind GetHoleyElementsKind(ElementsKind packed_kind) {
return packed_kind;
}
+inline bool UnionElementsKindUptoPackedness(ElementsKind* a_out,
+ ElementsKind b) {
+ // Assert that the union of two ElementKinds can be computed via std::max.
+ static_assert(PACKED_SMI_ELEMENTS < HOLEY_SMI_ELEMENTS,
+ "ElementsKind union not computable via std::max.");
+ static_assert(PACKED_ELEMENTS < HOLEY_ELEMENTS,
+ "ElementsKind union not computable via std::max.");
+ static_assert(PACKED_DOUBLE_ELEMENTS < HOLEY_DOUBLE_ELEMENTS,
+ "ElementsKind union not computable via std::max.");
+ ElementsKind a = *a_out;
+ switch (a) {
+ case HOLEY_SMI_ELEMENTS:
+ case PACKED_SMI_ELEMENTS:
+ if (b == PACKED_SMI_ELEMENTS || b == HOLEY_SMI_ELEMENTS) {
+ *a_out = std::max(a, b);
+ return true;
+ }
+ break;
+ case PACKED_ELEMENTS:
+ case HOLEY_ELEMENTS:
+ if (b == PACKED_ELEMENTS || b == HOLEY_ELEMENTS) {
+ *a_out = std::max(a, b);
+ return true;
+ }
+ break;
+ case PACKED_DOUBLE_ELEMENTS:
+ case HOLEY_DOUBLE_ELEMENTS:
+ if (b == PACKED_DOUBLE_ELEMENTS || b == HOLEY_DOUBLE_ELEMENTS) {
+ *a_out = std::max(a, b);
+ return true;
+ }
+ break;
+ default:
+ break;
+ }
+ return false;
+}
inline ElementsKind FastSmiToObjectElementsKind(ElementsKind from_kind) {
DCHECK(IsSmiElementsKind(from_kind));
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 22bf8012dd..499af83078 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -12,6 +12,7 @@
#include "src/messages.h"
#include "src/objects-inl.h"
#include "src/utils.h"
+#include "src/zone/zone.h"
// Each concrete ElementsAccessor can handle exactly one ElementsKind,
// several abstract ElementsAccessor classes are used to allow sharing
@@ -519,6 +520,21 @@ static Maybe<int64_t> IndexOfValueSlowPath(Isolate* isolate,
return Just<int64_t>(-1);
}
+// The InternalElementsAccessor is a helper class to expose otherwise protected
+// methods to its subclasses. Namely, we don't want to publicly expose methods
+// that take an entry (instead of an index) as an argument.
+class InternalElementsAccessor : public ElementsAccessor {
+ public:
+ explicit InternalElementsAccessor(const char* name)
+ : ElementsAccessor(name) {}
+
+ virtual uint32_t GetEntryForIndex(Isolate* isolate, JSObject* holder,
+ FixedArrayBase* backing_store,
+ uint32_t index) = 0;
+
+ virtual PropertyDetails GetDetails(JSObject* holder, uint32_t entry) = 0;
+};
+
// Base class for element handler implementations. Contains the
// the common logic for objects with different ElementsKinds.
// Subclasses must specialize method for which the element
@@ -537,10 +553,10 @@ static Maybe<int64_t> IndexOfValueSlowPath(Isolate* isolate,
// CRTP to guarantee aggressive compile time optimizations (i.e. inlining and
// specialization of SomeElementsAccessor methods).
template <typename Subclass, typename ElementsTraitsParam>
-class ElementsAccessorBase : public ElementsAccessor {
+class ElementsAccessorBase : public InternalElementsAccessor {
public:
explicit ElementsAccessorBase(const char* name)
- : ElementsAccessor(name) { }
+ : InternalElementsAccessor(name) {}
typedef ElementsTraitsParam ElementsTraits;
typedef typename ElementsTraitsParam::BackingStore BackingStore;
@@ -1052,35 +1068,65 @@ class ElementsAccessorBase : public ElementsAccessor {
Isolate* isolate, Handle<JSObject> object,
Handle<FixedArray> values_or_entries, bool get_entries, int* nof_items,
PropertyFilter filter) {
- int count = 0;
+ DCHECK_EQ(*nof_items, 0);
KeyAccumulator accumulator(isolate, KeyCollectionMode::kOwnOnly,
ALL_PROPERTIES);
Subclass::CollectElementIndicesImpl(
object, handle(object->elements(), isolate), &accumulator);
Handle<FixedArray> keys = accumulator.GetKeys();
- for (int i = 0; i < keys->length(); ++i) {
+ int count = 0;
+ int i = 0;
+ ElementsKind original_elements_kind = object->GetElementsKind();
+
+ for (; i < keys->length(); ++i) {
Handle<Object> key(keys->get(i), isolate);
- Handle<Object> value;
uint32_t index;
if (!key->ToUint32(&index)) continue;
+ DCHECK_EQ(object->GetElementsKind(), original_elements_kind);
uint32_t entry = Subclass::GetEntryForIndexImpl(
isolate, *object, object->elements(), index, filter);
if (entry == kMaxUInt32) continue;
-
PropertyDetails details = Subclass::GetDetailsImpl(*object, entry);
+ Handle<Object> value;
if (details.kind() == kData) {
value = Subclass::GetImpl(isolate, object->elements(), entry);
} else {
+ // This might modify the elements and/or change the elements kind.
LookupIterator it(isolate, object, index, LookupIterator::OWN);
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, value, Object::GetProperty(&it), Nothing<bool>());
}
- if (get_entries) {
- value = MakeEntryPair(isolate, index, value);
+ if (get_entries) value = MakeEntryPair(isolate, index, value);
+ values_or_entries->set(count++, *value);
+ if (object->GetElementsKind() != original_elements_kind) break;
+ }
+
+ // Slow path caused by changes in elements kind during iteration.
+ for (; i < keys->length(); i++) {
+ Handle<Object> key(keys->get(i), isolate);
+ uint32_t index;
+ if (!key->ToUint32(&index)) continue;
+
+ if (filter & ONLY_ENUMERABLE) {
+ InternalElementsAccessor* accessor =
+ reinterpret_cast<InternalElementsAccessor*>(
+ object->GetElementsAccessor());
+ uint32_t entry = accessor->GetEntryForIndex(isolate, *object,
+ object->elements(), index);
+ if (entry == kMaxUInt32) continue;
+ PropertyDetails details = accessor->GetDetails(*object, entry);
+ if (!details.IsEnumerable()) continue;
}
+
+ Handle<Object> value;
+ LookupIterator it(isolate, object, index, LookupIterator::OWN);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, value, Object::GetProperty(&it),
+ Nothing<bool>());
+
+ if (get_entries) value = MakeEntryPair(isolate, index, value);
values_or_entries->set(count++, *value);
}
@@ -1710,12 +1756,14 @@ class DictionaryElementsAccessor
return result;
}
}
-
+ ElementsKind original_elements_kind = receiver->GetElementsKind();
+ USE(original_elements_kind);
Handle<NumberDictionary> dictionary(
NumberDictionary::cast(receiver->elements()), isolate);
// Iterate through entire range, as accessing elements out of order is
// observable
for (uint32_t k = start_from; k < length; ++k) {
+ DCHECK_EQ(receiver->GetElementsKind(), original_elements_kind);
int entry = dictionary->FindEntry(isolate, k);
if (entry == NumberDictionary::kNotFound) {
if (search_for_hole) return Just(true);
@@ -1780,15 +1828,16 @@ class DictionaryElementsAccessor
uint32_t start_from, uint32_t length) {
DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver));
+ ElementsKind original_elements_kind = receiver->GetElementsKind();
+ USE(original_elements_kind);
Handle<NumberDictionary> dictionary(
NumberDictionary::cast(receiver->elements()), isolate);
// Iterate through entire range, as accessing elements out of order is
// observable.
for (uint32_t k = start_from; k < length; ++k) {
+ DCHECK_EQ(receiver->GetElementsKind(), original_elements_kind);
int entry = dictionary->FindEntry(isolate, k);
- if (entry == NumberDictionary::kNotFound) {
- continue;
- }
+ if (entry == NumberDictionary::kNotFound) continue;
PropertyDetails details = GetDetailsImpl(*dictionary, entry);
switch (details.kind()) {
@@ -3195,13 +3244,16 @@ class TypedElementsAccessor
}
template <typename SourceTraits>
- static void CopyBetweenBackingStores(FixedTypedArrayBase* source,
+ static void CopyBetweenBackingStores(void* source_data_ptr,
BackingStore* dest, size_t length,
uint32_t offset) {
- FixedTypedArray<SourceTraits>* source_fta =
- FixedTypedArray<SourceTraits>::cast(source);
+ DisallowHeapAllocation no_gc;
for (uint32_t i = 0; i < length; i++) {
- typename SourceTraits::ElementType elem = source_fta->get_scalar(i);
+ // We use scalar accessors to avoid boxing/unboxing, so there are no
+ // allocations.
+ typename SourceTraits::ElementType elem =
+ FixedTypedArray<SourceTraits>::get_scalar_from_data_ptr(
+ source_data_ptr, i);
dest->set(offset + i, dest->from(elem));
}
}
@@ -3232,15 +3284,10 @@ class TypedElementsAccessor
bool both_are_simple = HasSimpleRepresentation(source_type) &&
HasSimpleRepresentation(destination_type);
- // We assume the source and destination don't overlap, even though they
- // can share the same buffer. This is always true for newly allocated
- // TypedArrays.
uint8_t* source_data = static_cast<uint8_t*>(source_elements->DataPtr());
uint8_t* dest_data = static_cast<uint8_t*>(destination_elements->DataPtr());
size_t source_byte_length = NumberToSize(source->byte_length());
size_t dest_byte_length = NumberToSize(destination->byte_length());
- CHECK(dest_data + dest_byte_length <= source_data ||
- source_data + source_byte_length <= dest_data);
// We can simply copy the backing store if the types are the same, or if
// we are converting e.g. Uint8 <-> Int8, as the binary representation
@@ -3248,16 +3295,25 @@ class TypedElementsAccessor
// which have special conversion operations.
if (same_type || (same_size && both_are_simple)) {
size_t element_size = source->element_size();
- std::memcpy(dest_data + offset * element_size, source_data,
- length * element_size);
+ std::memmove(dest_data + offset * element_size, source_data,
+ length * element_size);
} else {
- // We use scalar accessors below to avoid boxing/unboxing, so there are
- // no allocations.
+ Isolate* isolate = source->GetIsolate();
+ Zone zone(isolate->allocator(), ZONE_NAME);
+
+ // If the typedarrays are overlapped, clone the source.
+ if (dest_data + dest_byte_length > source_data &&
+ source_data + source_byte_length > dest_data) {
+ uint8_t* temp_data = zone.NewArray<uint8_t>(source_byte_length);
+ std::memcpy(temp_data, source_data, source_byte_length);
+ source_data = temp_data;
+ }
+
switch (source->GetElementsKind()) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
- CopyBetweenBackingStores<Type##ArrayTraits>( \
- source_elements, destination_elements, length, offset); \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: \
+ CopyBetweenBackingStores<Type##ArrayTraits>( \
+ source_data, destination_elements, length, offset); \
break;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
default:
@@ -3273,7 +3329,7 @@ class TypedElementsAccessor
DisallowHeapAllocation no_gc;
DisallowJavascriptExecution no_js(isolate);
-#if defined(DEBUG) || defined(ENABLE_SLOWFAST_SWITCH)
+#ifdef V8_ENABLE_FORCE_SLOW_PATH
if (isolate->force_slow_path()) return true;
#endif
@@ -3698,12 +3754,13 @@ class SloppyArgumentsElementsAccessor
Handle<Object> value,
uint32_t start_from, uint32_t length) {
DCHECK(JSObject::PrototypeHasNoElements(isolate, *object));
- Handle<Map> original_map = handle(object->map(), isolate);
+ Handle<Map> original_map(object->map(), isolate);
Handle<SloppyArgumentsElements> elements(
SloppyArgumentsElements::cast(object->elements()), isolate);
bool search_for_hole = value->IsUndefined(isolate);
for (uint32_t k = start_from; k < length; ++k) {
+ DCHECK_EQ(object->map(), *original_map);
uint32_t entry =
GetEntryForIndexImpl(isolate, *object, *elements, k, ALL_PROPERTIES);
if (entry == kMaxUInt32) {
@@ -3739,11 +3796,12 @@ class SloppyArgumentsElementsAccessor
Handle<Object> value,
uint32_t start_from, uint32_t length) {
DCHECK(JSObject::PrototypeHasNoElements(isolate, *object));
- Handle<Map> original_map = handle(object->map(), isolate);
+ Handle<Map> original_map(object->map(), isolate);
Handle<SloppyArgumentsElements> elements(
SloppyArgumentsElements::cast(object->elements()), isolate);
for (uint32_t k = start_from; k < length; ++k) {
+ DCHECK_EQ(object->map(), *original_map);
uint32_t entry =
GetEntryForIndexImpl(isolate, *object, *elements, k, ALL_PROPERTIES);
if (entry == kMaxUInt32) {
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index 348af6d8ea..de5aa0d878 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -54,9 +54,10 @@ class ElementsAccessor {
// typed array elements.
virtual bool HasEntry(JSObject* holder, uint32_t entry) = 0;
+ // TODO(cbruni): HasEntry and Get should not be exposed publicly with the
+ // entry parameter.
virtual Handle<Object> Get(Handle<JSObject> holder, uint32_t entry) = 0;
- virtual PropertyDetails GetDetails(JSObject* holder, uint32_t entry) = 0;
virtual bool HasAccessors(JSObject* holder) = 0;
virtual uint32_t NumberOfElements(JSObject* holder) = 0;
@@ -67,8 +68,6 @@ class ElementsAccessor {
// element that is non-deletable.
virtual void SetLength(Handle<JSArray> holder, uint32_t new_length) = 0;
- // Deletes an element in an object.
- virtual void Delete(Handle<JSObject> holder, uint32_t entry) = 0;
// If kCopyToEnd is specified as the copy_size to CopyElements, it copies all
// of elements from source after source_start to the destination array.
@@ -126,11 +125,6 @@ class ElementsAccessor {
virtual void Set(Handle<JSObject> holder, uint32_t entry, Object* value) = 0;
- virtual void Reconfigure(Handle<JSObject> object,
- Handle<FixedArrayBase> backing_store, uint32_t entry,
- Handle<Object> value,
- PropertyAttributes attributes) = 0;
-
virtual void Add(Handle<JSObject> object, uint32_t index,
Handle<Object> value, PropertyAttributes attributes,
uint32_t new_capacity) = 0;
@@ -214,6 +208,15 @@ class ElementsAccessor {
FixedArrayBase* backing_store,
uint32_t index) = 0;
+ virtual PropertyDetails GetDetails(JSObject* holder, uint32_t entry) = 0;
+ virtual void Reconfigure(Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store, uint32_t entry,
+ Handle<Object> value,
+ PropertyAttributes attributes) = 0;
+
+ // Deletes an element in an object.
+ virtual void Delete(Handle<JSObject> holder, uint32_t entry) = 0;
+
// NOTE: this method violates the handlified function signature convention:
// raw pointer parameter |source_holder| in the function that allocates.
// This is done intentionally to avoid ArrayConcat() builtin performance
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index ee4bd55534..edd329f5da 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -55,7 +55,8 @@ namespace {
MUST_USE_RESULT MaybeHandle<Object> Invoke(
Isolate* isolate, bool is_construct, Handle<Object> target,
Handle<Object> receiver, int argc, Handle<Object> args[],
- Handle<Object> new_target, Execution::MessageHandling message_handling) {
+ Handle<Object> new_target, Execution::MessageHandling message_handling,
+ Execution::Target execution_target) {
DCHECK(!receiver->IsJSGlobalObject());
#ifdef USE_SIMULATOR
@@ -113,20 +114,30 @@ MUST_USE_RESULT MaybeHandle<Object> Invoke(
// Placeholder for return value.
Object* value = nullptr;
- typedef Object* (*JSEntryFunction)(Object* new_target, Object* target,
- Object* receiver, int argc,
- Object*** args);
-
- Handle<Code> code = is_construct
- ? isolate->factory()->js_construct_entry_code()
- : isolate->factory()->js_entry_code();
+ using JSEntryFunction =
+ GeneratedCode<Object*(Object * new_target, Object * target,
+ Object * receiver, int argc, Object*** args)>;
+
+ Handle<Code> code;
+ switch (execution_target) {
+ case Execution::Target::kCallable:
+ code = is_construct ? isolate->factory()->js_construct_entry_code()
+ : isolate->factory()->js_entry_code();
+ break;
+ case Execution::Target::kRunMicrotasks:
+ code = isolate->factory()->js_run_microtasks_entry_code();
+ break;
+ default:
+ UNREACHABLE();
+ }
{
// Save and restore context around invocation and block the
// allocation of handles without explicit handle scopes.
SaveContext save(isolate);
SealHandleScope shs(isolate);
- JSEntryFunction stub_entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
+ JSEntryFunction stub_entry =
+ JSEntryFunction::FromAddress(isolate, code->entry());
if (FLAG_clear_exceptions_on_js_entry) isolate->clear_pending_exception();
@@ -138,9 +149,8 @@ MUST_USE_RESULT MaybeHandle<Object> Invoke(
if (FLAG_profile_deserialization && target->IsJSFunction()) {
PrintDeserializedCodeInfo(Handle<JSFunction>::cast(target));
}
- RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::JS_Execution);
- value = CALL_GENERATED_CODE(isolate, stub_entry, orig_func, func, recv,
- argc, argv);
+ RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kJS_Execution);
+ value = stub_entry.Call(orig_func, func, recv, argc, argv);
}
#ifdef VERIFY_HEAP
@@ -167,7 +177,8 @@ MUST_USE_RESULT MaybeHandle<Object> Invoke(
MaybeHandle<Object> CallInternal(Isolate* isolate, Handle<Object> callable,
Handle<Object> receiver, int argc,
Handle<Object> argv[],
- Execution::MessageHandling message_handling) {
+ Execution::MessageHandling message_handling,
+ Execution::Target target) {
// Convert calls on global objects to be calls on the global
// receiver instead to avoid having a 'this' pointer which refers
// directly to a global object.
@@ -176,7 +187,8 @@ MaybeHandle<Object> CallInternal(Isolate* isolate, Handle<Object> callable,
handle(Handle<JSGlobalObject>::cast(receiver)->global_proxy(), isolate);
}
return Invoke(isolate, false, callable, receiver, argc, argv,
- isolate->factory()->undefined_value(), message_handling);
+ isolate->factory()->undefined_value(), message_handling,
+ target);
}
} // namespace
@@ -186,7 +198,7 @@ MaybeHandle<Object> Execution::Call(Isolate* isolate, Handle<Object> callable,
Handle<Object> receiver, int argc,
Handle<Object> argv[]) {
return CallInternal(isolate, callable, receiver, argc, argv,
- MessageHandling::kReport);
+ MessageHandling::kReport, Execution::Target::kCallable);
}
@@ -203,15 +215,13 @@ MaybeHandle<Object> Execution::New(Isolate* isolate, Handle<Object> constructor,
Handle<Object> argv[]) {
return Invoke(isolate, true, constructor,
isolate->factory()->undefined_value(), argc, argv, new_target,
- MessageHandling::kReport);
+ MessageHandling::kReport, Execution::Target::kCallable);
}
-MaybeHandle<Object> Execution::TryCall(Isolate* isolate,
- Handle<Object> callable,
- Handle<Object> receiver, int argc,
- Handle<Object> args[],
- MessageHandling message_handling,
- MaybeHandle<Object>* exception_out) {
+MaybeHandle<Object> Execution::TryCall(
+ Isolate* isolate, Handle<Object> callable, Handle<Object> receiver,
+ int argc, Handle<Object> args[], MessageHandling message_handling,
+ MaybeHandle<Object>* exception_out, Target target) {
bool is_termination = false;
MaybeHandle<Object> maybe_result;
if (exception_out != nullptr) *exception_out = MaybeHandle<Object>();
@@ -226,8 +236,8 @@ MaybeHandle<Object> Execution::TryCall(Isolate* isolate,
catcher.SetVerbose(false);
catcher.SetCaptureMessage(false);
- maybe_result =
- CallInternal(isolate, callable, receiver, argc, args, message_handling);
+ maybe_result = CallInternal(isolate, callable, receiver, argc, args,
+ message_handling, target);
if (maybe_result.is_null()) {
DCHECK(isolate->has_pending_exception());
@@ -253,6 +263,13 @@ MaybeHandle<Object> Execution::TryCall(Isolate* isolate,
return maybe_result;
}
+MaybeHandle<Object> Execution::RunMicrotasks(
+ Isolate* isolate, MessageHandling message_handling,
+ MaybeHandle<Object>* exception_out) {
+ auto undefined = isolate->factory()->undefined_value();
+ return TryCall(isolate, undefined, undefined, 0, {}, message_handling,
+ exception_out, Target::kRunMicrotasks);
+}
void StackGuard::SetStackLimit(uintptr_t limit) {
ExecutionAccess access(isolate_);
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index eeebfadde2..7dd920a446 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -8,6 +8,7 @@
#include "src/allocation.h"
#include "src/base/atomicops.h"
#include "src/globals.h"
+#include "src/objects/code.h"
#include "src/utils.h"
namespace v8 {
@@ -20,6 +21,7 @@ class Execution final : public AllStatic {
public:
// Whether to report pending messages, or keep them pending on the isolate.
enum class MessageHandling { kReport, kKeepPending };
+ enum class Target { kCallable, kRunMicrotasks };
// Call a function, the caller supplies a receiver and an array
// of arguments.
@@ -54,7 +56,12 @@ class Execution final : public AllStatic {
Handle<Object> receiver, int argc,
Handle<Object> argv[],
MessageHandling message_handling,
- MaybeHandle<Object>* exception_out);
+ MaybeHandle<Object>* exception_out,
+ Target target = Target::kCallable);
+ // Convenience method for performing RunMicrotasks
+ static MaybeHandle<Object> RunMicrotasks(Isolate* isolate,
+ MessageHandling message_handling,
+ MaybeHandle<Object>* exception_out);
};
@@ -162,8 +169,8 @@ class V8_EXPORT_PRIVATE StackGuard final {
void DisableInterrupts();
#if V8_TARGET_ARCH_64_BIT
- static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
- static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);
+ static const uintptr_t kInterruptLimit = uintptr_t{0xfffffffffffffffe};
+ static const uintptr_t kIllegalLimit = uintptr_t{0xfffffffffffffff8};
#else
static const uintptr_t kInterruptLimit = 0xfffffffe;
static const uintptr_t kIllegalLimit = 0xfffffff8;
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index f0b5d72387..aa9d5c4364 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -86,24 +86,6 @@ void StatisticsExtension::GetCounters(
STATS_COUNTER_LIST_1(ADD_COUNTER) STATS_COUNTER_LIST_2(ADD_COUNTER)
#undef ADD_COUNTER
-#define ADD_COUNTER(name) \
- { counters->count_of_##name(), "count_of_" #name } \
- , {counters->size_of_##name(), "size_of_" #name},
-
- INSTANCE_TYPE_LIST(ADD_COUNTER)
-#undef ADD_COUNTER
-#define ADD_COUNTER(name) \
- { counters->count_of_CODE_TYPE_##name(), "count_of_CODE_TYPE_" #name } \
- , {counters->size_of_CODE_TYPE_##name(), "size_of_CODE_TYPE_" #name},
-
- CODE_KIND_LIST(ADD_COUNTER)
-#undef ADD_COUNTER
-#define ADD_COUNTER(name) \
- { counters->count_of_FIXED_ARRAY_##name(), "count_of_FIXED_ARRAY_" #name } \
- , {counters->size_of_FIXED_ARRAY_##name(), "size_of_FIXED_ARRAY_" #name},
-
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADD_COUNTER)
-#undef ADD_COUNTER
}; // End counter_list array.
for (size_t i = 0; i < arraysize(counter_list); i++) {
diff --git a/deps/v8/src/external-reference-table.cc b/deps/v8/src/external-reference-table.cc
index 123f9c2fd2..52157b5034 100644
--- a/deps/v8/src/external-reference-table.cc
+++ b/deps/v8/src/external-reference-table.cc
@@ -92,6 +92,10 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
"LDoubleConstant::one_half");
Add(ExternalReference::isolate_address(isolate).address(), "isolate");
Add(ExternalReference::builtins_address(isolate).address(), "builtins");
+ Add(ExternalReference::handle_scope_implementer_address(isolate).address(),
+ "Isolate::handle_scope_implementer_address");
+ Add(ExternalReference::pending_microtask_count_address(isolate).address(),
+ "Isolate::pending_microtask_count_address()");
Add(ExternalReference::interpreter_dispatch_table_address(isolate).address(),
"Interpreter::dispatch_table_address");
Add(ExternalReference::bytecode_size_table_address(isolate).address(),
@@ -341,6 +345,10 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
"IncrementalMarking::RecordWrite");
Add(ExternalReference::store_buffer_overflow_function(isolate).address(),
"StoreBuffer::StoreBufferOverflow");
+
+ Add(ExternalReference::invalidate_prototype_chains_function(isolate)
+ .address(),
+ "JSObject::InvalidatePrototypeChains()");
}
void ExternalReferenceTable::AddBuiltins(Isolate* isolate) {
diff --git a/deps/v8/src/factory-inl.h b/deps/v8/src/factory-inl.h
index 02cdef3a15..ace5c35472 100644
--- a/deps/v8/src/factory-inl.h
+++ b/deps/v8/src/factory-inl.h
@@ -29,6 +29,14 @@ ROOT_LIST(ROOT_ACCESSOR)
STRUCT_LIST(STRUCT_MAP_ACCESSOR)
#undef STRUCT_MAP_ACCESSOR
+#define DATA_HANDLER_MAP_ACCESSOR(NAME, Name, Size, name) \
+ Handle<Map> Factory::name##_map() { \
+ return Handle<Map>(bit_cast<Map**>( \
+ &isolate()->heap()->roots_[Heap::k##Name##Size##MapRootIndex])); \
+ }
+DATA_HANDLER_LIST(DATA_HANDLER_MAP_ACCESSOR)
+#undef DATA_HANDLER_MAP_ACCESSOR
+
#define STRING_ACCESSOR(name, str) \
Handle<String> Factory::name() { \
return Handle<String>(bit_cast<String**>( \
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index ea3936e232..fab539bf8b 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -337,14 +337,6 @@ Handle<AccessorPair> Factory::NewAccessorPair() {
}
-Handle<TypeFeedbackInfo> Factory::NewTypeFeedbackInfo() {
- Handle<TypeFeedbackInfo> info =
- Handle<TypeFeedbackInfo>::cast(NewStruct(TUPLE3_TYPE, TENURED));
- info->initialize_storage();
- return info;
-}
-
-
// Internalized strings are created in the old generation (data space).
Handle<String> Factory::InternalizeUtf8String(Vector<const char> string) {
Utf8StringKey key(string, isolate()->heap()->HashSeed());
@@ -1003,6 +995,7 @@ Handle<Context> Factory::NewNativeContext() {
context->set_math_random_index(Smi::kZero);
Handle<WeakCell> weak_cell = NewWeakCell(context);
context->set_self_weak_cell(*weak_cell);
+ context->set_serialized_objects(*empty_fixed_array());
DCHECK(context->IsNativeContext());
return context;
}
@@ -1184,7 +1177,7 @@ Handle<Script> Factory::NewScript(Handle<String> source) {
script->set_type(Script::TYPE_NORMAL);
script->set_wrapper(heap->undefined_value());
script->set_line_ends(heap->undefined_value());
- script->set_eval_from_shared(heap->undefined_value());
+ script->set_eval_from_shared_or_wrapped_arguments(heap->undefined_value());
script->set_eval_from_position(0);
script->set_shared_function_infos(*empty_fixed_array(), SKIP_WRITE_BARRIER);
script->set_flags(0);
@@ -1881,7 +1874,7 @@ Handle<JSGlobalObject> Factory::NewJSGlobalObject(
// Create a new map for the global object.
Handle<Map> new_map = Map::CopyDropDescriptors(map);
new_map->set_may_have_interesting_symbols(true);
- new_map->set_dictionary_map(true);
+ new_map->set_is_dictionary_map(true);
// Set up the global object as a normalized object.
global->set_global_dictionary(*dictionary);
@@ -1985,6 +1978,18 @@ void Factory::NewJSArrayStorage(Handle<JSArray> array,
array->set_length(Smi::FromInt(length));
}
+Handle<JSWeakMap> Factory::NewJSWeakMap() {
+ Context* native_context = isolate()->raw_native_context();
+ Handle<Map> map(native_context->js_weak_map_fun()->initial_map());
+ Handle<JSWeakMap> weakmap(JSWeakMap::cast(*NewJSObjectFromMap(map)));
+ {
+ // Do not leak handles for the hash table, it would make entries strong.
+ HandleScope scope(isolate());
+ JSWeakCollection::Initialize(weakmap, isolate());
+ }
+ return weakmap;
+}
+
Handle<JSModuleNamespace> Factory::NewJSModuleNamespace() {
Handle<Map> map = isolate()->js_module_namespace_map();
Handle<JSModuleNamespace> module_namespace(
@@ -2775,6 +2780,46 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> native_context,
return map;
}
+Handle<LoadHandler> Factory::NewLoadHandler(int data_count) {
+ Handle<Map> map;
+ switch (data_count) {
+ case 1:
+ map = load_handler1_map();
+ break;
+ case 2:
+ map = load_handler2_map();
+ break;
+ case 3:
+ map = load_handler3_map();
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return New<LoadHandler>(map, OLD_SPACE);
+}
+
+Handle<StoreHandler> Factory::NewStoreHandler(int data_count) {
+ Handle<Map> map;
+ switch (data_count) {
+ case 0:
+ map = store_handler0_map();
+ break;
+ case 1:
+ map = store_handler1_map();
+ break;
+ case 2:
+ map = store_handler2_map();
+ break;
+ case 3:
+ map = store_handler3_map();
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return New<StoreHandler>(map, OLD_SPACE);
+}
void Factory::SetRegExpAtomData(Handle<JSRegExp> regexp,
JSRegExp::Type type,
@@ -2866,7 +2911,7 @@ Handle<Map> Factory::CreateSloppyFunctionMap(
TERMINAL_FAST_ELEMENTS_KIND, inobject_properties_count);
map->set_has_prototype_slot(has_prototype);
map->set_is_constructor(has_prototype);
- map->set_is_callable();
+ map->set_is_callable(true);
Handle<JSFunction> empty_function;
if (maybe_empty_function.ToHandle(&empty_function)) {
Map::SetPrototype(map, empty_function);
@@ -2945,7 +2990,7 @@ Handle<Map> Factory::CreateStrictFunctionMap(
TERMINAL_FAST_ELEMENTS_KIND, inobject_properties_count);
map->set_has_prototype_slot(has_prototype);
map->set_is_constructor(has_prototype);
- map->set_is_callable();
+ map->set_is_callable(true);
Map::SetPrototype(map, empty_function);
//
@@ -3010,7 +3055,7 @@ Handle<Map> Factory::CreateClassFunctionMap(Handle<JSFunction> empty_function) {
map->set_has_prototype_slot(true);
map->set_is_constructor(true);
map->set_is_prototype_map(true);
- map->set_is_callable();
+ map->set_is_callable(true);
Map::SetPrototype(map, empty_function);
//
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index cb76aab3b7..f0e9d63885 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -7,8 +7,10 @@
#include "src/feedback-vector.h"
#include "src/globals.h"
+#include "src/ic/handler-configuration.h"
#include "src/isolate.h"
#include "src/messages.h"
+#include "src/objects/data-handler.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/dictionary.h"
#include "src/objects/js-array.h"
@@ -29,7 +31,12 @@ class ConstantElementsPair;
class CoverageInfo;
class DebugInfo;
class FreshlyAllocatedBigInt;
+class JSMap;
+class JSMapIterator;
class JSModuleNamespace;
+class JSSet;
+class JSSetIterator;
+class JSWeakMap;
class NewFunctionArgs;
struct SourceRange;
class PreParsedScopeData;
@@ -164,9 +171,6 @@ class V8_EXPORT_PRIVATE Factory final {
// Create a pre-tenured empty AccessorPair.
Handle<AccessorPair> NewAccessorPair();
- // Create an empty TypeFeedbackInfo.
- Handle<TypeFeedbackInfo> NewTypeFeedbackInfo();
-
// Finds the internalized copy for string in the string table.
// If not found, a new string is added to the table and returned.
Handle<String> InternalizeUtf8String(Vector<const char> str);
@@ -552,6 +556,8 @@ class V8_EXPORT_PRIVATE Factory final {
int capacity,
ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS);
+ Handle<JSWeakMap> NewJSWeakMap();
+
Handle<JSGeneratorObject> NewJSGeneratorObject(Handle<JSFunction> function);
Handle<JSModuleNamespace> NewJSModuleNamespace();
@@ -734,6 +740,11 @@ class V8_EXPORT_PRIVATE Factory final {
STRUCT_LIST(STRUCT_MAP_ACCESSOR)
#undef STRUCT_MAP_ACCESSOR
+#define DATA_HANDLER_MAP_ACCESSOR(NAME, Name, Size, name) \
+ inline Handle<Map> name##_map();
+ DATA_HANDLER_LIST(DATA_HANDLER_MAP_ACCESSOR)
+#undef DATA_HANDLER_MAP_ACCESSOR
+
#define STRING_ACCESSOR(name, str) inline Handle<String> name();
INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
#undef STRING_ACCESSOR
@@ -805,6 +816,9 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<Map> ObjectLiteralMapFromCache(Handle<Context> native_context,
int number_of_properties);
+ Handle<LoadHandler> NewLoadHandler(int data_count);
+ Handle<StoreHandler> NewStoreHandler(int data_count);
+
Handle<RegExpMatchInfo> NewRegExpMatchInfo();
// Creates a new FixedArray that holds the data associated with the
diff --git a/deps/v8/src/fast-dtoa.cc b/deps/v8/src/fast-dtoa.cc
index 7c8438e62f..9572c7026d 100644
--- a/deps/v8/src/fast-dtoa.cc
+++ b/deps/v8/src/fast-dtoa.cc
@@ -316,7 +316,6 @@ static void BiggestPowerTen(uint32_t number,
}
}
-
// Generates the digits of input number w.
// w is a floating-point number (DiyFp), consisting of a significand and an
// exponent. Its exponent is bounded by kMinimalTargetExponent and
@@ -345,15 +344,15 @@ static void BiggestPowerTen(uint32_t number,
// then false is returned. This usually happens rarely (~0.5%).
//
// Say, for the sake of example, that
-// w.e() == -48, and w.f() == 0x1234567890abcdef
+// w.e() == -48, and w.f() == 0x1234567890ABCDEF
// w's value can be computed by w.f() * 2^w.e()
// We can obtain w's integral digits by simply shifting w.f() by -w.e().
// -> w's integral part is 0x1234
-// w's fractional part is therefore 0x567890abcdef.
+// w's fractional part is therefore 0x567890ABCDEF.
// Printing w's integral part is easy (simply print 0x1234 in decimal).
// In order to print its fraction we repeatedly multiply the fraction by 10 and
// get each digit. Example the first digit after the point would be computed by
-// (0x567890abcdef * 10) >> 48. -> 3
+// (0x567890ABCDEF * 10) >> 48. -> 3
// The whole thing becomes slightly more complicated because we want to stop
// once we have enough digits. That is, once the digits inside the buffer
// represent 'w' we can stop. Everything inside the interval low - high
diff --git a/deps/v8/src/feedback-vector-inl.h b/deps/v8/src/feedback-vector-inl.h
index e14381f2ab..888fa01854 100644
--- a/deps/v8/src/feedback-vector-inl.h
+++ b/deps/v8/src/feedback-vector-inl.h
@@ -117,7 +117,8 @@ bool FeedbackVector::has_optimized_code() const {
}
bool FeedbackVector::has_optimization_marker() const {
- return optimization_marker() != OptimizationMarker::kNone;
+ return optimization_marker() != OptimizationMarker::kLogFirstExecution &&
+ optimization_marker() != OptimizationMarker::kNone;
}
// Conversion from an integer index to either a slot or an ic slot.
@@ -171,9 +172,7 @@ BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback) {
case BinaryOperationFeedback::kString:
return BinaryOperationHint::kString;
case BinaryOperationFeedback::kBigInt:
- // TODO(jarin/jkummerow/neis): Support BigInts in TF.
- // Fall through for now.
- case BinaryOperationFeedback::kAny:
+ return BinaryOperationHint::kBigInt;
default:
return BinaryOperationHint::kAny;
}
@@ -197,6 +196,8 @@ CompareOperationHint CompareOperationHintFromFeedback(int type_feedback) {
return CompareOperationHint::kString;
case CompareOperationFeedback::kSymbol:
return CompareOperationHint::kSymbol;
+ case CompareOperationFeedback::kBigInt:
+ return CompareOperationHint::kBigInt;
case CompareOperationFeedback::kReceiver:
return CompareOperationHint::kReceiver;
default:
diff --git a/deps/v8/src/feedback-vector.cc b/deps/v8/src/feedback-vector.cc
index 0572b85395..c3bdd82616 100644
--- a/deps/v8/src/feedback-vector.cc
+++ b/deps/v8/src/feedback-vector.cc
@@ -234,7 +234,9 @@ Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
DCHECK_EQ(vector->shared_function_info(), *shared);
DCHECK_EQ(vector->optimized_code_cell(),
- Smi::FromEnum(OptimizationMarker::kNone));
+ Smi::FromEnum(FLAG_log_function_events
+ ? OptimizationMarker::kLogFirstExecution
+ : OptimizationMarker::kNone));
DCHECK_EQ(vector->invocation_count(), 0);
DCHECK_EQ(vector->profiler_ticks(), 0);
DCHECK_EQ(vector->deopt_count(), 0);
@@ -253,6 +255,8 @@ Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
switch (kind) {
case FeedbackSlotKind::kLoadGlobalInsideTypeof:
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+ case FeedbackSlotKind::kStoreGlobalSloppy:
+ case FeedbackSlotKind::kStoreGlobalStrict:
vector->set(index, isolate->heap()->empty_weak_cell(),
SKIP_WRITE_BARRIER);
break;
@@ -278,8 +282,6 @@ Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
case FeedbackSlotKind::kStoreNamedSloppy:
case FeedbackSlotKind::kStoreNamedStrict:
case FeedbackSlotKind::kStoreOwnNamed:
- case FeedbackSlotKind::kStoreGlobalSloppy:
- case FeedbackSlotKind::kStoreGlobalStrict:
case FeedbackSlotKind::kStoreKeyedSloppy:
case FeedbackSlotKind::kStoreKeyedStrict:
case FeedbackSlotKind::kStoreDataPropertyInLiteral:
@@ -341,12 +343,18 @@ void FeedbackVector::SetOptimizedCode(Handle<FeedbackVector> vector,
vector->set_optimized_code_cell(*cell);
}
-void FeedbackVector::SetOptimizationMarker(OptimizationMarker marker) {
- set_optimized_code_cell(Smi::FromEnum(marker));
+void FeedbackVector::ClearOptimizedCode() {
+ DCHECK(has_optimized_code());
+ SetOptimizationMarker(OptimizationMarker::kNone);
}
-void FeedbackVector::ClearOptimizedCode() {
- set_optimized_code_cell(Smi::FromEnum(OptimizationMarker::kNone));
+void FeedbackVector::ClearOptimizationMarker() {
+ DCHECK(!has_optimized_code());
+ SetOptimizationMarker(OptimizationMarker::kNone);
+}
+
+void FeedbackVector::SetOptimizationMarker(OptimizationMarker marker) {
+ set_optimized_code_cell(Smi::FromEnum(marker));
}
void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
@@ -356,7 +364,7 @@ void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
WeakCell* cell = WeakCell::cast(slot);
if (cell->cleared()) {
- ClearOptimizedCode();
+ ClearOptimizationMarker();
return;
}
@@ -424,10 +432,17 @@ bool FeedbackVector::ClearSlots(Isolate* isolate) {
}
case FeedbackSlotKind::kStoreNamedSloppy:
case FeedbackSlotKind::kStoreNamedStrict:
- case FeedbackSlotKind::kStoreOwnNamed:
+ case FeedbackSlotKind::kStoreOwnNamed: {
+ StoreICNexus nexus(this, slot);
+ if (!nexus.IsCleared()) {
+ nexus.Clear();
+ feedback_updated = true;
+ }
+ break;
+ }
case FeedbackSlotKind::kStoreGlobalSloppy:
case FeedbackSlotKind::kStoreGlobalStrict: {
- StoreICNexus nexus(this, slot);
+ StoreGlobalICNexus nexus(this, slot);
if (!nexus.IsCleared()) {
nexus.Clear();
feedback_updated = true;
@@ -564,18 +579,6 @@ InlineCacheState LoadICNexus::StateFromFeedback() const {
return UNINITIALIZED;
}
-InlineCacheState LoadGlobalICNexus::StateFromFeedback() const {
- Isolate* isolate = GetIsolate();
- Object* feedback = GetFeedback();
-
- Object* extra = GetFeedbackExtra();
- if (!WeakCell::cast(feedback)->cleared() ||
- extra != *FeedbackVector::UninitializedSentinel(isolate)) {
- return MONOMORPHIC;
- }
- return UNINITIALIZED;
-}
-
InlineCacheState KeyedLoadICNexus::StateFromFeedback() const {
Isolate* isolate = GetIsolate();
Object* feedback = GetFeedback();
@@ -602,6 +605,56 @@ InlineCacheState KeyedLoadICNexus::StateFromFeedback() const {
return UNINITIALIZED;
}
+void GlobalICNexus::ConfigureUninitialized() {
+ Isolate* isolate = GetIsolate();
+ SetFeedback(isolate->heap()->empty_weak_cell(), SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+}
+
+void GlobalICNexus::ConfigurePropertyCellMode(Handle<PropertyCell> cell) {
+ Isolate* isolate = GetIsolate();
+ SetFeedback(*isolate->factory()->NewWeakCell(cell));
+ SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+}
+
+bool GlobalICNexus::ConfigureLexicalVarMode(int script_context_index,
+ int context_slot_index) {
+ DCHECK_LE(0, script_context_index);
+ DCHECK_LE(0, context_slot_index);
+ if (!ContextIndexBits::is_valid(script_context_index) ||
+ !SlotIndexBits::is_valid(context_slot_index)) {
+ return false;
+ }
+ int config = ContextIndexBits::encode(script_context_index) |
+ SlotIndexBits::encode(context_slot_index);
+
+ SetFeedback(Smi::FromInt(config));
+ Isolate* isolate = GetIsolate();
+ SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+ return true;
+}
+
+void GlobalICNexus::ConfigureHandlerMode(Handle<Object> handler) {
+ SetFeedback(GetIsolate()->heap()->empty_weak_cell());
+ SetFeedbackExtra(*handler);
+}
+
+InlineCacheState GlobalICNexus::StateFromFeedback() const {
+ Isolate* isolate = GetIsolate();
+ Object* feedback = GetFeedback();
+ if (feedback->IsSmi()) return MONOMORPHIC;
+
+ Object* extra = GetFeedbackExtra();
+ if (!WeakCell::cast(feedback)->cleared() ||
+ extra != *FeedbackVector::UninitializedSentinel(isolate)) {
+ return MONOMORPHIC;
+ }
+ return UNINITIALIZED;
+}
+
InlineCacheState StoreICNexus::StateFromFeedback() const {
Isolate* isolate = GetIsolate();
Object* feedback = GetFeedback();
@@ -667,16 +720,31 @@ InlineCacheState CallICNexus::StateFromFeedback() const {
return UNINITIALIZED;
}
-int CallICNexus::ExtractCallCount() {
+int CallICNexus::GetCallCount() {
Object* call_count = GetFeedbackExtra();
CHECK(call_count->IsSmi());
- int value = Smi::ToInt(call_count);
- return value;
+ uint32_t value = static_cast<uint32_t>(Smi::ToInt(call_count));
+ return CallCountField::decode(value);
}
+void CallICNexus::SetSpeculationMode(SpeculationMode mode) {
+ Object* call_count = GetFeedbackExtra();
+ CHECK(call_count->IsSmi());
+ uint32_t value = static_cast<uint32_t>(Smi::ToInt(call_count));
+ int result = static_cast<int>(CallCountField::decode(value) |
+ SpeculationModeField::encode(mode));
+ SetFeedbackExtra(Smi::FromInt(result), SKIP_WRITE_BARRIER);
+}
+
+SpeculationMode CallICNexus::GetSpeculationMode() {
+ Object* call_count = GetFeedbackExtra();
+ CHECK(call_count->IsSmi());
+ uint32_t value = static_cast<uint32_t>(Smi::ToInt(call_count));
+ return SpeculationModeField::decode(value);
+}
float CallICNexus::ComputeCallFrequency() {
double const invocation_count = vector()->invocation_count();
- double const call_count = ExtractCallCount();
+ double const call_count = GetCallCount();
if (invocation_count == 0) {
// Prevent division by 0.
return 0.0f;
@@ -691,25 +759,6 @@ void CallICNexus::ConfigureUninitialized() {
SetFeedbackExtra(Smi::kZero, SKIP_WRITE_BARRIER);
}
-void LoadGlobalICNexus::ConfigureUninitialized() {
- Isolate* isolate = GetIsolate();
- SetFeedback(isolate->heap()->empty_weak_cell(), SKIP_WRITE_BARRIER);
- SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
- SKIP_WRITE_BARRIER);
-}
-
-void LoadGlobalICNexus::ConfigurePropertyCellMode(Handle<PropertyCell> cell) {
- Isolate* isolate = GetIsolate();
- SetFeedback(*isolate->factory()->NewWeakCell(cell));
- SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
- SKIP_WRITE_BARRIER);
-}
-
-void LoadGlobalICNexus::ConfigureHandlerMode(Handle<Object> handler) {
- SetFeedback(GetIsolate()->heap()->empty_weak_cell());
- SetFeedbackExtra(*handler);
-}
-
void FeedbackNexus::ConfigureMonomorphic(Handle<Name> name,
Handle<Map> receiver_map,
Handle<Object> handler) {
@@ -896,14 +945,10 @@ KeyedAccessStoreMode KeyedStoreICNexus::GetKeyedAccessStoreMode() const {
for (const Handle<Object>& maybe_code_handler : handlers) {
// The first handler that isn't the slow handler will have the bits we need.
Handle<Code> handler;
- if (maybe_code_handler->IsTuple3()) {
- // Elements transition.
- Handle<Tuple3> data_handler = Handle<Tuple3>::cast(maybe_code_handler);
- handler = handle(Code::cast(data_handler->value2()));
- } else if (maybe_code_handler->IsTuple2()) {
- // Element store with prototype chain check.
- Handle<Tuple2> data_handler = Handle<Tuple2>::cast(maybe_code_handler);
- handler = handle(Code::cast(data_handler->value2()));
+ if (maybe_code_handler->IsStoreHandler()) {
+ Handle<StoreHandler> data_handler =
+ Handle<StoreHandler>::cast(maybe_code_handler);
+ handler = handle(Code::cast(data_handler->smi_handler()));
} else if (maybe_code_handler->IsSmi()) {
// Skip proxy handlers.
DCHECK_EQ(*maybe_code_handler, *StoreHandler::StoreProxy(GetIsolate()));
diff --git a/deps/v8/src/feedback-vector.h b/deps/v8/src/feedback-vector.h
index fdcf9ff01a..9f8096d138 100644
--- a/deps/v8/src/feedback-vector.h
+++ b/deps/v8/src/feedback-vector.h
@@ -10,6 +10,7 @@
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/elements-kind.h"
+#include "src/globals.h"
#include "src/objects/map.h"
#include "src/objects/name.h"
#include "src/objects/object-macros.h"
@@ -89,6 +90,10 @@ inline bool IsKeyedStoreICKind(FeedbackSlotKind kind) {
kind == FeedbackSlotKind::kStoreKeyedStrict;
}
+inline bool IsGlobalICKind(FeedbackSlotKind kind) {
+ return IsLoadGlobalICKind(kind) || IsStoreGlobalICKind(kind);
+}
+
inline bool IsTypeProfileKind(FeedbackSlotKind kind) {
return kind == FeedbackSlotKind::kTypeProfile;
}
@@ -174,6 +179,9 @@ class FeedbackVector : public HeapObject {
Handle<Code> code);
void SetOptimizationMarker(OptimizationMarker marker);
+ // Clears the optimization marker in the feedback vector.
+ void ClearOptimizationMarker();
+
// Conversion from a slot to an integer index to the underlying array.
static int GetIndex(FeedbackSlot slot) { return slot.ToInt(); }
@@ -204,6 +212,7 @@ class FeedbackVector : public HeapObject {
bool Name(FeedbackSlot slot) const { return Name##Kind(GetKind(slot)); }
DEFINE_SLOT_KIND_PREDICATE(IsCallIC)
+ DEFINE_SLOT_KIND_PREDICATE(IsGlobalIC)
DEFINE_SLOT_KIND_PREDICATE(IsLoadIC)
DEFINE_SLOT_KIND_PREDICATE(IsLoadGlobalIC)
DEFINE_SLOT_KIND_PREDICATE(IsKeyedLoadIC)
@@ -644,11 +653,16 @@ class CallICNexus final : public FeedbackNexus {
return length == 0;
}
- int ExtractCallCount();
+ int GetCallCount();
+ void SetSpeculationMode(SpeculationMode mode);
+ SpeculationMode GetSpeculationMode();
// Compute the call frequency based on the call count and the invocation
// count (taken from the type feedback vector).
float ComputeCallFrequency();
+
+ typedef BitField<SpeculationMode, 0, 1> SpeculationModeField;
+ typedef BitField<uint32_t, 1, 31> CallCountField;
};
class LoadICNexus : public FeedbackNexus {
@@ -667,35 +681,6 @@ class LoadICNexus : public FeedbackNexus {
InlineCacheState StateFromFeedback() const override;
};
-class LoadGlobalICNexus : public FeedbackNexus {
- public:
- LoadGlobalICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsLoadGlobalIC(slot));
- }
- LoadGlobalICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsLoadGlobalIC(slot));
- }
-
- int ExtractMaps(MapHandles* maps) const final {
- // LoadGlobalICs don't record map feedback.
- return 0;
- }
- MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
- return MaybeHandle<Code>();
- }
- bool FindHandlers(ObjectHandles* code_list, int length = -1) const final {
- return length == 0;
- }
-
- void ConfigureUninitialized() override;
- void ConfigurePropertyCellMode(Handle<PropertyCell> cell);
- void ConfigureHandlerMode(Handle<Object> handler);
-
- InlineCacheState StateFromFeedback() const override;
-};
-
class KeyedLoadICNexus : public FeedbackNexus {
public:
KeyedLoadICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
@@ -719,13 +704,11 @@ class StoreICNexus : public FeedbackNexus {
public:
StoreICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK(vector->IsStoreIC(slot) || vector->IsStoreOwnIC(slot) ||
- vector->IsStoreGlobalIC(slot));
+ DCHECK(vector->IsStoreIC(slot) || vector->IsStoreOwnIC(slot));
}
StoreICNexus(FeedbackVector* vector, FeedbackSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK(vector->IsStoreIC(slot) || vector->IsStoreOwnIC(slot) ||
- vector->IsStoreGlobalIC(slot));
+ DCHECK(vector->IsStoreIC(slot) || vector->IsStoreOwnIC(slot));
}
void Clear() override { ConfigurePremonomorphic(); }
@@ -733,6 +716,74 @@ class StoreICNexus : public FeedbackNexus {
InlineCacheState StateFromFeedback() const override;
};
+// Base class for LoadGlobalICNexus and StoreGlobalICNexus.
+class GlobalICNexus : public FeedbackNexus {
+ public:
+ GlobalICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
+ : FeedbackNexus(vector, slot) {
+ DCHECK(vector->IsGlobalIC(slot));
+ }
+ GlobalICNexus(FeedbackVector* vector, FeedbackSlot slot)
+ : FeedbackNexus(vector, slot) {
+ DCHECK(vector->IsGlobalIC(slot));
+ }
+
+ int ExtractMaps(MapHandles* maps) const final {
+ // Load/StoreGlobalICs don't record map feedback.
+ return 0;
+ }
+ MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
+ return MaybeHandle<Code>();
+ }
+ bool FindHandlers(ObjectHandles* code_list, int length = -1) const final {
+ return length == 0;
+ }
+
+ void ConfigureUninitialized() override;
+ void ConfigurePropertyCellMode(Handle<PropertyCell> cell);
+ // Returns false if given combination of indices is not allowed.
+ bool ConfigureLexicalVarMode(int script_context_index,
+ int context_slot_index);
+ void ConfigureHandlerMode(Handle<Object> handler);
+
+ InlineCacheState StateFromFeedback() const override;
+
+// Bit positions in a smi that encodes lexical environment variable access.
+#define LEXICAL_MODE_BIT_FIELDS(V, _) \
+ V(ContextIndexBits, unsigned, 12, _) \
+ V(SlotIndexBits, unsigned, 19, _)
+
+ DEFINE_BIT_FIELDS(LEXICAL_MODE_BIT_FIELDS)
+#undef LEXICAL_MODE_BIT_FIELDS
+
+ // Make sure we don't overflow the smi.
+ STATIC_ASSERT(LEXICAL_MODE_BIT_FIELDS_Ranges::kBitsCount <= kSmiValueSize);
+};
+
+class LoadGlobalICNexus : public GlobalICNexus {
+ public:
+ LoadGlobalICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
+ : GlobalICNexus(vector, slot) {
+ DCHECK(vector->IsLoadGlobalIC(slot));
+ }
+ LoadGlobalICNexus(FeedbackVector* vector, FeedbackSlot slot)
+ : GlobalICNexus(vector, slot) {
+ DCHECK(vector->IsLoadGlobalIC(slot));
+ }
+};
+
+class StoreGlobalICNexus : public GlobalICNexus {
+ public:
+ StoreGlobalICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
+ : GlobalICNexus(vector, slot) {
+ DCHECK(vector->IsStoreGlobalIC(slot));
+ }
+ StoreGlobalICNexus(FeedbackVector* vector, FeedbackSlot slot)
+ : GlobalICNexus(vector, slot) {
+ DCHECK(vector->IsStoreGlobalIC(slot));
+ }
+};
+
// TODO(ishell): Currently we use StoreOwnIC only for storing properties that
// already exist in the boilerplate therefore we can use StoreIC.
typedef StoreICNexus StoreOwnICNexus;
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 92635703bb..e40e182dad 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -26,7 +26,7 @@
#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
V8_EXPORT_PRIVATE extern ctype FLAG_##nam;
#define FLAG_READONLY(ftype, ctype, nam, def, cmt) \
- static ctype const FLAG_##nam = def;
+ static constexpr ctype FLAG_##nam = def;
// We want to supply the actual storage and value for the flag variable in the
// .cc file. We only do this for writable flags.
@@ -44,7 +44,7 @@
// for MODE_META, so there is no impact on the flags interface.
#elif defined(FLAG_MODE_DEFINE_DEFAULTS)
#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
- static ctype const FLAGDEFAULT_##nam = def;
+ static constexpr ctype FLAGDEFAULT_##nam = def;
// We want to write entries into our meta data table, for internal parsing and
// printing / etc in the flag parser code. We only do this for writable flags.
@@ -161,7 +161,6 @@ struct MaybeBoolFlag {
#define DEFINE_INT(nam, def, cmt) FLAG(INT, int, nam, def, cmt)
#define DEFINE_UINT(nam, def, cmt) FLAG(UINT, unsigned int, nam, def, cmt)
#define DEFINE_FLOAT(nam, def, cmt) FLAG(FLOAT, double, nam, def, cmt)
-#define DEFINE_SIZE_T(nam, def, cmt) FLAG(SIZE_T, size_t, nam, def, cmt)
#define DEFINE_STRING(nam, def, cmt) FLAG(STRING, const char*, nam, def, cmt)
#define DEFINE_ARGS(nam, cmt) \
FLAG(ARGS, JSArguments, nam, {0 COMMA nullptr}, cmt)
@@ -169,11 +168,16 @@ struct MaybeBoolFlag {
#define DEFINE_ALIAS_BOOL(alias, nam) FLAG_ALIAS(BOOL, bool, alias, nam)
#define DEFINE_ALIAS_INT(alias, nam) FLAG_ALIAS(INT, int, alias, nam)
#define DEFINE_ALIAS_FLOAT(alias, nam) FLAG_ALIAS(FLOAT, double, alias, nam)
-#define DEFINE_ALIAS_SIZE_T(alias, nam) FLAG_ALIAS(SIZE_T, size_t, alias, nam)
#define DEFINE_ALIAS_STRING(alias, nam) \
FLAG_ALIAS(STRING, const char*, alias, nam)
#define DEFINE_ALIAS_ARGS(alias, nam) FLAG_ALIAS(ARGS, JSArguments, alias, nam)
+#ifdef DEBUG
+#define DEFINE_DEBUG_BOOL DEFINE_BOOL
+#else
+#define DEFINE_DEBUG_BOOL DEFINE_BOOL_READONLY
+#endif
+
//
// Flags in all modes.
//
@@ -192,7 +196,10 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
DEFINE_IMPLICATION(es_staging, harmony)
// Enabling import.meta requires to also enable import()
DEFINE_IMPLICATION(harmony_import_meta, harmony_dynamic_import)
+
DEFINE_IMPLICATION(harmony_class_fields, harmony_public_fields)
+DEFINE_IMPLICATION(harmony_class_fields, harmony_static_fields)
+DEFINE_IMPLICATION(harmony_class_fields, harmony_private_fields)
// Features that are still work in progress (behind individual flags).
#define HARMONY_INPROGRESS(V) \
@@ -201,8 +208,9 @@ DEFINE_IMPLICATION(harmony_class_fields, harmony_public_fields)
V(harmony_function_sent, "harmony function.sent") \
V(harmony_do_expressions, "harmony do-expressions") \
V(harmony_class_fields, "harmony fields in class literals") \
- V(harmony_public_fields, "harmony public fields in class literals") \
- V(harmony_bigint, "harmony arbitrary precision integers")
+ V(harmony_static_fields, "harmony static fields in class literals") \
+ V(harmony_bigint, "harmony arbitrary precision integers") \
+ V(harmony_private_fields, "harmony private fields in class literals")
// Features that are complete (but still behind --harmony/es-staging flag).
#define HARMONY_STAGED(V) \
@@ -210,14 +218,15 @@ DEFINE_IMPLICATION(harmony_class_fields, harmony_public_fields)
V(harmony_restrict_constructor_return, \
"harmony disallow non undefined primitive return value from class " \
"constructor") \
- V(harmony_dynamic_import, "harmony dynamic import")
+ V(harmony_dynamic_import, "harmony dynamic import") \
+ V(harmony_public_fields, "harmony public fields in class literals") \
+ V(harmony_optional_catch_binding, "allow omitting binding in catch blocks")
// Features that are shipping (turned on by default, but internal flag remains).
#define HARMONY_SHIPPING_BASE(V) \
V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
V(harmony_regexp_named_captures, "harmony regexp named captures") \
V(harmony_regexp_property, "harmony Unicode regexp property classes") \
- V(harmony_async_iteration, "harmony async iteration") \
V(harmony_promise_finally, "harmony Promise.prototype.finally")
#ifdef V8_INTL_SUPPORT
@@ -267,7 +276,7 @@ DEFINE_BOOL(future, FUTURE_BOOL,
"Implies all staged features that we want to ship in the "
"not-too-far future")
-DEFINE_IMPLICATION(future, preparser_scope_analysis)
+DEFINE_IMPLICATION(future, background_compile)
DEFINE_IMPLICATION(future, write_protect_code_memory)
// Flags for experimental implementation features.
@@ -449,7 +458,6 @@ DEFINE_BOOL(turbo_jt, true, "enable jump threading in TurboFan")
DEFINE_BOOL(turbo_loop_peeling, true, "Turbofan loop peeling")
DEFINE_BOOL(turbo_loop_variable, true, "Turbofan loop variable optimization")
DEFINE_BOOL(turbo_cf_optimization, true, "optimize control flow in TurboFan")
-DEFINE_BOOL(turbo_frame_elision, true, "elide frames in TurboFan")
DEFINE_BOOL(turbo_escape, true, "enable escape analysis")
DEFINE_BOOL(turbo_instruction_scheduling, false,
"enable instruction scheduling in TurboFan")
@@ -458,12 +466,8 @@ DEFINE_BOOL(turbo_stress_instruction_scheduling, false,
DEFINE_BOOL(turbo_store_elimination, true,
"enable store-store elimination in TurboFan")
DEFINE_BOOL(trace_store_elimination, false, "trace store elimination")
-DEFINE_BOOL(turbo_experimental, false,
- "enable crashing features, for testing purposes only")
DEFINE_BOOL(turbo_rewrite_far_jumps, true,
"rewrite far to near jumps (ia32,x64)")
-// TODO(rmcilroy): Remove extra_masking once the finch experiment is removed.
-DEFINE_BOOL(extra_masking, false, "obsolete - has no effect")
#ifdef DISABLE_UNTRUSTED_CODE_MITIGATIONS
#define V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS false
@@ -489,14 +493,18 @@ DEFINE_BOOL(wasm_disable_structured_cloning, false,
"disable wasm structured cloning")
DEFINE_INT(wasm_num_compilation_tasks, 10,
"number of parallel compilation tasks for wasm")
-DEFINE_BOOL(wasm_trace_native_heap, false, "trace wasm native heap events")
-DEFINE_BOOL(wasm_jit_to_native, false,
+DEFINE_DEBUG_BOOL(wasm_trace_native_heap, false,
+ "trace wasm native heap events")
+DEFINE_BOOL(wasm_jit_to_native, true,
"JIT wasm code to native (not JS GC) memory")
+DEFINE_BOOL(wasm_write_protect_code_memory, false,
+ "write protect code memory on the wasm native heap")
+DEFINE_IMPLICATION(future, wasm_jit_to_native)
DEFINE_BOOL(wasm_trace_serialization, false,
"trace serialization/deserialization")
DEFINE_BOOL(wasm_async_compilation, true,
"enable actual asynchronous compilation for WebAssembly.compile")
-DEFINE_BOOL(wasm_stream_compilation, false,
+DEFINE_BOOL(wasm_stream_compilation, true,
"enable streaming compilation for WebAssembly")
DEFINE_IMPLICATION(wasm_stream_compilation, wasm_async_compilation)
DEFINE_BOOL(wasm_test_streaming, false,
@@ -508,21 +516,24 @@ DEFINE_UINT(wasm_max_mem_pages, v8::internal::wasm::kV8MaxWasmMemoryPages,
"maximum memory size of a wasm instance")
DEFINE_UINT(wasm_max_table_size, v8::internal::wasm::kV8MaxWasmTableSize,
"maximum table size of a wasm instance")
-DEFINE_BOOL(trace_wasm_decoder, false, "trace decoding of wasm code")
-DEFINE_BOOL(trace_wasm_decode_time, false, "trace decoding time of wasm code")
-DEFINE_BOOL(trace_wasm_compiler, false, "trace compiling of wasm code")
-DEFINE_BOOL(trace_wasm_interpreter, false, "trace interpretation of wasm code")
-DEFINE_BOOL(trace_wasm_streaming, false,
- "trace streaming compilation of wasm code")
+DEFINE_DEBUG_BOOL(trace_wasm_decoder, false, "trace decoding of wasm code")
+DEFINE_DEBUG_BOOL(trace_wasm_decode_time, false,
+ "trace decoding time of wasm code")
+DEFINE_DEBUG_BOOL(trace_wasm_compiler, false, "trace compiling of wasm code")
+DEFINE_DEBUG_BOOL(trace_wasm_interpreter, false,
+ "trace interpretation of wasm code")
+DEFINE_DEBUG_BOOL(trace_wasm_streaming, false,
+ "trace streaming compilation of wasm code")
DEFINE_INT(trace_wasm_ast_start, 0,
"start function for wasm AST trace (inclusive)")
DEFINE_INT(trace_wasm_ast_end, 0, "end function for wasm AST trace (exclusive)")
DEFINE_BOOL(liftoff, false,
"enable liftoff, the experimental wasm baseline compiler")
-DEFINE_BOOL(trace_liftoff, false, "trace liftoff, the wasm baseline compiler")
+DEFINE_DEBUG_BOOL(trace_liftoff, false,
+ "trace liftoff, the wasm baseline compiler")
DEFINE_UINT(skip_compiling_wasm_funcs, 0, "start compiling at function N")
-DEFINE_BOOL(wasm_break_on_decoder_error, false,
- "debug break when wasm decoder encounters an error")
+DEFINE_DEBUG_BOOL(wasm_break_on_decoder_error, false,
+ "debug break when wasm decoder encounters an error")
DEFINE_BOOL(wasm_trace_memory, false,
"print all memory updates performed in wasm code")
@@ -535,7 +546,7 @@ DEFINE_BOOL(trace_asm_scanner, false,
DEFINE_BOOL(trace_asm_parser, false, "verbose logging of asm.js parse failures")
DEFINE_BOOL(stress_validate_asm, false, "try to validate everything as asm.js")
-DEFINE_BOOL(dump_wasm_module, false, "dump wasm module bytes")
+DEFINE_DEBUG_BOOL(dump_wasm_module, false, "dump wasm module bytes")
DEFINE_STRING(dump_wasm_module_path, nullptr,
"directory to dump wasm modules to")
@@ -547,6 +558,8 @@ DEFINE_BOOL(experimental_wasm_mv, false,
"enable prototype multi-value support for wasm")
DEFINE_BOOL(experimental_wasm_threads, false,
"enable prototype threads for wasm")
+DEFINE_BOOL(experimental_wasm_sat_f2i_conversions, false,
+ "enable non-trapping float-to-int conversions for wasm")
DEFINE_BOOL(wasm_opt, false, "enable wasm optimization")
DEFINE_BOOL(wasm_no_bounds_checks, false,
@@ -557,8 +570,9 @@ DEFINE_BOOL(wasm_no_stack_checks, false,
DEFINE_BOOL(wasm_trap_handler, false,
"use signal handlers to catch out of bounds memory access in wasm"
" (experimental, currently Linux x86_64 only)")
-DEFINE_BOOL(wasm_code_fuzzer_gen_test, false,
- "Generate a test case when running the wasm-code fuzzer")
+DEFINE_BOOL(wasm_fuzzer_gen_test, false,
+ "Generate a test case when running a wasm fuzzer")
+DEFINE_IMPLICATION(wasm_fuzzer_gen_test, single_threaded)
DEFINE_BOOL(print_wasm_code, false, "Print WebAssembly code")
DEFINE_BOOL(wasm_interpret_all, false,
"Execute all wasm code in the wasm interpreter")
@@ -567,8 +581,8 @@ DEFINE_BOOL(asm_wasm_lazy_compilation, false,
DEFINE_IMPLICATION(validate_asm, asm_wasm_lazy_compilation)
DEFINE_BOOL(wasm_lazy_compilation, false,
"enable lazy compilation for all wasm modules")
-DEFINE_BOOL(trace_wasm_lazy_compilation, false,
- "trace lazy compilation of wasm functions")
+DEFINE_DEBUG_BOOL(trace_wasm_lazy_compilation, false,
+ "trace lazy compilation of wasm functions")
// wasm-interpret-all resets {asm-,}wasm-lazy-compilation.
DEFINE_NEG_IMPLICATION(wasm_interpret_all, asm_wasm_lazy_compilation)
DEFINE_NEG_IMPLICATION(wasm_interpret_all, wasm_lazy_compilation)
@@ -577,24 +591,27 @@ DEFINE_NEG_IMPLICATION(wasm_interpret_all, wasm_lazy_compilation)
DEFINE_INT(frame_count, 1, "number of stack frames inspected by the profiler")
DEFINE_INT(type_info_threshold, 25,
"percentage of ICs that must have type info to allow optimization")
-DEFINE_INT(generic_ic_threshold, 30,
- "max percentage of megamorphic/generic ICs to allow optimization")
-DEFINE_INT(self_opt_count, 130, "call count before self-optimization")
+
+DEFINE_INT(stress_sampling_allocation_profiler, 0,
+ "Enables sampling allocation profiler with X as a sample interval")
// Garbage collections flags.
-DEFINE_SIZE_T(min_semi_space_size, 0,
- "min size of a semi-space (in MBytes), the new space consists of "
- "two semi-spaces")
-DEFINE_SIZE_T(max_semi_space_size, 0,
- "max size of a semi-space (in MBytes), the new space consists of "
- "two semi-spaces")
+DEFINE_INT(min_semi_space_size, 0,
+ "min size of a semi-space (in MBytes), the new space consists of two"
+ "semi-spaces")
+DEFINE_INT(max_semi_space_size, 0,
+ "max size of a semi-space (in MBytes), the new space consists of two"
+ "semi-spaces")
DEFINE_INT(semi_space_growth_factor, 2, "factor by which to grow the new space")
DEFINE_BOOL(experimental_new_space_growth_heuristic, false,
"Grow the new space based on the percentage of survivors instead "
"of their absolute value.")
-DEFINE_SIZE_T(max_old_space_size, 0, "max size of the old space (in Mbytes)")
-DEFINE_SIZE_T(initial_old_space_size, 0, "initial old space size (in Mbytes)")
+DEFINE_INT(max_old_space_size, 0, "max size of the old space (in Mbytes)")
+DEFINE_INT(initial_old_space_size, 0, "initial old space size (in Mbytes)")
DEFINE_BOOL(gc_global, false, "always perform global GCs")
+DEFINE_INT(random_gc_interval, 0,
+ "Collect garbage after random(0, X) allocations. It overrides "
+ "gc_interval.")
DEFINE_INT(gc_interval, -1, "garbage collect after <n> allocations")
DEFINE_INT(retain_maps_for_n_gc, 2,
"keeps maps alive for <n> old space garbage collections")
@@ -649,10 +666,13 @@ DEFINE_BOOL(parallel_pointer_update, true,
"use parallel pointer update during compaction")
DEFINE_BOOL(trace_incremental_marking, false,
"trace progress of the incremental marking")
+DEFINE_BOOL(trace_stress_marking, false, "trace stress marking progress")
+DEFINE_BOOL(trace_stress_scavenge, false, "trace stress scavenge progress")
DEFINE_BOOL(track_gc_object_stats, false,
"track object counts and memory usage")
DEFINE_BOOL(trace_gc_object_stats, false,
"trace object counts and memory usage")
+DEFINE_BOOL(trace_zone_stats, false, "trace zone memory usage")
DEFINE_BOOL(track_retaining_path, false,
"enable support for tracking retaining path")
DEFINE_BOOL(concurrent_array_buffer_freeing, true,
@@ -698,9 +718,19 @@ DEFINE_BOOL(stress_compaction_random, false,
"evacuation candidates. It overrides stress_compaction.")
DEFINE_BOOL(stress_incremental_marking, false,
"force incremental marking for small heaps and run it more often")
+
+DEFINE_BOOL(fuzzer_gc_analysis, false,
+ "prints number of allocations and enables analysis mode for gc "
+ "fuzz testing, e.g. --stress-marking, --stress-scavenge")
DEFINE_INT(stress_marking, 0,
"force marking at random points between 0 and X (inclusive) percent "
"of the regular marking start limit")
+DEFINE_INT(stress_scavenge, 0,
+ "force scavenge at random points between 0 and X (inclusive) "
+ "percent of the new space capacity")
+DEFINE_IMPLICATION(fuzzer_gc_analysis, stress_marking)
+DEFINE_IMPLICATION(fuzzer_gc_analysis, stress_scavenge)
+
DEFINE_BOOL(manual_evacuation_candidates_selection, false,
"Test mode only flag. It allows an unit test to select evacuation "
"candidates pages (requires --stress_compaction).")
@@ -780,8 +810,6 @@ DEFINE_BOOL(force_slow_path, false, "always take the slow path for builtins")
DEFINE_BOOL(inline_new, true, "use fast inline allocation")
// codegen-ia32.cc / codegen-arm.cc
-DEFINE_BOOL(trace_codegen, false,
- "print name of functions for which code is generated")
DEFINE_BOOL(trace, false, "trace function calls")
// codegen.cc
@@ -897,12 +925,11 @@ DEFINE_BOOL(trace_prototype_users, false,
DEFINE_BOOL(use_verbose_printer, true, "allows verbose printing")
DEFINE_BOOL(trace_for_in_enumerate, false, "Trace for-in enumerate slow-paths")
DEFINE_BOOL(trace_maps, false, "trace map creation")
+DEFINE_BOOL(trace_maps_details, true, "also log map details")
DEFINE_IMPLICATION(trace_maps, log_code)
// parser.cc
DEFINE_BOOL(allow_natives_syntax, false, "allow natives syntax")
-DEFINE_BOOL(trace_parse, false, "trace parsing and preparsing")
-DEFINE_BOOL(trace_preparse, false, "trace preparsing decisions")
DEFINE_BOOL(lazy_inner_functions, true, "enable lazy parsing inner functions")
DEFINE_BOOL(aggressive_lazy_inner_functions, false,
"even lazier inner function parsing")
@@ -932,8 +959,6 @@ DEFINE_INT(sim_stack_alignment, 8,
DEFINE_INT(sim_stack_size, 2 * MB / KB,
"Stack size of the ARM64, MIPS64 and PPC64 simulator "
"in kBytes (default is 2 MB)")
-DEFINE_BOOL(log_regs_modified, true,
- "When logging register values, only print modified registers.")
DEFINE_BOOL(log_colour, ENABLE_LOG_COLOUR,
"When logging, try to use coloured output.")
DEFINE_BOOL(ignore_asm_unimplemented_break, false,
@@ -964,7 +989,7 @@ DEFINE_INT(random_seed, 0,
"(0, the default, means to use system random).")
DEFINE_INT(fuzzer_random_seed, 0,
"Default seed for initializing fuzzer random generator "
- "(0, the default, means to use system random).")
+ "(0, the default, means to use v8's random number generator seed).")
DEFINE_BOOL(trace_rail, false, "trace RAIL mode")
DEFINE_BOOL(print_all_exceptions, false,
"print exception object and stack trace on each thrown exception")
@@ -1071,7 +1096,6 @@ DEFINE_BOOL(trace_contexts, false, "trace contexts operations")
// heap.cc
DEFINE_BOOL(gc_verbose, false, "print stuff during garbage collection")
-DEFINE_BOOL(heap_stats, false, "report heap statistics before and after GC")
DEFINE_BOOL(code_stats, false, "report code statistics after GC")
DEFINE_BOOL(print_handles, false, "report handles after GC")
DEFINE_BOOL(check_handle_count, false,
@@ -1094,8 +1118,6 @@ DEFINE_BOOL(trace_lazy, false, "trace lazy compilation")
DEFINE_BOOL(collect_heap_spill_statistics, false,
"report heap spill statistics along with heap_stats "
"(requires heap_stats)")
-DEFINE_BOOL(trace_live_bytes, false,
- "trace incrementing and resetting of live bytes")
DEFINE_BOOL(trace_isolates, false, "trace isolate state changes")
// Regexp
@@ -1110,8 +1132,8 @@ DEFINE_BOOL(trace_regexp_parser, false, "trace regexp parsing")
DEFINE_BOOL(print_break_location, false, "print source location on debug break")
// wasm instance management
-DEFINE_BOOL(trace_wasm_instances, false,
- "trace creation and collection of wasm instances")
+DEFINE_DEBUG_BOOL(trace_wasm_instances, false,
+ "trace creation and collection of wasm instances")
//
// Logging and profiling flags
@@ -1126,8 +1148,6 @@ DEFINE_BOOL(log_all, false, "Log all events to the log file.")
DEFINE_BOOL(log_api, false, "Log API events to the log file.")
DEFINE_BOOL(log_code, false,
"Log code events to the log file without profiling.")
-DEFINE_BOOL(log_gc, false,
- "Log heap samples on garbage collection for the hp2ps tool.")
DEFINE_BOOL(log_handles, false, "Log global handle events.")
DEFINE_BOOL(log_suspect, false, "Log suspect operations.")
DEFINE_BOOL(log_source_code, false, "Log source code.")
@@ -1242,9 +1262,6 @@ DEFINE_IMPLICATION(print_all_code, print_code_verbose)
DEFINE_IMPLICATION(print_all_code, print_builtin_code)
DEFINE_IMPLICATION(print_all_code, print_code_stubs)
DEFINE_IMPLICATION(print_all_code, code_comments)
-#ifdef DEBUG
-DEFINE_IMPLICATION(print_all_code, trace_codegen)
-#endif
#endif
#undef FLAG
@@ -1317,6 +1334,7 @@ DEFINE_IMPLICATION(unbox_double_fields, track_double_fields)
#undef DEFINE_BOOL
#undef DEFINE_MAYBE_BOOL
+#undef DEFINE_DEBUG_BOOL
#undef DEFINE_INT
#undef DEFINE_STRING
#undef DEFINE_FLOAT
diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc
index a51a4e7d71..693e514e94 100644
--- a/deps/v8/src/flags.cc
+++ b/deps/v8/src/flags.cc
@@ -5,7 +5,6 @@
#include "src/flags.h"
#include <cctype>
-#include <cerrno>
#include <cstdlib>
#include <sstream>
@@ -40,7 +39,6 @@ struct Flag {
TYPE_INT,
TYPE_UINT,
TYPE_FLOAT,
- TYPE_SIZE_T,
TYPE_STRING,
TYPE_ARGS
};
@@ -83,11 +81,6 @@ struct Flag {
return reinterpret_cast<double*>(valptr_);
}
- size_t* size_t_variable() const {
- DCHECK(type_ == TYPE_SIZE_T);
- return reinterpret_cast<size_t*>(valptr_);
- }
-
const char* string_value() const {
DCHECK(type_ == TYPE_STRING);
return *reinterpret_cast<const char**>(valptr_);
@@ -126,11 +119,6 @@ struct Flag {
return *reinterpret_cast<const double*>(defptr_);
}
- size_t size_t_default() const {
- DCHECK(type_ == TYPE_SIZE_T);
- return *reinterpret_cast<const size_t*>(defptr_);
- }
-
const char* string_default() const {
DCHECK(type_ == TYPE_STRING);
return *reinterpret_cast<const char* const *>(defptr_);
@@ -154,8 +142,6 @@ struct Flag {
return *uint_variable() == uint_default();
case TYPE_FLOAT:
return *float_variable() == float_default();
- case TYPE_SIZE_T:
- return *size_t_variable() == size_t_default();
case TYPE_STRING: {
const char* str1 = string_value();
const char* str2 = string_default();
@@ -187,9 +173,6 @@ struct Flag {
case TYPE_FLOAT:
*float_variable() = float_default();
break;
- case TYPE_SIZE_T:
- *size_t_variable() = size_t_default();
- break;
case TYPE_STRING:
set_string_value(string_default(), false);
break;
@@ -218,8 +201,6 @@ static const char* Type2String(Flag::FlagType type) {
case Flag::TYPE_UINT:
return "uint";
case Flag::TYPE_FLOAT: return "float";
- case Flag::TYPE_SIZE_T:
- return "size_t";
case Flag::TYPE_STRING: return "string";
case Flag::TYPE_ARGS: return "arguments";
}
@@ -246,9 +227,6 @@ std::ostream& operator<<(std::ostream& os, const Flag& flag) { // NOLINT
case Flag::TYPE_FLOAT:
os << *flag.float_variable();
break;
- case Flag::TYPE_SIZE_T:
- os << *flag.size_t_variable();
- break;
case Flag::TYPE_STRING: {
const char* str = flag.string_value();
os << (str ? str : "nullptr");
@@ -380,27 +358,6 @@ static Flag* FindFlag(const char* name) {
return nullptr;
}
-template <typename T>
-bool TryParseUnsigned(Flag* flag, const char* arg, const char* value,
- char** endp, T* out_val) {
- // We do not use strtoul because it accepts negative numbers.
- // Rejects values >= 2**63 when T is 64 bits wide but that
- // seems like an acceptable trade-off.
- uint64_t max = static_cast<uint64_t>(std::numeric_limits<T>::max());
- errno = 0;
- int64_t val = static_cast<int64_t>(strtoll(value, endp, 10));
- if (val < 0 || static_cast<uint64_t>(val) > max || errno != 0) {
- PrintF(stderr,
- "Error: Value for flag %s of type %s is out of bounds "
- "[0-%" PRIu64
- "]\n"
- "Try --help for options\n",
- arg, Type2String(flag->type()), max);
- return false;
- }
- *out_val = static_cast<T>(val);
- return true;
-}
// static
int FlagList::SetFlagsFromCommandLine(int* argc,
@@ -465,21 +422,27 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
case Flag::TYPE_INT:
*flag->int_variable() = static_cast<int>(strtol(value, &endp, 10));
break;
- case Flag::TYPE_UINT:
- if (!TryParseUnsigned(flag, arg, value, &endp,
- flag->uint_variable())) {
+ case Flag::TYPE_UINT: {
+ // We do not use strtoul because it accepts negative numbers.
+ int64_t val = static_cast<int64_t>(strtoll(value, &endp, 10));
+ if (val < 0 || val > std::numeric_limits<unsigned int>::max()) {
+ PrintF(stderr,
+ "Error: Value for flag %s of type %s is out of bounds "
+ "[0-%" PRIu64
+ "]\n"
+ "Try --help for options\n",
+ arg, Type2String(flag->type()),
+ static_cast<uint64_t>(
+ std::numeric_limits<unsigned int>::max()));
return_code = j;
+ break;
}
+ *flag->uint_variable() = static_cast<unsigned int>(val);
break;
+ }
case Flag::TYPE_FLOAT:
*flag->float_variable() = strtod(value, &endp);
break;
- case Flag::TYPE_SIZE_T:
- if (!TryParseUnsigned(flag, arg, value, &endp,
- flag->size_t_variable())) {
- return_code = j;
- }
- break;
case Flag::TYPE_STRING:
flag->set_string_value(value ? StrDup(value) : nullptr, true);
break;
@@ -619,10 +582,13 @@ void FlagList::PrintHelp() {
" run the new debugging shell\n\n"
"Options:\n";
- for (size_t i = 0; i < num_flags; ++i) {
- Flag* f = &flags[i];
- os << " --" << f->name() << " (" << f->comment() << ")\n"
- << " type: " << Type2String(f->type()) << " default: " << *f
+ for (const Flag& f : flags) {
+ os << " --";
+ for (const char* c = f.name(); *c != '\0'; ++c) {
+ os << NormalizeChar(*c);
+ }
+ os << " (" << f.comment() << ")\n"
+ << " type: " << Type2String(f.type()) << " default: " << f
<< "\n";
}
}
diff --git a/deps/v8/src/frame-constants.h b/deps/v8/src/frame-constants.h
index 8d2d1f8cc4..f042855657 100644
--- a/deps/v8/src/frame-constants.h
+++ b/deps/v8/src/frame-constants.h
@@ -217,7 +217,8 @@ class ArgumentsAdaptorFrameConstants : public TypedFrameConstants {
// FP-relative.
static const int kFunctionOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
static const int kLengthOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
- DEFINE_TYPED_FRAME_SIZES(2);
+ static const int kPaddingOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
+ DEFINE_TYPED_FRAME_SIZES(3);
};
class BuiltinFrameConstants : public TypedFrameConstants {
@@ -241,9 +242,10 @@ class ConstructFrameConstants : public TypedFrameConstants {
static const int kContextOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
static const int kLengthOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
static const int kConstructorOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
+ static const int kPaddingOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(3);
static const int kNewTargetOrImplicitReceiverOffset =
- TYPED_FRAME_PUSHED_VALUE_OFFSET(3);
- DEFINE_TYPED_FRAME_SIZES(4);
+ TYPED_FRAME_PUSHED_VALUE_OFFSET(4);
+ DEFINE_TYPED_FRAME_SIZES(5);
};
class BuiltinContinuationFrameConstants : public TypedFrameConstants {
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index 3438c1dfb0..f5a14471ba 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -50,18 +50,22 @@ inline Address* StackFrame::ResolveReturnAddressLocation(Address* pc_address) {
}
}
+inline NativeFrame::NativeFrame(StackFrameIteratorBase* iterator)
+ : StackFrame(iterator) {}
-inline EntryFrame::EntryFrame(StackFrameIteratorBase* iterator)
- : StackFrame(iterator) {
+inline Address NativeFrame::GetCallerStackPointer() const {
+ return fp() + CommonFrameConstants::kCallerSPOffset;
}
+inline EntryFrame::EntryFrame(StackFrameIteratorBase* iterator)
+ : StackFrame(iterator) {}
+
inline ConstructEntryFrame::ConstructEntryFrame(
StackFrameIteratorBase* iterator)
: EntryFrame(iterator) {}
inline ExitFrame::ExitFrame(StackFrameIteratorBase* iterator)
- : StackFrame(iterator) {
-}
+ : StackFrame(iterator) {}
inline BuiltinExitFrame::BuiltinExitFrame(StackFrameIteratorBase* iterator)
: ExitFrame(iterator) {}
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 23713197f5..d5a04ad933 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -16,7 +16,8 @@
#include "src/string-stream.h"
#include "src/visitors.h"
#include "src/vm-state-inl.h"
-#include "src/wasm/wasm-heap.h"
+#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/zone/zone-containers.h"
@@ -424,7 +425,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
Memory::Object_at(state->fp + StandardFrameConstants::kFunctionOffset);
if (!StackFrame::IsTypeMarker(marker)) {
if (maybe_function->IsSmi()) {
- return NONE;
+ return NATIVE;
} else if (IsInterpreterFramePc(iterator->isolate(),
*(state->pc_address))) {
return INTERPRETED;
@@ -439,19 +440,19 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
// than checking the flag, then getting the code, and then, if both are true
// (non-null, respectivelly), going down the wasm_code path.
wasm::WasmCode* wasm_code =
- iterator->isolate()->wasm_code_manager()->LookupCode(pc);
+ iterator->isolate()->wasm_engine()->code_manager()->LookupCode(pc);
if (wasm_code != nullptr) {
switch (wasm_code->kind()) {
- case wasm::WasmCode::InterpreterStub:
+ case wasm::WasmCode::kInterpreterStub:
return WASM_INTERPRETER_ENTRY;
- case wasm::WasmCode::Function:
- case wasm::WasmCode::CopiedStub:
+ case wasm::WasmCode::kFunction:
+ case wasm::WasmCode::kCopiedStub:
return WASM_COMPILED;
- case wasm::WasmCode::LazyStub:
+ case wasm::WasmCode::kLazyStub:
if (StackFrame::IsTypeMarker(marker)) break;
return BUILTIN;
- case wasm::WasmCode::WasmToJsWrapper:
- case wasm::WasmCode::WasmToWasmWrapper:
+ case wasm::WasmCode::kWasmToJsWrapper:
+ case wasm::WasmCode::kWasmToWasmWrapper:
return WASM_TO_JS;
default:
UNREACHABLE();
@@ -491,7 +492,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
break;
}
} else {
- return NONE;
+ return NATIVE;
}
}
}
@@ -519,7 +520,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
// interpreted frames, should never have a StackFrame::Type
// marker. If we find one, we're likely being called from the
// profiler in a bogus stack frame.
- return NONE;
+ return NATIVE;
}
}
@@ -541,6 +542,14 @@ Address StackFrame::UnpaddedFP() const {
return fp();
}
+void NativeFrame::ComputeCallerState(State* state) const {
+ state->sp = caller_sp();
+ state->fp = Memory::Address_at(fp() + CommonFrameConstants::kCallerFPOffset);
+ state->pc_address = ResolveReturnAddressLocation(
+ reinterpret_cast<Address*>(fp() + CommonFrameConstants::kCallerPCOffset));
+ state->callee_pc_address = nullptr;
+ state->constant_pool_address = nullptr;
+}
Code* EntryFrame::unchecked_code() const {
return isolate()->heap()->js_entry_code();
@@ -777,7 +786,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
Address inner_pointer = pc();
const wasm::WasmCode* wasm_code =
FLAG_wasm_jit_to_native
- ? isolate()->wasm_code_manager()->LookupCode(inner_pointer)
+ ? isolate()->wasm_engine()->code_manager()->LookupCode(inner_pointer)
: nullptr;
SafepointEntry safepoint_entry;
uint32_t stack_slots;
@@ -789,7 +798,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
wasm_code->stack_slots());
safepoint_entry = table.FindEntry(inner_pointer);
stack_slots = wasm_code->stack_slots();
- has_tagged_params = wasm_code->kind() != wasm::WasmCode::Function;
+ has_tagged_params = wasm_code->kind() != wasm::WasmCode::kFunction;
} else {
InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* entry =
isolate()->inner_pointer_to_code_cache()->GetCacheEntry(inner_pointer);
@@ -840,6 +849,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
// in the place on the stack that one finds the frame type.
UNREACHABLE();
break;
+ case NATIVE:
case NONE:
case NUMBER_OF_TYPES:
case MANUAL:
@@ -1236,26 +1246,26 @@ WASM_SUMMARY_DISPATCH(int, byte_offset)
#undef WASM_SUMMARY_DISPATCH
int FrameSummary::WasmFrameSummary::SourcePosition() const {
- Handle<WasmCompiledModule> compiled_module(wasm_instance()->compiled_module(),
- isolate());
- return WasmCompiledModule::GetSourcePosition(compiled_module,
- function_index(), byte_offset(),
- at_to_number_conversion());
+ Handle<WasmSharedModuleData> shared(
+ wasm_instance()->compiled_module()->shared(), isolate());
+ return WasmSharedModuleData::GetSourcePosition(
+ shared, function_index(), byte_offset(), at_to_number_conversion());
}
Handle<Script> FrameSummary::WasmFrameSummary::script() const {
- return handle(wasm_instance()->compiled_module()->script());
+ return handle(wasm_instance()->compiled_module()->shared()->script());
}
Handle<String> FrameSummary::WasmFrameSummary::FunctionName() const {
- Handle<WasmCompiledModule> compiled_module(
- wasm_instance()->compiled_module());
- return WasmCompiledModule::GetFunctionName(compiled_module->GetIsolate(),
- compiled_module, function_index());
+ Handle<WasmSharedModuleData> shared(
+ wasm_instance()->compiled_module()->shared(), isolate());
+ return WasmSharedModuleData::GetFunctionName(isolate(), shared,
+ function_index());
}
Handle<Context> FrameSummary::WasmFrameSummary::native_context() const {
- return wasm_instance()->compiled_module()->native_context();
+ return handle(wasm_instance()->compiled_module()->native_context(),
+ isolate());
}
FrameSummary::WasmCompiledFrameSummary::WasmCompiledFrameSummary(
@@ -1535,6 +1545,7 @@ void OptimizedFrame::GetFunctions(
DCHECK_EQ(Translation::BEGIN, opcode);
it.Next(); // Skip frame count.
int jsframe_count = it.Next();
+ it.Next(); // Skip update feedback count.
// We insert the frames in reverse order because the frames
// in the deoptimization translation are ordered bottom-to-top.
@@ -1691,11 +1702,18 @@ void WasmCompiledFrame::Print(StringStream* accumulator, PrintMode mode,
accumulator->Add("WASM [");
Script* script = this->script();
accumulator->PrintName(script->name());
- int pc = static_cast<int>(this->pc() - LookupCode()->instruction_start());
- Object* instance = this->wasm_instance();
+ Address instruction_start = FLAG_wasm_jit_to_native
+ ? isolate()
+ ->wasm_engine()
+ ->code_manager()
+ ->LookupCode(pc())
+ ->instructions()
+ .start()
+ : LookupCode()->instruction_start();
+ int pc = static_cast<int>(this->pc() - instruction_start);
+ WasmSharedModuleData* shared = wasm_instance()->compiled_module()->shared();
Vector<const uint8_t> raw_func_name =
- WasmInstanceObject::cast(instance)->compiled_module()->GetRawFunctionName(
- this->function_index());
+ shared->GetRawFunctionName(this->function_index());
const int kMaxPrintedFunctionName = 64;
char func_name[kMaxPrintedFunctionName + 1];
int func_name_len = std::min(kMaxPrintedFunctionName, raw_func_name.length());
@@ -1718,11 +1736,18 @@ Address WasmCompiledFrame::GetCallerStackPointer() const {
return fp() + ExitFrameConstants::kCallerSPOffset;
}
+WasmCodeWrapper WasmCompiledFrame::wasm_code() const {
+ return FLAG_wasm_jit_to_native
+ ? WasmCodeWrapper(
+ isolate()->wasm_engine()->code_manager()->LookupCode(pc()))
+ : WasmCodeWrapper(Handle<Code>(LookupCode(), isolate()));
+}
+
WasmInstanceObject* WasmCompiledFrame::wasm_instance() const {
WasmInstanceObject* obj =
FLAG_wasm_jit_to_native
? WasmInstanceObject::GetOwningInstance(
- isolate()->wasm_code_manager()->LookupCode(pc()))
+ isolate()->wasm_engine()->code_manager()->LookupCode(pc()))
: WasmInstanceObject::GetOwningInstanceGC(LookupCode());
// This is a live stack frame; it must have a live instance.
DCHECK_NOT_NULL(obj);
@@ -1734,7 +1759,7 @@ uint32_t WasmCompiledFrame::function_index() const {
}
Script* WasmCompiledFrame::script() const {
- return wasm_instance()->compiled_module()->script();
+ return wasm_instance()->compiled_module()->shared()->script();
}
int WasmCompiledFrame::position() const {
@@ -1743,25 +1768,9 @@ int WasmCompiledFrame::position() const {
void WasmCompiledFrame::Summarize(std::vector<FrameSummary>* functions) const {
DCHECK(functions->empty());
- WasmCodeWrapper code;
- Handle<WasmInstanceObject> instance;
- int offset = -1;
- if (FLAG_wasm_jit_to_native) {
- code = WasmCodeWrapper(isolate()->wasm_code_manager()->LookupCode(pc()));
- offset =
- static_cast<int>(pc() - code.GetWasmCode()->instructions().start());
- instance = Handle<WasmInstanceObject>(
- WasmInstanceObject::cast(code.GetWasmCode()
- ->owner()
- ->compiled_module()
- ->weak_owning_instance()
- ->value()),
- isolate());
- } else {
- code = WasmCodeWrapper(Handle<Code>(LookupCode(), isolate()));
- offset = static_cast<int>(pc() - code.GetCode()->instruction_start());
- instance = Handle<WasmInstanceObject>(wasm_instance(), isolate());
- }
+ WasmCodeWrapper code = wasm_code();
+ int offset = static_cast<int>(pc() - code.instructions().start());
+ Handle<WasmInstanceObject> instance = code.wasm_instance();
FrameSummary::WasmCompiledFrameSummary summary(
isolate(), instance, code, offset, at_to_number_conversion());
functions->push_back(summary);
@@ -1774,9 +1783,10 @@ bool WasmCompiledFrame::at_to_number_conversion() const {
int pos = -1;
if (FLAG_wasm_jit_to_native) {
wasm::WasmCode* code =
- callee_pc ? isolate()->wasm_code_manager()->LookupCode(callee_pc)
- : nullptr;
- if (!code || code->kind() != wasm::WasmCode::WasmToJsWrapper) return false;
+ callee_pc
+ ? isolate()->wasm_engine()->code_manager()->LookupCode(callee_pc)
+ : nullptr;
+ if (!code || code->kind() != wasm::WasmCode::kWasmToJsWrapper) return false;
int offset = static_cast<int>(callee_pc - code->instructions().start());
pos = FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(code,
offset);
@@ -1800,11 +1810,11 @@ int WasmCompiledFrame::LookupExceptionHandlerInTable(int* stack_slots) {
*stack_slots = code->stack_slots();
return table->LookupReturn(pc_offset);
}
- wasm::WasmCode* code = isolate()->wasm_code_manager()->LookupCode(pc());
+ wasm::WasmCode* code =
+ isolate()->wasm_engine()->code_manager()->LookupCode(pc());
if (!code->IsAnonymous()) {
Object* table_entry =
- code->owner()->compiled_module()->ptr_to_handler_table()->get(
- code->index());
+ code->owner()->compiled_module()->handler_table()->get(code->index());
if (table_entry->IsHandlerTable()) {
HandlerTable* table = HandlerTable::cast(table_entry);
int pc_offset = static_cast<int>(pc() - code->instructions().start());
@@ -1854,7 +1864,7 @@ WasmInstanceObject* WasmInterpreterEntryFrame::wasm_instance() const {
WasmInstanceObject* ret =
FLAG_wasm_jit_to_native
? WasmInstanceObject::GetOwningInstance(
- isolate()->wasm_code_manager()->LookupCode(pc()))
+ isolate()->wasm_engine()->code_manager()->LookupCode(pc()))
: WasmInstanceObject::GetOwningInstanceGC(LookupCode());
// This is a live stack frame, there must be a live wasm instance available.
DCHECK_NOT_NULL(ret);
@@ -1862,7 +1872,7 @@ WasmInstanceObject* WasmInterpreterEntryFrame::wasm_instance() const {
}
Script* WasmInterpreterEntryFrame::script() const {
- return wasm_instance()->compiled_module()->script();
+ return wasm_instance()->compiled_module()->shared()->script();
}
int WasmInterpreterEntryFrame::position() const {
@@ -1870,7 +1880,7 @@ int WasmInterpreterEntryFrame::position() const {
}
Object* WasmInterpreterEntryFrame::context() const {
- return wasm_instance()->compiled_module()->ptr_to_native_context();
+ return wasm_instance()->compiled_module()->native_context();
}
Address WasmInterpreterEntryFrame::GetCallerStackPointer() const {
@@ -2081,10 +2091,11 @@ void JavaScriptFrame::Iterate(RootVisitor* v) const {
void InternalFrame::Iterate(RootVisitor* v) const {
wasm::WasmCode* wasm_code =
- FLAG_wasm_jit_to_native ? isolate()->wasm_code_manager()->LookupCode(pc())
- : nullptr;
+ FLAG_wasm_jit_to_native
+ ? isolate()->wasm_engine()->code_manager()->LookupCode(pc())
+ : nullptr;
if (wasm_code != nullptr) {
- DCHECK(wasm_code->kind() == wasm::WasmCode::LazyStub);
+ DCHECK(wasm_code->kind() == wasm::WasmCode::kLazyStub);
} else {
Code* code = LookupCode();
IteratePc(v, pc_address(), constant_pool_address(), code);
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index e21d62764b..0c988770f6 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -64,8 +64,9 @@ class InnerPointerToCodeCache {
class StackHandlerConstants : public AllStatic {
public:
static const int kNextOffset = 0 * kPointerSize;
+ static const int kPaddingOffset = 1 * kPointerSize;
- static const int kSize = kNextOffset + kPointerSize;
+ static const int kSize = kPaddingOffset + kPointerSize;
static const int kSlotCount = kSize >> kPointerSizeLog2;
};
@@ -104,7 +105,8 @@ class StackHandler BASE_EMBEDDED {
V(CONSTRUCT, ConstructFrame) \
V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame) \
V(BUILTIN, BuiltinFrame) \
- V(BUILTIN_EXIT, BuiltinExitFrame)
+ V(BUILTIN_EXIT, BuiltinExitFrame) \
+ V(NATIVE, NativeFrame)
// Abstract base class for all stack frames.
class StackFrame BASE_EMBEDDED {
@@ -180,8 +182,7 @@ class StackFrame BASE_EMBEDDED {
// and should be converted back to a stack frame type using MarkerToType.
// Otherwise, the value is a tagged function pointer.
static bool IsTypeMarker(intptr_t function_or_marker) {
- bool is_marker = ((function_or_marker & kSmiTagMask) == kSmiTag);
- return is_marker;
+ return (function_or_marker & kSmiTagMask) == kSmiTag;
}
// Copy constructor; it breaks the connection to host iterator
@@ -328,6 +329,25 @@ class StackFrame BASE_EMBEDDED {
friend class SafeStackFrameIterator;
};
+class NativeFrame : public StackFrame {
+ public:
+ Type type() const override { return NATIVE; }
+
+ Code* unchecked_code() const override { return nullptr; }
+
+ // Garbage collection support.
+ void Iterate(RootVisitor* v) const override {}
+
+ protected:
+ inline explicit NativeFrame(StackFrameIteratorBase* iterator);
+
+ Address GetCallerStackPointer() const override;
+
+ private:
+ void ComputeCallerState(State* state) const override;
+
+ friend class StackFrameIteratorBase;
+};
// Entry frames are used to enter JavaScript execution from C.
class EntryFrame: public StackFrame {
@@ -949,6 +969,7 @@ class WasmCompiledFrame final : public StandardFrame {
// Accessors.
WasmInstanceObject* wasm_instance() const;
+ WasmCodeWrapper wasm_code() const;
uint32_t function_index() const;
Script* script() const override;
int position() const override;
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index fc35100a30..de56faa4fd 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -222,7 +222,7 @@ class MachOSection : public DebugSectionBase<MachOSectionHeader> {
public:
enum Type {
S_REGULAR = 0x0u,
- S_ATTR_COALESCED = 0xbu,
+ S_ATTR_COALESCED = 0xBu,
S_ATTR_SOME_INSTRUCTIONS = 0x400u,
S_ATTR_DEBUG = 0x02000000u,
S_ATTR_PURE_INSTRUCTIONS = 0x80000000u
@@ -297,9 +297,9 @@ class ELFSection : public DebugSectionBase<ELFSectionHeader> {
TYPE_DYNSYM = 11,
TYPE_LOPROC = 0x70000000,
TYPE_X86_64_UNWIND = 0x70000001,
- TYPE_HIPROC = 0x7fffffff,
+ TYPE_HIPROC = 0x7FFFFFFF,
TYPE_LOUSER = 0x80000000,
- TYPE_HIUSER = 0xffffffff
+ TYPE_HIUSER = 0xFFFFFFFF
};
enum Flags {
@@ -308,9 +308,7 @@ class ELFSection : public DebugSectionBase<ELFSectionHeader> {
FLAG_EXEC = 4
};
- enum SpecialIndexes {
- INDEX_ABSOLUTE = 0xfff1
- };
+ enum SpecialIndexes { INDEX_ABSOLUTE = 0xFFF1 };
ELFSection(const char* name, Type type, uintptr_t align)
: name_(name), type_(type), align_(align) { }
@@ -650,20 +648,20 @@ class ELF BASE_EMBEDDED {
Writer::Slot<ELFHeader> header = w->CreateSlotHere<ELFHeader>();
#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || \
(V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT))
- const uint8_t ident[16] =
- { 0x7f, 'E', 'L', 'F', 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ const uint8_t ident[16] = {0x7F, 'E', 'L', 'F', 1, 1, 1, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
#elif(V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT) || \
(V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN)
- const uint8_t ident[16] =
- { 0x7f, 'E', 'L', 'F', 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ const uint8_t ident[16] = {0x7F, 'E', 'L', 'F', 2, 1, 1, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
#elif V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN && V8_OS_LINUX
- const uint8_t ident[16] = {0x7f, 'E', 'L', 'F', 2, 2, 1, 0,
+ const uint8_t ident[16] = {0x7F, 'E', 'L', 'F', 2, 2, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0};
#elif V8_TARGET_ARCH_S390X
- const uint8_t ident[16] = {0x7f, 'E', 'L', 'F', 2, 2, 1, 3,
+ const uint8_t ident[16] = {0x7F, 'E', 'L', 'F', 2, 2, 1, 3,
0, 0, 0, 0, 0, 0, 0, 0};
#elif V8_TARGET_ARCH_S390
- const uint8_t ident[16] = {0x7f, 'E', 'L', 'F', 1, 2, 1, 3,
+ const uint8_t ident[16] = {0x7F, 'E', 'L', 'F', 1, 2, 1, 3,
0, 0, 0, 0, 0, 0, 0, 0};
#else
#error Unsupported target architecture.
@@ -1089,12 +1087,12 @@ class DebugInfoSection : public DebugSection {
DW_OP_reg7 = 0x57,
DW_OP_reg8 = 0x58,
DW_OP_reg9 = 0x59,
- DW_OP_reg10 = 0x5a,
- DW_OP_reg11 = 0x5b,
- DW_OP_reg12 = 0x5c,
- DW_OP_reg13 = 0x5d,
- DW_OP_reg14 = 0x5e,
- DW_OP_reg15 = 0x5f,
+ DW_OP_reg10 = 0x5A,
+ DW_OP_reg11 = 0x5B,
+ DW_OP_reg12 = 0x5C,
+ DW_OP_reg13 = 0x5D,
+ DW_OP_reg14 = 0x5E,
+ DW_OP_reg15 = 0x5F,
DW_OP_reg16 = 0x60,
DW_OP_reg17 = 0x61,
DW_OP_reg18 = 0x62,
@@ -1105,12 +1103,12 @@ class DebugInfoSection : public DebugSection {
DW_OP_reg23 = 0x67,
DW_OP_reg24 = 0x68,
DW_OP_reg25 = 0x69,
- DW_OP_reg26 = 0x6a,
- DW_OP_reg27 = 0x6b,
- DW_OP_reg28 = 0x6c,
- DW_OP_reg29 = 0x6d,
- DW_OP_reg30 = 0x6e,
- DW_OP_reg31 = 0x6f,
+ DW_OP_reg26 = 0x6A,
+ DW_OP_reg27 = 0x6B,
+ DW_OP_reg28 = 0x6C,
+ DW_OP_reg29 = 0x6D,
+ DW_OP_reg30 = 0x6E,
+ DW_OP_reg31 = 0x6F,
DW_OP_fbreg = 0x91 // 1 param: SLEB128 offset
};
@@ -1286,11 +1284,11 @@ class DebugAbbrevSection : public DebugSection {
// DWARF2 standard, figure 14.
enum DWARF2Tags {
DW_TAG_FORMAL_PARAMETER = 0x05,
- DW_TAG_POINTER_TYPE = 0xf,
+ DW_TAG_POINTER_TYPE = 0xF,
DW_TAG_COMPILE_UNIT = 0x11,
DW_TAG_STRUCTURE_TYPE = 0x13,
DW_TAG_BASE_TYPE = 0x24,
- DW_TAG_SUBPROGRAM = 0x2e,
+ DW_TAG_SUBPROGRAM = 0x2E,
DW_TAG_VARIABLE = 0x34
};
@@ -1304,11 +1302,11 @@ class DebugAbbrevSection : public DebugSection {
enum DWARF2Attribute {
DW_AT_LOCATION = 0x2,
DW_AT_NAME = 0x3,
- DW_AT_BYTE_SIZE = 0xb,
+ DW_AT_BYTE_SIZE = 0xB,
DW_AT_STMT_LIST = 0x10,
DW_AT_LOW_PC = 0x11,
DW_AT_HIGH_PC = 0x12,
- DW_AT_ENCODING = 0x3e,
+ DW_AT_ENCODING = 0x3E,
DW_AT_FRAME_BASE = 0x40,
DW_AT_TYPE = 0x49
};
@@ -1320,8 +1318,8 @@ class DebugAbbrevSection : public DebugSection {
DW_FORM_STRING = 0x8,
DW_FORM_DATA4 = 0x6,
DW_FORM_BLOCK = 0x9,
- DW_FORM_DATA1 = 0xb,
- DW_FORM_FLAG = 0xc,
+ DW_FORM_DATA1 = 0xB,
+ DW_FORM_FLAG = 0xC,
DW_FORM_REF4 = 0x13
};
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index c101877a6f..7845d71fb1 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -262,7 +262,7 @@ class GlobalHandles::Node {
}
// Zap with something dangerous.
- *location() = reinterpret_cast<Object*>(0x6057ca11);
+ *location() = reinterpret_cast<Object*>(0x6057CA11);
typedef v8::WeakCallbackInfo<void> Data;
auto callback = reinterpret_cast<Data::Callback>(weak_callback_);
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 8f5253016f..bc28181db1 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -98,7 +98,7 @@ namespace internal {
#endif
// Minimum stack size in KB required by compilers.
-const int kStackSpaceRequiredForCompilation = 40;
+constexpr int kStackSpaceRequiredForCompilation = 40;
// Determine whether double field unboxing feature is enabled.
#if V8_TARGET_ARCH_64_BIT
@@ -131,105 +131,106 @@ typedef byte* Address;
// -----------------------------------------------------------------------------
// Constants
-const int KB = 1024;
-const int MB = KB * KB;
-const int GB = KB * KB * KB;
-const int kMaxInt = 0x7FFFFFFF;
-const int kMinInt = -kMaxInt - 1;
-const int kMaxInt8 = (1 << 7) - 1;
-const int kMinInt8 = -(1 << 7);
-const int kMaxUInt8 = (1 << 8) - 1;
-const int kMinUInt8 = 0;
-const int kMaxInt16 = (1 << 15) - 1;
-const int kMinInt16 = -(1 << 15);
-const int kMaxUInt16 = (1 << 16) - 1;
-const int kMinUInt16 = 0;
-
-const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
-const int kMinUInt32 = 0;
-
-const int kUInt8Size = sizeof(uint8_t);
-const int kCharSize = sizeof(char);
-const int kShortSize = sizeof(short); // NOLINT
-const int kUInt16Size = sizeof(uint16_t);
-const int kIntSize = sizeof(int);
-const int kInt32Size = sizeof(int32_t);
-const int kInt64Size = sizeof(int64_t);
-const int kUInt32Size = sizeof(uint32_t);
-const int kSizetSize = sizeof(size_t);
-const int kFloatSize = sizeof(float);
-const int kDoubleSize = sizeof(double);
-const int kIntptrSize = sizeof(intptr_t);
-const int kUIntptrSize = sizeof(uintptr_t);
-const int kPointerSize = sizeof(void*);
+constexpr int KB = 1024;
+constexpr int MB = KB * KB;
+constexpr int GB = KB * KB * KB;
+constexpr int kMaxInt = 0x7FFFFFFF;
+constexpr int kMinInt = -kMaxInt - 1;
+constexpr int kMaxInt8 = (1 << 7) - 1;
+constexpr int kMinInt8 = -(1 << 7);
+constexpr int kMaxUInt8 = (1 << 8) - 1;
+constexpr int kMinUInt8 = 0;
+constexpr int kMaxInt16 = (1 << 15) - 1;
+constexpr int kMinInt16 = -(1 << 15);
+constexpr int kMaxUInt16 = (1 << 16) - 1;
+constexpr int kMinUInt16 = 0;
+
+constexpr uint32_t kMaxUInt32 = 0xFFFFFFFFu;
+constexpr int kMinUInt32 = 0;
+
+constexpr int kUInt8Size = sizeof(uint8_t);
+constexpr int kCharSize = sizeof(char);
+constexpr int kShortSize = sizeof(short); // NOLINT
+constexpr int kUInt16Size = sizeof(uint16_t);
+constexpr int kIntSize = sizeof(int);
+constexpr int kInt32Size = sizeof(int32_t);
+constexpr int kInt64Size = sizeof(int64_t);
+constexpr int kUInt32Size = sizeof(uint32_t);
+constexpr int kSizetSize = sizeof(size_t);
+constexpr int kFloatSize = sizeof(float);
+constexpr int kDoubleSize = sizeof(double);
+constexpr int kIntptrSize = sizeof(intptr_t);
+constexpr int kUIntptrSize = sizeof(uintptr_t);
+constexpr int kPointerSize = sizeof(void*);
#if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
-const int kRegisterSize = kPointerSize + kPointerSize;
+constexpr int kRegisterSize = kPointerSize + kPointerSize;
#else
-const int kRegisterSize = kPointerSize;
+constexpr int kRegisterSize = kPointerSize;
#endif
-const int kPCOnStackSize = kRegisterSize;
-const int kFPOnStackSize = kRegisterSize;
+constexpr int kPCOnStackSize = kRegisterSize;
+constexpr int kFPOnStackSize = kRegisterSize;
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
-const int kElidedFrameSlots = kPCOnStackSize / kPointerSize;
+constexpr int kElidedFrameSlots = kPCOnStackSize / kPointerSize;
#else
-const int kElidedFrameSlots = 0;
+constexpr int kElidedFrameSlots = 0;
#endif
-const int kDoubleSizeLog2 = 3;
-const size_t kMaxWasmCodeMemory = 256 * MB;
+constexpr int kDoubleSizeLog2 = 3;
+constexpr size_t kMaxWasmCodeMemory = 256 * MB;
#if V8_HOST_ARCH_64_BIT
-const int kPointerSizeLog2 = 3;
-const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
-const uintptr_t kUintptrAllBitsSet = V8_UINT64_C(0xFFFFFFFFFFFFFFFF);
-const bool kRequiresCodeRange = true;
+constexpr int kPointerSizeLog2 = 3;
+constexpr intptr_t kIntptrSignBit =
+ static_cast<intptr_t>(uintptr_t{0x8000000000000000});
+constexpr uintptr_t kUintptrAllBitsSet = uintptr_t{0xFFFFFFFFFFFFFFFF};
+constexpr bool kRequiresCodeRange = true;
#if V8_TARGET_ARCH_MIPS64
// To use pseudo-relative jumps such as j/jal instructions which have 28-bit
// encoded immediate, the addresses have to be in range of 256MB aligned
// region. Used only for large object space.
-const size_t kMaximalCodeRangeSize = 256 * MB;
-const size_t kCodeRangeAreaAlignment = 256 * MB;
+constexpr size_t kMaximalCodeRangeSize = 256 * MB;
+constexpr size_t kCodeRangeAreaAlignment = 256 * MB;
#elif V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
-const size_t kMaximalCodeRangeSize = 512 * MB;
-const size_t kCodeRangeAreaAlignment = 64 * KB; // OS page on PPC Linux
+constexpr size_t kMaximalCodeRangeSize = 512 * MB;
+constexpr size_t kCodeRangeAreaAlignment = 64 * KB; // OS page on PPC Linux
#else
-const size_t kMaximalCodeRangeSize = 512 * MB;
-const size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
+constexpr size_t kMaximalCodeRangeSize = 512 * MB;
+constexpr size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
#endif
#if V8_OS_WIN
-const size_t kMinimumCodeRangeSize = 4 * MB;
-const size_t kReservedCodeRangePages = 1;
+constexpr size_t kMinimumCodeRangeSize = 4 * MB;
+constexpr size_t kReservedCodeRangePages = 1;
#else
-const size_t kMinimumCodeRangeSize = 3 * MB;
-const size_t kReservedCodeRangePages = 0;
+constexpr size_t kMinimumCodeRangeSize = 3 * MB;
+constexpr size_t kReservedCodeRangePages = 0;
#endif
#else
-const int kPointerSizeLog2 = 2;
-const intptr_t kIntptrSignBit = 0x80000000;
-const uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu;
+constexpr int kPointerSizeLog2 = 2;
+constexpr intptr_t kIntptrSignBit = 0x80000000;
+constexpr uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu;
#if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
// x32 port also requires code range.
-const bool kRequiresCodeRange = true;
-const size_t kMaximalCodeRangeSize = 256 * MB;
-const size_t kMinimumCodeRangeSize = 3 * MB;
-const size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
+constexpr bool kRequiresCodeRange = true;
+constexpr size_t kMaximalCodeRangeSize = 256 * MB;
+constexpr size_t kMinimumCodeRangeSize = 3 * MB;
+constexpr size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
#elif V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
-const bool kRequiresCodeRange = false;
-const size_t kMaximalCodeRangeSize = 0 * MB;
-const size_t kMinimumCodeRangeSize = 0 * MB;
-const size_t kCodeRangeAreaAlignment = 64 * KB; // OS page on PPC Linux
+constexpr bool kRequiresCodeRange = false;
+constexpr size_t kMaximalCodeRangeSize = 0 * MB;
+constexpr size_t kMinimumCodeRangeSize = 0 * MB;
+constexpr size_t kCodeRangeAreaAlignment = 64 * KB; // OS page on PPC Linux
#else
-const bool kRequiresCodeRange = false;
-const size_t kMaximalCodeRangeSize = 0 * MB;
-const size_t kMinimumCodeRangeSize = 0 * MB;
-const size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
+constexpr bool kRequiresCodeRange = false;
+constexpr size_t kMaximalCodeRangeSize = 0 * MB;
+constexpr size_t kMinimumCodeRangeSize = 0 * MB;
+constexpr size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
#endif
-const size_t kReservedCodeRangePages = 0;
+constexpr size_t kReservedCodeRangePages = 0;
#endif
// Trigger an incremental GCs once the external memory reaches this limit.
-const int kExternalAllocationSoftLimit = 64 * MB;
+constexpr int kExternalAllocationSoftLimit = 64 * MB;
// Maximum object size that gets allocated into regular pages. Objects larger
// than that size are allocated in large object space and are never moved in
@@ -238,39 +239,39 @@ const int kExternalAllocationSoftLimit = 64 * MB;
// account.
//
// Current value: Page::kAllocatableMemory (on 32-bit arch) - 512 (slack).
-const int kMaxRegularHeapObjectSize = 507136;
+constexpr int kMaxRegularHeapObjectSize = 507136;
STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));
-const int kBitsPerByte = 8;
-const int kBitsPerByteLog2 = 3;
-const int kBitsPerPointer = kPointerSize * kBitsPerByte;
-const int kBitsPerInt = kIntSize * kBitsPerByte;
+constexpr int kBitsPerByte = 8;
+constexpr int kBitsPerByteLog2 = 3;
+constexpr int kBitsPerPointer = kPointerSize * kBitsPerByte;
+constexpr int kBitsPerInt = kIntSize * kBitsPerByte;
// IEEE 754 single precision floating point number bit layout.
-const uint32_t kBinary32SignMask = 0x80000000u;
-const uint32_t kBinary32ExponentMask = 0x7f800000u;
-const uint32_t kBinary32MantissaMask = 0x007fffffu;
-const int kBinary32ExponentBias = 127;
-const int kBinary32MaxExponent = 0xFE;
-const int kBinary32MinExponent = 0x01;
-const int kBinary32MantissaBits = 23;
-const int kBinary32ExponentShift = 23;
+constexpr uint32_t kBinary32SignMask = 0x80000000u;
+constexpr uint32_t kBinary32ExponentMask = 0x7f800000u;
+constexpr uint32_t kBinary32MantissaMask = 0x007fffffu;
+constexpr int kBinary32ExponentBias = 127;
+constexpr int kBinary32MaxExponent = 0xFE;
+constexpr int kBinary32MinExponent = 0x01;
+constexpr int kBinary32MantissaBits = 23;
+constexpr int kBinary32ExponentShift = 23;
// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
// other bits set.
-const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
+constexpr uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
// Latin1/UTF-16 constants
// Code-point values in Unicode 4.0 are 21 bits wide.
// Code units in UTF-16 are 16 bits wide.
typedef uint16_t uc16;
typedef int32_t uc32;
-const int kOneByteSize = kCharSize;
-const int kUC16Size = sizeof(uc16); // NOLINT
+constexpr int kOneByteSize = kCharSize;
+constexpr int kUC16Size = sizeof(uc16); // NOLINT
// 128 bit SIMD value size.
-const int kSimd128Size = 16;
+constexpr int kSimd128Size = 16;
// Round up n to be a multiple of sz, where sz is a power of 2.
#define ROUND_UP(n, sz) (((n) + ((sz) - 1)) & ~((sz) - 1))
@@ -354,10 +355,10 @@ inline LanguageMode stricter_language_mode(LanguageMode mode1,
enum TypeofMode : int { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// This constant is used as an undefined value when passing source positions.
-const int kNoSourcePosition = -1;
+constexpr int kNoSourcePosition = -1;
// This constant is used to indicate missing deoptimization information.
-const int kNoDeoptimizationId = -1;
+constexpr int kNoDeoptimizationId = -1;
// Deoptimize bailout kind.
enum class DeoptimizeKind : uint8_t { kEager, kSoft, kLazy };
@@ -392,63 +393,53 @@ inline std::ostream& operator<<(std::ostream& os,
}
// Mask for the sign bit in a smi.
-const intptr_t kSmiSignMask = kIntptrSignBit;
+constexpr intptr_t kSmiSignMask = kIntptrSignBit;
-const int kObjectAlignmentBits = kPointerSizeLog2;
-const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
-const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
+constexpr int kObjectAlignmentBits = kPointerSizeLog2;
+constexpr intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
+constexpr intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
// Desired alignment for pointers.
-const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
-const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
+constexpr intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
+constexpr intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
// Desired alignment for double values.
-const intptr_t kDoubleAlignment = 8;
-const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
+constexpr intptr_t kDoubleAlignment = 8;
+constexpr intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
// Desired alignment for generated code is 32 bytes (to improve cache line
// utilization).
-const int kCodeAlignmentBits = 5;
-const intptr_t kCodeAlignment = 1 << kCodeAlignmentBits;
-const intptr_t kCodeAlignmentMask = kCodeAlignment - 1;
-
-// The owner field of a page is tagged with the page header tag. We need that
-// to find out if a slot is part of a large object. If we mask out the lower
-// 0xfffff bits (1M pages), go to the owner offset, and see that this field
-// is tagged with the page header tag, we can just look up the owner.
-// Otherwise, we know that we are somewhere (not within the first 1M) in a
-// large object.
-const int kPageHeaderTag = 3;
-const int kPageHeaderTagSize = 2;
-const intptr_t kPageHeaderTagMask = (1 << kPageHeaderTagSize) - 1;
+constexpr int kCodeAlignmentBits = 5;
+constexpr intptr_t kCodeAlignment = 1 << kCodeAlignmentBits;
+constexpr intptr_t kCodeAlignmentMask = kCodeAlignment - 1;
+// Weak references are tagged using the second bit in a pointer.
+constexpr int kWeakReferenceTag = 3;
+constexpr int kWeakReferenceTagSize = 2;
+constexpr intptr_t kWeakReferenceTagMask = (1 << kWeakReferenceTagSize) - 1;
// Zap-value: The value used for zapping dead objects.
// Should be a recognizable hex value tagged as a failure.
#ifdef V8_HOST_ARCH_64_BIT
-const Address kZapValue =
- reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeef));
-const Address kHandleZapValue =
- reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddeaf));
-const Address kGlobalHandleZapValue =
- reinterpret_cast<Address>(V8_UINT64_C(0x1baffed00baffedf));
-const Address kFromSpaceZapValue =
- reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdaf));
-const uint64_t kDebugZapValue = V8_UINT64_C(0xbadbaddbbadbaddb);
-const uint64_t kSlotsZapValue = V8_UINT64_C(0xbeefdeadbeefdeef);
-const uint64_t kFreeListZapValue = 0xfeed1eaffeed1eaf;
+constexpr uint64_t kZapValue = uint64_t{0xdeadbeedbeadbeef};
+constexpr uint64_t kHandleZapValue = uint64_t{0x1baddead0baddeaf};
+constexpr uint64_t kGlobalHandleZapValue = uint64_t{0x1baffed00baffedf};
+constexpr uint64_t kFromSpaceZapValue = uint64_t{0x1beefdad0beefdaf};
+constexpr uint64_t kDebugZapValue = uint64_t{0xbadbaddbbadbaddb};
+constexpr uint64_t kSlotsZapValue = uint64_t{0xbeefdeadbeefdeef};
+constexpr uint64_t kFreeListZapValue = 0xfeed1eaffeed1eaf;
#else
-const Address kZapValue = reinterpret_cast<Address>(0xdeadbeef);
-const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddeaf);
-const Address kGlobalHandleZapValue = reinterpret_cast<Address>(0xbaffedf);
-const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdaf);
-const uint32_t kSlotsZapValue = 0xbeefdeef;
-const uint32_t kDebugZapValue = 0xbadbaddb;
-const uint32_t kFreeListZapValue = 0xfeed1eaf;
+constexpr uint32_t kZapValue = 0xdeadbeef;
+constexpr uint32_t kHandleZapValue = 0xbaddeaf;
+constexpr uint32_t kGlobalHandleZapValue = 0xbaffedf;
+constexpr uint32_t kFromSpaceZapValue = 0xbeefdaf;
+constexpr uint32_t kSlotsZapValue = 0xbeefdeef;
+constexpr uint32_t kDebugZapValue = 0xbadbaddb;
+constexpr uint32_t kFreeListZapValue = 0xfeed1eaf;
#endif
-const int kCodeZapValue = 0xbadc0de;
-const uint32_t kPhantomReferenceZap = 0xca11bac;
+constexpr int kCodeZapValue = 0xbadc0de;
+constexpr uint32_t kPhantomReferenceZap = 0xca11bac;
// On Intel architecture, cache line size is 64 bytes.
// On ARM it may be less (32 bytes), but as far this constant is
@@ -457,8 +448,7 @@ const uint32_t kPhantomReferenceZap = 0xca11bac;
// Constants relevant to double precision floating point numbers.
// If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
-const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
-
+constexpr uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
// -----------------------------------------------------------------------------
// Forward declarations for frequently used classes
@@ -542,8 +532,7 @@ enum AllocationSpace {
FIRST_PAGED_SPACE = OLD_SPACE,
LAST_PAGED_SPACE = MAP_SPACE
};
-const int kSpaceTagSize = 3;
-const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
+constexpr int kSpaceTagSize = 3;
enum AllocationAlignment { kWordAligned, kDoubleAligned, kDoubleUnaligned };
@@ -628,7 +617,7 @@ enum VisitMode {
VISIT_ALL_IN_SCAVENGE,
VISIT_ALL_IN_SWEEP_NEWSPACE,
VISIT_ONLY_STRONG,
- VISIT_ONLY_STRONG_FOR_SERIALIZATION,
+ VISIT_FOR_SERIALIZATION,
};
// Flag indicating whether code is built into the VM (one of the natives files).
@@ -750,12 +739,12 @@ union IeeeDoubleBigEndianArchType {
#if V8_TARGET_LITTLE_ENDIAN
typedef IeeeDoubleLittleEndianArchType IeeeDoubleArchType;
-const int kIeeeDoubleMantissaWordOffset = 0;
-const int kIeeeDoubleExponentWordOffset = 4;
+constexpr int kIeeeDoubleMantissaWordOffset = 0;
+constexpr int kIeeeDoubleExponentWordOffset = 4;
#else
typedef IeeeDoubleBigEndianArchType IeeeDoubleArchType;
-const int kIeeeDoubleMantissaWordOffset = 4;
-const int kIeeeDoubleExponentWordOffset = 0;
+constexpr int kIeeeDoubleMantissaWordOffset = 4;
+constexpr int kIeeeDoubleExponentWordOffset = 0;
#endif
// AccessorCallback
@@ -930,20 +919,18 @@ enum AllocationSiteMode {
(!defined(USE_SIMULATOR) || !defined(_MIPS_TARGET_SIMULATOR))) || \
(V8_TARGET_ARCH_MIPS64 && !defined(_MIPS_ARCH_MIPS64R6) && \
(!defined(USE_SIMULATOR) || !defined(_MIPS_TARGET_SIMULATOR)))
-const uint32_t kHoleNanUpper32 = 0xFFFF7FFF;
-const uint32_t kHoleNanLower32 = 0xFFFF7FFF;
+constexpr uint32_t kHoleNanUpper32 = 0xFFFF7FFF;
+constexpr uint32_t kHoleNanLower32 = 0xFFFF7FFF;
#else
-const uint32_t kHoleNanUpper32 = 0xFFF7FFFF;
-const uint32_t kHoleNanLower32 = 0xFFF7FFFF;
+constexpr uint32_t kHoleNanUpper32 = 0xFFF7FFFF;
+constexpr uint32_t kHoleNanLower32 = 0xFFF7FFFF;
#endif
-const uint64_t kHoleNanInt64 =
+constexpr uint64_t kHoleNanInt64 =
(static_cast<uint64_t>(kHoleNanUpper32) << 32) | kHoleNanLower32;
-
// ES6 section 20.1.2.6 Number.MAX_SAFE_INTEGER
-const double kMaxSafeInteger = 9007199254740991.0; // 2^53-1
-
+constexpr double kMaxSafeInteger = 9007199254740991.0; // 2^53-1
// The order of this enum has to be kept in sync with the predicates below.
enum VariableMode : uint8_t {
@@ -1094,7 +1081,6 @@ enum FunctionKind : uint16_t {
kArrowFunction = 1 << 0,
kGeneratorFunction = 1 << 1,
kConciseMethod = 1 << 2,
- kConciseGeneratorMethod = kGeneratorFunction | kConciseMethod,
kDefaultConstructor = 1 << 3,
kDerivedConstructor = 1 << 4,
kBaseConstructor = 1 << 5,
@@ -1102,6 +1088,10 @@ enum FunctionKind : uint16_t {
kSetterFunction = 1 << 7,
kAsyncFunction = 1 << 8,
kModule = 1 << 9,
+ kClassFieldsInitializerFunction = 1 << 10 | kConciseMethod,
+ kLastFunctionKind = kClassFieldsInitializerFunction,
+
+ kConciseGeneratorMethod = kGeneratorFunction | kConciseMethod,
kAccessorFunction = kGetterFunction | kSetterFunction,
kDefaultBaseConstructor = kDefaultConstructor | kBaseConstructor,
kDefaultDerivedConstructor = kDefaultConstructor | kDerivedConstructor,
@@ -1133,7 +1123,8 @@ inline bool IsValidFunctionKind(FunctionKind kind) {
kind == FunctionKind::kAsyncArrowFunction ||
kind == FunctionKind::kAsyncConciseMethod ||
kind == FunctionKind::kAsyncConciseGeneratorMethod ||
- kind == FunctionKind::kAsyncGeneratorFunction;
+ kind == FunctionKind::kAsyncGeneratorFunction ||
+ kind == FunctionKind::kClassFieldsInitializerFunction;
}
@@ -1211,6 +1202,11 @@ inline bool IsClassConstructor(FunctionKind kind) {
return (kind & FunctionKind::kClassConstructor) != 0;
}
+inline bool IsClassFieldsInitializerFunction(FunctionKind kind) {
+ DCHECK(IsValidFunctionKind(kind));
+ return kind == FunctionKind::kClassFieldsInitializerFunction;
+}
+
inline bool IsConstructable(FunctionKind kind) {
if (IsAccessorFunction(kind)) return false;
if (IsConciseMethod(kind)) return false;
@@ -1253,14 +1249,17 @@ inline uint32_t ObjectHash(Address address) {
// Type feedback is encoded in such a way that, we can combine the feedback
// at different points by performing an 'OR' operation. Type feedback moves
// to a more generic type when we combine feedback.
-// kSignedSmall -> kSignedSmallInputs -> kNumber -> kNumberOrOddball -> kAny
-// kString -> kAny
-// kBigInt -> kAny
-// TODO(mythria): Remove kNumber type when crankshaft can handle Oddballs
-// similar to Numbers. We don't need kNumber feedback for Turbofan. Extra
-// information about Number might reduce few instructions but causes more
-// deopts. We collect Number only because crankshaft does not handle all
-// cases of oddballs.
+//
+// kSignedSmall -> kSignedSmallInputs -> kNumber -> kNumberOrOddball -> kAny
+// kString -> kAny
+// kBigInt -> kAny
+//
+// Technically we wouldn't need the separation between the kNumber and the
+// kNumberOrOddball values here, since for binary operations, we always
+// truncate oddballs to numbers. In practice though it causes TurboFan to
+// generate quite a lot of unused code though if we always handle numbers
+// and oddballs everywhere, although in 99% of the use sites they are only
+// used with numbers.
class BinaryOperationFeedback {
public:
enum {
@@ -1278,11 +1277,15 @@ class BinaryOperationFeedback {
// Type feedback is encoded in such a way that, we can combine the feedback
// at different points by performing an 'OR' operation. Type feedback moves
// to a more generic type when we combine feedback.
-// kSignedSmall -> kNumber -> kAny
-// kInternalizedString -> kString -> kAny
-// kSymbol -> kAny
-// kReceiver -> kAny
-// TODO(epertoso): consider unifying this with BinaryOperationFeedback.
+//
+// kSignedSmall -> kNumber -> kNumberOrOddball -> kAny
+// kInternalizedString -> kString -> kAny
+// kSymbol -> kAny
+// kBigInt -> kAny
+// kReceiver -> kAny
+//
+// This is distinct from BinaryOperationFeedback on purpose, because the
+// feedback that matters differs greatly as well as the way it is consumed.
class CompareOperationFeedback {
public:
enum {
@@ -1293,6 +1296,7 @@ class CompareOperationFeedback {
kInternalizedString = 0x8,
kString = 0x18,
kSymbol = 0x20,
+ kBigInt = 0x30,
kReceiver = 0x40,
kAny = 0xff
};
@@ -1419,6 +1423,7 @@ inline std::ostream& operator<<(std::ostream& os,
}
enum class OptimizationMarker {
+ kLogFirstExecution,
kNone,
kCompileOptimized,
kCompileOptimizedConcurrent,
@@ -1428,6 +1433,8 @@ enum class OptimizationMarker {
inline std::ostream& operator<<(std::ostream& os,
const OptimizationMarker& marker) {
switch (marker) {
+ case OptimizationMarker::kLogFirstExecution:
+ return os << "OptimizationMarker::kLogFirstExecution";
case OptimizationMarker::kNone:
return os << "OptimizationMarker::kNone";
case OptimizationMarker::kCompileOptimized:
@@ -1441,6 +1448,20 @@ inline std::ostream& operator<<(std::ostream& os,
return os;
}
+enum class SpeculationMode { kAllowSpeculation, kDisallowSpeculation };
+
+inline std::ostream& operator<<(std::ostream& os,
+ SpeculationMode speculation_mode) {
+ switch (speculation_mode) {
+ case SpeculationMode::kAllowSpeculation:
+ return os << "SpeculationMode::kAllowSpeculation";
+ case SpeculationMode::kDisallowSpeculation:
+ return os << "SpeculationMode::kDisallowSpeculation";
+ }
+ UNREACHABLE();
+ return os;
+}
+
enum class ConcurrencyMode { kNotConcurrent, kConcurrent };
#define FOR_EACH_ISOLATE_ADDRESS_NAME(C) \
@@ -1455,7 +1476,9 @@ enum class ConcurrencyMode { kNotConcurrent, kConcurrent };
C(PendingHandlerFP, pending_handler_fp) \
C(PendingHandlerSP, pending_handler_sp) \
C(ExternalCaughtException, external_caught_exception) \
- C(JSEntrySP, js_entry_sp)
+ C(JSEntrySP, js_entry_sp) \
+ C(MicrotaskQueueBailoutIndex, microtask_queue_bailout_index) \
+ C(MicrotaskQueueBailoutCount, microtask_queue_bailout_count)
enum IsolateAddressId {
#define DECLARE_ENUM(CamelName, hacker_name) k##CamelName##Address,
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 7f403bcdb5..ef4d4b155a 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -110,7 +110,7 @@ void HandleScope::DeleteExtensions(Isolate* isolate) {
void HandleScope::ZapRange(Object** start, Object** end) {
DCHECK_LE(end - start, kHandleBlockSize);
for (Object** p = start; p != end; p++) {
- *reinterpret_cast<Address*>(p) = kHandleZapValue;
+ *reinterpret_cast<Address*>(p) = reinterpret_cast<Address>(kHandleZapValue);
}
}
#endif
diff --git a/deps/v8/src/heap-symbols.h b/deps/v8/src/heap-symbols.h
index 62dc9007ad..e747ba2720 100644
--- a/deps/v8/src/heap-symbols.h
+++ b/deps/v8/src/heap-symbols.h
@@ -146,6 +146,7 @@
V(promise_string, "promise") \
V(proto_string, "__proto__") \
V(prototype_string, "prototype") \
+ V(proxy_string, "proxy") \
V(Proxy_string, "Proxy") \
V(query_colon_string, "(?:)") \
V(RangeError_string, "RangeError") \
@@ -155,6 +156,7 @@
V(reject_string, "reject") \
V(resolve_string, "resolve") \
V(return_string, "return") \
+ V(revoke_string, "revoke") \
V(script_string, "script") \
V(second_string, "second") \
V(setPrototypeOf_string, "setPrototypeOf") \
@@ -356,4 +358,17 @@
F(SCAVENGER_SCAVENGE_ROOTS) \
F(SCAVENGER_SCAVENGE_WEAK)
+#define TRACER_BACKGROUND_SCOPES(F) \
+ F(BACKGROUND_ARRAY_BUFFER_FREE) \
+ F(BACKGROUND_STORE_BUFFER) \
+ F(BACKGROUND_UNMAPPER) \
+ F(MC_BACKGROUND_EVACUATE_COPY) \
+ F(MC_BACKGROUND_EVACUATE_UPDATE_POINTERS) \
+ F(MC_BACKGROUND_MARKING) \
+ F(MC_BACKGROUND_SWEEPING) \
+ F(MINOR_MC_BACKGROUND_EVACUATE_COPY) \
+ F(MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS) \
+ F(MINOR_MC_BACKGROUND_MARKING) \
+ F(SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL)
+
#endif // V8_HEAP_SYMBOLS_H_
diff --git a/deps/v8/src/heap/array-buffer-collector.cc b/deps/v8/src/heap/array-buffer-collector.cc
index 1f41ffb2eb..cf0297bb2a 100644
--- a/deps/v8/src/heap/array-buffer-collector.cc
+++ b/deps/v8/src/heap/array-buffer-collector.cc
@@ -36,6 +36,9 @@ class ArrayBufferCollector::FreeingTask final : public CancelableTask {
private:
void RunInternal() final {
+ TRACE_BACKGROUND_GC(
+ heap_->tracer(),
+ GCTracer::BackgroundScope::BACKGROUND_ARRAY_BUFFER_FREE);
heap_->array_buffer_collector()->FreeAllocations();
}
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index 9634db951a..44ab099ba8 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -7,6 +7,7 @@
#include <stack>
#include <unordered_map>
+#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/mark-compact-inl.h"
@@ -186,9 +187,14 @@ class ConcurrentMarkingVisitor final
// ===========================================================================
int VisitFixedArray(Map* map, FixedArray* object) {
- int length = object->synchronized_length();
- int size = FixedArray::SizeFor(length);
+ // The synchronized_length() function checks that the length is a Smi.
+ // This is not necessarily the case if the array is being left-trimmed.
+ Object* length = object->unchecked_synchronized_length();
if (!ShouldVisit(object)) return 0;
+ // The cached length must be the actual length as the array is not black.
+ // Left trimming marks the array black before over-writing the length.
+ DCHECK(length->IsSmi());
+ int size = FixedArray::SizeFor(Smi::ToInt(length));
VisitMapPointer(object, object->map_slot());
FixedArray::BodyDescriptor::IterateBody(object, size, this);
return size;
@@ -381,6 +387,12 @@ SeqTwoByteString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
return reinterpret_cast<SeqTwoByteString*>(object);
}
+// Fixed array can become a free space during left trimming.
+template <>
+FixedArray* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
+ return reinterpret_cast<FixedArray*>(object);
+}
+
class ConcurrentMarking::Task : public CancelableTask {
public:
Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
@@ -427,6 +439,8 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
}
void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
+ TRACE_BACKGROUND_GC(heap_->tracer(),
+ GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000;
LiveBytesMap* live_bytes = nullptr;
@@ -500,6 +514,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
}
void ConcurrentMarking::ScheduleTasks() {
+ DCHECK(heap_->use_tasks());
if (!FLAG_concurrent_marking) return;
base::LockGuard<base::Mutex> guard(&pending_lock_);
if (task_count_ == 0) {
@@ -528,7 +543,7 @@ void ConcurrentMarking::ScheduleTasks() {
}
void ConcurrentMarking::RescheduleTasksIfNeeded() {
- if (!FLAG_concurrent_marking) return;
+ if (!FLAG_concurrent_marking || !heap_->use_tasks()) return;
{
base::LockGuard<base::Mutex> guard(&pending_lock_);
if (pending_task_count_ > 0) return;
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index b9832d5433..30abe44ca6 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -6,6 +6,7 @@
#include <cstdarg>
+#include "src/base/atomic-utils.h"
#include "src/counters.h"
#include "src/heap/heap-inl.h"
#include "src/isolate.h"
@@ -15,8 +16,8 @@ namespace internal {
static size_t CountTotalHolesSize(Heap* heap) {
size_t holes_size = 0;
- OldSpaces spaces(heap);
- for (OldSpace* space = spaces.next(); space != nullptr;
+ PagedSpaces spaces(heap);
+ for (PagedSpace* space = spaces.next(); space != nullptr;
space = spaces.next()) {
DCHECK_GE(holes_size + space->Waste() + space->Available(), holes_size);
holes_size += space->Waste() + space->Available();
@@ -24,9 +25,11 @@ static size_t CountTotalHolesSize(Heap* heap) {
return holes_size;
}
-RuntimeCallStats::CounterId GCTracer::RCSCounterFromScope(Scope::ScopeId id) {
- return RuntimeCallStats::counters[kFirstGCIndexInRuntimeCallStats +
- static_cast<int>(id)];
+RuntimeCallCounterId GCTracer::RCSCounterFromScope(Scope::ScopeId id) {
+ STATIC_ASSERT(Scope::FIRST_SCOPE == Scope::MC_INCREMENTAL);
+ return static_cast<RuntimeCallCounterId>(
+ static_cast<int>(RuntimeCallCounterId::kGC_MC_INCREMENTAL) +
+ static_cast<int>(id));
}
GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope)
@@ -35,8 +38,7 @@ GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope)
// TODO(cbruni): remove once we fully moved to a trace-based system.
if (V8_LIKELY(!FLAG_runtime_stats)) return;
runtime_stats_ = tracer_->heap_->isolate()->counters()->runtime_call_stats();
- RuntimeCallStats::Enter(runtime_stats_, &timer_,
- GCTracer::RCSCounterFromScope(scope));
+ runtime_stats_->Enter(&timer_, GCTracer::RCSCounterFromScope(scope));
}
GCTracer::Scope::~Scope() {
@@ -44,7 +46,28 @@ GCTracer::Scope::~Scope() {
scope_, tracer_->heap_->MonotonicallyIncreasingTimeInMs() - start_time_);
// TODO(cbruni): remove once we fully moved to a trace-based system.
if (V8_LIKELY(runtime_stats_ == nullptr)) return;
- RuntimeCallStats::Leave(runtime_stats_, &timer_);
+ runtime_stats_->Leave(&timer_);
+}
+
+GCTracer::BackgroundScope::BackgroundScope(GCTracer* tracer, ScopeId scope)
+ : tracer_(tracer), scope_(scope), runtime_stats_enabled_(false) {
+ start_time_ = tracer_->heap_->MonotonicallyIncreasingTimeInMs();
+ // TODO(cbruni): remove once we fully moved to a trace-based system.
+ if (V8_LIKELY(!base::AsAtomic32::Relaxed_Load(&FLAG_runtime_stats))) return;
+ timer_.Start(&counter_, nullptr);
+ runtime_stats_enabled_ = true;
+}
+
+GCTracer::BackgroundScope::~BackgroundScope() {
+ double duration_ms =
+ tracer_->heap_->MonotonicallyIncreasingTimeInMs() - start_time_;
+ // TODO(cbruni): remove once we fully moved to a trace-based system.
+ if (V8_LIKELY(!runtime_stats_enabled_)) {
+ tracer_->AddBackgroundScopeSample(scope_, duration_ms, nullptr);
+ } else {
+ timer_.Stop();
+ tracer_->AddBackgroundScopeSample(scope_, duration_ms, &counter_);
+ }
}
const char* GCTracer::Scope::Name(ScopeId id) {
@@ -53,11 +76,27 @@ const char* GCTracer::Scope::Name(ScopeId id) {
return "V8.GC_" #scope;
switch (id) {
TRACER_SCOPES(CASE)
+ TRACER_BACKGROUND_SCOPES(CASE)
case Scope::NUMBER_OF_SCOPES:
break;
}
#undef CASE
- return "(unknown)";
+ UNREACHABLE();
+ return nullptr;
+}
+
+const char* GCTracer::BackgroundScope::Name(ScopeId id) {
+#define CASE(scope) \
+ case BackgroundScope::scope: \
+ return "V8.GC_" #scope;
+ switch (id) {
+ TRACER_BACKGROUND_SCOPES(CASE)
+ case BackgroundScope::NUMBER_OF_SCOPES:
+ break;
+ }
+#undef CASE
+ UNREACHABLE();
+ return nullptr;
}
GCTracer::Event::Event(Type type, GarbageCollectionReason gc_reason,
@@ -120,9 +159,11 @@ GCTracer::GCTracer(Heap* heap)
// We assume that MC_INCREMENTAL is the first scope so that we can properly
// map it to RuntimeCallStats.
STATIC_ASSERT(0 == Scope::MC_INCREMENTAL);
- CHECK(&RuntimeCallStats::GC_MC_INCREMENTAL ==
- RuntimeCallStats::counters[GCTracer::kFirstGCIndexInRuntimeCallStats]);
current_.end_time = heap_->MonotonicallyIncreasingTimeInMs();
+ for (int i = 0; i < BackgroundScope::NUMBER_OF_SCOPES; i++) {
+ background_counter_[i].total_duration_ms = 0;
+ background_counter_[i].runtime_call_counter = RuntimeCallCounter(nullptr);
+ }
}
void GCTracer::ResetForTesting() {
@@ -147,6 +188,11 @@ void GCTracer::ResetForTesting() {
recorded_context_disposal_times_.Reset();
recorded_survival_ratios_.Reset();
start_counter_ = 0;
+ base::LockGuard<base::Mutex> guard(&background_counter_mutex_);
+ for (int i = 0; i < BackgroundScope::NUMBER_OF_SCOPES; i++) {
+ background_counter_[i].total_duration_ms = 0;
+ background_counter_[i].runtime_call_counter.Reset();
+ }
}
void GCTracer::NotifyYoungGenerationHandling(
@@ -267,6 +313,7 @@ void GCTracer::Stop(GarbageCollector collector) {
MakeBytesAndDuration(current_.new_space_object_size, duration));
recorded_minor_gcs_survived_.Push(MakeBytesAndDuration(
current_.survived_new_space_object_size, duration));
+ FetchBackgroundMinorGCCounters();
break;
case Event::INCREMENTAL_MARK_COMPACTOR:
current_.incremental_marking_bytes = incremental_marking_bytes_;
@@ -281,6 +328,7 @@ void GCTracer::Stop(GarbageCollector collector) {
MakeBytesAndDuration(current_.start_object_size, duration));
ResetIncrementalMarkingCounters();
combined_mark_compact_speed_cache_ = 0.0;
+ FetchBackgroundMarkCompactCounters();
break;
case Event::MARK_COMPACTOR:
DCHECK_EQ(0u, current_.incremental_marking_bytes);
@@ -289,10 +337,12 @@ void GCTracer::Stop(GarbageCollector collector) {
MakeBytesAndDuration(current_.start_object_size, duration));
ResetIncrementalMarkingCounters();
combined_mark_compact_speed_cache_ = 0.0;
+ FetchBackgroundMarkCompactCounters();
break;
case Event::START:
UNREACHABLE();
}
+ FetchBackgroundGeneralCounters();
heap_->UpdateTotalGCTime(duration);
@@ -467,6 +517,10 @@ void GCTracer::PrintNVP() const {
"scavenge.weak_global_handles.identify=%.2f "
"scavenge.weak_global_handles.process=%.2f "
"scavenge.parallel=%.2f "
+ "background.scavenge.parallel=%.2f "
+ "background.array_buffer_free=%.2f "
+ "background.store_buffer=%.2f "
+ "background.unmapper=%.2f "
"incremental.steps_count=%d "
"incremental.steps_took=%.1f "
"scavenge_throughput=%.f "
@@ -493,7 +547,6 @@ void GCTracer::PrintNVP() const {
"semi_space_copy_rate=%.1f%% "
"new_space_allocation_throughput=%.1f "
"unmapper_chunks=%d "
- "unmapper_delayed_chunks=%d "
"context_disposal_rate=%.1f\n",
duration, spent_in_mutator, current_.TypeName(true),
current_.reduce_memory, current_.scopes[Scope::HEAP_PROLOGUE],
@@ -512,6 +565,10 @@ void GCTracer::PrintNVP() const {
current_
.scopes[Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS],
current_.scopes[Scope::SCAVENGER_SCAVENGE_PARALLEL],
+ current_.scopes[Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL],
+ current_.scopes[Scope::BACKGROUND_ARRAY_BUFFER_FREE],
+ current_.scopes[Scope::BACKGROUND_STORE_BUFFER],
+ current_.scopes[Scope::BACKGROUND_UNMAPPER],
current_.incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL]
.steps,
current_.scopes[Scope::MC_INCREMENTAL],
@@ -526,7 +583,6 @@ void GCTracer::PrintNVP() const {
heap_->semi_space_copied_rate_,
NewSpaceAllocationThroughputInBytesPerMillisecond(),
heap_->memory_allocator()->unmapper()->NumberOfChunks(),
- heap_->memory_allocator()->unmapper()->NumberOfDelayedChunks(),
ContextDisposalRateInMilliseconds());
break;
case Event::MINOR_MARK_COMPACTOR:
@@ -550,6 +606,12 @@ void GCTracer::PrintNVP() const {
"evacuate.update_pointers=%.2f "
"evacuate.update_pointers.to_new_roots=%.2f "
"evacuate.update_pointers.slots=%.2f "
+ "background.mark=%.2f "
+ "background.evacuate.copy=%.2f "
+ "background.evacuate.update_pointers=%.2f "
+ "background.array_buffer_free=%.2f "
+ "background.store_buffer=%.2f "
+ "background.unmapper=%.2f "
"update_marking_deque=%.2f "
"reset_liveness=%.2f\n",
duration, spent_in_mutator, "mmc", current_.reduce_memory,
@@ -569,6 +631,12 @@ void GCTracer::PrintNVP() const {
current_
.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS],
current_.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS],
+ current_.scopes[Scope::MINOR_MC_BACKGROUND_MARKING],
+ current_.scopes[Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY],
+ current_.scopes[Scope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS],
+ current_.scopes[Scope::BACKGROUND_ARRAY_BUFFER_FREE],
+ current_.scopes[Scope::BACKGROUND_STORE_BUFFER],
+ current_.scopes[Scope::BACKGROUND_UNMAPPER],
current_.scopes[Scope::MINOR_MC_MARKING_DEQUE],
current_.scopes[Scope::MINOR_MC_RESET_LIVENESS]);
break;
@@ -640,6 +708,13 @@ void GCTracer::PrintNVP() const {
"incremental_steps_count=%d "
"incremental_marking_throughput=%.f "
"incremental_walltime_duration=%.f "
+ "background.mark=%.1f "
+ "background.sweep=%.1f "
+ "background.evacuate.copy=%.1f "
+ "background.evacuate.update_pointers=%.1f "
+ "background.array_buffer_free=%.2f "
+ "background.store_buffer=%.2f "
+ "background.unmapper=%.1f "
"total_size_before=%" PRIuS
" "
"total_size_after=%" PRIuS
@@ -663,7 +738,6 @@ void GCTracer::PrintNVP() const {
"semi_space_copy_rate=%.1f%% "
"new_space_allocation_throughput=%.1f "
"unmapper_chunks=%d "
- "unmapper_delayed_chunks=%d "
"context_disposal_rate=%.1f "
"compaction_speed=%.f\n",
duration, spent_in_mutator, current_.TypeName(true),
@@ -732,10 +806,17 @@ void GCTracer::PrintNVP() const {
.longest_step,
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL].steps,
IncrementalMarkingSpeedInBytesPerMillisecond(),
- incremental_walltime_duration, current_.start_object_size,
- current_.end_object_size, current_.start_holes_size,
- current_.end_holes_size, allocated_since_last_gc,
- heap_->promoted_objects_size(),
+ incremental_walltime_duration,
+ current_.scopes[Scope::MC_BACKGROUND_MARKING],
+ current_.scopes[Scope::MC_BACKGROUND_SWEEPING],
+ current_.scopes[Scope::MC_BACKGROUND_EVACUATE_COPY],
+ current_.scopes[Scope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS],
+ current_.scopes[Scope::BACKGROUND_ARRAY_BUFFER_FREE],
+ current_.scopes[Scope::BACKGROUND_STORE_BUFFER],
+ current_.scopes[Scope::BACKGROUND_UNMAPPER],
+ current_.start_object_size, current_.end_object_size,
+ current_.start_holes_size, current_.end_holes_size,
+ allocated_since_last_gc, heap_->promoted_objects_size(),
heap_->semi_space_copied_object_size(),
heap_->nodes_died_in_new_space_, heap_->nodes_copied_in_new_space_,
heap_->nodes_promoted_, heap_->promotion_ratio_,
@@ -743,7 +824,6 @@ void GCTracer::PrintNVP() const {
heap_->semi_space_copied_rate_,
NewSpaceAllocationThroughputInBytesPerMillisecond(),
heap_->memory_allocator()->unmapper()->NumberOfChunks(),
- heap_->memory_allocator()->unmapper()->NumberOfDelayedChunks(),
ContextDisposalRateInMilliseconds(),
CompactionSpeedInBytesPerMillisecond());
break;
@@ -899,5 +979,72 @@ void GCTracer::NotifyIncrementalMarkingStart() {
incremental_marking_start_time_ = heap_->MonotonicallyIncreasingTimeInMs();
}
+void GCTracer::FetchBackgroundMarkCompactCounters() {
+ FetchBackgroundCounters(Scope::FIRST_MC_BACKGROUND_SCOPE,
+ Scope::LAST_MC_BACKGROUND_SCOPE,
+ BackgroundScope::FIRST_MC_BACKGROUND_SCOPE,
+ BackgroundScope::LAST_MC_BACKGROUND_SCOPE);
+ heap_->isolate()->counters()->background_marking()->AddSample(
+ static_cast<int>(current_.scopes[Scope::MC_BACKGROUND_MARKING]));
+ heap_->isolate()->counters()->background_sweeping()->AddSample(
+ static_cast<int>(current_.scopes[Scope::MC_BACKGROUND_SWEEPING]));
+}
+
+void GCTracer::FetchBackgroundMinorGCCounters() {
+ FetchBackgroundCounters(Scope::FIRST_MINOR_GC_BACKGROUND_SCOPE,
+ Scope::LAST_MINOR_GC_BACKGROUND_SCOPE,
+ BackgroundScope::FIRST_MINOR_GC_BACKGROUND_SCOPE,
+ BackgroundScope::LAST_MINOR_GC_BACKGROUND_SCOPE);
+ heap_->isolate()->counters()->background_scavenger()->AddSample(
+ static_cast<int>(
+ current_.scopes[Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL]));
+}
+
+void GCTracer::FetchBackgroundGeneralCounters() {
+ FetchBackgroundCounters(Scope::FIRST_GENERAL_BACKGROUND_SCOPE,
+ Scope::LAST_GENERAL_BACKGROUND_SCOPE,
+ BackgroundScope::FIRST_GENERAL_BACKGROUND_SCOPE,
+ BackgroundScope::LAST_GENERAL_BACKGROUND_SCOPE);
+}
+
+void GCTracer::FetchBackgroundCounters(int first_global_scope,
+ int last_global_scope,
+ int first_background_scope,
+ int last_background_scope) {
+ DCHECK_EQ(last_global_scope - first_global_scope,
+ last_background_scope - first_background_scope);
+ base::LockGuard<base::Mutex> guard(&background_counter_mutex_);
+ int background_mc_scopes = last_background_scope - first_background_scope + 1;
+ for (int i = 0; i < background_mc_scopes; i++) {
+ current_.scopes[first_global_scope + i] +=
+ background_counter_[first_background_scope + i].total_duration_ms;
+ background_counter_[first_background_scope + i].total_duration_ms = 0;
+ }
+ if (V8_LIKELY(!FLAG_runtime_stats)) return;
+ RuntimeCallStats* runtime_stats =
+ heap_->isolate()->counters()->runtime_call_stats();
+ if (!runtime_stats) return;
+ for (int i = 0; i < background_mc_scopes; i++) {
+ runtime_stats
+ ->GetCounter(GCTracer::RCSCounterFromScope(
+ static_cast<Scope::ScopeId>(first_global_scope + i)))
+ ->Add(&background_counter_[first_background_scope + i]
+ .runtime_call_counter);
+ background_counter_[first_background_scope + i]
+ .runtime_call_counter.Reset();
+ }
+}
+
+void GCTracer::AddBackgroundScopeSample(
+ BackgroundScope::ScopeId scope, double duration,
+ RuntimeCallCounter* runtime_call_counter) {
+ base::LockGuard<base::Mutex> guard(&background_counter_mutex_);
+ BackgroundCounter& counter = background_counter_[scope];
+ counter.total_duration_ms += duration;
+ if (runtime_call_counter) {
+ counter.runtime_call_counter.Add(runtime_call_counter);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index 2a0c47692e..fb0f001e3d 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -31,6 +31,11 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), \
GCTracer::Scope::Name(gc_tracer_scope_id))
+#define TRACE_BACKGROUND_GC(tracer, scope_id) \
+ GCTracer::BackgroundScope background_scope(tracer, scope_id); \
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), \
+ GCTracer::BackgroundScope::Name(scope_id))
+
// GCTracer collects and prints ONE line after each garbage collector
// invocation IFF --trace_gc is used.
class V8_EXPORT_PRIVATE GCTracer {
@@ -61,7 +66,7 @@ class V8_EXPORT_PRIVATE GCTracer {
public:
enum ScopeId {
#define DEFINE_SCOPE(scope) scope,
- TRACER_SCOPES(DEFINE_SCOPE)
+ TRACER_SCOPES(DEFINE_SCOPE) TRACER_BACKGROUND_SCOPES(DEFINE_SCOPE)
#undef DEFINE_SCOPE
NUMBER_OF_SCOPES,
@@ -69,7 +74,13 @@ class V8_EXPORT_PRIVATE GCTracer {
LAST_INCREMENTAL_SCOPE = MC_INCREMENTAL_EXTERNAL_PROLOGUE,
FIRST_SCOPE = MC_INCREMENTAL,
NUMBER_OF_INCREMENTAL_SCOPES =
- LAST_INCREMENTAL_SCOPE - FIRST_INCREMENTAL_SCOPE + 1
+ LAST_INCREMENTAL_SCOPE - FIRST_INCREMENTAL_SCOPE + 1,
+ FIRST_GENERAL_BACKGROUND_SCOPE = BACKGROUND_ARRAY_BUFFER_FREE,
+ LAST_GENERAL_BACKGROUND_SCOPE = BACKGROUND_UNMAPPER,
+ FIRST_MC_BACKGROUND_SCOPE = MC_BACKGROUND_EVACUATE_COPY,
+ LAST_MC_BACKGROUND_SCOPE = MC_BACKGROUND_SWEEPING,
+ FIRST_MINOR_GC_BACKGROUND_SCOPE = MINOR_MC_BACKGROUND_EVACUATE_COPY,
+ LAST_MINOR_GC_BACKGROUND_SCOPE = SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL
};
Scope(GCTracer* tracer, ScopeId scope);
@@ -86,6 +97,34 @@ class V8_EXPORT_PRIVATE GCTracer {
DISALLOW_COPY_AND_ASSIGN(Scope);
};
+ class V8_EXPORT_PRIVATE BackgroundScope {
+ public:
+ enum ScopeId {
+#define DEFINE_SCOPE(scope) scope,
+ TRACER_BACKGROUND_SCOPES(DEFINE_SCOPE)
+#undef DEFINE_SCOPE
+ NUMBER_OF_SCOPES,
+ FIRST_GENERAL_BACKGROUND_SCOPE = BACKGROUND_ARRAY_BUFFER_FREE,
+ LAST_GENERAL_BACKGROUND_SCOPE = BACKGROUND_UNMAPPER,
+ FIRST_MC_BACKGROUND_SCOPE = MC_BACKGROUND_EVACUATE_COPY,
+ LAST_MC_BACKGROUND_SCOPE = MC_BACKGROUND_SWEEPING,
+ FIRST_MINOR_GC_BACKGROUND_SCOPE = MINOR_MC_BACKGROUND_EVACUATE_COPY,
+ LAST_MINOR_GC_BACKGROUND_SCOPE = SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL
+ };
+ BackgroundScope(GCTracer* tracer, ScopeId scope);
+ ~BackgroundScope();
+
+ static const char* Name(ScopeId id);
+
+ private:
+ GCTracer* tracer_;
+ ScopeId scope_;
+ double start_time_;
+ RuntimeCallTimer timer_;
+ RuntimeCallCounter counter_;
+ bool runtime_stats_enabled_;
+ DISALLOW_COPY_AND_ASSIGN(BackgroundScope);
+ };
class Event {
public:
@@ -159,9 +198,8 @@ class V8_EXPORT_PRIVATE GCTracer {
};
static const int kThroughputTimeFrameMs = 5000;
- static const int kFirstGCIndexInRuntimeCallStats = 0;
- static RuntimeCallStats::CounterId RCSCounterFromScope(Scope::ScopeId id);
+ static RuntimeCallCounterId RCSCounterFromScope(Scope::ScopeId id);
explicit GCTracer(Heap* heap);
@@ -273,9 +311,16 @@ class V8_EXPORT_PRIVATE GCTracer {
}
}
+ void AddBackgroundScopeSample(BackgroundScope::ScopeId scope, double duration,
+ RuntimeCallCounter* runtime_call_counter);
+
private:
FRIEND_TEST(GCTracer, AverageSpeed);
FRIEND_TEST(GCTracerTest, AllocationThroughput);
+ FRIEND_TEST(GCTracerTest, BackgroundScavengerScope);
+ FRIEND_TEST(GCTracerTest, BackgroundMinorMCScope);
+ FRIEND_TEST(GCTracerTest, BackgroundMajorMCScope);
+ FRIEND_TEST(GCTracerTest, MultithreadedBackgroundScope);
FRIEND_TEST(GCTracerTest, NewSpaceAllocationThroughput);
FRIEND_TEST(GCTracerTest, NewSpaceAllocationThroughputWithProvidedTime);
FRIEND_TEST(GCTracerTest, OldGenerationAllocationThroughputWithProvidedTime);
@@ -284,6 +329,11 @@ class V8_EXPORT_PRIVATE GCTracer {
FRIEND_TEST(GCTracerTest, IncrementalScope);
FRIEND_TEST(GCTracerTest, IncrementalMarkingSpeed);
+ struct BackgroundCounter {
+ double total_duration_ms;
+ RuntimeCallCounter runtime_call_counter;
+ };
+
// Returns the average speed of the events in the buffer.
// If the buffer is empty, the result is 0.
// Otherwise, the result is between 1 byte/ms and 1 GB/ms.
@@ -315,6 +365,13 @@ class V8_EXPORT_PRIVATE GCTracer {
current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE];
}
+ void FetchBackgroundCounters(int first_global_scope, int last_global_scope,
+ int first_background_scope,
+ int last_background_scope);
+ void FetchBackgroundMinorGCCounters();
+ void FetchBackgroundMarkCompactCounters();
+ void FetchBackgroundGeneralCounters();
+
// Pointer to the heap that owns this tracer.
Heap* heap_;
@@ -368,8 +425,12 @@ class V8_EXPORT_PRIVATE GCTracer {
base::RingBuffer<double> recorded_context_disposal_times_;
base::RingBuffer<double> recorded_survival_ratios_;
+ base::Mutex background_counter_mutex_;
+ BackgroundCounter background_counter_[BackgroundScope::NUMBER_OF_SCOPES];
+
DISALLOW_COPY_AND_ASSIGN(GCTracer);
};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index bf9eb2874f..f4e5c1fe13 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -52,6 +52,13 @@ ROOT_LIST(ROOT_ACCESSOR)
STRUCT_LIST(STRUCT_MAP_ACCESSOR)
#undef STRUCT_MAP_ACCESSOR
+#define DATA_HANDLER_MAP_ACCESSOR(NAME, Name, Size, name) \
+ Map* Heap::name##_map() { \
+ return Map::cast(roots_[k##Name##Size##MapRootIndex]); \
+ }
+DATA_HANDLER_LIST(DATA_HANDLER_MAP_ACCESSOR)
+#undef DATA_HANDLER_MAP_ACCESSOR
+
#define STRING_ACCESSOR(name, str) \
String* Heap::name() { return String::cast(roots_[k##name##RootIndex]); }
INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
@@ -243,11 +250,14 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK(gc_state_ == NOT_IN_GC);
-#ifdef DEBUG
- if (FLAG_gc_interval >= 0 && !always_allocate() &&
- Heap::allocation_timeout_-- <= 0) {
- return AllocationResult::Retry(space);
+#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
+ if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
+ if (!always_allocate() && Heap::allocation_timeout_-- <= 0) {
+ return AllocationResult::Retry(space);
+ }
}
+#endif
+#ifdef DEBUG
isolate_->counters()->objs_since_last_full()->Increment();
isolate_->counters()->objs_since_last_young()->Increment();
#endif
@@ -314,10 +324,10 @@ void Heap::OnAllocationEvent(HeapObject* object, int size_in_bytes) {
if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
PrintAllocationsHash();
}
- }
-
- if (FLAG_trace_allocation_stack_interval > 0) {
- if (!FLAG_verify_predictable) ++allocations_count_;
+ } else if (FLAG_fuzzer_gc_analysis) {
+ ++allocations_count_;
+ } else if (FLAG_trace_allocation_stack_interval > 0) {
+ ++allocations_count_;
if (allocations_count_ % FLAG_trace_allocation_stack_interval == 0) {
isolate()->PrintStack(stdout, Isolate::kPrintStackConcise);
}
@@ -349,10 +359,11 @@ void Heap::OnMoveEvent(HeapObject* target, HeapObject* source,
if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
PrintAllocationsHash();
}
+ } else if (FLAG_fuzzer_gc_analysis) {
+ ++allocations_count_;
}
}
-
void Heap::UpdateAllocationsHash(HeapObject* object) {
Address object_address = object->address();
MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 5aed117903..49c2eccb2d 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -43,9 +43,11 @@
#include "src/heap/scavenge-job.h"
#include "src/heap/scavenger-inl.h"
#include "src/heap/store-buffer.h"
+#include "src/heap/stress-marking-observer.h"
+#include "src/heap/stress-scavenge-observer.h"
#include "src/heap/sweeper.h"
#include "src/interpreter/interpreter.h"
-#include "src/objects/object-macros.h"
+#include "src/objects/data-handler.h"
#include "src/objects/shared-function-info.h"
#include "src/regexp/jsregexp.h"
#include "src/runtime-profiler.h"
@@ -60,6 +62,9 @@
#include "src/v8.h"
#include "src/vm-state-inl.h"
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
namespace v8 {
namespace internal {
@@ -89,14 +94,12 @@ void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
}
-void Heap::SetSerializedTemplates(FixedArray* templates) {
- DCHECK_EQ(empty_fixed_array(), serialized_templates());
+void Heap::SetSerializedObjects(FixedArray* objects) {
DCHECK(isolate()->serializer_enabled());
- set_serialized_templates(templates);
+ set_serialized_objects(objects);
}
void Heap::SetSerializedGlobalProxySizes(FixedArray* sizes) {
- DCHECK_EQ(empty_fixed_array(), serialized_global_proxy_sizes());
DCHECK(isolate()->serializer_enabled());
set_serialized_global_proxy_sizes(sizes);
}
@@ -172,13 +175,13 @@ Heap::Heap()
gc_post_processing_depth_(0),
allocations_count_(0),
raw_allocations_hash_(0),
+ stress_marking_observer_(nullptr),
+ stress_scavenge_observer_(nullptr),
+ max_marking_limit_reached_(0.0),
ms_count_(0),
gc_count_(0),
mmap_region_base_(0),
remembered_unmapped_pages_index_(0),
-#ifdef DEBUG
- allocation_timeout_(0),
-#endif // DEBUG
old_generation_allocation_limit_(initial_old_generation_size_),
inline_allocation_disabled_(false),
tracer_(nullptr),
@@ -227,7 +230,12 @@ Heap::Heap()
use_tasks_(true),
force_oom_(false),
delay_sweeper_tasks_for_testing_(false),
- pending_layout_change_object_(nullptr) {
+ pending_layout_change_object_(nullptr)
+#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
+ ,
+ allocation_timeout_(0)
+#endif // V8_ENABLE_ALLOCATION_TIMEOUT
+{
// Ensure old_generation_size_ is a multiple of kPageSize.
DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
@@ -296,9 +304,9 @@ size_t Heap::Available() {
if (!HasBeenSetUp()) return 0;
size_t total = 0;
- AllSpaces spaces(this);
- for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
- total += space->Available();
+
+ for (SpaceIterator it(this); it.has_next();) {
+ total += it.next()->Available();
}
return total;
}
@@ -356,30 +364,6 @@ void Heap::SetGCState(HeapState state) {
gc_state_ = state;
}
-// TODO(1238405): Combine the infrastructure for --heap-stats and
-// --log-gc to avoid the complicated preprocessor and flag testing.
-void Heap::ReportStatisticsBeforeGC() {
-// Heap::ReportHeapStatistics will also log NewSpace statistics when
-// compiled --log-gc is set. The following logic is used to avoid
-// double logging.
-#ifdef DEBUG
- if (FLAG_heap_stats || FLAG_log_gc) new_space_->CollectStatistics();
- if (FLAG_heap_stats) {
- ReportHeapStatistics("Before GC");
- } else if (FLAG_log_gc) {
- new_space_->ReportStatistics();
- }
- if (FLAG_heap_stats || FLAG_log_gc) new_space_->ClearHistograms();
-#else
- if (FLAG_log_gc) {
- new_space_->CollectStatistics();
- new_space_->ReportStatistics();
- new_space_->ClearHistograms();
- }
-#endif // DEBUG
-}
-
-
void Heap::PrintShortHeapStatistics() {
if (!FLAG_trace_gc_verbose) return;
PrintIsolate(isolate_, "Memory allocator, used: %6" PRIuS
@@ -437,21 +421,7 @@ void Heap::PrintShortHeapStatistics() {
total_gc_time_ms_);
}
-// TODO(1238405): Combine the infrastructure for --heap-stats and
-// --log-gc to avoid the complicated preprocessor and flag testing.
void Heap::ReportStatisticsAfterGC() {
-// Similar to the before GC, we use some complicated logic to ensure that
-// NewSpace statistics are logged exactly once when --log-gc is turned on.
-#if defined(DEBUG)
- if (FLAG_heap_stats) {
- new_space_->CollectStatistics();
- ReportHeapStatistics("After GC");
- } else if (FLAG_log_gc) {
- new_space_->ReportStatistics();
- }
-#else
- if (FLAG_log_gc) new_space_->ReportStatistics();
-#endif // DEBUG
for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
++i) {
int count = deferred_counters_[i];
@@ -630,8 +600,6 @@ void Heap::GarbageCollectionPrologue() {
DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
if (FLAG_gc_verbose) Print();
-
- ReportStatisticsBeforeGC();
#endif // DEBUG
if (new_space_->IsAtMaximumCapacity()) {
@@ -650,9 +618,9 @@ void Heap::GarbageCollectionPrologue() {
size_t Heap::SizeOfObjects() {
size_t total = 0;
- AllSpaces spaces(this);
- for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
- total += space->SizeOfObjects();
+
+ for (SpaceIterator it(this); it.has_next();) {
+ total += it.next()->SizeOfObjects();
}
return total;
}
@@ -712,6 +680,34 @@ void Heap::MergeAllocationSitePretenuringFeedback(
}
}
+void Heap::AddAllocationObserversToAllSpaces(
+ AllocationObserver* observer, AllocationObserver* new_space_observer) {
+ DCHECK(observer && new_space_observer);
+
+ for (SpaceIterator it(this); it.has_next();) {
+ Space* space = it.next();
+ if (space == new_space()) {
+ space->AddAllocationObserver(new_space_observer);
+ } else {
+ space->AddAllocationObserver(observer);
+ }
+ }
+}
+
+void Heap::RemoveAllocationObserversFromAllSpaces(
+ AllocationObserver* observer, AllocationObserver* new_space_observer) {
+ DCHECK(observer && new_space_observer);
+
+ for (SpaceIterator it(this); it.has_next();) {
+ Space* space = it.next();
+ if (space == new_space()) {
+ space->RemoveAllocationObserver(new_space_observer);
+ } else {
+ space->RemoveAllocationObserver(observer);
+ }
+ }
+}
+
class Heap::SkipStoreBufferScope {
public:
explicit SkipStoreBufferScope(StoreBuffer* store_buffer)
@@ -1027,7 +1023,10 @@ class GCCallbacksScope {
void Heap::HandleGCRequest() {
- if (HighMemoryPressure()) {
+ if (FLAG_stress_scavenge > 0 && stress_scavenge_observer_->HasRequestedGC()) {
+ CollectAllGarbage(NEW_SPACE, GarbageCollectionReason::kTesting);
+ stress_scavenge_observer_->RequestedGCDone();
+ } else if (HighMemoryPressure()) {
incremental_marking()->reset_request_type();
CheckMemoryPressure();
} else if (incremental_marking()->request_type() ==
@@ -1129,7 +1128,7 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
InvokeOutOfMemoryCallback();
}
RuntimeCallTimerScope runtime_timer(
- isolate(), &RuntimeCallStats::GC_Custom_AllAvailableGarbage);
+ isolate(), RuntimeCallCounterId::kGC_Custom_AllAvailableGarbage);
if (isolate()->concurrent_recompilation_enabled()) {
// The optimizing compiler may be unnecessarily holding on to memory.
DisallowHeapAllocation no_recursive_gc;
@@ -1217,13 +1216,14 @@ bool Heap::CollectGarbage(AllocationSpace space,
const char* collector_reason = nullptr;
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
-#ifdef DEBUG
- // Reset the allocation timeout to the GC interval, but make sure to
- // allow at least a few allocations after a collection. The reason
- // for this is that we have a lot of allocation sequences and we
- // assume that a garbage collection will allow the subsequent
- // allocation attempts to go through.
- allocation_timeout_ = Max(6, FLAG_gc_interval);
+#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
+ // Reset the allocation timeout, but make sure to allow at least a few
+ // allocations after a collection. The reason for this is that we have a lot
+ // of allocation sequences and we assume that a garbage collection will allow
+ // the subsequent allocation attempts to go through.
+ if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
+ allocation_timeout_ = Max(6, NextAllocationTimeout(allocation_timeout_));
+ }
#endif
EnsureFillerObjectAtTop();
@@ -1680,8 +1680,8 @@ bool Heap::PerformGarbageCollection(
void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
- RuntimeCallTimerScope runtime_timer(isolate(),
- &RuntimeCallStats::GCPrologueCallback);
+ RuntimeCallTimerScope runtime_timer(
+ isolate(), RuntimeCallCounterId::kGCPrologueCallback);
for (const GCCallbackTuple& info : gc_prologue_callbacks_) {
if (gc_type & info.gc_type) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
@@ -1691,8 +1691,8 @@ void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
}
void Heap::CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags) {
- RuntimeCallTimerScope runtime_timer(isolate(),
- &RuntimeCallStats::GCEpilogueCallback);
+ RuntimeCallTimerScope runtime_timer(
+ isolate(), RuntimeCallCounterId::kGCEpilogueCallback);
for (const GCCallbackTuple& info : gc_epilogue_callbacks_) {
if (gc_type & info.gc_type) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
@@ -1741,6 +1741,8 @@ void Heap::MinorMarkCompact() {
PauseAllocationObserversScope pause_observers(this);
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
incremental_marking());
+ CodeSpaceMemoryModificationScope code_modifcation(this);
+ ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
minor_mark_compact_collector()->CollectGarbage();
@@ -1826,7 +1828,7 @@ void Heap::EvacuateYoungGeneration() {
DCHECK(CanExpandOldGeneration(new_space()->Size()));
}
- mark_compact_collector()->sweeper()->EnsureNewSpaceCompleted();
+ mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
SetGCState(SCAVENGE);
LOG(isolate_, ResourceEvent("scavenge", "begin"));
@@ -1845,7 +1847,7 @@ void Heap::EvacuateYoungGeneration() {
if (!new_space()->Rebalance()) {
FatalProcessOutOfMemory("NewSpace::Rebalance");
}
- new_space()->ResetAllocationInfo();
+ new_space()->ResetLinearAllocationArea();
new_space()->set_age_mark(new_space()->top());
// Fix up special trackers.
@@ -1887,6 +1889,9 @@ class ScavengingTask final : public ItemParallelJob::Task {
barrier_(barrier) {}
void RunInParallel() final {
+ TRACE_BACKGROUND_GC(
+ heap_->tracer(),
+ GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
double scavenging_time = 0.0;
{
barrier_->Start();
@@ -1942,14 +1947,8 @@ void Heap::Scavenge() {
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
incremental_marking());
- if (mark_compact_collector()->sweeper()->sweeping_in_progress() &&
- memory_allocator_->unmapper()->NumberOfDelayedChunks() >
- static_cast<int>(new_space_->MaximumCapacity() / Page::kPageSize)) {
- mark_compact_collector()->EnsureSweepingCompleted();
- }
- // TODO(mlippautz): Untangle the dependency of the unmapper from the sweeper.
- mark_compact_collector()->sweeper()->EnsureNewSpaceCompleted();
+ mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
SetGCState(SCAVENGE);
@@ -1959,7 +1958,7 @@ void Heap::Scavenge() {
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
new_space_->Flip();
- new_space_->ResetAllocationInfo();
+ new_space_->ResetLinearAllocationArea();
ItemParallelJob job(isolate()->cancelable_task_manager(),
&parallel_scavenge_semaphore_);
@@ -2415,8 +2414,8 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
map->set_bit_field(0);
map->set_bit_field2(0);
int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
- Map::OwnsDescriptors::encode(true) |
- Map::ConstructionCounter::encode(Map::kNoSlackTracking);
+ Map::OwnsDescriptorsBit::encode(true) |
+ Map::ConstructionCounterBits::encode(Map::kNoSlackTracking);
map->set_bit_field3(bit_field3);
map->set_weak_cell_cache(Smi::kZero);
map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
@@ -2464,14 +2463,14 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
// |layout_descriptor| are set.
map->set_visitor_id(Map::GetVisitorId(map));
map->set_bit_field(0);
- map->set_bit_field2(1 << Map::kIsExtensible);
+ map->set_bit_field2(Map::IsExtensibleBit::kMask);
int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
- Map::OwnsDescriptors::encode(true) |
- Map::ConstructionCounter::encode(Map::kNoSlackTracking);
+ Map::OwnsDescriptorsBit::encode(true) |
+ Map::ConstructionCounterBits::encode(Map::kNoSlackTracking);
map->set_bit_field3(bit_field3);
map->set_elements_kind(elements_kind);
map->set_new_target_is_base(true);
-
+ if (FLAG_trace_maps) LOG(isolate(), MapCreate(map));
return map;
}
@@ -2608,6 +2607,10 @@ void Heap::CreateJSConstructEntryStub() {
set_js_construct_entry_code(*stub.GetCode());
}
+void Heap::CreateJSRunMicrotasksEntryStub() {
+ JSEntryStub stub(isolate(), JSEntryStub::SpecialTarget::kRunMicrotasks);
+ set_js_run_microtasks_entry_code(*stub.GetCode());
+}
void Heap::CreateFixedStubs() {
// Here we create roots for fixed stubs. They are needed at GC
@@ -2639,6 +2642,7 @@ void Heap::CreateFixedStubs() {
// To workaround the problem, make separate functions without inlining.
Heap::CreateJSEntryStub();
Heap::CreateJSConstructEntryStub();
+ Heap::CreateJSRunMicrotasksEntryStub();
}
bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
@@ -2656,7 +2660,7 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kFeedbackVectorsForProfilingToolsRootIndex:
case kNoScriptSharedFunctionInfosRootIndex:
case kWeakStackTraceListRootIndex:
- case kSerializedTemplatesRootIndex:
+ case kSerializedObjectsRootIndex:
case kSerializedGlobalProxySizesRootIndex:
case kPublicSymbolTableRootIndex:
case kApiSymbolTableRootIndex:
@@ -3984,7 +3988,9 @@ AllocationResult Heap::AllocateFeedbackVector(SharedFunctionInfo* shared,
result->set_map_after_allocation(feedback_vector_map(), SKIP_WRITE_BARRIER);
FeedbackVector* vector = FeedbackVector::cast(result);
vector->set_shared_function_info(shared);
- vector->set_optimized_code_cell(Smi::FromEnum(OptimizationMarker::kNone));
+ vector->set_optimized_code_cell(Smi::FromEnum(
+ FLAG_log_function_events ? OptimizationMarker::kLogFirstExecution
+ : OptimizationMarker::kNone));
vector->set_length(length);
vector->set_invocation_count(0);
vector->set_profiler_ticks(0);
@@ -4546,9 +4552,9 @@ void Heap::CollectCodeStatistics() {
void Heap::Print() {
if (!HasBeenSetUp()) return;
isolate()->PrintStack(stdout);
- AllSpaces spaces(this);
- for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
- space->Print();
+
+ for (SpaceIterator it(this); it.has_next();) {
+ it.next()->Print();
}
}
@@ -4559,37 +4565,6 @@ void Heap::ReportCodeStatistics(const char* title) {
CodeStatistics::ReportCodeStatistics(isolate());
}
-
-// This function expects that NewSpace's allocated objects histogram is
-// populated (via a call to CollectStatistics or else as a side effect of a
-// just-completed scavenge collection).
-void Heap::ReportHeapStatistics(const char* title) {
- USE(title);
- PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title,
- gc_count_);
- PrintF("old_generation_allocation_limit_ %" PRIuS "\n",
- old_generation_allocation_limit_);
-
- PrintF("\n");
- PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
- isolate_->global_handles()->PrintStats();
- PrintF("\n");
-
- PrintF("Heap statistics : ");
- memory_allocator()->ReportStatistics();
- PrintF("To space : ");
- new_space_->ReportStatistics();
- PrintF("Old space : ");
- old_space_->ReportStatistics();
- PrintF("Code space : ");
- code_space_->ReportStatistics();
- PrintF("Map space : ");
- map_space_->ReportStatistics();
- PrintF("Large object space : ");
- lo_space_->ReportStatistics();
- PrintF(">>>>>> ========================================= >>>>>>\n");
-}
-
#endif // DEBUG
const char* Heap::GarbageCollectionReasonToString(
@@ -4887,7 +4862,8 @@ void Heap::ZapFromSpace() {
PageRange(new_space_->FromSpaceStart(), new_space_->FromSpaceEnd())) {
for (Address cursor = page->area_start(), limit = page->area_end();
cursor < limit; cursor += kPointerSize) {
- Memory::Address_at(cursor) = kFromSpaceZapValue;
+ Memory::Address_at(cursor) =
+ reinterpret_cast<Address>(kFromSpaceZapValue);
}
}
}
@@ -4904,8 +4880,11 @@ void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
v->VisitRootPointer(Root::kStringTable, reinterpret_cast<Object**>(
&roots_[kStringTableRootIndex]));
v->Synchronize(VisitorSynchronization::kStringTable);
- if (!isMinorGC && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
+ if (!isMinorGC && mode != VISIT_ALL_IN_SWEEP_NEWSPACE &&
+ mode != VISIT_FOR_SERIALIZATION) {
// Scavenge collections have special processing for this.
+ // Do not visit for serialization, since the external string table will
+ // be populated from scratch upon deserialization.
external_string_table_.IterateAll(v);
}
v->Synchronize(VisitorSynchronization::kExternalStringsTable);
@@ -5007,7 +4986,10 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
// Iterate over global handles.
switch (mode) {
- case VISIT_ONLY_STRONG_FOR_SERIALIZATION:
+ case VISIT_FOR_SERIALIZATION:
+ // Global handles are not iterated by the serializer. Values referenced by
+ // global handles need to be added manually.
+ break;
case VISIT_ONLY_STRONG:
isolate_->global_handles()->IterateStrongRoots(v);
break;
@@ -5027,11 +5009,14 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
}
v->Synchronize(VisitorSynchronization::kGlobalHandles);
- // Iterate over eternal handles.
- if (isMinorGC) {
- isolate_->eternal_handles()->IterateNewSpaceRoots(v);
- } else {
- isolate_->eternal_handles()->IterateAllRoots(v);
+ // Iterate over eternal handles. Eternal handles are not iterated by the
+ // serializer. Values referenced by eternal handles need to be added manually.
+ if (mode != VISIT_FOR_SERIALIZATION) {
+ if (isMinorGC) {
+ isolate_->eternal_handles()->IterateNewSpaceRoots(v);
+ } else {
+ isolate_->eternal_handles()->IterateAllRoots(v);
+ }
}
v->Synchronize(VisitorSynchronization::kEternalHandles);
@@ -5046,13 +5031,11 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
v->Synchronize(VisitorSynchronization::kStrongRoots);
// Iterate over the partial snapshot cache unless serializing.
- if (mode != VISIT_ONLY_STRONG_FOR_SERIALIZATION) {
+ if (mode != VISIT_FOR_SERIALIZATION) {
SerializerDeserializer::Iterate(isolate_, v);
+ // We don't do a v->Synchronize call here because the serializer and the
+ // deserializer are deliberately out of sync here.
}
- // We don't do a v->Synchronize call here, because in debug mode that will
- // output a flag to the snapshot. However at this point the serializer and
- // deserializer are deliberately a little unsynchronized (see above) so the
- // checking of the sync flag in the snapshot would fail.
}
@@ -5095,8 +5078,8 @@ bool Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
// The new space size must be a power of two to support single-bit testing
// for containment.
- max_semi_space_size_ = static_cast<size_t>(base::bits::RoundUpToPowerOfTwo64(
- static_cast<uint64_t>(max_semi_space_size_)));
+ max_semi_space_size_ = base::bits::RoundUpToPowerOfTwo32(
+ static_cast<uint32_t>(max_semi_space_size_));
if (max_semi_space_size_ == kMaxSemiSpaceSizeInKB * KB) {
// Start with at least 1*MB semi-space on machines with a lot of memory.
@@ -5470,13 +5453,20 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
if (bytes_to_limit > 0) {
double current_percent = (gained_since_last_gc / bytes_to_limit) * 100.0;
- if (FLAG_trace_incremental_marking) {
+ if (FLAG_trace_stress_marking) {
isolate()->PrintWithTimestamp(
"[IncrementalMarking] %.2lf%% of the memory limit reached\n",
current_percent);
}
- if (static_cast<int>(current_percent) >= stress_marking_percentage_) {
+ if (FLAG_fuzzer_gc_analysis) {
+ // Skips values >=100% since they already trigger marking.
+ if (current_percent < 100.0) {
+ max_marking_limit_reached_ =
+ std::max(max_marking_limit_reached_, current_percent);
+ }
+ } else if (static_cast<int>(current_percent) >=
+ stress_marking_percentage_) {
stress_marking_percentage_ = NextStressMarkingLimit();
return IncrementalMarkingLimit::kHardLimit;
}
@@ -5521,13 +5511,13 @@ void Heap::DisableInlineAllocation() {
CodeSpaceMemoryModificationScope modification_scope(this);
for (PagedSpace* space = spaces.next(); space != nullptr;
space = spaces.next()) {
- space->EmptyAllocationInfo();
+ space->FreeLinearAllocationArea();
}
}
bool Heap::SetUp() {
-#ifdef DEBUG
- allocation_timeout_ = FLAG_gc_interval;
+#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
+ allocation_timeout_ = NextAllocationTimeout();
#endif
// Initialize heap spaces and initial maps and objects. Whenever something
@@ -5543,7 +5533,7 @@ bool Heap::SetUp() {
}
mmap_region_base_ =
- reinterpret_cast<uintptr_t>(base::OS::GetRandomMmapAddr()) &
+ reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
~kMmapRegionMask;
// Set up memory allocator.
@@ -5632,6 +5622,13 @@ bool Heap::SetUp() {
if (FLAG_stress_marking > 0) {
stress_marking_percentage_ = NextStressMarkingLimit();
+ stress_marking_observer_ = new StressMarkingObserver(*this);
+ AddAllocationObserversToAllSpaces(stress_marking_observer_,
+ stress_marking_observer_);
+ }
+ if (FLAG_stress_scavenge > 0) {
+ stress_scavenge_observer_ = new StressScavengeObserver(*this);
+ new_space()->AddAllocationObserver(stress_scavenge_observer_);
}
write_protect_code_memory_ = FLAG_write_protect_code_memory;
@@ -5667,11 +5664,34 @@ void Heap::ClearStackLimits() {
roots_[kRealStackLimitRootIndex] = Smi::kZero;
}
+int Heap::NextAllocationTimeout(int current_timeout) {
+ if (FLAG_random_gc_interval > 0) {
+ // If current timeout hasn't reached 0 the GC was caused by something
+ // different than --stress-atomic-gc flag and we don't update the timeout.
+ if (current_timeout <= 0) {
+ return isolate()->fuzzer_rng()->NextInt(FLAG_random_gc_interval + 1);
+ } else {
+ return current_timeout;
+ }
+ }
+ return FLAG_gc_interval;
+}
+
void Heap::PrintAllocationsHash() {
uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
}
+void Heap::PrintMaxMarkingLimitReached() {
+ PrintF("\n### Maximum marking limit reached = %.02lf\n",
+ max_marking_limit_reached_);
+}
+
+void Heap::PrintMaxNewSpaceSizeReached() {
+ PrintF("\n### Maximum new space size reached = %.02lf\n",
+ stress_scavenge_observer_->MaxNewSpaceSizeReached());
+}
+
int Heap::NextStressMarkingLimit() {
return isolate()->fuzzer_rng()->NextInt(FLAG_stress_marking + 1);
}
@@ -5734,14 +5754,35 @@ void Heap::TearDown() {
UpdateMaximumCommitted();
- if (FLAG_verify_predictable) {
+ if (FLAG_verify_predictable || FLAG_fuzzer_gc_analysis) {
PrintAllocationsHash();
}
+ if (FLAG_fuzzer_gc_analysis) {
+ if (FLAG_stress_marking > 0) {
+ PrintMaxMarkingLimitReached();
+ }
+ if (FLAG_stress_scavenge > 0) {
+ PrintMaxNewSpaceSizeReached();
+ }
+ }
+
new_space()->RemoveAllocationObserver(idle_scavenge_observer_);
delete idle_scavenge_observer_;
idle_scavenge_observer_ = nullptr;
+ if (FLAG_stress_marking > 0) {
+ RemoveAllocationObserversFromAllSpaces(stress_marking_observer_,
+ stress_marking_observer_);
+ delete stress_marking_observer_;
+ stress_marking_observer_ = nullptr;
+ }
+ if (FLAG_stress_scavenge > 0) {
+ new_space()->RemoveAllocationObserver(stress_scavenge_observer_);
+ delete stress_scavenge_observer_;
+ stress_scavenge_observer_ = nullptr;
+ }
+
if (mark_compact_collector_ != nullptr) {
mark_compact_collector_->TearDown();
delete mark_compact_collector_;
@@ -6086,22 +6127,6 @@ void Heap::RecordWritesIntoCode(Code* code) {
}
}
-Space* AllSpaces::next() {
- switch (counter_++) {
- case NEW_SPACE:
- return heap_->new_space();
- case OLD_SPACE:
- return heap_->old_space();
- case CODE_SPACE:
- return heap_->code_space();
- case MAP_SPACE:
- return heap_->map_space();
- case LO_SPACE:
- return heap_->lo_space();
- default:
- return nullptr;
- }
-}
PagedSpace* PagedSpaces::next() {
switch (counter_++) {
@@ -6116,18 +6141,6 @@ PagedSpace* PagedSpaces::next() {
}
}
-
-OldSpace* OldSpaces::next() {
- switch (counter_++) {
- case OLD_SPACE:
- return heap_->old_space();
- case CODE_SPACE:
- return heap_->code_space();
- default:
- return nullptr;
- }
-}
-
SpaceIterator::SpaceIterator(Heap* heap)
: heap_(heap), current_space_(FIRST_SPACE - 1) {}
@@ -6380,9 +6393,9 @@ void Heap::RememberUnmappedPage(Address page, bool compacted) {
uintptr_t p = reinterpret_cast<uintptr_t>(page);
// Tag the page pointer to make it findable in the dump file.
if (compacted) {
- p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
+ p ^= 0xC1EAD & (Page::kPageSize - 1); // Cleared.
} else {
- p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
+ p ^= 0x1D1ED & (Page::kPageSize - 1); // I died.
}
remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
reinterpret_cast<Address>(p);
@@ -6485,6 +6498,22 @@ bool Heap::GetObjectTypeName(size_t index, const char** object_type,
return false;
}
+size_t Heap::NumberOfNativeContexts() {
+ int result = 0;
+ Object* context = native_contexts_list();
+ while (!context->IsUndefined(isolate())) {
+ ++result;
+ Context* native_context = Context::cast(context);
+ context = native_context->next_context_link();
+ }
+ return result;
+}
+
+size_t Heap::NumberOfDetachedContexts() {
+ // The detached_contexts() array has two entries per detached context.
+ return detached_contexts()->length() / 2;
+}
+
const char* AllocationSpaceName(AllocationSpace space) {
switch (space) {
case NEW_SPACE:
@@ -6574,6 +6603,17 @@ void Heap::CreateObjectStats() {
}
}
+void AllocationObserver::AllocationStep(int bytes_allocated,
+ Address soon_object, size_t size) {
+ DCHECK_GE(bytes_allocated, 0);
+ bytes_to_next_step_ -= bytes_allocated;
+ if (bytes_to_next_step_ <= 0) {
+ Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object, size);
+ step_size_ = GetNextStepSize();
+ bytes_to_next_step_ = step_size_;
+ }
+}
+
namespace {
Map* GcSafeMapOfCodeSpaceObject(HeapObject* object) {
@@ -6610,15 +6650,13 @@ Code* Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
return GcSafeCastToCode(this, large_page->GetObject(), inner_pointer);
}
- if (!code_space()->Contains(inner_pointer)) {
- return nullptr;
- }
+ DCHECK(code_space()->Contains(inner_pointer));
// Iterate through the page until we reach the end or find an object starting
// after the inner pointer.
Page* page = Page::FromAddress(inner_pointer);
DCHECK_EQ(page->owner(), code_space());
- mark_compact_collector()->sweeper()->SweepOrWaitUntilSweepingCompleted(page);
+ mark_compact_collector()->sweeper()->EnsurePageIsIterable(page);
Address addr = page->skip_list()->StartFor(inner_pointer);
Address top = code_space()->top();
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 72d74c9715..3d8234f392 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -42,6 +42,7 @@ class BytecodeArray;
class CodeDataContainer;
class DeoptimizationData;
class HandlerTable;
+class IncrementalMarking;
class JSArrayBuffer;
using v8::MemoryPressureLevel;
@@ -105,6 +106,7 @@ using v8::MemoryPressureLevel;
V(Map, script_context_table_map, ScriptContextTableMap) \
/* Maps */ \
V(Map, descriptor_array_map, DescriptorArrayMap) \
+ V(Map, array_list_map, ArrayListMap) \
V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
V(Map, mutable_heap_number_map, MutableHeapNumberMap) \
V(Map, ordered_hash_map_map, OrderedHashMapMap) \
@@ -245,7 +247,7 @@ using v8::MemoryPressureLevel;
FeedbackVectorsForProfilingTools) \
V(Object, weak_stack_trace_list, WeakStackTraceList) \
V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos) \
- V(FixedArray, serialized_templates, SerializedTemplates) \
+ V(FixedArray, serialized_objects, SerializedObjects) \
V(FixedArray, serialized_global_proxy_sizes, SerializedGlobalProxySizes) \
V(TemplateList, message_listeners, MessageListeners) \
/* DeserializeLazy handlers for lazy bytecode deserialization */ \
@@ -255,7 +257,8 @@ using v8::MemoryPressureLevel;
DeserializeLazyHandlerExtraWide) \
/* JS Entries */ \
V(Code, js_entry_code, JsEntryCode) \
- V(Code, js_construct_entry_code, JsConstructEntryCode)
+ V(Code, js_construct_entry_code, JsConstructEntryCode) \
+ V(Code, js_run_microtasks_entry_code, JsRunMicrotasksEntryCode)
// Entries in this list are limited to Smis and are not visited during GC.
#define SMI_ROOT_LIST(V) \
@@ -411,10 +414,11 @@ class ObjectStats;
class Page;
class PagedSpace;
class RootVisitor;
-class Scavenger;
class ScavengeJob;
+class Scavenger;
class Space;
class StoreBuffer;
+class StressScavengeObserver;
class TracePossibleWrapperReporter;
class WeakObjectRetainer;
@@ -523,78 +527,51 @@ struct CommentStatistic {
};
#endif
-class NumberAndSizeInfo BASE_EMBEDDED {
+class Heap {
public:
- NumberAndSizeInfo() : number_(0), bytes_(0) {}
+ // Declare all the root indices. This defines the root list order.
+ // clang-format off
+ enum RootListIndex {
+#define DECL(type, name, camel_name) k##camel_name##RootIndex,
+ STRONG_ROOT_LIST(DECL)
+#undef DECL
- int number() const { return number_; }
- void increment_number(int num) { number_ += num; }
+#define DECL(name, str) k##name##RootIndex,
+ INTERNALIZED_STRING_LIST(DECL)
+#undef DECL
- int bytes() const { return bytes_; }
- void increment_bytes(int size) { bytes_ += size; }
+#define DECL(name) k##name##RootIndex,
+ PRIVATE_SYMBOL_LIST(DECL)
+#undef DECL
- void clear() {
- number_ = 0;
- bytes_ = 0;
- }
+#define DECL(name, description) k##name##RootIndex,
+ PUBLIC_SYMBOL_LIST(DECL)
+ WELL_KNOWN_SYMBOL_LIST(DECL)
+#undef DECL
- private:
- int number_;
- int bytes_;
-};
+#define DECL(accessor_name, AccessorName) k##AccessorName##AccessorRootIndex,
+ ACCESSOR_INFO_LIST(DECL)
+#undef DECL
-// HistogramInfo class for recording a single "bar" of a histogram. This
-// class is used for collecting statistics to print to the log file.
-class HistogramInfo : public NumberAndSizeInfo {
- public:
- HistogramInfo() : NumberAndSizeInfo(), name_(nullptr) {}
+#define DECL(NAME, Name, name) k##Name##MapRootIndex,
+ STRUCT_LIST(DECL)
+#undef DECL
- const char* name() { return name_; }
- void set_name(const char* name) { name_ = name; }
+#define DECL(NAME, Name, Size, name) k##Name##Size##MapRootIndex,
+ DATA_HANDLER_LIST(DECL)
+#undef DECL
- private:
- const char* name_;
-};
+ kStringTableRootIndex,
-class Heap {
- public:
- // Declare all the root indices. This defines the root list order.
- enum RootListIndex {
-#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
- STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
-#undef ROOT_INDEX_DECLARATION
-
-#define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex,
- INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION)
-#undef STRING_DECLARATION
-
-#define SYMBOL_INDEX_DECLARATION(name) k##name##RootIndex,
- PRIVATE_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
-#undef SYMBOL_INDEX_DECLARATION
-
-#define SYMBOL_INDEX_DECLARATION(name, description) k##name##RootIndex,
- PUBLIC_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
- WELL_KNOWN_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
-#undef SYMBOL_INDEX_DECLARATION
-
-#define ACCESSOR_INDEX_DECLARATION(accessor_name, AccessorName) \
- k##AccessorName##AccessorRootIndex,
- ACCESSOR_INFO_LIST(ACCESSOR_INDEX_DECLARATION)
-#undef ACCESSOR_INDEX_DECLARATION
-
-// Utility type maps
-#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
- STRUCT_LIST(DECLARE_STRUCT_MAP)
-#undef DECLARE_STRUCT_MAP
- kStringTableRootIndex,
-
-#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
- SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
-#undef ROOT_INDEX_DECLARATION
- kRootListLength,
+#define DECL(type, name, camel_name) k##camel_name##RootIndex,
+ SMI_ROOT_LIST(DECL)
+#undef DECL
+
+ kRootListLength,
kStrongRootListLength = kStringTableRootIndex,
kSmiRootsStart = kStringTableRootIndex + 1
};
+ // clang-format on
enum FindMementoMode { kForRuntime, kForGC };
@@ -626,15 +603,15 @@ class Heap {
#endif
// Semi-space size needs to be a multiple of page size.
- static const size_t kMinSemiSpaceSizeInKB =
+ static const int kMinSemiSpaceSizeInKB =
1 * kPointerMultiplier * ((1 << kPageSizeBits) / KB);
- static const size_t kMaxSemiSpaceSizeInKB =
+ static const int kMaxSemiSpaceSizeInKB =
16 * kPointerMultiplier * ((1 << kPageSizeBits) / KB);
// The old space size has to be a multiple of Page::kPageSize.
// Sizes are in MB.
- static const size_t kMinOldGenerationSize = 128 * kPointerMultiplier;
- static const size_t kMaxOldGenerationSize = 1024 * kPointerMultiplier;
+ static const int kMinOldGenerationSize = 128 * kPointerMultiplier;
+ static const int kMaxOldGenerationSize = 1024 * kPointerMultiplier;
static const int kTraceRingBufferSize = 512;
static const int kStacktraceBufferSize = 512;
@@ -875,7 +852,7 @@ class Heap {
inline int NextScriptId();
inline int GetNextTemplateSerialNumber();
- void SetSerializedTemplates(FixedArray* templates);
+ void SetSerializedObjects(FixedArray* objects);
void SetSerializedGlobalProxySizes(FixedArray* sizes);
// For post mortem debugging.
@@ -1045,6 +1022,11 @@ class Heap {
STRUCT_LIST(STRUCT_MAP_ACCESSOR)
#undef STRUCT_MAP_ACCESSOR
+#define DATA_HANDLER_MAP_ACCESSOR(NAME, Name, Size, name) \
+ inline Map* name##_map();
+ DATA_HANDLER_LIST(DATA_HANDLER_MAP_ACCESSOR)
+#undef DATA_HANDLER_MAP_ACCESSOR
+
#define STRING_ACCESSOR(name, str) inline String* name();
INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
#undef STRING_ACCESSOR
@@ -1352,6 +1334,12 @@ class Heap {
bool GetObjectTypeName(size_t index, const char** object_type,
const char** object_sub_type);
+ // The total number of native contexts object on the heap.
+ size_t NumberOfNativeContexts();
+ // The total number of native contexts that were detached but were not
+ // garbage collected yet.
+ size_t NumberOfDetachedContexts();
+
// ===========================================================================
// Code statistics. ==========================================================
// ===========================================================================
@@ -1372,10 +1360,10 @@ class Heap {
size_t MaxOldGenerationSize() { return max_old_generation_size_; }
static size_t ComputeMaxOldGenerationSize(uint64_t physical_memory) {
- const size_t old_space_physical_memory_factor = 4;
- size_t computed_size = static_cast<size_t>(
- physical_memory / i::MB / old_space_physical_memory_factor *
- kPointerMultiplier);
+ const int old_space_physical_memory_factor = 4;
+ int computed_size =
+ static_cast<int>(physical_memory / i::MB /
+ old_space_physical_memory_factor * kPointerMultiplier);
return Max(Min(computed_size, kMaxOldGenerationSize),
kMinOldGenerationSize);
}
@@ -1387,11 +1375,11 @@ class Heap {
uint64_t capped_physical_memory =
Max(Min(physical_memory, max_physical_memory), min_physical_memory);
// linearly scale max semi-space size: (X-A)/(B-A)*(D-C)+C
- size_t semi_space_size_in_kb =
- static_cast<size_t>(((capped_physical_memory - min_physical_memory) *
- (kMaxSemiSpaceSizeInKB - kMinSemiSpaceSizeInKB)) /
- (max_physical_memory - min_physical_memory) +
- kMinSemiSpaceSizeInKB);
+ int semi_space_size_in_kb =
+ static_cast<int>(((capped_physical_memory - min_physical_memory) *
+ (kMaxSemiSpaceSizeInKB - kMinSemiSpaceSizeInKB)) /
+ (max_physical_memory - min_physical_memory) +
+ kMinSemiSpaceSizeInKB);
return RoundUp(semi_space_size_in_kb, (1 << kPageSizeBits) / KB);
}
@@ -1571,6 +1559,19 @@ class Heap {
const PretenuringFeedbackMap& local_pretenuring_feedback);
// ===========================================================================
+ // Allocation tracking. ======================================================
+ // ===========================================================================
+
+ // Adds {new_space_observer} to new space and {observer} to any other space.
+ void AddAllocationObserversToAllSpaces(
+ AllocationObserver* observer, AllocationObserver* new_space_observer);
+
+ // Removes {new_space_observer} from new space and {observer} from any other
+ // space.
+ void RemoveAllocationObserversFromAllSpaces(
+ AllocationObserver* observer, AllocationObserver* new_space_observer);
+
+ // ===========================================================================
// Retaining path tracking. ==================================================
// ===========================================================================
@@ -1599,21 +1600,22 @@ class Heap {
void VerifyRememberedSetFor(HeapObject* object);
#endif
+#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
+ void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
+#endif
+
#ifdef DEBUG
void VerifyCountersAfterSweeping();
void VerifyCountersBeforeConcurrentSweeping();
- void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
-
void Print();
void PrintHandles();
- // Report heap statistics.
- void ReportHeapStatistics(const char* title);
+ // Report code statistics.
void ReportCodeStatistics(const char* title);
#endif
void* GetRandomMmapAddr() {
- void* result = base::OS::GetRandomMmapAddr();
+ void* result = v8::internal::GetRandomMmapAddr();
#if V8_TARGET_ARCH_X64
#if V8_OS_MACOSX
// The Darwin kernel [as of macOS 10.12.5] does not clean up page
@@ -1624,7 +1626,7 @@ class Heap {
// killed. Confine the hint to a 32-bit section of the virtual address
// space. See crbug.com/700928.
uintptr_t offset =
- reinterpret_cast<uintptr_t>(base::OS::GetRandomMmapAddr()) &
+ reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
kMmapRegionMask;
result = reinterpret_cast<void*>(mmap_region_base_ + offset);
#endif // V8_OS_MACOSX
@@ -1816,6 +1818,7 @@ class Heap {
// because of a gcc-4.4 bug that assigns wrong vtable entries.
NO_INLINE(void CreateJSEntryStub());
NO_INLINE(void CreateJSConstructEntryStub());
+ NO_INLINE(void CreateJSRunMicrotasksEntryStub());
void CreateFixedStubs();
@@ -1837,8 +1840,7 @@ class Heap {
// the old space.
void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc);
- // Record statistics before and after garbage collection.
- void ReportStatisticsBeforeGC();
+ // Record statistics after garbage collection.
void ReportStatisticsAfterGC();
// Creates and installs the full-sized number string cache.
@@ -1865,10 +1867,14 @@ class Heap {
GCIdleTimeHeapState heap_state, double start_ms,
double deadline_in_ms);
+ int NextAllocationTimeout(int current_timeout = 0);
inline void UpdateAllocationsHash(HeapObject* object);
inline void UpdateAllocationsHash(uint32_t value);
void PrintAllocationsHash();
+ void PrintMaxMarkingLimitReached();
+ void PrintMaxNewSpaceSizeReached();
+
int NextStressMarkingLimit();
void AddToRingBuffer(const char* string);
@@ -2387,6 +2393,17 @@ class Heap {
// is reached.
int stress_marking_percentage_;
+ // Observer that causes more frequent checks for reached incremental marking
+ // limit.
+ AllocationObserver* stress_marking_observer_;
+
+ // Observer that can cause early scavenge start.
+ StressScavengeObserver* stress_scavenge_observer_;
+
+ // The maximum percent of the marking limit reached wihout causing marking.
+ // This is tracked when specyfing --fuzzer-gc-analysis.
+ double max_marking_limit_reached_;
+
// How many mark-sweep collections happened.
unsigned int ms_count_;
@@ -2400,13 +2417,6 @@ class Heap {
int remembered_unmapped_pages_index_;
Address remembered_unmapped_pages_[kRememberedUnmappedPages];
-#ifdef DEBUG
- // If the --gc-interval flag is set to a positive value, this
- // variable holds the value indicating the number of allocations
- // remain until the next failure and garbage collection.
- int allocation_timeout_;
-#endif // DEBUG
-
// Limit that triggers a global GC on the next (normally caused) GC. This
// is checked when we have already decided to do a GC to help determine
// which collector to invoke, before expanding a paged space in the old
@@ -2552,6 +2562,13 @@ class Heap {
HeapObject* pending_layout_change_object_;
+#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
+ // If the --gc-interval flag is set to a positive value, this
+ // variable holds the value indicating the number of allocations
+ // remain until the next failure and garbage collection.
+ int allocation_timeout_;
+#endif // V8_ENABLE_ALLOCATION_TIMEOUT
+
std::map<HeapObject*, HeapObject*> retainer_;
std::map<HeapObject*, Root> retaining_root_;
// If an object is retained by an ephemeron, then the retaining key of the
@@ -2685,37 +2702,10 @@ class VerifySmisVisitor : public RootVisitor {
void VisitRootPointers(Root root, Object** start, Object** end) override;
};
-
-// Space iterator for iterating over all spaces of the heap. Returns each space
-// in turn, and null when it is done.
-class AllSpaces BASE_EMBEDDED {
- public:
- explicit AllSpaces(Heap* heap) : heap_(heap), counter_(FIRST_SPACE) {}
- Space* next();
-
- private:
- Heap* heap_;
- int counter_;
-};
-
-
-// Space iterator for iterating over all old spaces of the heap: Old space
-// and code space. Returns each space in turn, and null when it is done.
-class V8_EXPORT_PRIVATE OldSpaces BASE_EMBEDDED {
- public:
- explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
- OldSpace* next();
-
- private:
- Heap* heap_;
- int counter_;
-};
-
-
// Space iterator for iterating over all the paged spaces of the heap: Map
// space, old space, code space and cell space. Returns
// each space in turn, and null when it is done.
-class PagedSpaces BASE_EMBEDDED {
+class V8_EXPORT_PRIVATE PagedSpaces BASE_EMBEDDED {
public:
explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
PagedSpace* next();
@@ -2800,15 +2790,7 @@ class AllocationObserver {
// Called each time the observed space does an allocation step. This may be
// more frequently than the step_size we are monitoring (e.g. when there are
// multiple observers, or when page or space boundary is encountered.)
- void AllocationStep(int bytes_allocated, Address soon_object, size_t size) {
- bytes_to_next_step_ -= bytes_allocated;
- if (bytes_to_next_step_ <= 0) {
- Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object,
- size);
- step_size_ = GetNextStepSize();
- bytes_to_next_step_ = step_size_;
- }
- }
+ void AllocationStep(int bytes_allocated, Address soon_object, size_t size);
protected:
intptr_t step_size() const { return step_size_; }
diff --git a/deps/v8/src/heap/incremental-marking-job.cc b/deps/v8/src/heap/incremental-marking-job.cc
index 8acbd31ec7..fa6082ae7c 100644
--- a/deps/v8/src/heap/incremental-marking-job.cc
+++ b/deps/v8/src/heap/incremental-marking-job.cc
@@ -21,7 +21,7 @@ void IncrementalMarkingJob::Start(Heap* heap) {
}
void IncrementalMarkingJob::ScheduleTask(Heap* heap) {
- if (!task_pending_) {
+ if (!task_pending_ && heap->use_tasks()) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
task_pending_ = true;
auto task = new Task(heap->isolate(), this);
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index a046dff4b0..4868adc26e 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -34,7 +34,8 @@ void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
Heap* heap = incremental_marking_.heap();
VMState<GC> state(heap->isolate());
RuntimeCallTimerScope runtime_timer(
- heap->isolate(), &RuntimeCallStats::GC_Custom_IncrementalMarkingObserver);
+ heap->isolate(),
+ RuntimeCallCounterId::kGC_Custom_IncrementalMarkingObserver);
incremental_marking_.AdvanceIncrementalMarkingOnAllocation();
if (incremental_marking_.black_allocation() && addr != nullptr) {
// AdvanceIncrementalMarkingOnAllocation can start black allocation.
@@ -363,16 +364,8 @@ void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
SetState(SWEEPING);
}
- SpaceIterator it(heap_);
- while (it.has_next()) {
- Space* space = it.next();
- if (space == heap_->new_space()) {
- space->AddAllocationObserver(&new_generation_observer_);
- } else {
- space->AddAllocationObserver(&old_generation_observer_);
- }
- }
-
+ heap_->AddAllocationObserversToAllSpaces(&old_generation_observer_,
+ &new_generation_observer_);
incremental_marking_job()->Start(heap_);
}
@@ -427,7 +420,7 @@ void IncrementalMarking::StartMarking() {
IncrementalMarkingRootMarkingVisitor visitor(this);
heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
- if (FLAG_concurrent_marking) {
+ if (FLAG_concurrent_marking && heap_->use_tasks()) {
heap_->concurrent_marking()->ScheduleTasks();
}
@@ -442,9 +435,9 @@ void IncrementalMarking::StartBlackAllocation() {
DCHECK(!black_allocation_);
DCHECK(IsMarking());
black_allocation_ = true;
- heap()->old_space()->MarkAllocationInfoBlack();
- heap()->map_space()->MarkAllocationInfoBlack();
- heap()->code_space()->MarkAllocationInfoBlack();
+ heap()->old_space()->MarkLinearAllocationAreaBlack();
+ heap()->map_space()->MarkLinearAllocationAreaBlack();
+ heap()->code_space()->MarkLinearAllocationAreaBlack();
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Black allocation started\n");
@@ -454,9 +447,9 @@ void IncrementalMarking::StartBlackAllocation() {
void IncrementalMarking::PauseBlackAllocation() {
DCHECK(FLAG_black_allocation);
DCHECK(IsMarking());
- heap()->old_space()->UnmarkAllocationInfo();
- heap()->map_space()->UnmarkAllocationInfo();
- heap()->code_space()->UnmarkAllocationInfo();
+ heap()->old_space()->UnmarkLinearAllocationArea();
+ heap()->map_space()->UnmarkLinearAllocationArea();
+ heap()->code_space()->UnmarkLinearAllocationArea();
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Black allocation paused\n");
@@ -687,7 +680,7 @@ void IncrementalMarking::RevisitObject(HeapObject* obj) {
DCHECK(IsMarking());
DCHECK(FLAG_concurrent_marking || marking_state()->IsBlack(obj));
Page* page = Page::FromAddress(obj->address());
- if ((page->owner() != nullptr) && (page->owner()->identity() == LO_SPACE)) {
+ if (page->owner()->identity() == LO_SPACE) {
page->ResetProgressBar();
}
Map* map = obj->map();
@@ -996,10 +989,14 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
marking_worklist()->shared()->MergeGlobalPool(
marking_worklist()->on_hold());
}
+
+// Only print marking worklist in debug mode to save ~40KB of code size.
+#ifdef DEBUG
if (FLAG_trace_incremental_marking && FLAG_trace_concurrent_marking &&
FLAG_trace_gc_verbose) {
marking_worklist()->Print();
}
+#endif
if (worklist_to_process == WorklistToProcess::kBailout) {
bytes_processed =
diff --git a/deps/v8/src/heap/local-allocator.h b/deps/v8/src/heap/local-allocator.h
index 2f21b382b6..0a23e774b3 100644
--- a/deps/v8/src/heap/local-allocator.h
+++ b/deps/v8/src/heap/local-allocator.h
@@ -33,7 +33,7 @@ class LocalAllocator {
compaction_spaces_.Get(CODE_SPACE));
// Give back remaining LAB space if this LocalAllocator's new space LAB
// sits right next to new space allocation top.
- const AllocationInfo info = new_space_lab_.Close();
+ const LinearAllocationArea info = new_space_lab_.Close();
const Address top = new_space_->top();
if (info.limit() != nullptr && info.limit() == top) {
DCHECK_NOT_NULL(info.top());
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 4ae9dce439..30a7e55d6b 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -625,9 +625,6 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
verifier.Run();
}
#endif
-
- if (heap()->memory_allocator()->unmapper()->has_delayed_chunks())
- heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
}
void MarkCompactCollector::ComputeEvacuationHeuristics(
@@ -925,6 +922,7 @@ void MarkCompactCollector::Finish() {
#endif
sweeper()->StartSweeperTasks();
+ sweeper()->StartIterabilityTasks();
// The hashing of weak_object_to_code_table is no longer valid.
heap()->weak_object_to_code_table()->Rehash();
@@ -1452,9 +1450,11 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
buffer_(LocalAllocationBuffer::InvalidBuffer()),
promoted_size_(0),
semispace_copied_size_(0),
- local_pretenuring_feedback_(local_pretenuring_feedback) {}
+ local_pretenuring_feedback_(local_pretenuring_feedback),
+ is_incremental_marking_(heap->incremental_marking()->IsMarking()) {}
inline bool Visit(HeapObject* object, int size) override {
+ if (TryEvacuateWithoutCopy(object)) return true;
HeapObject* target_object = nullptr;
if (heap_->ShouldBePromoted(object->address()) &&
TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
@@ -1474,6 +1474,26 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
intptr_t semispace_copied_size() { return semispace_copied_size_; }
private:
+ inline bool TryEvacuateWithoutCopy(HeapObject* object) {
+ if (is_incremental_marking_) return false;
+
+ Map* map = object->map();
+
+ // Some objects can be evacuated without creating a copy.
+ if (map->visitor_id() == kVisitThinString) {
+ HeapObject* actual = ThinString::cast(object)->unchecked_actual();
+ if (MarkCompactCollector::IsOnEvacuationCandidate(actual)) return false;
+ base::Relaxed_Store(
+ reinterpret_cast<base::AtomicWord*>(object->address()),
+ reinterpret_cast<base::AtomicWord>(
+ MapWord::FromForwardingAddress(actual).ToMap()));
+ return true;
+ }
+ // TODO(mlippautz): Handle ConsString.
+
+ return false;
+ }
+
inline AllocationSpace AllocateTargetObject(HeapObject* old_object, int size,
HeapObject** target_object) {
AllocationAlignment alignment = old_object->RequiredAlignment();
@@ -1505,6 +1525,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
intptr_t promoted_size_;
intptr_t semispace_copied_size_;
Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
+ bool is_incremental_marking_;
};
template <PageEvacuationMode mode>
@@ -1633,28 +1654,16 @@ void MarkCompactCollector::ProcessMarkingWorklist() {
DCHECK(marking_worklist()->IsBailoutEmpty());
}
-// Mark all objects reachable (transitively) from objects on the marking
-// stack including references only considered in the atomic marking pause.
-void MarkCompactCollector::ProcessEphemeralMarking(
- bool only_process_harmony_weak_collections) {
+void MarkCompactCollector::ProcessEphemeralMarking() {
DCHECK(marking_worklist()->IsEmpty());
bool work_to_do = true;
while (work_to_do) {
- if (!only_process_harmony_weak_collections) {
- if (heap_->local_embedder_heap_tracer()->InUse()) {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
- heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
- heap_->local_embedder_heap_tracer()->Trace(
- 0,
- EmbedderHeapTracer::AdvanceTracingActions(
- EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
- }
- } else {
- // TODO(mlippautz): We currently do not trace through blink when
- // discovering new objects reachable from weak roots (that have been made
- // strong). This is a limitation of not having a separate handle type
- // that doesn't require zapping before this phase. See crbug.com/668060.
- heap_->local_embedder_heap_tracer()->ClearCachedWrappersToTrace();
+ if (heap_->local_embedder_heap_tracer()->InUse()) {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
+ heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
+ heap_->local_embedder_heap_tracer()->Trace(
+ 0, EmbedderHeapTracer::AdvanceTracingActions(
+ EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
}
ProcessWeakCollections();
work_to_do = !marking_worklist()->IsEmpty();
@@ -1680,54 +1689,12 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
}
}
-class ObjectStatsVisitor : public HeapObjectVisitor {
- public:
- ObjectStatsVisitor(Heap* heap, ObjectStats* live_stats,
- ObjectStats* dead_stats)
- : live_collector_(heap, live_stats),
- dead_collector_(heap, dead_stats),
- marking_state_(
- heap->mark_compact_collector()->non_atomic_marking_state()) {
- DCHECK_NOT_NULL(live_stats);
- DCHECK_NOT_NULL(dead_stats);
- // Global objects are roots and thus recorded as live.
- live_collector_.CollectGlobalStatistics();
- }
-
- bool Visit(HeapObject* obj, int size) override {
- if (marking_state_->IsBlack(obj)) {
- live_collector_.CollectStatistics(obj);
- } else {
- DCHECK(!marking_state_->IsGrey(obj));
- dead_collector_.CollectStatistics(obj);
- }
- return true;
- }
-
- private:
- ObjectStatsCollector live_collector_;
- ObjectStatsCollector dead_collector_;
- MarkCompactCollector::NonAtomicMarkingState* marking_state_;
-};
-
-void MarkCompactCollector::VisitAllObjects(HeapObjectVisitor* visitor) {
- SpaceIterator space_it(heap());
- HeapObject* obj = nullptr;
- while (space_it.has_next()) {
- std::unique_ptr<ObjectIterator> it(space_it.next()->GetObjectIterator());
- ObjectIterator* obj_it = it.get();
- while ((obj = obj_it->Next()) != nullptr) {
- visitor->Visit(obj, obj->Size());
- }
- }
-}
-
void MarkCompactCollector::RecordObjectStats() {
if (V8_UNLIKELY(FLAG_gc_stats)) {
heap()->CreateObjectStats();
- ObjectStatsVisitor visitor(heap(), heap()->live_object_stats_,
- heap()->dead_object_stats_);
- VisitAllObjects(&visitor);
+ ObjectStatsCollector collector(heap(), heap()->live_object_stats_,
+ heap()->dead_object_stats_);
+ collector.Collect();
if (V8_UNLIKELY(FLAG_gc_stats &
v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
std::stringstream live, dead;
@@ -1844,6 +1811,8 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
}
void RunInParallel() override {
+ TRACE_BACKGROUND_GC(collector_->heap()->tracer(),
+ GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING);
double marking_time = 0.0;
{
TimedScope scope(&marking_time);
@@ -2146,7 +2115,7 @@ void MinorMarkCompactCollector::ProcessMarkingWorklist() {
void MinorMarkCompactCollector::CollectGarbage() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
- heap()->mark_compact_collector()->sweeper()->EnsureNewSpaceCompleted();
+ heap()->mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
CleanupSweepToIteratePages();
}
@@ -2186,6 +2155,15 @@ void MinorMarkCompactCollector::CollectGarbage() {
}
}
+ RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
+ heap(), [](MemoryChunk* chunk) {
+ if (chunk->SweepingDone()) {
+ RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
+ } else {
+ RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk);
+ }
+ });
+
heap()->account_external_memory_concurrently_freed();
}
@@ -2210,7 +2188,7 @@ void MinorMarkCompactCollector::MakeIterable(
p->AddressToMarkbitIndex(free_start),
p->AddressToMarkbitIndex(free_end));
if (free_space_mode == ZAP_FREE_SPACE) {
- memset(free_start, 0xcc, size);
+ memset(free_start, 0xCC, size);
}
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
@@ -2227,7 +2205,7 @@ void MinorMarkCompactCollector::MakeIterable(
p->AddressToMarkbitIndex(free_start),
p->AddressToMarkbitIndex(p->area_end()));
if (free_space_mode == ZAP_FREE_SPACE) {
- memset(free_start, 0xcc, size);
+ memset(free_start, 0xCC, size);
}
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
@@ -2266,7 +2244,7 @@ void MinorMarkCompactCollector::EvacuatePrologue() {
new_space_evacuation_pages_.push_back(p);
}
new_space->Flip();
- new_space->ResetAllocationInfo();
+ new_space->ResetLinearAllocationArea();
}
void MinorMarkCompactCollector::EvacuateEpilogue() {
@@ -2367,20 +2345,20 @@ void MarkCompactCollector::MarkLiveObjects() {
DCHECK(marking_worklist()->IsEmpty());
- // The objects reachable from the roots are marked, yet unreachable
- // objects are unmarked. Mark objects reachable due to host
- // application specific logic or through Harmony weak maps.
+ // The objects reachable from the roots are marked, yet unreachable objects
+ // are unmarked. Mark objects reachable due to embedder heap tracing or
+ // harmony weak maps.
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL);
- ProcessEphemeralMarking(false);
+ ProcessEphemeralMarking();
DCHECK(marking_worklist()->IsEmpty());
}
- // The objects reachable from the roots, weak maps or object groups
- // are marked. Objects pointed to only by weak global handles cannot be
- // immediately reclaimed. Instead, we have to mark them as pending and mark
- // objects reachable from them.
+ // The objects reachable from the roots, weak maps, and embedder heap
+ // tracing are marked. Objects pointed to only by weak global handles cannot
+ // be immediately reclaimed. Instead, we have to mark them as pending and
+ // mark objects reachable from them.
//
// First we identify nonlive weak handles and mark them as pending
// destruction.
@@ -2392,6 +2370,8 @@ void MarkCompactCollector::MarkLiveObjects() {
ProcessMarkingWorklist();
}
+ // Process finalizers, effectively keeping them alive until the next
+ // garbage collection.
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
@@ -2400,14 +2380,10 @@ void MarkCompactCollector::MarkLiveObjects() {
ProcessMarkingWorklist();
}
- // Repeat Harmony weak maps marking to mark unmarked objects reachable from
- // the weak roots we just marked as pending destruction.
- //
- // We only process harmony collections, as all object groups have been fully
- // processed and no weakly reachable node can discover new objects groups.
+ // Repeat ephemeral processing from the newly marked objects.
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
- ProcessEphemeralMarking(true);
+ ProcessEphemeralMarking();
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_EPILOGUE);
heap()->local_embedder_heap_tracer()->TraceEpilogue();
@@ -2914,7 +2890,7 @@ void MarkCompactCollector::EvacuatePrologue() {
new_space_evacuation_pages_.push_back(p);
}
new_space->Flip();
- new_space->ResetAllocationInfo();
+ new_space->ResetLinearAllocationArea();
// Old space.
DCHECK(old_space_evacuation_pages_.empty());
@@ -2998,6 +2974,8 @@ class Evacuator : public Malloced {
// to be called from the main thread.
inline void Finalize();
+ virtual GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() = 0;
+
protected:
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
@@ -3077,6 +3055,10 @@ class FullEvacuator : public Evacuator {
RecordMigratedSlotVisitor* record_visitor)
: Evacuator(collector->heap(), record_visitor), collector_(collector) {}
+ GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override {
+ return GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_COPY;
+ }
+
protected:
void RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
@@ -3133,6 +3115,10 @@ class YoungGenerationEvacuator : public Evacuator {
RecordMigratedSlotVisitor* record_visitor)
: Evacuator(collector->heap(), record_visitor), collector_(collector) {}
+ GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override {
+ return GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY;
+ }
+
protected:
void RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
@@ -3210,9 +3196,12 @@ class PageEvacuationItem : public ItemParallelJob::Item {
class PageEvacuationTask : public ItemParallelJob::Task {
public:
PageEvacuationTask(Isolate* isolate, Evacuator* evacuator)
- : ItemParallelJob::Task(isolate), evacuator_(evacuator) {}
+ : ItemParallelJob::Task(isolate),
+ evacuator_(evacuator),
+ tracer_(isolate->heap()->tracer()) {}
void RunInParallel() override {
+ TRACE_BACKGROUND_GC(tracer_, evacuator_->GetBackgroundTracingScope());
PageEvacuationItem* item = nullptr;
while ((item = GetItem<PageEvacuationItem>()) != nullptr) {
evacuator_->EvacuatePage(item->page());
@@ -3222,6 +3211,7 @@ class PageEvacuationTask : public ItemParallelJob::Task {
private:
Evacuator* evacuator_;
+ GCTracer* tracer_;
};
template <class Evacuator, class Collector>
@@ -3477,12 +3467,11 @@ void MarkCompactCollector::Evacuate() {
for (Page* p : new_space_evacuation_pages_) {
if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
- sweeper()->AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
+ sweeper()->AddPageForIterability(p);
} else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
- p->ForAllFreeListCategories(
- [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
- sweeper()->AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
+ DCHECK_EQ(OLD_SPACE, p->owner()->identity());
+ sweeper()->AddPage(OLD_SPACE, p, Sweeper::REGULAR);
}
}
new_space_evacuation_pages_.clear();
@@ -3521,16 +3510,24 @@ class UpdatingItem : public ItemParallelJob::Item {
class PointersUpdatingTask : public ItemParallelJob::Task {
public:
- explicit PointersUpdatingTask(Isolate* isolate)
- : ItemParallelJob::Task(isolate) {}
+ explicit PointersUpdatingTask(Isolate* isolate,
+ GCTracer::BackgroundScope::ScopeId scope)
+ : ItemParallelJob::Task(isolate),
+ tracer_(isolate->heap()->tracer()),
+ scope_(scope) {}
void RunInParallel() override {
+ TRACE_BACKGROUND_GC(tracer_, scope_);
UpdatingItem* item = nullptr;
while ((item = GetItem<UpdatingItem>()) != nullptr) {
item->Process();
item->MarkFinished();
}
};
+
+ private:
+ GCTracer* tracer_;
+ GCTracer::BackgroundScope::ScopeId scope_;
};
template <typename MarkingState>
@@ -3921,7 +3918,9 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
for (int i = 0; i < num_tasks; i++) {
- updating_job.AddTask(new PointersUpdatingTask(isolate()));
+ updating_job.AddTask(new PointersUpdatingTask(
+ isolate(),
+ GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
updating_job.Run();
}
@@ -3951,7 +3950,9 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
const int num_tasks = Max(array_buffer_pages, remembered_set_tasks);
if (num_tasks > 0) {
for (int i = 0; i < num_tasks; i++) {
- updating_job.AddTask(new PointersUpdatingTask(isolate()));
+ updating_job.AddTask(new PointersUpdatingTask(
+ isolate(),
+ GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
updating_job.Run();
heap()->array_buffer_collector()->FreeAllocationsOnBackgroundThread();
@@ -3996,11 +3997,15 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
remembered_set_pages += CollectRememberedSetUpdatingItems(
&updating_job, heap()->lo_space(),
RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- const int remembered_set_tasks = NumberOfParallelPointerUpdateTasks(
- remembered_set_pages, old_to_new_slots_);
+ const int remembered_set_tasks =
+ remembered_set_pages == 0 ? 0
+ : NumberOfParallelPointerUpdateTasks(
+ remembered_set_pages, old_to_new_slots_);
const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
for (int i = 0; i < num_tasks; i++) {
- updating_job.AddTask(new PointersUpdatingTask(isolate()));
+ updating_job.AddTask(new PointersUpdatingTask(
+ isolate(), GCTracer::BackgroundScope::
+ MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
{
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index a68be9b241..6fda00633c 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -723,8 +723,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
int* target_fragmentation_percent,
size_t* max_evacuated_bytes);
- void VisitAllObjects(HeapObjectVisitor* visitor);
-
void RecordObjectStats();
// Finishes GC, performs heap verification if enabled.
@@ -751,13 +749,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// the string table are weak.
void MarkStringTable(ObjectVisitor* visitor);
- // Mark objects reachable (transitively) from objects in the marking stack
- // or overflowed in the heap. This respects references only considered in
- // the final atomic marking pause including the following:
- // - Processing of objects reachable through Harmony WeakMaps.
- // - Objects reachable due to host application logic like object groups,
- // implicit references' groups, or embedder heap tracing.
- void ProcessEphemeralMarking(bool only_process_harmony_weak_collections);
+ // Marks object reachable from harmony weak maps and wrapper tracing.
+ void ProcessEphemeralMarking();
// If the call-site of the top optimized code was not prepared for
// deoptimization, then treat embedded pointers in the code as strong as
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index d8fe9fe7d8..f58a472671 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -4,6 +4,8 @@
#include "src/heap/object-stats.h"
+#include <unordered_set>
+
#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/compilation-cache.h"
@@ -19,7 +21,6 @@ namespace internal {
static base::LazyMutex object_stats_mutex = LAZY_MUTEX_INITIALIZER;
-
void ObjectStats::ClearObjectStats(bool clear_last_time_stats) {
memset(object_counts_, 0, sizeof(object_counts_));
memset(object_sizes_, 0, sizeof(object_sizes_));
@@ -104,16 +105,18 @@ void ObjectStats::PrintJSON(const char* key) {
#define FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER(name) \
PrintInstanceTypeJSON(key, gc_count, "*FIXED_ARRAY_" #name, \
FIRST_FIXED_ARRAY_SUB_TYPE + name);
+#define VIRTUAL_INSTANCE_TYPE_WRAPPER(name) \
+ PrintInstanceTypeJSON(key, gc_count, #name, FIRST_VIRTUAL_TYPE + name);
INSTANCE_TYPE_LIST(INSTANCE_TYPE_WRAPPER)
CODE_KIND_LIST(CODE_KIND_WRAPPER)
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER)
+ VIRTUAL_INSTANCE_TYPE_LIST(VIRTUAL_INSTANCE_TYPE_WRAPPER)
#undef INSTANCE_TYPE_WRAPPER
#undef CODE_KIND_WRAPPER
#undef FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER
-#undef PRINT_INSTANCE_TYPE_DATA
-#undef PRINT_KEY_AND_ID
+#undef VIRTUAL_INSTANCE_TYPE_WRAPPER
}
void ObjectStats::DumpInstanceTypeData(std::stringstream& stream,
@@ -154,58 +157,23 @@ void ObjectStats::Dump(std::stringstream& stream) {
#define FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER(name) \
DumpInstanceTypeData(stream, "*FIXED_ARRAY_" #name, \
FIRST_FIXED_ARRAY_SUB_TYPE + name);
+#define VIRTUAL_INSTANCE_TYPE_WRAPPER(name) \
+ DumpInstanceTypeData(stream, #name, FIRST_VIRTUAL_TYPE + name);
INSTANCE_TYPE_LIST(INSTANCE_TYPE_WRAPPER);
CODE_KIND_LIST(CODE_KIND_WRAPPER);
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER);
+ VIRTUAL_INSTANCE_TYPE_LIST(VIRTUAL_INSTANCE_TYPE_WRAPPER)
stream << "\"END\":{}}}";
#undef INSTANCE_TYPE_WRAPPER
#undef CODE_KIND_WRAPPER
#undef FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER
-#undef PRINT_INSTANCE_TYPE_DATA
+#undef VIRTUAL_INSTANCE_TYPE_WRAPPER
}
void ObjectStats::CheckpointObjectStats() {
base::LockGuard<base::Mutex> lock_guard(object_stats_mutex.Pointer());
- Counters* counters = isolate()->counters();
-#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
- counters->count_of_##name()->Increment( \
- static_cast<int>(object_counts_[name])); \
- counters->count_of_##name()->Decrement( \
- static_cast<int>(object_counts_last_time_[name])); \
- counters->size_of_##name()->Increment( \
- static_cast<int>(object_sizes_[name])); \
- counters->size_of_##name()->Decrement( \
- static_cast<int>(object_sizes_last_time_[name]));
- INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
-#undef ADJUST_LAST_TIME_OBJECT_COUNT
- int index;
-#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
- index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
- counters->count_of_CODE_TYPE_##name()->Increment( \
- static_cast<int>(object_counts_[index])); \
- counters->count_of_CODE_TYPE_##name()->Decrement( \
- static_cast<int>(object_counts_last_time_[index])); \
- counters->size_of_CODE_TYPE_##name()->Increment( \
- static_cast<int>(object_sizes_[index])); \
- counters->size_of_CODE_TYPE_##name()->Decrement( \
- static_cast<int>(object_sizes_last_time_[index]));
- CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
-#undef ADJUST_LAST_TIME_OBJECT_COUNT
-#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
- index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
- counters->count_of_FIXED_ARRAY_##name()->Increment( \
- static_cast<int>(object_counts_[index])); \
- counters->count_of_FIXED_ARRAY_##name()->Decrement( \
- static_cast<int>(object_counts_last_time_[index])); \
- counters->size_of_FIXED_ARRAY_##name()->Increment( \
- static_cast<int>(object_sizes_[index])); \
- counters->size_of_FIXED_ARRAY_##name()->Decrement( \
- static_cast<int>(object_sizes_last_time_[index]));
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
-#undef ADJUST_LAST_TIME_OBJECT_COUNT
-
MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
ClearObjectStats();
@@ -233,6 +201,14 @@ void ObjectStats::RecordObjectStats(InstanceType type, size_t size) {
size_histogram_[type][HistogramIndexFromSize(size)]++;
}
+void ObjectStats::RecordVirtualObjectStats(VirtualInstanceType type,
+ size_t size) {
+ DCHECK_LE(type, LAST_VIRTUAL_TYPE);
+ object_counts_[FIRST_VIRTUAL_TYPE + type]++;
+ object_sizes_[FIRST_VIRTUAL_TYPE + type] += size;
+ size_histogram_[FIRST_VIRTUAL_TYPE + type][HistogramIndexFromSize(size)]++;
+}
+
void ObjectStats::RecordCodeSubTypeStats(int code_sub_type, size_t size) {
int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type;
DCHECK_GE(code_sub_type_index, FIRST_CODE_KIND_SUB_TYPE);
@@ -267,18 +243,117 @@ bool ObjectStats::RecordFixedArraySubTypeStats(FixedArrayBase* array,
Isolate* ObjectStats::isolate() { return heap()->isolate(); }
-ObjectStatsCollector::ObjectStatsCollector(Heap* heap, ObjectStats* stats)
+class ObjectStatsCollectorImpl {
+ public:
+ ObjectStatsCollectorImpl(Heap* heap, ObjectStats* stats);
+
+ void CollectGlobalStatistics();
+
+ // Collects statistics of objects for virtual instance types.
+ void CollectVirtualStatistics(HeapObject* obj);
+
+ // Collects statistics of objects for regular instance types.
+ void CollectStatistics(HeapObject* obj);
+
+ private:
+ class CompilationCacheTableVisitor;
+
+ void RecordObjectStats(HeapObject* obj, InstanceType type, size_t size);
+ void RecordBytecodeArrayDetails(BytecodeArray* obj);
+ void RecordCodeDetails(Code* code);
+ void RecordFixedArrayDetails(FixedArray* array);
+ void RecordJSCollectionDetails(JSObject* obj);
+ void RecordJSObjectDetails(JSObject* object);
+ void RecordJSWeakCollectionDetails(JSWeakCollection* obj);
+ void RecordMapDetails(Map* map);
+ void RecordScriptDetails(Script* obj);
+ void RecordTemplateInfoDetails(TemplateInfo* obj);
+ void RecordSharedFunctionInfoDetails(SharedFunctionInfo* sfi);
+
+ bool RecordFixedArrayHelper(HeapObject* parent, FixedArray* array,
+ int subtype, size_t overhead);
+ void RecursivelyRecordFixedArrayHelper(HeapObject* parent, FixedArray* array,
+ int subtype);
+ template <class HashTable>
+ void RecordHashTableHelper(HeapObject* parent, HashTable* array, int subtype);
+ bool SameLiveness(HeapObject* obj1, HeapObject* obj2);
+
+ void RecordVirtualObjectStats(HeapObject* obj,
+ ObjectStats::VirtualInstanceType type,
+ size_t size);
+ void RecordVirtualAllocationSiteDetails(AllocationSite* site);
+
+ Heap* heap_;
+ ObjectStats* stats_;
+ MarkCompactCollector::NonAtomicMarkingState* marking_state_;
+ std::unordered_set<HeapObject*> virtual_objects_;
+
+ friend class ObjectStatsCollectorImpl::CompilationCacheTableVisitor;
+};
+
+ObjectStatsCollectorImpl::ObjectStatsCollectorImpl(Heap* heap,
+ ObjectStats* stats)
: heap_(heap),
stats_(stats),
marking_state_(
heap->mark_compact_collector()->non_atomic_marking_state()) {}
-void ObjectStatsCollector::CollectStatistics(HeapObject* obj) {
+// For entries which shared the same instance type (historically FixedArrays)
+// we do a pre-pass and create virtual instance types.
+void ObjectStatsCollectorImpl::CollectVirtualStatistics(HeapObject* obj) {
+ if (obj->IsAllocationSite()) {
+ RecordVirtualAllocationSiteDetails(AllocationSite::cast(obj));
+ }
+}
+
+void ObjectStatsCollectorImpl::RecordVirtualObjectStats(
+ HeapObject* obj, ObjectStats::VirtualInstanceType type, size_t size) {
+ virtual_objects_.insert(obj);
+ stats_->RecordVirtualObjectStats(type, size);
+}
+
+void ObjectStatsCollectorImpl::RecordVirtualAllocationSiteDetails(
+ AllocationSite* site) {
+ if (!site->PointsToLiteral()) return;
+ JSObject* boilerplate = site->boilerplate();
+ if (boilerplate->IsJSArray()) {
+ RecordVirtualObjectStats(boilerplate,
+ ObjectStats::JS_ARRAY_BOILERPLATE_TYPE,
+ boilerplate->Size());
+ // Array boilerplates cannot have properties.
+ } else {
+ RecordVirtualObjectStats(boilerplate,
+ ObjectStats::JS_OBJECT_BOILERPLATE_TYPE,
+ boilerplate->Size());
+ if (boilerplate->HasFastProperties()) {
+ // We'll misclassify the empty_proeprty_array here. Given that there is a
+ // single instance, this is neglible.
+ PropertyArray* properties = boilerplate->property_array();
+ RecordVirtualObjectStats(properties,
+ ObjectStats::BOILERPLATE_PROPERTY_ARRAY_TYPE,
+ properties->Size());
+ } else {
+ NameDictionary* properties = boilerplate->property_dictionary();
+ RecordVirtualObjectStats(properties,
+ ObjectStats::BOILERPLATE_NAME_DICTIONARY_TYPE,
+ properties->Size());
+ }
+ }
+ FixedArrayBase* elements = boilerplate->elements();
+ // We skip COW elements since they are shared, and we are sure that if the
+ // boilerplate exists there must have been at least one instantiation.
+ if (!elements->IsCowArray()) {
+ RecordVirtualObjectStats(elements, ObjectStats::BOILERPLATE_ELEMENTS_TYPE,
+ elements->Size());
+ }
+}
+
+void ObjectStatsCollectorImpl::CollectStatistics(HeapObject* obj) {
Map* map = obj->map();
// Record for the InstanceType.
int object_size = obj->Size();
- stats_->RecordObjectStats(map->instance_type(), object_size);
+ RecordObjectStats(obj, map->instance_type(), object_size);
// Record specific sub types where possible.
if (obj->IsMap()) RecordMapDetails(Map::cast(obj));
@@ -303,9 +378,10 @@ void ObjectStatsCollector::CollectStatistics(HeapObject* obj) {
if (obj->IsScript()) RecordScriptDetails(Script::cast(obj));
}
-class ObjectStatsCollector::CompilationCacheTableVisitor : public RootVisitor {
+class ObjectStatsCollectorImpl::CompilationCacheTableVisitor
+ : public RootVisitor {
public:
- explicit CompilationCacheTableVisitor(ObjectStatsCollector* parent)
+ explicit CompilationCacheTableVisitor(ObjectStatsCollectorImpl* parent)
: parent_(parent) {}
void VisitRootPointers(Root root, Object** start, Object** end) override {
@@ -319,15 +395,15 @@ class ObjectStatsCollector::CompilationCacheTableVisitor : public RootVisitor {
}
private:
- ObjectStatsCollector* parent_;
+ ObjectStatsCollectorImpl* parent_;
};
-void ObjectStatsCollector::CollectGlobalStatistics() {
+void ObjectStatsCollectorImpl::CollectGlobalStatistics() {
// Global FixedArrays.
RecordFixedArrayHelper(nullptr, heap_->weak_new_space_object_to_code_list(),
WEAK_NEW_SPACE_OBJECT_TO_CODE_SUB_TYPE, 0);
- RecordFixedArrayHelper(nullptr, heap_->serialized_templates(),
- SERIALIZED_TEMPLATES_SUB_TYPE, 0);
+ RecordFixedArrayHelper(nullptr, heap_->serialized_objects(),
+ SERIALIZED_OBJECTS_SUB_TYPE, 0);
RecordFixedArrayHelper(nullptr, heap_->number_string_cache(),
NUMBER_STRING_CACHE_SUB_TYPE, 0);
RecordFixedArrayHelper(nullptr, heap_->single_character_string_cache(),
@@ -359,6 +435,13 @@ void ObjectStatsCollector::CollectGlobalStatistics() {
compilation_cache->Iterate(&v);
}
+void ObjectStatsCollectorImpl::RecordObjectStats(HeapObject* obj,
+ InstanceType type,
+ size_t size) {
+ if (virtual_objects_.find(obj) == virtual_objects_.end())
+ stats_->RecordObjectStats(type, size);
+}
+
static bool CanRecordFixedArray(Heap* heap, FixedArrayBase* array) {
return array->map()->instance_type() == FIXED_ARRAY_TYPE &&
array != heap->empty_fixed_array() &&
@@ -371,15 +454,16 @@ static bool IsCowArray(Heap* heap, FixedArrayBase* array) {
return array->map() == heap->fixed_cow_array_map();
}
-bool ObjectStatsCollector::SameLiveness(HeapObject* obj1, HeapObject* obj2) {
+bool ObjectStatsCollectorImpl::SameLiveness(HeapObject* obj1,
+ HeapObject* obj2) {
return obj1 == nullptr || obj2 == nullptr ||
marking_state_->Color(obj1) == marking_state_->Color(obj2);
}
-bool ObjectStatsCollector::RecordFixedArrayHelper(HeapObject* parent,
- FixedArray* array,
- int subtype,
- size_t overhead) {
+bool ObjectStatsCollectorImpl::RecordFixedArrayHelper(HeapObject* parent,
+ FixedArray* array,
+ int subtype,
+ size_t overhead) {
if (SameLiveness(parent, array) && CanRecordFixedArray(heap_, array) &&
!IsCowArray(heap_, array)) {
return stats_->RecordFixedArraySubTypeStats(array, subtype, array->Size(),
@@ -388,9 +472,8 @@ bool ObjectStatsCollector::RecordFixedArrayHelper(HeapObject* parent,
return false;
}
-void ObjectStatsCollector::RecursivelyRecordFixedArrayHelper(HeapObject* parent,
- FixedArray* array,
- int subtype) {
+void ObjectStatsCollectorImpl::RecursivelyRecordFixedArrayHelper(
+ HeapObject* parent, FixedArray* array, int subtype) {
if (RecordFixedArrayHelper(parent, array, subtype, 0)) {
for (int i = 0; i < array->length(); i++) {
if (array->get(i)->IsFixedArray()) {
@@ -402,9 +485,9 @@ void ObjectStatsCollector::RecursivelyRecordFixedArrayHelper(HeapObject* parent,
}
template <class HashTable>
-void ObjectStatsCollector::RecordHashTableHelper(HeapObject* parent,
- HashTable* array,
- int subtype) {
+void ObjectStatsCollectorImpl::RecordHashTableHelper(HeapObject* parent,
+ HashTable* array,
+ int subtype) {
int used = array->NumberOfElements() * HashTable::kEntrySize * kPointerSize;
CHECK_GE(array->Size(), used);
size_t overhead = array->Size() - used -
@@ -413,7 +496,7 @@ void ObjectStatsCollector::RecordHashTableHelper(HeapObject* parent,
RecordFixedArrayHelper(parent, array, subtype, overhead);
}
-void ObjectStatsCollector::RecordJSObjectDetails(JSObject* object) {
+void ObjectStatsCollectorImpl::RecordJSObjectDetails(JSObject* object) {
size_t overhead = 0;
FixedArrayBase* elements = object->elements();
if (CanRecordFixedArray(heap_, elements) && !IsCowArray(heap_, elements)) {
@@ -448,7 +531,7 @@ void ObjectStatsCollector::RecordJSObjectDetails(JSObject* object) {
}
}
-void ObjectStatsCollector::RecordJSWeakCollectionDetails(
+void ObjectStatsCollectorImpl::RecordJSWeakCollectionDetails(
JSWeakCollection* obj) {
if (obj->table()->IsHashTable()) {
ObjectHashTable* table = ObjectHashTable::cast(obj->table());
@@ -458,7 +541,7 @@ void ObjectStatsCollector::RecordJSWeakCollectionDetails(
}
}
-void ObjectStatsCollector::RecordJSCollectionDetails(JSObject* obj) {
+void ObjectStatsCollectorImpl::RecordJSCollectionDetails(JSObject* obj) {
// The JS versions use a different HashTable implementation that cannot use
// the regular helper. Since overall impact is usually small just record
// without overhead.
@@ -472,12 +555,12 @@ void ObjectStatsCollector::RecordJSCollectionDetails(JSObject* obj) {
}
}
-void ObjectStatsCollector::RecordScriptDetails(Script* obj) {
+void ObjectStatsCollectorImpl::RecordScriptDetails(Script* obj) {
FixedArray* infos = FixedArray::cast(obj->shared_function_infos());
RecordFixedArrayHelper(obj, infos, SHARED_FUNCTION_INFOS_SUB_TYPE, 0);
}
-void ObjectStatsCollector::RecordMapDetails(Map* map_obj) {
+void ObjectStatsCollectorImpl::RecordMapDetails(Map* map_obj) {
DescriptorArray* array = map_obj->instance_descriptors();
if (map_obj->owns_descriptors() && array != heap_->empty_descriptor_array() &&
SameLiveness(map_obj, array)) {
@@ -508,7 +591,7 @@ void ObjectStatsCollector::RecordMapDetails(Map* map_obj) {
}
}
-void ObjectStatsCollector::RecordTemplateInfoDetails(TemplateInfo* obj) {
+void ObjectStatsCollectorImpl::RecordTemplateInfoDetails(TemplateInfo* obj) {
if (obj->property_accessors()->IsFixedArray()) {
RecordFixedArrayHelper(obj, FixedArray::cast(obj->property_accessors()),
TEMPLATE_INFO_SUB_TYPE, 0);
@@ -519,14 +602,14 @@ void ObjectStatsCollector::RecordTemplateInfoDetails(TemplateInfo* obj) {
}
}
-void ObjectStatsCollector::RecordBytecodeArrayDetails(BytecodeArray* obj) {
+void ObjectStatsCollectorImpl::RecordBytecodeArrayDetails(BytecodeArray* obj) {
RecordFixedArrayHelper(obj, obj->constant_pool(),
BYTECODE_ARRAY_CONSTANT_POOL_SUB_TYPE, 0);
RecordFixedArrayHelper(obj, obj->handler_table(),
BYTECODE_ARRAY_HANDLER_TABLE_SUB_TYPE, 0);
}
-void ObjectStatsCollector::RecordCodeDetails(Code* code) {
+void ObjectStatsCollectorImpl::RecordCodeDetails(Code* code) {
stats_->RecordCodeSubTypeStats(code->kind(), code->Size());
RecordFixedArrayHelper(code, code->deoptimization_data(),
DEOPTIMIZATION_DATA_SUB_TYPE, 0);
@@ -554,7 +637,7 @@ void ObjectStatsCollector::RecordCodeDetails(Code* code) {
}
}
-void ObjectStatsCollector::RecordSharedFunctionInfoDetails(
+void ObjectStatsCollectorImpl::RecordSharedFunctionInfoDetails(
SharedFunctionInfo* sfi) {
FixedArray* scope_info = sfi->scope_info();
RecordFixedArrayHelper(sfi, scope_info, SCOPE_INFO_SUB_TYPE, 0);
@@ -565,7 +648,7 @@ void ObjectStatsCollector::RecordSharedFunctionInfoDetails(
}
}
-void ObjectStatsCollector::RecordFixedArrayDetails(FixedArray* array) {
+void ObjectStatsCollectorImpl::RecordFixedArrayDetails(FixedArray* array) {
if (array->IsContext()) {
RecordFixedArrayHelper(nullptr, array, CONTEXT_SUB_TYPE, 0);
}
@@ -585,5 +668,85 @@ void ObjectStatsCollector::RecordFixedArrayDetails(FixedArray* array) {
}
}
+class ObjectStatsVisitor {
+ public:
+ enum CollectionMode {
+ kRegular,
+ kVirtual,
+ };
+
+ ObjectStatsVisitor(Heap* heap, ObjectStatsCollectorImpl* live_collector,
+ ObjectStatsCollectorImpl* dead_collector,
+ CollectionMode mode)
+ : live_collector_(live_collector),
+ dead_collector_(dead_collector),
+ marking_state_(
+ heap->mark_compact_collector()->non_atomic_marking_state()),
+ mode_(mode) {}
+
+ bool Visit(HeapObject* obj, int size) {
+ if (marking_state_->IsBlack(obj)) {
+ Collect(live_collector_, obj);
+ } else {
+ DCHECK(!marking_state_->IsGrey(obj));
+ Collect(dead_collector_, obj);
+ }
+ return true;
+ }
+
+ private:
+ void Collect(ObjectStatsCollectorImpl* collector, HeapObject* obj) {
+ switch (mode_) {
+ case kRegular:
+ collector->CollectStatistics(obj);
+ break;
+ case kVirtual:
+ collector->CollectVirtualStatistics(obj);
+ break;
+ }
+ }
+
+ ObjectStatsCollectorImpl* live_collector_;
+ ObjectStatsCollectorImpl* dead_collector_;
+ MarkCompactCollector::NonAtomicMarkingState* marking_state_;
+ CollectionMode mode_;
+};
+
+namespace {
+
+void IterateHeap(Heap* heap, ObjectStatsVisitor* visitor) {
+ SpaceIterator space_it(heap);
+ HeapObject* obj = nullptr;
+ while (space_it.has_next()) {
+ std::unique_ptr<ObjectIterator> it(space_it.next()->GetObjectIterator());
+ ObjectIterator* obj_it = it.get();
+ while ((obj = obj_it->Next()) != nullptr) {
+ visitor->Visit(obj, obj->Size());
+ }
+ }
+}
+
+} // namespace
+
+void ObjectStatsCollector::Collect() {
+ ObjectStatsCollectorImpl live_collector(heap_, live_);
+ ObjectStatsCollectorImpl dead_collector(heap_, dead_);
+ // 1. Collect system type otherwise indistinguishable from other types.
+ {
+ ObjectStatsVisitor visitor(heap_, &live_collector, &dead_collector,
+ ObjectStatsVisitor::kVirtual);
+ IterateHeap(heap_, &visitor);
+ }
+
+ // 2. Collect globals; only applies to live objects.
+ live_collector.CollectGlobalStatistics();
+ // 3. Collect rest.
+ {
+ ObjectStatsVisitor visitor(heap_, &live_collector, &dead_collector,
+ ObjectStatsVisitor::kRegular);
+ IterateHeap(heap_, &visitor);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/object-stats.h b/deps/v8/src/heap/object-stats.h
index 18bbaaaa43..500ce36bd9 100644
--- a/deps/v8/src/heap/object-stats.h
+++ b/deps/v8/src/heap/object-stats.h
@@ -13,6 +13,19 @@
#include "src/heap/objects-visiting.h"
#include "src/objects.h"
+// These instance types do not exist for actual use but are merely introduced
+// for object stats tracing. In contrast to Code and FixedArray sub types
+// these types are not known to other counters outside of object stats
+// tracing.
+//
+// Update LAST_VIRTUAL_TYPE below when changing this macro.
+#define VIRTUAL_INSTANCE_TYPE_LIST(V) \
+ V(BOILERPLATE_ELEMENTS_TYPE) \
+ V(BOILERPLATE_NAME_DICTIONARY_TYPE) \
+ V(BOILERPLATE_PROPERTY_ARRAY_TYPE) \
+ V(JS_ARRAY_BOILERPLATE_TYPE) \
+ V(JS_OBJECT_BOILERPLATE_TYPE)
+
namespace v8 {
namespace internal {
@@ -20,6 +33,14 @@ class ObjectStats {
public:
explicit ObjectStats(Heap* heap) : heap_(heap) { ClearObjectStats(); }
+ // See description on VIRTUAL_INSTANCE_TYPE_LIST.
+ enum VirtualInstanceType {
+#define DEFINE_VIRTUAL_INSTANCE_TYPE(type) type,
+ VIRTUAL_INSTANCE_TYPE_LIST(DEFINE_VIRTUAL_INSTANCE_TYPE)
+#undef DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE
+ LAST_VIRTUAL_TYPE = JS_OBJECT_BOILERPLATE_TYPE,
+ };
+
// ObjectStats are kept in two arrays, counts and sizes. Related stats are
// stored in a contiguous linear buffer. Stats groups are stored one after
// another.
@@ -27,17 +48,19 @@ class ObjectStats {
FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
FIRST_FIXED_ARRAY_SUB_TYPE =
FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS,
- OBJECT_STATS_COUNT =
+ FIRST_VIRTUAL_TYPE =
FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1,
+ OBJECT_STATS_COUNT = FIRST_VIRTUAL_TYPE + LAST_VIRTUAL_TYPE + 1,
};
void ClearObjectStats(bool clear_last_time_stats = false);
- void CheckpointObjectStats();
void PrintJSON(const char* key);
void Dump(std::stringstream& stream);
+ void CheckpointObjectStats();
void RecordObjectStats(InstanceType type, size_t size);
+ void RecordVirtualObjectStats(VirtualInstanceType type, size_t size);
void RecordCodeSubTypeStats(int code_sub_type, size_t size);
bool RecordFixedArraySubTypeStats(FixedArrayBase* array, int array_sub_type,
size_t size, size_t over_allocated);
@@ -88,37 +111,21 @@ class ObjectStats {
class ObjectStatsCollector {
public:
- ObjectStatsCollector(Heap* heap, ObjectStats* stats);
+ ObjectStatsCollector(Heap* heap, ObjectStats* live, ObjectStats* dead)
+ : heap_(heap), live_(live), dead_(dead) {
+ DCHECK_NOT_NULL(heap_);
+ DCHECK_NOT_NULL(live_);
+ DCHECK_NOT_NULL(dead_);
+ }
- void CollectGlobalStatistics();
- void CollectStatistics(HeapObject* obj);
+ // Collects type information of live and dead objects. Requires mark bits to
+ // be present.
+ void Collect();
private:
- class CompilationCacheTableVisitor;
-
- void RecordBytecodeArrayDetails(BytecodeArray* obj);
- void RecordCodeDetails(Code* code);
- void RecordFixedArrayDetails(FixedArray* array);
- void RecordJSCollectionDetails(JSObject* obj);
- void RecordJSObjectDetails(JSObject* object);
- void RecordJSWeakCollectionDetails(JSWeakCollection* obj);
- void RecordMapDetails(Map* map);
- void RecordScriptDetails(Script* obj);
- void RecordTemplateInfoDetails(TemplateInfo* obj);
- void RecordSharedFunctionInfoDetails(SharedFunctionInfo* sfi);
-
- bool RecordFixedArrayHelper(HeapObject* parent, FixedArray* array,
- int subtype, size_t overhead);
- void RecursivelyRecordFixedArrayHelper(HeapObject* parent, FixedArray* array,
- int subtype);
- template <class HashTable>
- void RecordHashTableHelper(HeapObject* parent, HashTable* array, int subtype);
- bool SameLiveness(HeapObject* obj1, HeapObject* obj2);
- Heap* heap_;
- ObjectStats* stats_;
- MarkCompactCollector::NonAtomicMarkingState* marking_state_;
-
- friend class ObjectStatsCollector::CompilationCacheTableVisitor;
+ Heap* const heap_;
+ ObjectStats* const live_;
+ ObjectStats* const dead_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 39ebdd2cbd..c20434a283 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -20,6 +20,7 @@ class BigInt;
class BytecodeArray;
class JSArrayBuffer;
class JSRegExp;
+class JSWeakCollection;
#define TYPED_VISITOR_ID_LIST(V) \
V(AllocationSite) \
diff --git a/deps/v8/src/heap/scavenge-job.cc b/deps/v8/src/heap/scavenge-job.cc
index a7583cb754..b649c010ae 100644
--- a/deps/v8/src/heap/scavenge-job.cc
+++ b/deps/v8/src/heap/scavenge-job.cc
@@ -103,7 +103,7 @@ void ScavengeJob::ScheduleIdleTaskIfNeeded(Heap* heap, int bytes_allocated) {
void ScavengeJob::ScheduleIdleTask(Heap* heap) {
- if (!idle_task_pending_) {
+ if (!idle_task_pending_ && heap->use_tasks()) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
if (V8::GetCurrentPlatform()->IdleTasksEnabled(isolate)) {
idle_task_pending_ = true;
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index 99e1a8004e..b61872074e 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -58,8 +58,6 @@ bool Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target,
}
if (V8_UNLIKELY(is_logging_)) {
- // Update NewSpace stats if necessary.
- RecordCopiedObject(target);
heap()->OnMoveEvent(target, source, size);
}
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 231a8f5074..be5fb87a90 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -153,20 +153,6 @@ void Scavenger::Process(OneshotBarrier* barrier) {
} while (!done);
}
-void Scavenger::RecordCopiedObject(HeapObject* obj) {
- bool should_record = FLAG_log_gc;
-#ifdef DEBUG
- should_record = FLAG_heap_stats;
-#endif
- if (should_record) {
- if (heap()->new_space()->Contains(obj)) {
- heap()->new_space()->RecordAllocation(obj);
- } else {
- heap()->new_space()->RecordPromotion(obj);
- }
- }
-}
-
void Scavenger::Finalize() {
heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
heap()->IncrementSemiSpaceCopiedObjectSize(copied_size_);
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index 75b24fe282..27ae2e8ab7 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -92,8 +92,6 @@ class Scavenger {
void IterateAndScavengePromotedObject(HeapObject* target, int size);
- void RecordCopiedObject(HeapObject* obj);
-
static inline bool ContainsOnlyData(VisitorId visitor_id);
Heap* const heap_;
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 8831417ce2..9e2d7e6354 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -17,6 +17,7 @@
#include "src/lookup-cache.h"
#include "src/objects-inl.h"
#include "src/objects/arguments.h"
+#include "src/objects/data-handler.h"
#include "src/objects/debug-objects.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/dictionary.h"
@@ -69,6 +70,11 @@ const Heap::StructTable Heap::struct_table[] = {
{NAME##_TYPE, Name::kSize, k##Name##MapRootIndex},
STRUCT_LIST(STRUCT_TABLE_ELEMENT)
#undef STRUCT_TABLE_ELEMENT
+
+#define DATA_HANDLER_ELEMENT(NAME, Name, Size, name) \
+ {NAME##_TYPE, Name::kSizeWithData##Size, k##Name##Size##MapRootIndex},
+ DATA_HANDLER_LIST(DATA_HANDLER_ELEMENT)
+#undef DATA_HANDLER_ELEMENT
};
namespace {
@@ -188,9 +194,9 @@ bool Heap::CreateInitialMaps() {
FinalizePartialMap(this, fixed_cow_array_map());
FinalizePartialMap(this, descriptor_array_map());
FinalizePartialMap(this, undefined_map());
- undefined_map()->set_is_undetectable();
+ undefined_map()->set_is_undetectable(true);
FinalizePartialMap(this, null_map());
- null_map()->set_is_undetectable();
+ null_map()->set_is_undetectable(true);
FinalizePartialMap(this, the_hole_map());
for (unsigned i = 0; i < arraysize(struct_table); ++i) {
const StructTable& entry = struct_table[i];
@@ -300,6 +306,8 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, string_table)
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, weak_hash_table)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, array_list)
+
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
@@ -551,9 +559,7 @@ void Heap::CreateInitialObjects() {
set_weak_object_to_code_table(*WeakHashTable::New(isolate(), 16, TENURED));
- set_weak_new_space_object_to_code_list(
- ArrayList::cast(*(factory->NewFixedArray(16, TENURED))));
- weak_new_space_object_to_code_list()->SetLength(0);
+ set_weak_new_space_object_to_code_list(*ArrayList::New(isolate(), 16));
set_feedback_vectors_for_profiling_tools(undefined_value());
@@ -632,7 +638,7 @@ void Heap::CreateInitialObjects() {
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_array_buffer_neutering_protector(*cell);
- set_serialized_templates(empty_fixed_array());
+ set_serialized_objects(empty_fixed_array());
set_serialized_global_proxy_sizes(empty_fixed_array());
set_weak_stack_trace_list(Smi::kZero);
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index fb78b99c2f..39a62327df 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -137,21 +137,20 @@ bool NewSpace::FromSpaceContainsSlow(Address a) {
bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
-void Page::InitializeFreeListCategories() {
+void MemoryChunk::InitializeFreeListCategories() {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
categories_[i].Initialize(static_cast<FreeListCategoryType>(i));
}
}
bool PagedSpace::Contains(Address addr) {
+ if (heap_->lo_space()->FindPage(addr)) return false;
return MemoryChunk::FromAnyPointerAddress(heap(), addr)->owner() == this;
}
bool PagedSpace::Contains(Object* o) {
if (!o->IsHeapObject()) return false;
- Page* p = Page::FromAddress(HeapObject::cast(o)->address());
- if (!Page::IsValid(p)) return false;
- return p->owner() == this;
+ return Page::FromAddress(HeapObject::cast(o)->address())->owner() == this;
}
void PagedSpace::UnlinkFreeListCategories(Page* page) {
@@ -186,18 +185,13 @@ bool PagedSpace::TryFreeLast(HeapObject* object, int object_size) {
}
MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(addr);
- uintptr_t offset = addr - chunk->address();
- if (offset < MemoryChunk::kHeaderSize || !chunk->HasPageHeader()) {
- chunk = heap->lo_space()->FindPageThreadSafe(addr);
+ MemoryChunk* chunk = heap->lo_space()->FindPage(addr);
+ if (chunk == nullptr) {
+ chunk = MemoryChunk::FromAddress(addr);
}
return chunk;
}
-Page* Page::FromAnyPointerAddress(Heap* heap, Address addr) {
- return static_cast<Page*>(MemoryChunk::FromAnyPointerAddress(heap, addr));
-}
-
void Page::MarkNeverAllocateForTesting() {
DCHECK(this->owner()->identity() != NEW_SPACE);
DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
@@ -301,8 +295,7 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes) {
if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit())
return true;
- if (free_list_.Allocate(size_in_bytes)) return true;
- return SlowAllocateRaw(size_in_bytes);
+ return SlowRefillLinearAllocationArea(size_in_bytes);
}
HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
@@ -393,11 +386,11 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
#endif
HeapObject* heap_obj = nullptr;
if (!result.IsRetry() && result.To(&heap_obj) && !is_local()) {
- AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
- heap_obj->address(), size_in_bytes);
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
+ AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
+ heap_obj->address(), size_in_bytes);
StartNextInlineAllocationStep();
}
return result;
@@ -462,6 +455,12 @@ AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment) {
+ if (top() < top_on_previous_step_) {
+ // Generated code decreased the top() pointer to do folded allocations
+ DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
+ Page::FromAllocationAreaAddress(top_on_previous_step_));
+ top_on_previous_step_ = top();
+ }
#ifdef V8_HOST_ARCH_32_BIT
return alignment == kDoubleAligned
? AllocateRawAligned(size_in_bytes, kDoubleAligned)
@@ -484,7 +483,7 @@ size_t LargeObjectSpace::Available() {
LocalAllocationBuffer LocalAllocationBuffer::InvalidBuffer() {
- return LocalAllocationBuffer(nullptr, AllocationInfo(nullptr, nullptr));
+ return LocalAllocationBuffer(nullptr, LinearAllocationArea(nullptr, nullptr));
}
@@ -497,7 +496,7 @@ LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
USE(ok);
DCHECK(ok);
Address top = HeapObject::cast(obj)->address();
- return LocalAllocationBuffer(heap, AllocationInfo(top, top + size));
+ return LocalAllocationBuffer(heap, LinearAllocationArea(top, top + size));
}
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index c3663573b0..2dd5e9b24d 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -12,6 +12,7 @@
#include "src/counters.h"
#include "src/heap/array-buffer-tracker.h"
#include "src/heap/concurrent-marking.h"
+#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/slot-set.h"
@@ -57,8 +58,7 @@ bool HeapObjectIterator::AdvanceToNextPage() {
Page* cur_page = *(current_page_++);
Heap* heap = space_->heap();
- heap->mark_compact_collector()->sweeper()->SweepOrWaitUntilSweepingCompleted(
- cur_page);
+ heap->mark_compact_collector()->sweeper()->EnsurePageIsIterable(cur_page);
if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE))
heap->minor_mark_compact_collector()->MakeIterable(
cur_page, MarkingTreatmentMode::CLEAR,
@@ -71,16 +71,14 @@ bool HeapObjectIterator::AdvanceToNextPage() {
PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
: heap_(heap) {
- AllSpaces spaces(heap_);
- for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
- space->PauseAllocationObservers();
+ for (SpaceIterator it(heap_); it.has_next();) {
+ it.next()->PauseAllocationObservers();
}
}
PauseAllocationObserversScope::~PauseAllocationObserversScope() {
- AllSpaces spaces(heap_);
- for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
- space->ResumeAllocationObservers();
+ for (SpaceIterator it(heap_); it.has_next();) {
+ it.next()->ResumeAllocationObservers();
}
}
@@ -120,8 +118,8 @@ bool CodeRange::SetUp(size_t requested) {
VirtualMemory reservation;
if (!AlignedAllocVirtualMemory(
- requested, Max(kCodeRangeAreaAlignment, base::OS::AllocatePageSize()),
- base::OS::GetRandomMmapAddr(), &reservation)) {
+ requested, Max(kCodeRangeAreaAlignment, AllocatePageSize()),
+ GetRandomMmapAddr(), &reservation)) {
return false;
}
@@ -133,7 +131,7 @@ bool CodeRange::SetUp(size_t requested) {
// the beginning of an executable space.
if (reserved_area > 0) {
if (!reservation.SetPermissions(base, reserved_area,
- base::OS::MemoryPermission::kReadWrite))
+ PageAllocator::kReadWrite))
return false;
base += reserved_area;
@@ -228,7 +226,7 @@ bool CodeRange::CommitRawMemory(Address start, size_t length) {
bool CodeRange::UncommitRawMemory(Address start, size_t length) {
return virtual_memory_.SetPermissions(start, length,
- base::OS::MemoryPermission::kNoAccess);
+ PageAllocator::kNoAccess);
}
@@ -236,8 +234,7 @@ void CodeRange::FreeRawMemory(Address address, size_t length) {
DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
base::LockGuard<base::Mutex> guard(&code_range_mutex_);
free_list_.emplace_back(address, length);
- virtual_memory_.SetPermissions(address, length,
- base::OS::MemoryPermission::kNoAccess);
+ virtual_memory_.SetPermissions(address, length, PageAllocator::kNoAccess);
}
bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
@@ -316,20 +313,24 @@ void MemoryAllocator::TearDown() {
class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
public:
explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper)
- : CancelableTask(isolate), unmapper_(unmapper) {}
+ : CancelableTask(isolate),
+ unmapper_(unmapper),
+ tracer_(isolate->heap()->tracer()) {}
private:
void RunInternal() override {
+ TRACE_BACKGROUND_GC(tracer_,
+ GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
unmapper_->pending_unmapping_tasks_semaphore_.Signal();
}
Unmapper* const unmapper_;
+ GCTracer* const tracer_;
DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
};
void MemoryAllocator::Unmapper::FreeQueuedChunks() {
- ReconsiderDelayedChunks();
if (heap_->use_tasks() && FLAG_concurrent_sweeping) {
if (concurrent_unmapping_tasks_active_ >= kMaxUnmapperTasks) {
// kMaxUnmapperTasks are already running. Avoid creating any more.
@@ -380,23 +381,12 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
void MemoryAllocator::Unmapper::TearDown() {
CHECK_EQ(0, concurrent_unmapping_tasks_active_);
- ReconsiderDelayedChunks();
- CHECK(delayed_regular_chunks_.empty());
PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
for (int i = 0; i < kNumberOfChunkQueues; i++) {
DCHECK(chunks_[i].empty());
}
}
-void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
- std::list<MemoryChunk*> delayed_chunks(std::move(delayed_regular_chunks_));
- // Move constructed, so the permanent list should be empty.
- DCHECK(delayed_regular_chunks_.empty());
- for (auto it = delayed_chunks.begin(); it != delayed_chunks.end(); ++it) {
- AddMemoryChunkSafe<kRegular>(*it);
- }
-}
-
int MemoryAllocator::Unmapper::NumberOfChunks() {
base::LockGuard<base::Mutex> guard(&mutex_);
size_t result = 0;
@@ -406,20 +396,9 @@ int MemoryAllocator::Unmapper::NumberOfChunks() {
return static_cast<int>(result);
}
-bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
- MarkCompactCollector* mc = isolate_->heap()->mark_compact_collector();
- // We cannot free a memory chunk in new space while the sweeper is running
- // because the memory chunk can be in the queue of a sweeper task.
- // Chunks in old generation are unmapped if they are empty.
- DCHECK(chunk->InNewSpace() || chunk->SweepingDone());
- return !chunk->InNewSpace() || mc == nullptr ||
- !mc->sweeper()->sweeping_in_progress();
-}
-
bool MemoryAllocator::CommitMemory(Address base, size_t size,
Executability executable) {
- if (!base::OS::SetPermissions(base, size,
- base::OS::MemoryPermission::kReadWrite)) {
+ if (!SetPermissions(base, size, PageAllocator::kReadWrite)) {
return false;
}
UpdateAllocatedSpaceLimits(base, base + size);
@@ -448,7 +427,7 @@ void MemoryAllocator::FreeMemory(Address base, size_t size,
code_range()->FreeRawMemory(base, size);
} else {
DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
- CHECK(base::OS::Free(base, size));
+ CHECK(FreePages(base, size));
}
}
@@ -481,7 +460,7 @@ Address MemoryAllocator::AllocateAlignedMemory(
}
} else {
if (reservation.SetPermissions(base, commit_size,
- base::OS::MemoryPermission::kReadWrite)) {
+ PageAllocator::kReadWrite)) {
UpdateAllocatedSpaceLimits(base, base + commit_size);
} else {
base = nullptr;
@@ -545,8 +524,8 @@ void MemoryChunk::SetReadAndExecutable() {
size_t page_size = MemoryAllocator::GetCommitPageSize();
DCHECK(IsAddressAligned(protect_start, page_size));
size_t protect_size = RoundUp(area_size(), page_size);
- CHECK(base::OS::SetPermissions(protect_start, protect_size,
- base::OS::MemoryPermission::kReadExecute));
+ CHECK(SetPermissions(protect_start, protect_size,
+ PageAllocator::kReadExecute));
}
}
@@ -564,8 +543,8 @@ void MemoryChunk::SetReadAndWritable() {
size_t page_size = MemoryAllocator::GetCommitPageSize();
DCHECK(IsAddressAligned(unprotect_start, page_size));
size_t unprotect_size = RoundUp(area_size(), page_size);
- CHECK(base::OS::SetPermissions(unprotect_start, unprotect_size,
- base::OS::MemoryPermission::kReadWrite));
+ CHECK(SetPermissions(unprotect_start, unprotect_size,
+ PageAllocator::kReadWrite));
}
}
@@ -604,6 +583,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->set_next_chunk(nullptr);
chunk->set_prev_chunk(nullptr);
chunk->local_tracker_ = nullptr;
+ chunk->InitializeFreeListCategories();
heap->incremental_marking()->non_atomic_marking_state()->ClearLiveness(chunk);
@@ -618,9 +598,8 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
size_t page_size = MemoryAllocator::GetCommitPageSize();
DCHECK(IsAddressAligned(area_start, page_size));
size_t area_size = RoundUp(area_end - area_start, page_size);
- CHECK(base::OS::SetPermissions(
- area_start, area_size,
- base::OS::MemoryPermission::kReadWriteExecute));
+ CHECK(SetPermissions(area_start, area_size,
+ PageAllocator::kReadWriteExecute));
}
}
@@ -634,7 +613,6 @@ Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
Page* page = static_cast<Page*>(chunk);
DCHECK_GE(Page::kAllocatableMemory, page->area_size());
// Make sure that categories are initialized before freeing the area.
- page->InitializeFreeListCategories();
page->ResetAllocatedBytes();
heap()->incremental_marking()->SetOldSpacePageFlags(page);
page->InitializationMemoryFence();
@@ -866,6 +844,22 @@ size_t Page::AvailableInFreeList() {
return sum;
}
+#ifdef DEBUG
+namespace {
+// Skips filler starting from the given filler until the end address.
+// Returns the first address after the skipped fillers.
+Address SkipFillers(HeapObject* filler, Address end) {
+ Address addr = filler->address();
+ while (addr < end) {
+ filler = HeapObject::FromAddress(addr);
+ CHECK(filler->IsFiller());
+ addr = filler->address() + filler->Size();
+ }
+ return addr;
+}
+} // anonymous namespace
+#endif // DEBUG
+
size_t Page::ShrinkToHighWaterMark() {
// Shrinking only makes sense outside of the CodeRange, where we don't care
// about address space fragmentation.
@@ -877,29 +871,13 @@ size_t Page::ShrinkToHighWaterMark() {
HeapObject* filler = HeapObject::FromAddress(HighWaterMark());
if (filler->address() == area_end()) return 0;
CHECK(filler->IsFiller());
- if (!filler->IsFreeSpace()) return 0;
-
-#ifdef DEBUG
- // Check the the filler is indeed the last filler on the page.
- HeapObjectIterator it(this);
- HeapObject* filler2 = nullptr;
- for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
- filler2 = HeapObject::FromAddress(obj->address() + obj->Size());
- }
- if (filler2 == nullptr || filler2->address() == area_end()) return 0;
- DCHECK(filler2->IsFiller());
- // The deserializer might leave behind fillers. In this case we need to
- // iterate even further.
- while ((filler2->address() + filler2->Size()) != area_end()) {
- filler2 = HeapObject::FromAddress(filler2->address() + filler2->Size());
- DCHECK(filler2->IsFiller());
- }
- DCHECK_EQ(filler->address(), filler2->address());
-#endif // DEBUG
+ // Ensure that no objects were allocated in [filler, area_end) region.
+ DCHECK_EQ(area_end(), SkipFillers(filler, area_end()));
+ // Ensure that no objects will be allocated on this page.
+ DCHECK_EQ(0u, AvailableInFreeList());
- size_t unused = RoundDown(
- static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize),
- MemoryAllocator::GetCommitPageSize());
+ size_t unused = RoundDown(static_cast<size_t>(area_end() - filler->address()),
+ MemoryAllocator::GetCommitPageSize());
if (unused > 0) {
DCHECK_EQ(0u, unused % MemoryAllocator::GetCommitPageSize());
if (FLAG_trace_gc_verbose) {
@@ -914,8 +892,10 @@ size_t Page::ShrinkToHighWaterMark() {
ClearRecordedSlots::kNo);
heap()->memory_allocator()->PartialFreeMemory(
this, address() + size() - unused, unused, area_end() - unused);
- CHECK(filler->IsFiller());
- CHECK_EQ(filler->address() + filler->Size(), area_end());
+ if (filler->address() != area_end()) {
+ CHECK(filler->IsFiller());
+ CHECK_EQ(filler->address() + filler->Size(), area_end());
+ }
}
return unused;
}
@@ -959,7 +939,7 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
DCHECK_EQ(chunk->address() + chunk->size(),
chunk->area_end() + CodePageGuardSize());
reservation->SetPermissions(chunk->area_end_, page_size,
- base::OS::MemoryPermission::kNoAccess);
+ PageAllocator::kNoAccess);
}
// On e.g. Windows, a reservation may be larger than a page and releasing
// partially starting at |start_free| will also release the potentially
@@ -1111,9 +1091,7 @@ bool MemoryAllocator::CommitBlock(Address start, size_t size,
bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
- if (!base::OS::SetPermissions(start, size,
- base::OS::MemoryPermission::kNoAccess))
- return false;
+ if (!SetPermissions(start, size, PageAllocator::kNoAccess)) return false;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
return true;
}
@@ -1121,19 +1099,10 @@ bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
void MemoryAllocator::ZapBlock(Address start, size_t size) {
for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
- Memory::Address_at(start + s) = kZapValue;
+ Memory::Address_at(start + s) = reinterpret_cast<Address>(kZapValue);
}
}
-#ifdef DEBUG
-void MemoryAllocator::ReportStatistics() {
- size_t size = Size();
- float pct = static_cast<float>(capacity_ - size) / capacity_;
- PrintF(" capacity: %zu , used: %" PRIuS ", available: %%%d\n\n",
- capacity_, size, static_cast<int>(pct * 100));
-}
-#endif
-
size_t MemoryAllocator::CodePageGuardStartOffset() {
// We are guarding code pages: the first OS page after the header
// will be protected as non-writable.
@@ -1159,7 +1128,7 @@ intptr_t MemoryAllocator::GetCommitPageSize() {
DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
return FLAG_v8_os_page_size * KB;
} else {
- return base::OS::CommitPageSize();
+ return CommitPageSize();
}
}
@@ -1180,26 +1149,23 @@ bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
const Address code_area = start + code_area_offset;
const Address post_guard_page = start + reserved_size - guard_size;
// Commit the non-executable header, from start to pre-code guard page.
- if (vm->SetPermissions(start, pre_guard_offset,
- base::OS::MemoryPermission::kReadWrite)) {
+ if (vm->SetPermissions(start, pre_guard_offset, PageAllocator::kReadWrite)) {
// Create the pre-code guard page, following the header.
if (vm->SetPermissions(pre_guard_page, page_size,
- base::OS::MemoryPermission::kNoAccess)) {
+ PageAllocator::kNoAccess)) {
// Commit the executable code body.
if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
- base::OS::MemoryPermission::kReadWrite)) {
+ PageAllocator::kReadWrite)) {
// Create the post-code guard page.
if (vm->SetPermissions(post_guard_page, page_size,
- base::OS::MemoryPermission::kNoAccess)) {
+ PageAllocator::kNoAccess)) {
UpdateAllocatedSpaceLimits(start, code_area + commit_size);
return true;
}
- vm->SetPermissions(code_area, commit_size,
- base::OS::MemoryPermission::kNoAccess);
+ vm->SetPermissions(code_area, commit_size, PageAllocator::kNoAccess);
}
}
- vm->SetPermissions(start, pre_guard_offset,
- base::OS::MemoryPermission::kNoAccess);
+ vm->SetPermissions(start, pre_guard_offset, PageAllocator::kNoAccess);
}
return false;
}
@@ -1379,7 +1345,7 @@ void Space::ResumeAllocationObservers() {
void Space::AllocationStep(int bytes_since_last, Address soon_object,
int size) {
- if (!allocation_observers_paused_) {
+ if (AllocationObserversActive()) {
heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
for (AllocationObserver* observer : allocation_observers_) {
observer->AllocationStep(bytes_since_last, soon_object, size);
@@ -1399,14 +1365,11 @@ intptr_t Space::GetNextInlineAllocationStepSize() {
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable)
- : Space(heap, space, executable),
+ : SpaceWithLinearArea(heap, space, executable),
anchor_(this),
- free_list_(this),
- top_on_previous_step_(0) {
+ free_list_(this) {
area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear();
-
- allocation_info_.Reset(nullptr, nullptr);
}
@@ -1469,7 +1432,7 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
// area_size_
// anchor_
- other->EmptyAllocationInfo();
+ other->FreeLinearAllocationArea();
// The linear allocation area of {other} should be destroyed now.
DCHECK_NULL(other->top());
@@ -1574,12 +1537,18 @@ size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
return unused;
}
+void PagedSpace::ResetFreeList() {
+ for (Page* page : *this) {
+ free_list_.EvictFreeListItems(page);
+ }
+ DCHECK(free_list_.IsEmpty());
+}
+
void PagedSpace::ShrinkImmortalImmovablePages() {
DCHECK(!heap()->deserialization_complete());
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- EmptyAllocationInfo();
+ FreeLinearAllocationArea();
ResetFreeList();
-
for (Page* page : *this) {
DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
ShrinkPageToHighWaterMark(page);
@@ -1623,7 +1592,7 @@ void PagedSpace::ResetFreeListStatistics() {
}
}
-void PagedSpace::SetAllocationInfo(Address top, Address limit) {
+void PagedSpace::SetLinearAllocationArea(Address top, Address limit) {
SetTopAndLimit(top, limit);
if (top != nullptr && top != limit &&
heap()->incremental_marking()->black_allocation()) {
@@ -1645,35 +1614,38 @@ void PagedSpace::DecreaseLimit(Address new_limit) {
}
}
-Address PagedSpace::ComputeLimit(Address start, Address end,
- size_t size_in_bytes) {
- DCHECK_GE(end - start, size_in_bytes);
+Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
+ size_t min_size) {
+ DCHECK_GE(end - start, min_size);
if (heap()->inline_allocation_disabled()) {
- // Keep the linear allocation area to fit exactly the requested size.
- return start + size_in_bytes;
- } else if (!allocation_observers_paused_ && !allocation_observers_.empty() &&
- identity() == OLD_SPACE && !is_local()) {
- // Generated code may allocate inline from the linear allocation area for
- // Old Space. To make sure we can observe these allocations, we use a lower
- // limit.
- size_t step = RoundSizeDownToObjectAlignment(
- static_cast<int>(GetNextInlineAllocationStepSize()));
- return Max(start + size_in_bytes, Min(start + step, end));
+ // Fit the requested area exactly.
+ return start + min_size;
+ } else if (SupportsInlineAllocation() && AllocationObserversActive()) {
+ // Generated code may allocate inline from the linear allocation area for.
+ // To make sure we can observe these allocations, we use a lower limit.
+ size_t step = GetNextInlineAllocationStepSize();
+
+ // TODO(ofrobots): there is subtle difference between old space and new
+ // space here. Any way to avoid it? `step - 1` makes more sense as we would
+ // like to sample the object that straddles the `start + step` boundary.
+ // Rounding down further would introduce a small statistical error in
+ // sampling. However, presently PagedSpace requires limit to be aligned.
+ size_t rounded_step;
+ if (identity() == NEW_SPACE) {
+ DCHECK_GE(step, 1);
+ rounded_step = step - 1;
+ } else {
+ rounded_step = RoundSizeDownToObjectAlignment(static_cast<int>(step));
+ }
+ return Min(start + min_size + rounded_step, end);
} else {
// The entire node can be used as the linear allocation area.
return end;
}
}
-void PagedSpace::StartNextInlineAllocationStep() {
- if (!allocation_observers_paused_ && SupportsInlineAllocation()) {
- top_on_previous_step_ = allocation_observers_.empty() ? 0 : top();
- DecreaseLimit(ComputeLimit(top(), limit(), 0));
- }
-}
-
-void PagedSpace::MarkAllocationInfoBlack() {
+void PagedSpace::MarkLinearAllocationAreaBlack() {
DCHECK(heap()->incremental_marking()->black_allocation());
Address current_top = top();
Address current_limit = limit();
@@ -1683,7 +1655,7 @@ void PagedSpace::MarkAllocationInfoBlack() {
}
}
-void PagedSpace::UnmarkAllocationInfo() {
+void PagedSpace::UnmarkLinearAllocationArea() {
Address current_top = top();
Address current_limit = limit();
if (current_top != nullptr && current_top != current_limit) {
@@ -1692,8 +1664,7 @@ void PagedSpace::UnmarkAllocationInfo() {
}
}
-// Empty space allocation info, returning unused area to free list.
-void PagedSpace::EmptyAllocationInfo() {
+void PagedSpace::FreeLinearAllocationArea() {
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap.
Address current_top = top();
@@ -1718,12 +1689,7 @@ void PagedSpace::EmptyAllocationInfo() {
}
}
- if (top_on_previous_step_) {
- DCHECK(current_top >= top_on_previous_step_);
- AllocationStep(static_cast<int>(current_top - top_on_previous_step_),
- nullptr, 0);
- top_on_previous_step_ = 0;
- }
+ InlineAllocationStep(current_top, nullptr, nullptr, 0);
SetTopAndLimit(nullptr, nullptr);
DCHECK_GE(current_limit, current_top);
Free(current_top, current_limit - current_top);
@@ -1771,6 +1737,62 @@ std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() {
return std::unique_ptr<ObjectIterator>(new HeapObjectIterator(this));
}
+bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
+ DCHECK(IsAligned(size_in_bytes, kPointerSize));
+ DCHECK_LE(top(), limit());
+#ifdef DEBUG
+ if (top() != limit()) {
+ DCHECK_EQ(Page::FromAddress(top()), Page::FromAddress(limit() - 1));
+ }
+#endif
+ // Don't free list allocate if there is linear space available.
+ DCHECK_LT(static_cast<size_t>(limit() - top()), size_in_bytes);
+
+ // Mark the old linear allocation area with a free space map so it can be
+ // skipped when scanning the heap. This also puts it back in the free list
+ // if it is big enough.
+ FreeLinearAllocationArea();
+
+ if (!is_local()) {
+ heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
+ Heap::kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
+ }
+
+ size_t new_node_size = 0;
+ FreeSpace* new_node = free_list_.Allocate(size_in_bytes, &new_node_size);
+ if (new_node == nullptr) return false;
+
+ DCHECK_GE(new_node_size, size_in_bytes);
+
+#ifdef DEBUG
+ for (size_t i = 0; i < size_in_bytes / kPointerSize; i++) {
+ reinterpret_cast<Object**>(new_node->address())[i] =
+ Smi::FromInt(kCodeZapValue);
+ }
+#endif
+
+ // The old-space-step might have finished sweeping and restarted marking.
+ // Verify that it did not turn the page of the new node into an evacuation
+ // candidate.
+ DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
+
+ // Memory in the linear allocation area is counted as allocated. We may free
+ // a little of this again immediately - see below.
+ IncreaseAllocatedBytes(new_node_size, Page::FromAddress(new_node->address()));
+
+ Address start = new_node->address();
+ Address end = new_node->address() + new_node_size;
+ Address limit = ComputeLimit(start, end, size_in_bytes);
+ DCHECK_LE(limit, end);
+ DCHECK_LE(size_in_bytes, limit - start);
+ if (limit != end) {
+ Free(limit, end - limit);
+ }
+ SetLinearAllocationArea(start, limit);
+
+ return true;
+}
+
#ifdef DEBUG
void PagedSpace::Print() {}
#endif
@@ -1904,31 +1926,13 @@ bool NewSpace::SetUp(size_t initial_semispace_capacity,
return false;
}
DCHECK(!from_space_.is_committed()); // No need to use memory yet.
- ResetAllocationInfo();
-
- // Allocate and set up the histogram arrays if necessary.
- allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
- promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
-#define SET_NAME(name) \
- allocated_histogram_[name].set_name(#name); \
- promoted_histogram_[name].set_name(#name);
- INSTANCE_TYPE_LIST(SET_NAME)
-#undef SET_NAME
+ ResetLinearAllocationArea();
return true;
}
void NewSpace::TearDown() {
- if (allocated_histogram_) {
- DeleteArray(allocated_histogram_);
- allocated_histogram_ = nullptr;
- }
- if (promoted_histogram_) {
- DeleteArray(promoted_histogram_);
- promoted_histogram_ = nullptr;
- }
-
allocation_info_.Reset(nullptr, nullptr);
to_space_.TearDown();
@@ -1952,7 +1956,7 @@ void NewSpace::Grow() {
if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
// We are in an inconsistent state because we could not
// commit/uncommit memory from new space.
- CHECK(false);
+ FATAL("inconsistent state");
}
}
}
@@ -1973,7 +1977,7 @@ void NewSpace::Shrink() {
if (!to_space_.GrowTo(from_space_.current_capacity())) {
// We are in an inconsistent state because we could not
// commit/uncommit memory from new space.
- CHECK(false);
+ FATAL("inconsistent state");
}
}
}
@@ -2028,22 +2032,21 @@ bool SemiSpace::EnsureCurrentCapacity() {
return true;
}
-AllocationInfo LocalAllocationBuffer::Close() {
+LinearAllocationArea LocalAllocationBuffer::Close() {
if (IsValid()) {
heap_->CreateFillerObjectAt(
allocation_info_.top(),
static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
ClearRecordedSlots::kNo);
- const AllocationInfo old_info = allocation_info_;
- allocation_info_ = AllocationInfo(nullptr, nullptr);
+ const LinearAllocationArea old_info = allocation_info_;
+ allocation_info_ = LinearAllocationArea(nullptr, nullptr);
return old_info;
}
- return AllocationInfo(nullptr, nullptr);
+ return LinearAllocationArea(nullptr, nullptr);
}
-
-LocalAllocationBuffer::LocalAllocationBuffer(Heap* heap,
- AllocationInfo allocation_info)
+LocalAllocationBuffer::LocalAllocationBuffer(
+ Heap* heap, LinearAllocationArea allocation_info)
: heap_(heap), allocation_info_(allocation_info) {
if (IsValid()) {
heap_->CreateFillerObjectAt(
@@ -2074,21 +2077,25 @@ LocalAllocationBuffer& LocalAllocationBuffer::operator=(
return *this;
}
+void NewSpace::UpdateLinearAllocationArea() {
+ Address old_top = top();
+ Address new_top = to_space_.page_low();
-void NewSpace::UpdateAllocationInfo() {
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- allocation_info_.Reset(to_space_.page_low(), to_space_.page_high());
+ allocation_info_.Reset(new_top, to_space_.page_high());
original_top_.SetValue(top());
original_limit_.SetValue(limit());
UpdateInlineAllocationLimit(0);
+ // TODO(ofrobots): It would be more correct to do a step before setting the
+ // limit on the new allocation area. However, fixing this causes a regression
+ // due to the idle scavenger getting pinged too frequently. crbug.com/795323.
+ InlineAllocationStep(old_top, new_top, nullptr, 0);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
-
-void NewSpace::ResetAllocationInfo() {
- Address old_top = allocation_info_.top();
+void NewSpace::ResetLinearAllocationArea() {
to_space_.Reset();
- UpdateAllocationInfo();
+ UpdateLinearAllocationArea();
// Clear all mark-bits in the to-space.
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
@@ -2097,29 +2104,19 @@ void NewSpace::ResetAllocationInfo() {
// Concurrent marking may have local live bytes for this page.
heap()->concurrent_marking()->ClearLiveness(p);
}
- InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
}
-
-void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
- if (heap()->inline_allocation_disabled()) {
- // Lowest limit when linear allocation was disabled.
- Address high = to_space_.page_high();
- Address new_top = allocation_info_.top() + size_in_bytes;
- allocation_info_.set_limit(Min(new_top, high));
- } else if (allocation_observers_paused_ || top_on_previous_step_ == 0) {
- // Normal limit is the end of the current page.
- allocation_info_.set_limit(to_space_.page_high());
- } else {
- // Lower limit during incremental marking.
- Address high = to_space_.page_high();
- Address new_top = allocation_info_.top() + size_in_bytes;
- Address new_limit = new_top + GetNextInlineAllocationStepSize() - 1;
- allocation_info_.set_limit(Min(new_limit, high));
- }
+void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
+ Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size);
+ allocation_info_.set_limit(new_limit);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
+void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
+ Address new_limit = ComputeLimit(top(), limit(), min_size);
+ DCHECK_LE(new_limit, limit());
+ DecreaseLimit(new_limit);
+}
bool NewSpace::AddFreshPage() {
Address top = allocation_info_.top();
@@ -2133,7 +2130,7 @@ bool NewSpace::AddFreshPage() {
Address limit = Page::FromAllocationAreaAddress(top)->area_end();
int remaining_in_page = static_cast<int>(limit - top);
heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
- UpdateAllocationInfo();
+ UpdateLinearAllocationArea();
return true;
}
@@ -2158,8 +2155,6 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
return false;
}
- InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
-
old_top = allocation_info_.top();
high = to_space_.page_high();
filler_size = Heap::GetFillToAlign(old_top, alignment);
@@ -2180,54 +2175,59 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
return true;
}
-
-void NewSpace::StartNextInlineAllocationStep() {
- if (!allocation_observers_paused_) {
- top_on_previous_step_ =
- !allocation_observers_.empty() ? allocation_info_.top() : 0;
+void SpaceWithLinearArea::StartNextInlineAllocationStep() {
+ if (AllocationObserversActive()) {
+ top_on_previous_step_ = top();
UpdateInlineAllocationLimit(0);
+ } else {
+ DCHECK_NULL(top_on_previous_step_);
}
}
-void NewSpace::PauseAllocationObservers() {
- // Do a step to account for memory allocated so far.
+void SpaceWithLinearArea::AddAllocationObserver(AllocationObserver* observer) {
InlineAllocationStep(top(), top(), nullptr, 0);
- Space::PauseAllocationObservers();
- top_on_previous_step_ = 0;
- UpdateInlineAllocationLimit(0);
+ Space::AddAllocationObserver(observer);
+ DCHECK_IMPLIES(top_on_previous_step_, AllocationObserversActive());
}
-void PagedSpace::PauseAllocationObservers() {
- // Do a step to account for memory allocated so far.
- if (top_on_previous_step_) {
- int bytes_allocated = static_cast<int>(top() - top_on_previous_step_);
- AllocationStep(bytes_allocated, nullptr, 0);
- }
- Space::PauseAllocationObservers();
- top_on_previous_step_ = 0;
+void SpaceWithLinearArea::RemoveAllocationObserver(
+ AllocationObserver* observer) {
+ Address top_for_next_step =
+ allocation_observers_.size() == 1 ? nullptr : top();
+ InlineAllocationStep(top(), top_for_next_step, nullptr, 0);
+ Space::RemoveAllocationObserver(observer);
+ DCHECK_IMPLIES(top_on_previous_step_, AllocationObserversActive());
}
-void NewSpace::ResumeAllocationObservers() {
+void SpaceWithLinearArea::PauseAllocationObservers() {
+ // Do a step to account for memory allocated so far.
+ InlineAllocationStep(top(), nullptr, nullptr, 0);
+ Space::PauseAllocationObservers();
DCHECK_NULL(top_on_previous_step_);
- Space::ResumeAllocationObservers();
- StartNextInlineAllocationStep();
+ UpdateInlineAllocationLimit(0);
}
-// TODO(ofrobots): refactor into SpaceWithLinearArea
-void PagedSpace::ResumeAllocationObservers() {
+void SpaceWithLinearArea::ResumeAllocationObservers() {
DCHECK_NULL(top_on_previous_step_);
Space::ResumeAllocationObservers();
StartNextInlineAllocationStep();
}
-void NewSpace::InlineAllocationStep(Address top, Address new_top,
- Address soon_object, size_t size) {
+void SpaceWithLinearArea::InlineAllocationStep(Address top,
+ Address top_for_next_step,
+ Address soon_object,
+ size_t size) {
if (top_on_previous_step_) {
- int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
- for (AllocationObserver* observer : allocation_observers_) {
- observer->AllocationStep(bytes_allocated, soon_object, size);
+ if (top < top_on_previous_step_) {
+ // Generated code decreased the top pointer to do folded allocations.
+ DCHECK_NOT_NULL(top);
+ DCHECK_EQ(Page::FromAllocationAreaAddress(top),
+ Page::FromAllocationAreaAddress(top_on_previous_step_));
+ top_on_previous_step_ = top;
}
- top_on_previous_step_ = new_top;
+ int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
+ AllocationStep(bytes_allocated, soon_object, static_cast<int>(size));
+ top_on_previous_step_ = top_for_next_step;
}
}
@@ -2372,7 +2372,7 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
DCHECK_LE(new_capacity, maximum_capacity_);
DCHECK_GT(new_capacity, current_capacity_);
const size_t delta = new_capacity - current_capacity_;
- DCHECK(IsAligned(delta, base::OS::AllocatePageSize()));
+ DCHECK(IsAligned(delta, AllocatePageSize()));
const int delta_pages = static_cast<int>(delta / Page::kPageSize);
Page* last_page = anchor()->prev_page();
DCHECK_NE(last_page, anchor());
@@ -2416,7 +2416,7 @@ bool SemiSpace::ShrinkTo(size_t new_capacity) {
DCHECK_LT(new_capacity, current_capacity_);
if (is_committed()) {
const size_t delta = current_capacity_ - new_capacity;
- DCHECK(IsAligned(delta, base::OS::AllocatePageSize()));
+ DCHECK(IsAligned(delta, AllocatePageSize()));
int delta_pages = static_cast<int>(delta / Page::kPageSize);
Page* new_last_page;
Page* last_page;
@@ -2584,159 +2584,6 @@ void SemiSpaceIterator::Initialize(Address start, Address end) {
limit_ = end;
}
-#ifdef DEBUG
-// heap_histograms is shared, always clear it before using it.
-static void ClearHistograms(Isolate* isolate) {
-// We reset the name each time, though it hasn't changed.
-#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
- INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
-#undef DEF_TYPE_NAME
-
-#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
- INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
-#undef CLEAR_HISTOGRAM
-
- isolate->js_spill_information()->Clear();
-}
-
-static int CollectHistogramInfo(HeapObject* obj) {
- Isolate* isolate = obj->GetIsolate();
- InstanceType type = obj->map()->instance_type();
- DCHECK(0 <= type && type <= LAST_TYPE);
- DCHECK_NOT_NULL(isolate->heap_histograms()[type].name());
- isolate->heap_histograms()[type].increment_number(1);
- isolate->heap_histograms()[type].increment_bytes(obj->Size());
-
- if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
- JSObject::cast(obj)
- ->IncrementSpillStatistics(isolate->js_spill_information());
- }
-
- return obj->Size();
-}
-
-
-static void ReportHistogram(Isolate* isolate, bool print_spill) {
- PrintF("\n Object Histogram:\n");
- for (int i = 0; i <= LAST_TYPE; i++) {
- if (isolate->heap_histograms()[i].number() > 0) {
- PrintF(" %-34s%10d (%10d bytes)\n",
- isolate->heap_histograms()[i].name(),
- isolate->heap_histograms()[i].number(),
- isolate->heap_histograms()[i].bytes());
- }
- }
- PrintF("\n");
-
- // Summarize string types.
- int string_number = 0;
- int string_bytes = 0;
-#define INCREMENT(type, size, name, camel_name) \
- string_number += isolate->heap_histograms()[type].number(); \
- string_bytes += isolate->heap_histograms()[type].bytes();
- STRING_TYPE_LIST(INCREMENT)
-#undef INCREMENT
- if (string_number > 0) {
- PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
- string_bytes);
- }
-
- if (FLAG_collect_heap_spill_statistics && print_spill) {
- isolate->js_spill_information()->Print();
- }
-}
-#endif // DEBUG
-
-
-// Support for statistics gathering for --heap-stats and --log-gc.
-void NewSpace::ClearHistograms() {
- for (int i = 0; i <= LAST_TYPE; i++) {
- allocated_histogram_[i].clear();
- promoted_histogram_[i].clear();
- }
-}
-
-
-// Because the copying collector does not touch garbage objects, we iterate
-// the new space before a collection to get a histogram of allocated objects.
-// This only happens when --log-gc flag is set.
-void NewSpace::CollectStatistics() {
- ClearHistograms();
- SemiSpaceIterator it(this);
- for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next())
- RecordAllocation(obj);
-}
-
-
-static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
- const char* description) {
- LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
- // Lump all the string types together.
- int string_number = 0;
- int string_bytes = 0;
-#define INCREMENT(type, size, name, camel_name) \
- string_number += info[type].number(); \
- string_bytes += info[type].bytes();
- STRING_TYPE_LIST(INCREMENT)
-#undef INCREMENT
- if (string_number > 0) {
- LOG(isolate,
- HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
- }
-
- // Then do the other types.
- for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
- if (info[i].number() > 0) {
- LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
- info[i].bytes()));
- }
- }
- LOG(isolate, HeapSampleEndEvent("NewSpace", description));
-}
-
-
-void NewSpace::ReportStatistics() {
-#ifdef DEBUG
- if (FLAG_heap_stats) {
- float pct = static_cast<float>(Available()) / TotalCapacity();
- PrintF(" capacity: %" PRIuS ", available: %" PRIuS ", %%%d\n",
- TotalCapacity(), Available(), static_cast<int>(pct * 100));
- PrintF("\n Object Histogram:\n");
- for (int i = 0; i <= LAST_TYPE; i++) {
- if (allocated_histogram_[i].number() > 0) {
- PrintF(" %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
- allocated_histogram_[i].number(),
- allocated_histogram_[i].bytes());
- }
- }
- PrintF("\n");
- }
-#endif // DEBUG
-
- if (FLAG_log_gc) {
- Isolate* isolate = heap()->isolate();
- DoReportStatistics(isolate, allocated_histogram_, "allocated");
- DoReportStatistics(isolate, promoted_histogram_, "promoted");
- }
-}
-
-
-void NewSpace::RecordAllocation(HeapObject* obj) {
- InstanceType type = obj->map()->instance_type();
- DCHECK(0 <= type && type <= LAST_TYPE);
- allocated_histogram_[type].increment_number(1);
- allocated_histogram_[type].increment_bytes(obj->Size());
-}
-
-
-void NewSpace::RecordPromotion(HeapObject* obj) {
- InstanceType type = obj->map()->instance_type();
- DCHECK(0 <= type && type <= LAST_TYPE);
- promoted_histogram_[type].increment_number(1);
- promoted_histogram_[type].increment_bytes(obj->Size());
-}
-
-
size_t NewSpace::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits()) return CommittedMemory();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
@@ -2839,11 +2686,6 @@ void FreeListCategory::Relink() {
owner()->AddCategory(this);
}
-void FreeListCategory::Invalidate() {
- Reset();
- type_ = kInvalidCategory;
-}
-
FreeList::FreeList(PagedSpace* owner) : owner_(owner), wasted_bytes_(0) {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
categories_[i] = nullptr;
@@ -2932,9 +2774,9 @@ FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
return node;
}
-FreeSpace* FreeList::FindNodeFor(size_t size_in_bytes, size_t* node_size) {
+FreeSpace* FreeList::Allocate(size_t size_in_bytes, size_t* node_size) {
+ DCHECK_GE(kMaxBlockSize, size_in_bytes);
FreeSpace* node = nullptr;
-
// First try the allocation fast path: try to allocate the minimum element
// size of a free list category. This operation is constant time.
FreeListCategoryType type =
@@ -2964,75 +2806,14 @@ FreeSpace* FreeList::FindNodeFor(size_t size_in_bytes, size_t* node_size) {
return node;
}
-bool FreeList::Allocate(size_t size_in_bytes) {
- DCHECK_GE(kMaxBlockSize, size_in_bytes);
- DCHECK(IsAligned(size_in_bytes, kPointerSize));
- DCHECK_LE(owner_->top(), owner_->limit());
-#ifdef DEBUG
- if (owner_->top() != owner_->limit()) {
- DCHECK_EQ(Page::FromAddress(owner_->top()),
- Page::FromAddress(owner_->limit() - 1));
- }
-#endif
- // Don't free list allocate if there is linear space available.
- DCHECK_LT(static_cast<size_t>(owner_->limit() - owner_->top()),
- size_in_bytes);
-
- // Mark the old linear allocation area with a free space map so it can be
- // skipped when scanning the heap. This also puts it back in the free list
- // if it is big enough.
- owner_->EmptyAllocationInfo();
-
- if (!owner_->is_local()) {
- owner_->heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
- Heap::kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
- }
-
- size_t new_node_size = 0;
- FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
- if (new_node == nullptr) return false;
-
- DCHECK_GE(new_node_size, size_in_bytes);
-
-#ifdef DEBUG
- for (size_t i = 0; i < size_in_bytes / kPointerSize; i++) {
- reinterpret_cast<Object**>(new_node->address())[i] =
- Smi::FromInt(kCodeZapValue);
- }
-#endif
-
- // The old-space-step might have finished sweeping and restarted marking.
- // Verify that it did not turn the page of the new node into an evacuation
- // candidate.
- DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
-
- // Memory in the linear allocation area is counted as allocated. We may free
- // a little of this again immediately - see below.
- owner_->IncreaseAllocatedBytes(new_node_size,
- Page::FromAddress(new_node->address()));
-
- Address start = new_node->address();
- Address end = new_node->address() + new_node_size;
- Address limit = owner_->ComputeLimit(start, end, size_in_bytes);
- DCHECK_LE(limit, end);
- DCHECK_LE(size_in_bytes, limit - start);
- if (limit != end) {
- owner_->Free(limit, end - limit);
- }
- owner_->SetAllocationInfo(start, limit);
-
- return true;
-}
-
size_t FreeList::EvictFreeListItems(Page* page) {
size_t sum = 0;
- page->ForAllFreeListCategories(
- [this, &sum](FreeListCategory* category) {
- DCHECK_EQ(this, category->owner());
- sum += category->available();
- RemoveCategory(category);
- category->Invalidate();
- });
+ page->ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
+ DCHECK_EQ(this, category->owner());
+ sum += category->available();
+ RemoveCategory(category);
+ category->Reset();
+ });
return sum;
}
@@ -3054,6 +2835,7 @@ void FreeList::RepairLists(Heap* heap) {
bool FreeList::AddCategory(FreeListCategory* category) {
FreeListCategoryType type = category->type_;
+ DCHECK_LT(type, kNumberOfCategories);
FreeListCategory* top = categories_[type];
if (category->is_empty()) return false;
@@ -3070,6 +2852,7 @@ bool FreeList::AddCategory(FreeListCategory* category) {
void FreeList::RemoveCategory(FreeListCategory* category) {
FreeListCategoryType type = category->type_;
+ DCHECK_LT(type, kNumberOfCategories);
FreeListCategory* top = categories_[type];
// Common double-linked list removal.
@@ -3152,7 +2935,7 @@ size_t FreeList::SumFreeLists() {
void PagedSpace::PrepareForMarkCompact() {
// We don't have a linear allocation area while sweeping. It will be restored
// on the first allocation after the sweep.
- EmptyAllocationInfo();
+ FreeLinearAllocationArea();
// Clear the free list before a full GC---it will be rebuilt afterward.
free_list_.Reset();
@@ -3181,6 +2964,12 @@ void PagedSpace::RepairFreeListsAfterDeserialization() {
}
Address start = page->HighWaterMark();
Address end = page->area_end();
+ if (start < end - size) {
+ // A region at the high watermark is already in free list.
+ HeapObject* filler = HeapObject::FromAddress(start);
+ CHECK(filler->IsFiller());
+ start += filler->Size();
+ }
CHECK_EQ(size, static_cast<int>(end - start));
heap()->CreateFillerObjectAt(start, size, ClearRecordedSlots::kNo);
}
@@ -3194,7 +2983,7 @@ bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
// After waiting for the sweeper threads, there may be new free-list
// entries.
- return free_list_.Allocate(size_in_bytes);
+ return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
}
return false;
}
@@ -3204,27 +2993,29 @@ bool CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
if (FLAG_concurrent_sweeping && collector->sweeping_in_progress()) {
collector->sweeper()->ParallelSweepSpace(identity(), 0);
RefillFreeList();
- return free_list_.Allocate(size_in_bytes);
+ return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
}
return false;
}
-bool PagedSpace::SlowAllocateRaw(int size_in_bytes) {
+bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes) {
VMState<GC> state(heap()->isolate());
RuntimeCallTimerScope runtime_timer(
- heap()->isolate(), &RuntimeCallStats::GC_Custom_SlowAllocateRaw);
- return RawSlowAllocateRaw(size_in_bytes);
+ heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
+ return RawSlowRefillLinearAllocationArea(size_in_bytes);
}
-bool CompactionSpace::SlowAllocateRaw(int size_in_bytes) {
- return RawSlowAllocateRaw(size_in_bytes);
+bool CompactionSpace::SlowRefillLinearAllocationArea(int size_in_bytes) {
+ return RawSlowRefillLinearAllocationArea(size_in_bytes);
}
-bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
+bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
// Allocation in this space has failed.
DCHECK_GE(size_in_bytes, 0);
const int kMaxPagesToSweep = 1;
+ if (RefillLinearAllocationAreaFromFreeList(size_in_bytes)) return true;
+
MarkCompactCollector* collector = heap()->mark_compact_collector();
// Sweeping is still in progress.
if (collector->sweeping_in_progress()) {
@@ -3238,14 +3029,18 @@ bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
RefillFreeList();
// Retry the free list allocation.
- if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
+ if (RefillLinearAllocationAreaFromFreeList(
+ static_cast<size_t>(size_in_bytes)))
+ return true;
// If sweeping is still in progress try to sweep pages.
int max_freed = collector->sweeper()->ParallelSweepSpace(
identity(), size_in_bytes, kMaxPagesToSweep);
RefillFreeList();
if (max_freed >= size_in_bytes) {
- if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
+ if (RefillLinearAllocationAreaFromFreeList(
+ static_cast<size_t>(size_in_bytes)))
+ return true;
}
} else if (is_local()) {
// Sweeping not in progress and we are on a {CompactionSpace}. This can
@@ -3254,14 +3049,17 @@ bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
Page* page = main_space->RemovePageSafe(size_in_bytes);
if (page != nullptr) {
AddPage(page);
- if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
+ if (RefillLinearAllocationAreaFromFreeList(
+ static_cast<size_t>(size_in_bytes)))
+ return true;
}
}
if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
DCHECK((CountTotalPages() > 1) ||
(static_cast<size_t>(size_in_bytes) <= free_list_.Available()));
- return free_list_.Allocate(static_cast<size_t>(size_in_bytes));
+ return RefillLinearAllocationAreaFromFreeList(
+ static_cast<size_t>(size_in_bytes));
}
// If sweeper threads are active, wait for them at that point and steal
@@ -3270,23 +3068,6 @@ bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
return SweepAndRetryAllocation(size_in_bytes);
}
-#ifdef DEBUG
-void PagedSpace::ReportStatistics() {
- int pct = static_cast<int>(Available() * 100 / Capacity());
- PrintF(" capacity: %" PRIuS ", waste: %" PRIuS
- ", available: %" PRIuS ", %%%d\n",
- Capacity(), Waste(), Available(), pct);
-
- heap()->mark_compact_collector()->EnsureSweepingCompleted();
- ClearHistograms(heap()->isolate());
- HeapObjectIterator obj_it(this);
- for (HeapObject* obj = obj_it.Next(); obj != nullptr; obj = obj_it.Next())
- CollectHistogramInfo(obj);
- ReportHistogram(heap()->isolate(), true);
-}
-#endif
-
-
// -----------------------------------------------------------------------------
// MapSpace implementation
@@ -3436,7 +3217,6 @@ LargePage* LargeObjectSpace::FindPage(Address a) {
auto it = chunk_map_.find(reinterpret_cast<Address>(key));
if (it != chunk_map_.end()) {
LargePage* page = it->second;
- DCHECK(LargePage::IsValid(page));
if (page->Contains(a)) {
return page;
}
@@ -3572,13 +3352,14 @@ void LargeObjectSpace::Verify() {
// We have only code, sequential strings, external strings (sequential
// strings that have been morphed into external strings), thin strings
// (sequential strings that have been morphed into thin strings), fixed
- // arrays, fixed double arrays, byte arrays, feedback vectors and free space
- // (right after allocation) in the large object space.
+ // arrays, fixed double arrays, byte arrays, feedback vectors, bigints and
+ // free space (right after allocation) in the large object space.
CHECK(object->IsAbstractCode() || object->IsSeqString() ||
object->IsExternalString() || object->IsThinString() ||
object->IsFixedArray() || object->IsFixedDoubleArray() ||
object->IsPropertyArray() || object->IsByteArray() ||
- object->IsFeedbackVector() || object->IsFreeSpace());
+ object->IsFeedbackVector() || object->IsBigInt() ||
+ object->IsFreeSpace());
// The object itself should look OK.
object->ObjectVerify();
@@ -3625,25 +3406,6 @@ void LargeObjectSpace::Print() {
}
}
-
-void LargeObjectSpace::ReportStatistics() {
- PrintF(" size: %" PRIuS "\n", size_);
- int num_objects = 0;
- ClearHistograms(heap()->isolate());
- LargeObjectIterator it(this);
- for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
- num_objects++;
- CollectHistogramInfo(obj);
- }
-
- PrintF(
- " number of objects %d, "
- "size of objects %" PRIuS "\n",
- num_objects, objects_size_);
- if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
-}
-
-
void Page::Print() {
// Make a best-effort to print the objects in the page.
PrintF("Page@%p in %s\n", static_cast<void*>(this->address()),
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 3fb3c39496..08fef7d6e3 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -34,12 +34,12 @@ class HeapTester;
class TestCodeRangeScope;
} // namespace heap
-class AllocationInfo;
class AllocationObserver;
class CompactionSpace;
class CompactionSpaceCollection;
class FreeList;
class Isolate;
+class LinearAllocationArea;
class LocalArrayBufferTracker;
class MemoryAllocator;
class MemoryChunk;
@@ -170,8 +170,6 @@ class FreeListCategory {
next_ = nullptr;
}
- void Invalidate();
-
void Reset();
void ResetStats() { Reset(); }
@@ -425,8 +423,6 @@ class MemoryChunk {
!chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
}
- static bool IsValid(MemoryChunk* chunk) { return chunk != nullptr; }
-
Address address() const {
return reinterpret_cast<Address>(const_cast<MemoryChunk*>(this));
}
@@ -610,25 +606,9 @@ class MemoryChunk {
void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
- Space* owner() const {
- uintptr_t owner_value = base::AsAtomicWord::Acquire_Load(
- reinterpret_cast<const uintptr_t*>(&owner_));
- return ((owner_value & kPageHeaderTagMask) == kPageHeaderTag)
- ? reinterpret_cast<Space*>(owner_value - kPageHeaderTag)
- : nullptr;
- }
-
- void set_owner(Space* space) {
- DCHECK_EQ(0, reinterpret_cast<uintptr_t>(space) & kPageHeaderTagMask);
- base::AsAtomicWord::Release_Store(
- reinterpret_cast<uintptr_t*>(&owner_),
- reinterpret_cast<uintptr_t>(space) + kPageHeaderTag);
- DCHECK_EQ(kPageHeaderTag, base::AsAtomicWord::Relaxed_Load(
- reinterpret_cast<const uintptr_t*>(&owner_)) &
- kPageHeaderTagMask);
- }
+ Space* owner() const { return owner_.Value(); }
- bool HasPageHeader() { return owner() != nullptr; }
+ void set_owner(Space* space) { owner_.SetValue(space); }
void InsertAfter(MemoryChunk* other);
void Unlink();
@@ -640,6 +620,8 @@ class MemoryChunk {
void SetReadAndExecutable();
void SetReadAndWritable();
+ inline void InitializeFreeListCategories();
+
protected:
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
@@ -661,10 +643,8 @@ class MemoryChunk {
// If the chunk needs to remember its memory reservation, it is stored here.
VirtualMemory reservation_;
- // The identity of the owning space. This is tagged as a failure pointer, but
- // no failure can be in an object, so this can be distinguished from any entry
- // in a fixed array.
- Address owner_;
+ // The space owning this memory chunk.
+ base::AtomicValue<Space*> owner_;
Heap* heap_;
@@ -792,8 +772,6 @@ class Page : public MemoryChunk {
static Page* ConvertNewToOld(Page* old_page);
- inline static Page* FromAnyPointerAddress(Heap* heap, Address addr);
-
// Create a Page object that is only used as anchor for the doubly-linked
// list of real pages.
explicit Page(Space* owner) { InitializeAsAnchor(owner); }
@@ -845,8 +823,6 @@ class Page : public MemoryChunk {
return &categories_[type];
}
- inline void InitializeFreeListCategories();
-
bool is_anchor() { return IsFlagSet(Page::ANCHOR); }
size_t wasted_memory() { return wasted_memory_; }
@@ -933,9 +909,11 @@ class Space : public Malloced {
// Identity used in error reporting.
AllocationSpace identity() { return id_; }
- void AddAllocationObserver(AllocationObserver* observer);
+ V8_EXPORT_PRIVATE virtual void AddAllocationObserver(
+ AllocationObserver* observer);
- void RemoveAllocationObserver(AllocationObserver* observer);
+ V8_EXPORT_PRIVATE virtual void RemoveAllocationObserver(
+ AllocationObserver* observer);
V8_EXPORT_PRIVATE virtual void PauseAllocationObservers();
@@ -995,11 +973,14 @@ class Space : public Malloced {
protected:
intptr_t GetNextInlineAllocationStepSize();
+ bool AllocationObserversActive() {
+ return !allocation_observers_paused_ && !allocation_observers_.empty();
+ }
std::vector<AllocationObserver*> allocation_observers_;
bool allocation_observers_paused_;
- private:
+ protected:
Heap* heap_;
AllocationSpace id_;
Executability executable_;
@@ -1223,19 +1204,11 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
void FreeQueuedChunks();
void WaitUntilCompleted();
void TearDown();
-
- bool has_delayed_chunks() { return delayed_regular_chunks_.size() > 0; }
-
- int NumberOfDelayedChunks() {
- base::LockGuard<base::Mutex> guard(&mutex_);
- return static_cast<int>(delayed_regular_chunks_.size());
- }
-
int NumberOfChunks();
private:
static const int kReservedQueueingSlots = 64;
- static const int kMaxUnmapperTasks = 24;
+ static const int kMaxUnmapperTasks = 4;
enum ChunkQueueType {
kRegular, // Pages of kPageSize that do not live in a CodeRange and
@@ -1253,12 +1226,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
template <ChunkQueueType type>
void AddMemoryChunkSafe(MemoryChunk* chunk) {
base::LockGuard<base::Mutex> guard(&mutex_);
- if (type != kRegular || allocator_->CanFreeMemoryChunk(chunk)) {
- chunks_[type].push_back(chunk);
- } else {
- DCHECK_EQ(type, kRegular);
- delayed_regular_chunks_.push_back(chunk);
- }
+ chunks_[type].push_back(chunk);
}
template <ChunkQueueType type>
@@ -1270,7 +1238,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
return chunk;
}
- void ReconsiderDelayedChunks();
template <FreeMode mode>
void PerformFreeMemoryOnQueuedChunks();
@@ -1278,10 +1245,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
MemoryAllocator* const allocator_;
base::Mutex mutex_;
std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
- // Delayed chunks cannot be processed in the current unmapping cycle because
- // of dependencies such as an active sweeper.
- // See MemoryAllocator::CanFreeMemoryChunk.
- std::list<MemoryChunk*> delayed_regular_chunks_;
CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
base::Semaphore pending_unmapping_tasks_semaphore_;
intptr_t concurrent_unmapping_tasks_active_;
@@ -1342,8 +1305,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
template <MemoryAllocator::FreeMode mode = kFull>
void Free(MemoryChunk* chunk);
- bool CanFreeMemoryChunk(MemoryChunk* chunk);
-
// Returns allocated spaces in bytes.
size_t Size() { return size_.Value(); }
@@ -1415,11 +1376,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
CodeRange* code_range() { return code_range_; }
Unmapper* unmapper() { return &unmapper_; }
-#ifdef DEBUG
- // Reports statistic info of the space.
- void ReportStatistics();
-#endif
-
private:
// PreFree logically frees the object, i.e., it takes care of the size
// bookkeeping and calls the allocation callback.
@@ -1584,10 +1540,10 @@ class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
// An abstraction of allocation and relocation pointers in a page-structured
// space.
-class AllocationInfo {
+class LinearAllocationArea {
public:
- AllocationInfo() : top_(nullptr), limit_(nullptr) {}
- AllocationInfo(Address top, Address limit) : top_(top), limit_(limit) {}
+ LinearAllocationArea() : top_(nullptr), limit_(nullptr) {}
+ LinearAllocationArea(Address top, Address limit) : top_(top), limit_(limit) {}
void Reset(Address top, Address limit) {
set_top(top);
@@ -1785,10 +1741,11 @@ class V8_EXPORT_PRIVATE FreeList {
// and the size should be a non-zero multiple of the word size.
size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
- // Finds a node of size at least size_in_bytes and sets up a linear allocation
- // area using this node. Returns false if there is no such node and the caller
- // has to retry allocation after collecting garbage.
- MUST_USE_RESULT bool Allocate(size_t size_in_bytes);
+ // Allocates a free space node frome the free list of at least size_in_bytes
+ // bytes. Returns the actual node size in node_size which can be bigger than
+ // size_in_bytes. This method returns null if the allocation request cannot be
+ // handled by the free list.
+ MUST_USE_RESULT FreeSpace* Allocate(size_t size_in_bytes, size_t* node_size);
// Clear the free list.
void Reset();
@@ -1887,8 +1844,6 @@ class V8_EXPORT_PRIVATE FreeList {
static const size_t kMediumAllocationMax = kSmallListMax;
static const size_t kLargeAllocationMax = kMediumListMax;
- FreeSpace* FindNodeFor(size_t size_in_bytes, size_t* node_size);
-
// Walks all available categories for a given |type| and tries to retrieve
// a node. Returns nullptr if the category is empty.
FreeSpace* FindNodeIn(FreeListCategoryType type, size_t* node_size);
@@ -1975,16 +1930,73 @@ class LocalAllocationBuffer {
inline bool TryFreeLast(HeapObject* object, int object_size);
// Close a LAB, effectively invalidating it. Returns the unused area.
- AllocationInfo Close();
+ LinearAllocationArea Close();
private:
- LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info);
+ LocalAllocationBuffer(Heap* heap, LinearAllocationArea allocation_info);
Heap* heap_;
- AllocationInfo allocation_info_;
+ LinearAllocationArea allocation_info_;
+};
+
+class SpaceWithLinearArea : public Space {
+ public:
+ SpaceWithLinearArea(Heap* heap, AllocationSpace id, Executability executable)
+ : Space(heap, id, executable), top_on_previous_step_(0) {
+ allocation_info_.Reset(nullptr, nullptr);
+ }
+
+ virtual bool SupportsInlineAllocation() = 0;
+
+ // Returns the allocation pointer in this space.
+ Address top() { return allocation_info_.top(); }
+ Address limit() { return allocation_info_.limit(); }
+
+ // The allocation top address.
+ Address* allocation_top_address() { return allocation_info_.top_address(); }
+
+ // The allocation limit address.
+ Address* allocation_limit_address() {
+ return allocation_info_.limit_address();
+ }
+
+ V8_EXPORT_PRIVATE void AddAllocationObserver(
+ AllocationObserver* observer) override;
+ V8_EXPORT_PRIVATE void RemoveAllocationObserver(
+ AllocationObserver* observer) override;
+ V8_EXPORT_PRIVATE void ResumeAllocationObservers() override;
+ V8_EXPORT_PRIVATE void PauseAllocationObservers() override;
+
+ // When allocation observers are active we may use a lower limit to allow the
+ // observers to 'interrupt' earlier than the natural limit. Given a linear
+ // area bounded by [start, end), this function computes the limit to use to
+ // allow proper observation based on existing observers. min_size specifies
+ // the minimum size that the limited area should have.
+ Address ComputeLimit(Address start, Address end, size_t min_size);
+ V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit(
+ size_t min_size) = 0;
+
+ protected:
+ // If we are doing inline allocation in steps, this method performs the 'step'
+ // operation. top is the memory address of the bump pointer at the last
+ // inline allocation (i.e. it determines the numbers of bytes actually
+ // allocated since the last step.) top_for_next_step is the address of the
+ // bump pointer where the next byte is going to be allocated from. top and
+ // top_for_next_step may be different when we cross a page boundary or reset
+ // the space.
+ // TODO(ofrobots): clarify the precise difference between this and
+ // Space::AllocationStep.
+ void InlineAllocationStep(Address top, Address top_for_next_step,
+ Address soon_object, size_t size);
+ V8_EXPORT_PRIVATE void StartNextInlineAllocationStep() override;
+
+ // TODO(ofrobots): make these private after refactoring is complete.
+ LinearAllocationArea allocation_info_;
+ Address top_on_previous_step_;
};
-class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
+class V8_EXPORT_PRIVATE PagedSpace
+ : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
public:
typedef PageIterator iterator;
@@ -2056,18 +2068,6 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// due to being too small to use for allocation.
virtual size_t Waste() { return free_list_.wasted_bytes(); }
- // Returns the allocation pointer in this space.
- Address top() { return allocation_info_.top(); }
- Address limit() { return allocation_info_.limit(); }
-
- // The allocation top address.
- Address* allocation_top_address() { return allocation_info_.top_address(); }
-
- // The allocation limit address.
- Address* allocation_limit_address() {
- return allocation_info_.limit_address();
- }
-
enum UpdateSkipList { UPDATE_SKIP_LIST, IGNORE_SKIP_LIST };
// Allocate the requested number of bytes in the space if possible, return a
@@ -2106,16 +2106,13 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
inline bool TryFreeLast(HeapObject* object, int object_size);
- void ResetFreeList() { free_list_.Reset(); }
+ void ResetFreeList();
- void PauseAllocationObservers() override;
- void ResumeAllocationObservers() override;
+ // Empty space linear allocation area, returning unused area to free list.
+ void FreeLinearAllocationArea();
- // Empty space allocation info, returning unused area to free list.
- void EmptyAllocationInfo();
-
- void MarkAllocationInfoBlack();
- void UnmarkAllocationInfo();
+ void MarkLinearAllocationAreaBlack();
+ void UnmarkLinearAllocationArea();
void DecreaseAllocatedBytes(size_t bytes, Page* page) {
accounting_stats_.DecreaseAllocatedBytes(bytes, page);
@@ -2165,9 +2162,6 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// Print meta info and objects in this space.
void Print() override;
- // Reports statistics for the space
- void ReportStatistics();
-
// Report code object related statistics
static void ReportCodeStatistics(Isolate* isolate);
static void ResetCodeStatistics(Isolate* isolate);
@@ -2212,11 +2206,10 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
std::unique_ptr<ObjectIterator> GetObjectIterator() override;
- Address ComputeLimit(Address start, Address end, size_t size_in_bytes);
- void SetAllocationInfo(Address top, Address limit);
+ void SetLinearAllocationArea(Address top, Address limit);
private:
- // Set space allocation info.
+ // Set space linear allocation area.
void SetTopAndLimit(Address top, Address limit) {
DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
@@ -2224,8 +2217,10 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
allocation_info_.Reset(top, limit);
}
void DecreaseLimit(Address new_limit);
- void StartNextInlineAllocationStep() override;
- bool SupportsInlineAllocation() { return identity() == OLD_SPACE; }
+ void UpdateInlineAllocationLimit(size_t min_size) override;
+ bool SupportsInlineAllocation() override {
+ return identity() == OLD_SPACE && !is_local();
+ }
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
@@ -2256,6 +2251,10 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// (object size + alignment filler size) to the size_in_bytes.
inline HeapObject* TryAllocateLinearlyAligned(int* size_in_bytes,
AllocationAlignment alignment);
+
+ MUST_USE_RESULT bool RefillLinearAllocationAreaFromFreeList(
+ size_t size_in_bytes);
+
// If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and retry free-list
// allocation. Returns false if there is not enough space and the caller
@@ -2265,11 +2264,12 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// Slow path of AllocateRaw. This function is space-dependent. Returns false
// if there is not enough space and the caller has to retry after
// collecting garbage.
- MUST_USE_RESULT virtual bool SlowAllocateRaw(int size_in_bytes);
+ MUST_USE_RESULT virtual bool SlowRefillLinearAllocationArea(
+ int size_in_bytes);
// Implementation of SlowAllocateRaw. Returns false if there is not enough
// space and the caller has to retry after collecting garbage.
- MUST_USE_RESULT bool RawSlowAllocateRaw(int size_in_bytes);
+ MUST_USE_RESULT bool RawSlowRefillLinearAllocationArea(int size_in_bytes);
size_t area_size_;
@@ -2282,14 +2282,9 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// The space's free list.
FreeList free_list_;
- // Normal allocation information.
- AllocationInfo allocation_info_;
-
// Mutex guarding any concurrent access to the space.
base::Mutex space_mutex_;
- Address top_on_previous_step_;
-
friend class IncrementalMarking;
friend class MarkCompactCollector;
@@ -2500,18 +2495,15 @@ class SemiSpaceIterator : public ObjectIterator {
// The new space consists of a contiguous pair of semispaces. It simply
// forwards most functions to the appropriate semispace.
-class NewSpace : public Space {
+class NewSpace : public SpaceWithLinearArea {
public:
typedef PageIterator iterator;
explicit NewSpace(Heap* heap)
- : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
- top_on_previous_step_(0),
+ : SpaceWithLinearArea(heap, NEW_SPACE, NOT_EXECUTABLE),
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace),
- reservation_(),
- allocated_histogram_(nullptr),
- promoted_histogram_(nullptr) {}
+ reservation_() {}
inline bool Contains(HeapObject* o);
inline bool ContainsSlow(Address a);
@@ -2631,18 +2623,6 @@ class NewSpace : public Space {
return to_space_.minimum_capacity();
}
- // Return the address of the allocation pointer in the active semispace.
- Address top() {
- DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
- return allocation_info_.top();
- }
-
- // Return the address of the allocation pointer limit in the active semispace.
- Address limit() {
- DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
- return allocation_info_.limit();
- }
-
void ResetOriginalTop() {
DCHECK_GE(top(), original_top());
DCHECK_LE(top(), original_limit());
@@ -2660,14 +2640,6 @@ class NewSpace : public Space {
// Set the age mark in the active semispace.
void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
- // The allocation top and limit address.
- Address* allocation_top_address() { return allocation_info_.top_address(); }
-
- // The allocation limit address.
- Address* allocation_limit_address() {
- return allocation_info_.limit_address();
- }
-
MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment));
@@ -2681,19 +2653,14 @@ class NewSpace : public Space {
int size_in_bytes, AllocationAlignment alignment);
// Reset the allocation pointer to the beginning of the active semispace.
- void ResetAllocationInfo();
+ void ResetLinearAllocationArea();
// When inline allocation stepping is active, either because of incremental
// marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
// inline allocation every once in a while. This is done by setting
// allocation_info_.limit to be lower than the actual limit and and increasing
// it in steps to guarantee that the observers are notified periodically.
- void UpdateInlineAllocationLimit(int size_in_bytes);
-
- void DisableInlineAllocationSteps() {
- top_on_previous_step_ = 0;
- UpdateInlineAllocationLimit(0);
- }
+ void UpdateInlineAllocationLimit(size_t size_in_bytes) override;
// Get the extent of the inactive semispace (for use as a marking stack,
// or to zap it). Notice: space-addresses are not necessarily on the
@@ -2729,19 +2696,6 @@ class NewSpace : public Space {
void Print() override { to_space_.Print(); }
#endif
- // Iterates the active semispace to collect statistics.
- void CollectStatistics();
- // Reports previously collected statistics of the active semispace.
- void ReportStatistics();
- // Clears previously collected statistics.
- void ClearHistograms();
-
- // Record the allocation or promotion of a heap object. Note that we don't
- // record every single allocation, but only those that happen in the
- // to space during a scavenge GC.
- void RecordAllocation(HeapObject* obj);
- void RecordPromotion(HeapObject* obj);
-
// Return whether the operation succeeded.
bool CommitFromSpaceIfNeeded() {
if (from_space_.is_committed()) return true;
@@ -2757,9 +2711,6 @@ class NewSpace : public Space {
SemiSpace* active_space() { return &to_space_; }
- void PauseAllocationObservers() override;
- void ResumeAllocationObservers() override;
-
iterator begin() { return to_space_.begin(); }
iterator end() { return to_space_.end(); }
@@ -2769,16 +2720,12 @@ class NewSpace : public Space {
SemiSpace& to_space() { return to_space_; }
private:
- // Update allocation info to match the current to-space page.
- void UpdateAllocationInfo();
+ // Update linear allocation area to match the current to-space page.
+ void UpdateLinearAllocationArea();
base::Mutex mutex_;
- // Allocation pointer and limit for normal allocation and allocation during
- // mark-compact collection.
- AllocationInfo allocation_info_;
- Address top_on_previous_step_;
- // The top and the limit at the time of setting the allocation info.
+ // The top and the limit at the time of setting the linear allocation area.
// These values can be accessed by background tasks.
base::AtomicValue<Address> original_top_;
base::AtomicValue<Address> original_limit_;
@@ -2788,20 +2735,8 @@ class NewSpace : public Space {
SemiSpace from_space_;
VirtualMemory reservation_;
- HistogramInfo* allocated_histogram_;
- HistogramInfo* promoted_histogram_;
-
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
-
- // If we are doing inline allocation in steps, this method performs the 'step'
- // operation. top is the memory address of the bump pointer at the last
- // inline allocation (i.e. it determines the numbers of bytes actually
- // allocated since the last step.) new_top is the address of the bump pointer
- // where the next byte is going to be allocated from. top and new_top may be
- // different when we cross a page boundary or reset the space.
- void InlineAllocationStep(Address top, Address new_top, Address soon_object,
- size_t size);
- void StartNextInlineAllocationStep() override;
+ bool SupportsInlineAllocation() override { return true; }
friend class SemiSpaceIterator;
};
@@ -2832,7 +2767,8 @@ class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
MUST_USE_RESULT bool SweepAndRetryAllocation(int size_in_bytes) override;
- MUST_USE_RESULT bool SlowAllocateRaw(int size_in_bytes) override;
+ MUST_USE_RESULT bool SlowRefillLinearAllocationArea(
+ int size_in_bytes) override;
};
@@ -2986,13 +2922,14 @@ class LargeObjectSpace : public Space {
std::unique_ptr<ObjectIterator> GetObjectIterator() override;
+ base::Mutex* chunk_map_mutex() { return &chunk_map_mutex_; }
+
#ifdef VERIFY_HEAP
virtual void Verify();
#endif
#ifdef DEBUG
void Print() override;
- void ReportStatistics();
#endif
private:
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index 4613b705fa..724edf5721 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -58,7 +58,7 @@ void StoreBuffer::SetUp() {
if (!reservation.SetPermissions(reinterpret_cast<Address>(start_[0]),
kStoreBufferSize * kStoreBuffers,
- base::OS::MemoryPermission::kReadWrite)) {
+ PageAllocator::kReadWrite)) {
V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
current_ = 0;
@@ -105,10 +105,14 @@ void StoreBuffer::MoveEntriesToRememberedSet(int index) {
DCHECK_GE(index, 0);
DCHECK_LT(index, kStoreBuffers);
Address last_inserted_addr = nullptr;
+
+ // We are taking the chunk map mutex here because the page lookup of addr
+ // below may require us to check if addr is part of a large page.
+ base::LockGuard<base::Mutex> guard(heap_->lo_space()->chunk_map_mutex());
for (Address* current = start_[index]; current < lazy_top_[index];
current++) {
Address addr = *current;
- Page* page = Page::FromAnyPointerAddress(heap_, addr);
+ MemoryChunk* chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
if (IsDeletionAddress(addr)) {
last_inserted_addr = nullptr;
current++;
@@ -116,15 +120,15 @@ void StoreBuffer::MoveEntriesToRememberedSet(int index) {
DCHECK(!IsDeletionAddress(end));
addr = UnmarkDeletionAddress(addr);
if (end) {
- RememberedSet<OLD_TO_NEW>::RemoveRange(page, addr, end,
+ RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, addr, end,
SlotSet::PREFREE_EMPTY_BUCKETS);
} else {
- RememberedSet<OLD_TO_NEW>::Remove(page, addr);
+ RememberedSet<OLD_TO_NEW>::Remove(chunk, addr);
}
} else {
DCHECK(!IsDeletionAddress(addr));
if (addr != last_inserted_addr) {
- RememberedSet<OLD_TO_NEW>::Insert(page, addr);
+ RememberedSet<OLD_TO_NEW>::Insert(chunk, addr);
last_inserted_addr = addr;
}
}
diff --git a/deps/v8/src/heap/store-buffer.h b/deps/v8/src/heap/store-buffer.h
index 75da76490e..a69abcc886 100644
--- a/deps/v8/src/heap/store-buffer.h
+++ b/deps/v8/src/heap/store-buffer.h
@@ -10,6 +10,7 @@
#include "src/base/platform/platform.h"
#include "src/cancelable-task.h"
#include "src/globals.h"
+#include "src/heap/gc-tracer.h"
#include "src/heap/remembered-set.h"
#include "src/heap/slot-set.h"
@@ -167,14 +168,19 @@ class StoreBuffer {
class Task : public CancelableTask {
public:
Task(Isolate* isolate, StoreBuffer* store_buffer)
- : CancelableTask(isolate), store_buffer_(store_buffer) {}
+ : CancelableTask(isolate),
+ store_buffer_(store_buffer),
+ tracer_(isolate->heap()->tracer()) {}
virtual ~Task() {}
private:
void RunInternal() override {
+ TRACE_BACKGROUND_GC(tracer_,
+ GCTracer::BackgroundScope::BACKGROUND_STORE_BUFFER);
store_buffer_->ConcurrentlyProcessStoreBuffer();
}
StoreBuffer* store_buffer_;
+ GCTracer* tracer_;
DISALLOW_COPY_AND_ASSIGN(Task);
};
diff --git a/deps/v8/src/heap/stress-marking-observer.cc b/deps/v8/src/heap/stress-marking-observer.cc
new file mode 100644
index 0000000000..710282d573
--- /dev/null
+++ b/deps/v8/src/heap/stress-marking-observer.cc
@@ -0,0 +1,21 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/stress-marking-observer.h"
+
+namespace v8 {
+namespace internal {
+
+// TODO(majeski): meaningful step_size
+StressMarkingObserver::StressMarkingObserver(Heap& heap)
+ : AllocationObserver(64), heap_(heap) {}
+
+void StressMarkingObserver::Step(int bytes_allocated, Address soon_object,
+ size_t size) {
+ heap_.StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
+ kNoGCCallbackFlags);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/stress-marking-observer.h b/deps/v8/src/heap/stress-marking-observer.h
new file mode 100644
index 0000000000..b97c2b179c
--- /dev/null
+++ b/deps/v8/src/heap/stress-marking-observer.h
@@ -0,0 +1,26 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_STRESS_MARKING_OBSERVER_H_
+#define V8_HEAP_STRESS_MARKING_OBSERVER_H_
+
+#include "src/heap/heap.h"
+
+namespace v8 {
+namespace internal {
+
+class StressMarkingObserver : public AllocationObserver {
+ public:
+ explicit StressMarkingObserver(Heap& heap);
+
+ void Step(int bytes_allocated, Address soon_object, size_t size) override;
+
+ private:
+ Heap& heap_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif
diff --git a/deps/v8/src/heap/stress-scavenge-observer.cc b/deps/v8/src/heap/stress-scavenge-observer.cc
new file mode 100644
index 0000000000..c9f169ae45
--- /dev/null
+++ b/deps/v8/src/heap/stress-scavenge-observer.cc
@@ -0,0 +1,94 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/stress-scavenge-observer.h"
+
+#include "src/base/utils/random-number-generator.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/spaces.h"
+#include "src/isolate.h"
+
+namespace v8 {
+namespace internal {
+
+// TODO(majeski): meaningful step_size
+StressScavengeObserver::StressScavengeObserver(Heap& heap)
+ : AllocationObserver(64),
+ heap_(heap),
+ has_requested_gc_(false),
+ max_new_space_size_reached_(0.0) {
+ limit_percentage_ = NextLimit();
+
+ if (FLAG_trace_stress_scavenge && !FLAG_fuzzer_gc_analysis) {
+ heap_.isolate()->PrintWithTimestamp(
+ "[StressScavenge] %d%% is the new limit\n", limit_percentage_);
+ }
+}
+
+void StressScavengeObserver::Step(int bytes_allocated, Address soon_object,
+ size_t size) {
+ if (has_requested_gc_ || heap_.new_space()->Capacity() == 0) {
+ return;
+ }
+
+ double current_percent =
+ heap_.new_space()->Size() * 100.0 / heap_.new_space()->Capacity();
+
+ if (FLAG_trace_stress_scavenge) {
+ heap_.isolate()->PrintWithTimestamp(
+ "[Scavenge] %.2lf%% of the new space capacity reached\n",
+ current_percent);
+ }
+
+ if (FLAG_fuzzer_gc_analysis) {
+ max_new_space_size_reached_ =
+ std::max(max_new_space_size_reached_, current_percent);
+ return;
+ }
+
+ if (static_cast<int>(current_percent) >= limit_percentage_) {
+ if (FLAG_trace_stress_scavenge) {
+ heap_.isolate()->PrintWithTimestamp("[Scavenge] GC requested\n");
+ }
+
+ has_requested_gc_ = true;
+ heap_.isolate()->stack_guard()->RequestGC();
+ }
+}
+
+bool StressScavengeObserver::HasRequestedGC() const {
+ return has_requested_gc_;
+}
+
+void StressScavengeObserver::RequestedGCDone() {
+ double current_percent =
+ heap_.new_space()->Size() * 100.0 / heap_.new_space()->Capacity();
+ limit_percentage_ = NextLimit(static_cast<int>(current_percent));
+
+ if (FLAG_trace_stress_scavenge) {
+ heap_.isolate()->PrintWithTimestamp(
+ "[Scavenge] %.2lf%% of the new space capacity reached\n",
+ current_percent);
+ heap_.isolate()->PrintWithTimestamp("[Scavenge] %d%% is the new limit\n",
+ limit_percentage_);
+ }
+
+ has_requested_gc_ = false;
+}
+
+double StressScavengeObserver::MaxNewSpaceSizeReached() const {
+ return max_new_space_size_reached_;
+}
+
+int StressScavengeObserver::NextLimit(int min) {
+ int max = FLAG_stress_scavenge;
+ if (min >= max) {
+ return max;
+ }
+
+ return min + heap_.isolate()->fuzzer_rng()->NextInt(max - min + 1);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/stress-scavenge-observer.h b/deps/v8/src/heap/stress-scavenge-observer.h
new file mode 100644
index 0000000000..6f69afe4c5
--- /dev/null
+++ b/deps/v8/src/heap/stress-scavenge-observer.h
@@ -0,0 +1,39 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_STRESS_SCAVENGE_OBSERVER_H_
+#define V8_HEAP_STRESS_SCAVENGE_OBSERVER_H_
+
+#include "src/heap/heap.h"
+
+namespace v8 {
+namespace internal {
+
+class StressScavengeObserver : public AllocationObserver {
+ public:
+ explicit StressScavengeObserver(Heap& heap);
+
+ void Step(int bytes_allocated, Address soon_object, size_t size) override;
+
+ bool HasRequestedGC() const;
+ void RequestedGCDone();
+
+ // The maximum percent of the newspace capacity reached. This is tracked when
+ // specyfing --fuzzer-gc-analysis.
+ double MaxNewSpaceSizeReached() const;
+
+ private:
+ Heap& heap_;
+ int limit_percentage_;
+ bool has_requested_gc_;
+
+ double max_new_space_size_reached_;
+
+ int NextLimit(int min = 0);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index 17375aad97..25ba0df8fd 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -5,6 +5,7 @@
#include "src/heap/sweeper.h"
#include "src/heap/array-buffer-tracker-inl.h"
+#include "src/heap/gc-tracer.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/remembered-set.h"
#include "src/objects-inl.h"
@@ -68,21 +69,24 @@ class Sweeper::SweeperTask final : public CancelableTask {
sweeper_(sweeper),
pending_sweeper_tasks_(pending_sweeper_tasks),
num_sweeping_tasks_(num_sweeping_tasks),
- space_to_start_(space_to_start) {}
+ space_to_start_(space_to_start),
+ tracer_(isolate->heap()->tracer()) {}
virtual ~SweeperTask() {}
private:
void RunInternal() final {
- DCHECK_GE(space_to_start_, FIRST_SPACE);
+ TRACE_BACKGROUND_GC(tracer_,
+ GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING);
+ DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE);
DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
- const int offset = space_to_start_ - FIRST_SPACE;
- const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1;
+ const int offset = space_to_start_ - FIRST_PAGED_SPACE;
+ const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
for (int i = 0; i < num_spaces; i++) {
- const int space_id = FIRST_SPACE + ((i + offset) % num_spaces);
+ const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces);
// Do not sweep code space concurrently.
if (static_cast<AllocationSpace>(space_id) == CODE_SPACE) continue;
- DCHECK_GE(space_id, FIRST_SPACE);
+ DCHECK_GE(space_id, FIRST_PAGED_SPACE);
DCHECK_LE(space_id, LAST_PAGED_SPACE);
sweeper_->SweepSpaceFromTask(static_cast<AllocationSpace>(space_id));
}
@@ -94,6 +98,7 @@ class Sweeper::SweeperTask final : public CancelableTask {
base::Semaphore* const pending_sweeper_tasks_;
base::AtomicNumber<intptr_t>* const num_sweeping_tasks_;
AllocationSpace space_to_start_;
+ GCTracer* const tracer_;
DISALLOW_COPY_AND_ASSIGN(SweeperTask);
};
@@ -127,6 +132,7 @@ class Sweeper::IncrementalSweeperTask final : public CancelableTask {
void Sweeper::StartSweeping() {
CHECK(!stop_sweeper_tasks_.Value());
sweeping_in_progress_ = true;
+ iterability_in_progress_ = true;
MajorNonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
ForAllSweepingSpaces([this, marking_state](AllocationSpace space) {
@@ -144,7 +150,7 @@ void Sweeper::StartSweeperTasks() {
if (FLAG_concurrent_sweeping && sweeping_in_progress_ &&
!heap_->delay_sweeper_tasks_for_testing_) {
ForAllSweepingSpaces([this](AllocationSpace space) {
- if (space == NEW_SPACE) return;
+ DCHECK(IsValidSweepingSpace(space));
num_sweeping_tasks_.Increment(1);
SweeperTask* task = new SweeperTask(heap_->isolate(), this,
&pending_sweeper_tasks_semaphore_,
@@ -200,6 +206,8 @@ void Sweeper::AbortAndWaitForTasks() {
void Sweeper::EnsureCompleted() {
if (!sweeping_in_progress_) return;
+ EnsureIterabilityCompleted();
+
// If sweeping is not completed or not running at all, we try to complete it
// here.
ForAllSweepingSpaces(
@@ -207,24 +215,11 @@ void Sweeper::EnsureCompleted() {
AbortAndWaitForTasks();
- ForAllSweepingSpaces([this](AllocationSpace space) {
- if (space == NEW_SPACE) {
- swept_list_[NEW_SPACE].clear();
- }
- DCHECK(sweeping_list_[space].empty());
- });
+ ForAllSweepingSpaces(
+ [this](AllocationSpace space) { CHECK(sweeping_list_[space].empty()); });
sweeping_in_progress_ = false;
}
-void Sweeper::EnsureNewSpaceCompleted() {
- if (!sweeping_in_progress_) return;
- if (!FLAG_concurrent_sweeping || sweeping_in_progress()) {
- for (Page* p : *heap_->new_space()) {
- SweepOrWaitUntilSweepingCompleted(p);
- }
- }
-}
-
bool Sweeper::AreSweeperTasksRunning() {
return num_sweeping_tasks_.Value() != 0;
}
@@ -281,7 +276,7 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
CHECK_GT(free_end, free_start);
size_t size = static_cast<size_t>(free_end - free_start);
if (free_space_mode == ZAP_FREE_SPACE) {
- memset(free_start, 0xcc, size);
+ memset(free_start, 0xCC, size);
}
if (free_list_mode == REBUILD_FREE_LIST) {
freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
@@ -320,7 +315,7 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
CHECK_GT(p->area_end(), free_start);
size_t size = static_cast<size_t>(p->area_end() - free_start);
if (free_space_mode == ZAP_FREE_SPACE) {
- memset(free_start, 0xcc, size);
+ memset(free_start, 0xCC, size);
}
if (free_list_mode == REBUILD_FREE_LIST) {
freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
@@ -408,6 +403,7 @@ int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
// path. This check here avoids taking the lock first, avoiding deadlocks.
if (page->SweepingDone()) return 0;
+ DCHECK(IsValidSweepingSpace(identity));
int max_freed = 0;
{
base::LockGuard<base::Mutex> guard(page->mutex());
@@ -423,11 +419,7 @@ int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
- if (identity == NEW_SPACE) {
- RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
- } else {
- max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
- }
+ max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
DCHECK(page->SweepingDone());
// After finishing sweeping of a page we clean up its remembered set.
@@ -461,9 +453,9 @@ void Sweeper::ScheduleIncrementalSweepingTask() {
void Sweeper::AddPage(AllocationSpace space, Page* page,
Sweeper::AddPageMode mode) {
base::LockGuard<base::Mutex> guard(&mutex_);
+ DCHECK(IsValidSweepingSpace(space));
DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
if (mode == Sweeper::REGULAR) {
- DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state().Value());
PrepareToBeSweptPage(space, page);
} else {
// Page has been temporarily removed from the sweeper. Accounting already
@@ -475,17 +467,19 @@ void Sweeper::AddPage(AllocationSpace space, Page* page,
}
void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
- page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
DCHECK_GE(page->area_size(),
static_cast<size_t>(marking_state_->live_bytes(page)));
- if (space != NEW_SPACE) {
- heap_->paged_space(space)->IncreaseAllocatedBytes(
- marking_state_->live_bytes(page), page);
- }
+ DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state().Value());
+ page->ForAllFreeListCategories(
+ [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
+ page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
+ heap_->paged_space(space)->IncreaseAllocatedBytes(
+ marking_state_->live_bytes(page), page);
}
Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
base::LockGuard<base::Mutex> guard(&mutex_);
+ DCHECK(IsValidSweepingSpace(space));
Page* page = nullptr;
if (!sweeping_list_[space].empty()) {
page = sweeping_list_[space].front();
@@ -494,5 +488,94 @@ Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
return page;
}
+void Sweeper::EnsurePageIsIterable(Page* page) {
+ AllocationSpace space = page->owner()->identity();
+ if (IsValidSweepingSpace(space)) {
+ SweepOrWaitUntilSweepingCompleted(page);
+ } else {
+ DCHECK(IsValidIterabilitySpace(space));
+ EnsureIterabilityCompleted();
+ }
+}
+
+void Sweeper::EnsureIterabilityCompleted() {
+ if (!iterability_in_progress_) return;
+
+ if (FLAG_concurrent_sweeping && iterability_task_started_) {
+ if (heap_->isolate()->cancelable_task_manager()->TryAbort(
+ iterability_task_id_) != CancelableTaskManager::kTaskAborted) {
+ iterability_task_semaphore_.Wait();
+ }
+ iterability_task_started_ = false;
+ }
+
+ for (Page* page : iterability_list_) {
+ MakeIterable(page);
+ }
+ iterability_list_.clear();
+ iterability_in_progress_ = false;
+}
+
+class Sweeper::IterabilityTask final : public CancelableTask {
+ public:
+ IterabilityTask(Isolate* isolate, Sweeper* sweeper,
+ base::Semaphore* pending_iterability_task)
+ : CancelableTask(isolate),
+ sweeper_(sweeper),
+ pending_iterability_task_(pending_iterability_task),
+ tracer_(isolate->heap()->tracer()) {}
+
+ virtual ~IterabilityTask() {}
+
+ private:
+ void RunInternal() final {
+ TRACE_BACKGROUND_GC(tracer_,
+ GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING);
+ for (Page* page : sweeper_->iterability_list_) {
+ sweeper_->MakeIterable(page);
+ }
+ sweeper_->iterability_list_.clear();
+ pending_iterability_task_->Signal();
+ }
+
+ Sweeper* const sweeper_;
+ base::Semaphore* const pending_iterability_task_;
+ GCTracer* const tracer_;
+
+ DISALLOW_COPY_AND_ASSIGN(IterabilityTask);
+};
+
+void Sweeper::StartIterabilityTasks() {
+ if (!iterability_in_progress_) return;
+
+ DCHECK(!iterability_task_started_);
+ if (FLAG_concurrent_sweeping && !iterability_list_.empty()) {
+ IterabilityTask* task = new IterabilityTask(heap_->isolate(), this,
+ &iterability_task_semaphore_);
+ iterability_task_id_ = task->id();
+ iterability_task_started_ = true;
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ task, v8::Platform::kShortRunningTask);
+ }
+}
+
+void Sweeper::AddPageForIterability(Page* page) {
+ DCHECK(sweeping_in_progress_);
+ DCHECK(iterability_in_progress_);
+ DCHECK(!iterability_task_started_);
+ DCHECK(IsValidIterabilitySpace(page->owner()->identity()));
+ DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state().Value());
+
+ iterability_list_.push_back(page);
+ page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
+}
+
+void Sweeper::MakeIterable(Page* page) {
+ DCHECK(IsValidIterabilitySpace(page->owner()->identity()));
+ const FreeSpaceTreatmentMode free_space_mode =
+ Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
+ RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/sweeper.h b/deps/v8/src/heap/sweeper.h
index 9a8eef3115..6eee902bcc 100644
--- a/deps/v8/src/heap/sweeper.h
+++ b/deps/v8/src/heap/sweeper.h
@@ -23,6 +23,7 @@ enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
class Sweeper {
public:
+ typedef std::vector<Page*> IterabilityList;
typedef std::deque<Page*> SweepingList;
typedef std::vector<Page*> SweptList;
@@ -83,7 +84,10 @@ class Sweeper {
incremental_sweeper_pending_(false),
sweeping_in_progress_(false),
num_sweeping_tasks_(0),
- stop_sweeper_tasks_(false) {}
+ stop_sweeper_tasks_(false),
+ iterability_task_semaphore_(0),
+ iterability_in_progress_(false),
+ iterability_task_started_(false) {}
bool sweeping_in_progress() const { return sweeping_in_progress_; }
@@ -104,32 +108,38 @@ class Sweeper {
void StartSweeping();
void StartSweeperTasks();
void EnsureCompleted();
- void EnsureNewSpaceCompleted();
bool AreSweeperTasksRunning();
- void SweepOrWaitUntilSweepingCompleted(Page* page);
Page* GetSweptPageSafe(PagedSpace* space);
+ void EnsurePageIsIterable(Page* page);
+
+ void AddPageForIterability(Page* page);
+ void StartIterabilityTasks();
+ void EnsureIterabilityCompleted();
+
private:
class IncrementalSweeperTask;
+ class IterabilityTask;
class SweeperTask;
- static const int kAllocationSpaces = LAST_PAGED_SPACE + 1;
- static const int kMaxSweeperTasks = kAllocationSpaces;
+ static const int kNumberOfSweepingSpaces = LAST_PAGED_SPACE + 1;
+ static const int kMaxSweeperTasks = 3;
template <typename Callback>
- void ForAllSweepingSpaces(Callback callback) {
- for (int i = 0; i < kAllocationSpaces; i++) {
- callback(static_cast<AllocationSpace>(i));
- }
+ void ForAllSweepingSpaces(Callback callback) const {
+ callback(OLD_SPACE);
+ callback(CODE_SPACE);
+ callback(MAP_SPACE);
}
// Can only be called on the main thread when no tasks are running.
bool IsDoneSweeping() const {
- for (int i = 0; i < kAllocationSpaces; i++) {
- if (!sweeping_list_[i].empty()) return false;
- }
- return true;
+ bool is_done = true;
+ ForAllSweepingSpaces([this, &is_done](AllocationSpace space) {
+ if (!sweeping_list_[space].empty()) is_done = false;
+ });
+ return is_done;
}
void SweepSpaceFromTask(AllocationSpace identity);
@@ -144,14 +154,26 @@ class Sweeper {
void PrepareToBeSweptPage(AllocationSpace space, Page* page);
+ void SweepOrWaitUntilSweepingCompleted(Page* page);
+
+ void MakeIterable(Page* page);
+
+ bool IsValidIterabilitySpace(AllocationSpace space) {
+ return space == NEW_SPACE;
+ }
+
+ bool IsValidSweepingSpace(AllocationSpace space) {
+ return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE;
+ }
+
Heap* const heap_;
MajorNonAtomicMarkingState* marking_state_;
int num_tasks_;
- CancelableTaskManager::Id task_ids_[kMaxSweeperTasks];
+ CancelableTaskManager::Id task_ids_[kNumberOfSweepingSpaces];
base::Semaphore pending_sweeper_tasks_semaphore_;
base::Mutex mutex_;
- SweptList swept_list_[kAllocationSpaces];
- SweepingList sweeping_list_[kAllocationSpaces];
+ SweptList swept_list_[kNumberOfSweepingSpaces];
+ SweepingList sweeping_list_[kNumberOfSweepingSpaces];
bool incremental_sweeper_pending_;
bool sweeping_in_progress_;
// Counter is actively maintained by the concurrent tasks to avoid querying
@@ -159,6 +181,13 @@ class Sweeper {
base::AtomicNumber<intptr_t> num_sweeping_tasks_;
// Used by PauseOrCompleteScope to signal early bailout to tasks.
base::AtomicValue<bool> stop_sweeper_tasks_;
+
+ // Pages that are only made iterable but have their free lists ignored.
+ IterabilityList iterability_list_;
+ CancelableTaskManager::Id iterability_task_id_;
+ base::Semaphore iterability_task_semaphore_;
+ bool iterability_in_progress_;
+ bool iterability_task_started_;
};
} // namespace internal
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index ebc9f49dd9..368addd718 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -70,7 +70,7 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
Address RelocInfo::target_address_address() {
@@ -153,7 +153,7 @@ void RelocInfo::WipeOut(Isolate* isolate) {
Memory::Address_at(pc_) = nullptr;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
- Assembler::set_target_address_at(isolate, pc_, host_,
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_,
pc_ + sizeof(int32_t));
} else {
UNREACHABLE();
@@ -261,25 +261,14 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
}
}
-Address Assembler::target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- return target_address_at(pc, constant_pool);
-}
-
-void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
-}
-
Address Assembler::target_address_from_return_address(Address pc) {
return pc - kCallTargetAddressOffset;
}
void Assembler::deserialization_set_special_target_at(
Isolate* isolate, Address instruction_payload, Code* code, Address target) {
- set_target_address_at(isolate, instruction_payload, code, target);
+ set_target_address_at(isolate, instruction_payload,
+ code ? code->constant_pool() : nullptr, target);
}
Displacement Assembler::disp_at(Label* L) {
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 99f52031ed..38508c7632 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -88,7 +88,7 @@ V8_INLINE uint64_t _xgetbv(unsigned int xcr) {
// directly because older assemblers do not include support for xgetbv and
// there is no easy way to conditionally compile based on the assembler
// used.
- __asm__ volatile(".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c"(xcr));
+ __asm__ volatile(".byte 0x0F, 0x01, 0xD0" : "=a"(eax), "=d"(edx) : "c"(xcr));
return static_cast<uint64_t>(eax) | (static_cast<uint64_t>(edx) << 32);
}
@@ -398,7 +398,7 @@ bool Assembler::IsNop(Address addr) {
Address a = addr;
while (*a == 0x66) a++;
if (*a == 0x90) return true;
- if (a[0] == 0xf && a[1] == 0x1f) return true;
+ if (a[0] == 0xF && a[1] == 0x1F) return true;
return false;
}
@@ -415,28 +415,28 @@ void Assembler::Nop(int bytes) {
EMIT(0x90);
return;
case 3:
- EMIT(0xf);
- EMIT(0x1f);
+ EMIT(0xF);
+ EMIT(0x1F);
EMIT(0);
return;
case 4:
- EMIT(0xf);
- EMIT(0x1f);
+ EMIT(0xF);
+ EMIT(0x1F);
EMIT(0x40);
EMIT(0);
return;
case 6:
EMIT(0x66);
case 5:
- EMIT(0xf);
- EMIT(0x1f);
+ EMIT(0xF);
+ EMIT(0x1F);
EMIT(0x44);
EMIT(0);
EMIT(0);
return;
case 7:
- EMIT(0xf);
- EMIT(0x1f);
+ EMIT(0xF);
+ EMIT(0x1F);
EMIT(0x80);
EMIT(0);
EMIT(0);
@@ -454,8 +454,8 @@ void Assembler::Nop(int bytes) {
EMIT(0x66);
bytes--;
case 8:
- EMIT(0xf);
- EMIT(0x1f);
+ EMIT(0xF);
+ EMIT(0x1F);
EMIT(0x84);
EMIT(0);
EMIT(0);
@@ -507,7 +507,7 @@ void Assembler::popfd() {
void Assembler::push(const Immediate& x) {
EnsureSpace ensure_space(this);
if (x.is_int8()) {
- EMIT(0x6a);
+ EMIT(0x6A);
EMIT(x.immediate());
} else {
EMIT(0x68);
@@ -609,7 +609,7 @@ void Assembler::mov_w(const Operand& dst, const Immediate& src) {
EMIT(0x66);
EMIT(0xC7);
emit_operand(eax, dst);
- EMIT(static_cast<int8_t>(src.immediate() & 0xff));
+ EMIT(static_cast<int8_t>(src.immediate() & 0xFF));
EMIT(static_cast<int8_t>(src.immediate() >> 8));
}
@@ -796,6 +796,13 @@ void Assembler::cmpxchg_w(const Operand& dst, Register src) {
emit_operand(src, dst);
}
+void Assembler::lfence() {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xAE);
+ EMIT(0xE8);
+}
+
void Assembler::adc(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
emit_arith(2, Operand(dst), Immediate(imm32));
@@ -1378,7 +1385,7 @@ void Assembler::test_w(Register reg, Immediate imm16) {
} else {
EMIT(0x66);
EMIT(0xF7);
- EMIT(0xc0 | reg.code());
+ EMIT(0xC0 | reg.code());
emit_w(imm16);
}
}
@@ -2426,6 +2433,13 @@ void Assembler::sqrtsd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
+void Assembler::haddps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x7C);
+ emit_sse_operand(dst, src);
+}
void Assembler::andpd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
@@ -2828,6 +2842,17 @@ void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
EMIT(offset);
}
+void Assembler::insertps(XMMRegister dst, const Operand& src, int8_t offset) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x3A);
+ EMIT(0x21);
+ emit_sse_operand(dst, src);
+ EMIT(offset);
+}
+
void Assembler::pinsrb(XMMRegister dst, const Operand& src, int8_t offset) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
@@ -2908,8 +2933,8 @@ void Assembler::sqrtss(XMMRegister dst, const Operand& src) {
void Assembler::ucomiss(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
- EMIT(0x0f);
- EMIT(0x2e);
+ EMIT(0x0F);
+ EMIT(0x2E);
emit_sse_operand(dst, src);
}
@@ -2982,6 +3007,13 @@ void Assembler::vcmpps(XMMRegister dst, XMMRegister src1, const Operand& src2,
EMIT(cmp);
}
+void Assembler::vshufps(XMMRegister dst, XMMRegister src1, const Operand& src2,
+ byte imm8) {
+ DCHECK(is_uint8(imm8));
+ vps(0xC6, dst, src1, src2);
+ EMIT(imm8);
+}
+
void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int8_t imm8) {
XMMRegister iop = XMMRegister::from_code(6);
vinstr(0x71, iop, dst, Operand(src), k66, k0F, kWIG);
@@ -3043,6 +3075,12 @@ void Assembler::vpextrd(const Operand& dst, XMMRegister src, int8_t offset) {
EMIT(offset);
}
+void Assembler::vinsertps(XMMRegister dst, XMMRegister src1,
+ const Operand& src2, int8_t offset) {
+ vinstr(0x21, dst, src1, src2, k66, k0F3A, kWIG);
+ EMIT(offset);
+}
+
void Assembler::vpinsrb(XMMRegister dst, XMMRegister src1, const Operand& src2,
int8_t offset) {
vinstr(0x20, dst, src1, src2, k66, k0F3A, kWIG);
@@ -3186,12 +3224,12 @@ void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
void Assembler::emit_vex_prefix(XMMRegister vreg, VectorLength l, SIMDPrefix pp,
LeadingOpcode mm, VexW w) {
if (mm != k0F || w != kW0) {
- EMIT(0xc4);
+ EMIT(0xC4);
// Change RXB from "110" to "111" to align with gdb disassembler.
- EMIT(0xe0 | mm);
- EMIT(w | ((~vreg.code() & 0xf) << 3) | l | pp);
+ EMIT(0xE0 | mm);
+ EMIT(w | ((~vreg.code() & 0xF) << 3) | l | pp);
} else {
- EMIT(0xc5);
+ EMIT(0xC5);
EMIT(((~vreg.code()) << 3) | l | pp);
}
}
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 83e30df4f5..d57e3bee71 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -113,6 +113,7 @@ GENERAL_REGISTERS(DEFINE_REGISTER)
#undef DEFINE_REGISTER
constexpr Register no_reg = Register::no_reg();
+constexpr bool kPadArguments = false;
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
@@ -530,10 +531,6 @@ class Assembler : public AssemblerBase {
inline static void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- static inline Address target_address_at(Address pc, Code* code);
- static inline void set_target_address_at(
- Isolate* isolate, Address pc, Code* code, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -683,6 +680,9 @@ class Assembler : public AssemblerBase {
void cmpxchg_b(const Operand& dst, Register src);
void cmpxchg_w(const Operand& dst, Register src);
+ // Memory Fence
+ void lfence();
+
// Arithmetics
void adc(Register dst, int32_t imm32);
void adc(Register dst, const Operand& src);
@@ -1004,6 +1004,8 @@ class Assembler : public AssemblerBase {
void rcpps(XMMRegister dst, XMMRegister src) { rcpps(dst, Operand(src)); }
void rsqrtps(XMMRegister dst, const Operand& src);
void rsqrtps(XMMRegister dst, XMMRegister src) { rsqrtps(dst, Operand(src)); }
+ void haddps(XMMRegister dst, const Operand& src);
+ void haddps(XMMRegister dst, XMMRegister src) { haddps(dst, Operand(src)); }
void minps(XMMRegister dst, const Operand& src);
void minps(XMMRegister dst, XMMRegister src) { minps(dst, Operand(src)); }
@@ -1149,6 +1151,10 @@ class Assembler : public AssemblerBase {
}
void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
+ void insertps(XMMRegister dst, XMMRegister src, int8_t offset) {
+ insertps(dst, Operand(src), offset);
+ }
+ void insertps(XMMRegister dst, const Operand& src, int8_t offset);
void pinsrb(XMMRegister dst, Register src, int8_t offset) {
pinsrb(dst, Operand(src), offset);
}
@@ -1397,6 +1403,14 @@ class Assembler : public AssemblerBase {
void vrsqrtps(XMMRegister dst, const Operand& src) {
vinstr(0x52, dst, xmm0, src, kNone, k0F, kWIG);
}
+ void vmovaps(XMMRegister dst, XMMRegister src) {
+ vps(0x28, dst, xmm0, Operand(src));
+ }
+ void vshufps(XMMRegister dst, XMMRegister src1, XMMRegister src2, byte imm8) {
+ vshufps(dst, src1, Operand(src2), imm8);
+ }
+ void vshufps(XMMRegister dst, XMMRegister src1, const Operand& src2,
+ byte imm8);
void vpsllw(XMMRegister dst, XMMRegister src, int8_t imm8);
void vpslld(XMMRegister dst, XMMRegister src, int8_t imm8);
@@ -1427,6 +1441,12 @@ class Assembler : public AssemblerBase {
}
void vpextrd(const Operand& dst, XMMRegister src, int8_t offset);
+ void vinsertps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ int8_t offset) {
+ vinsertps(dst, src1, Operand(src2), offset);
+ }
+ void vinsertps(XMMRegister dst, XMMRegister src1, const Operand& src2,
+ int8_t offset);
void vpinsrb(XMMRegister dst, XMMRegister src1, Register src2,
int8_t offset) {
vpinsrb(dst, src1, Operand(src2), offset);
@@ -1459,6 +1479,12 @@ class Assembler : public AssemblerBase {
vinstr(0x5B, dst, xmm0, src, kF3, k0F, kWIG);
}
+ void vmovdqu(XMMRegister dst, const Operand& src) {
+ vinstr(0x6F, dst, xmm0, src, kF3, k0F, kWIG);
+ }
+ void vmovdqu(const Operand& dst, XMMRegister src) {
+ vinstr(0x7F, src, xmm0, dst, kF3, k0F, kWIG);
+ }
void vmovd(XMMRegister dst, Register src) { vmovd(dst, Operand(src)); }
void vmovd(XMMRegister dst, const Operand& src) {
vinstr(0x6E, dst, xmm0, src, k66, k0F, kWIG);
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 8ca0b5989f..697539713a 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -486,12 +486,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// pop the faked function when we return. Notice that we cannot store a
// reference to the trampoline code directly in this stub, because the
// builtin stubs may not have been generated yet.
- if (type() == StackFrame::CONSTRUCT_ENTRY) {
- __ Call(BUILTIN_CODE(isolate(), JSConstructEntryTrampoline),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(BUILTIN_CODE(isolate(), JSEntryTrampoline), RelocInfo::CODE_TARGET);
- }
+ __ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
// Unlink this frame from the handler chain.
__ PopStackHandler();
@@ -588,7 +583,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -632,7 +627,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
Handle<Map> allocation_site_map =
masm->isolate()->factory()->allocation_site_map();
__ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
- __ Assert(equal, kExpectedAllocationSite);
+ __ Assert(equal, AbortReason::kExpectedAllocationSite);
}
// Save the resulting elements kind in type info. We can't just store r3
@@ -657,7 +652,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -731,9 +726,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ test(ecx, Immediate(kSmiTagMask));
- __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(not_zero, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CmpObjectType(ecx, MAP_TYPE, ecx);
- __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
// We should either have undefined in ebx or a valid AllocationSite
__ AssertUndefinedOrAllocationSite(ebx);
@@ -828,9 +823,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ test(ecx, Immediate(kSmiTagMask));
- __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(not_zero, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CmpObjectType(ecx, MAP_TYPE, ecx);
- __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
// Figure out the right elements kind
@@ -847,8 +842,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ cmp(ecx, Immediate(PACKED_ELEMENTS));
__ j(equal, &done);
__ cmp(ecx, Immediate(HOLEY_ELEMENTS));
- __ Assert(equal,
- kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ Assert(
+ equal,
+ AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
__ bind(&done);
}
@@ -959,7 +955,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// previous handle scope.
__ mov(Operand::StaticVariable(next_address), ebx);
__ sub(Operand::StaticVariable(level_address), Immediate(1));
- __ Assert(above_equal, kInvalidHandleScopeLevel);
+ __ Assert(above_equal, AbortReason::kInvalidHandleScopeLevel);
__ cmp(edi, Operand::StaticVariable(limit_address));
__ j(not_equal, &delete_allocated_handles);
@@ -1007,7 +1003,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ cmp(return_value, isolate->factory()->null_value());
__ j(equal, &ok, Label::kNear);
- __ Abort(kAPICallReturnedInvalidObject);
+ __ Abort(AbortReason::kAPICallReturnedInvalidObject);
__ bind(&ok);
#endif
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index a66334e3a0..8bd6b5f30c 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -16,8 +16,7 @@ namespace internal {
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
size_t allocated = 0;
- byte* buffer =
- AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@@ -39,8 +38,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
- CHECK(base::OS::SetPermissions(buffer, allocated,
- base::OS::MemoryPermission::kReadExecute));
+ CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
@@ -133,8 +131,7 @@ class LabelConverter {
MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
size_t allocated = 0;
- byte* buffer =
- AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@@ -452,8 +449,7 @@ MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
- CHECK(base::OS::SetPermissions(buffer, allocated,
- base::OS::MemoryPermission::kReadExecute));
+ CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
// TODO(jkummerow): It would be nice to register this code creation event
// with the PROFILE / GDBJIT system.
return FUNCTION_CAST<MemMoveFunction>(buffer);
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 39c6ff0d5c..6ce62e93bb 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -87,14 +87,11 @@ static const ByteMnemonic short_immediate_instr[] = {
// register stalls. They are included for completeness and because the cmp
// variant is used by the RecordWrite stub. Because it does not update the
// register it is not subject to partial register stalls.
-static ByteMnemonic byte_immediate_instr[] = {
- {0x0c, "or", UNSET_OP_ORDER},
- {0x24, "and", UNSET_OP_ORDER},
- {0x34, "xor", UNSET_OP_ORDER},
- {0x3c, "cmp", UNSET_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}
-};
-
+static ByteMnemonic byte_immediate_instr[] = {{0x0C, "or", UNSET_OP_ORDER},
+ {0x24, "and", UNSET_OP_ORDER},
+ {0x34, "xor", UNSET_OP_ORDER},
+ {0x3C, "cmp", UNSET_OP_ORDER},
+ {-1, "", UNSET_OP_ORDER}};
static const char* const jump_conditional_mnem[] = {
/*0*/ "jo", "jno", "jc", "jnc",
@@ -251,7 +248,7 @@ class DisassemblerIA32 {
private:
const NameConverter& converter_;
- byte vex_byte0_; // 0xc4 or 0xc5
+ byte vex_byte0_; // 0xC4 or 0xC5
byte vex_byte1_;
byte vex_byte2_; // only for 3 bytes vex prefix
InstructionTable* instruction_table_;
@@ -282,59 +279,59 @@ class DisassemblerIA32 {
};
bool vex_128() {
- DCHECK(vex_byte0_ == 0xc4 || vex_byte0_ == 0xc5);
- byte checked = vex_byte0_ == 0xc4 ? vex_byte2_ : vex_byte1_;
+ DCHECK(vex_byte0_ == 0xC4 || vex_byte0_ == 0xC5);
+ byte checked = vex_byte0_ == 0xC4 ? vex_byte2_ : vex_byte1_;
return (checked & 4) == 0;
}
bool vex_none() {
- DCHECK(vex_byte0_ == 0xc4 || vex_byte0_ == 0xc5);
- byte checked = vex_byte0_ == 0xc4 ? vex_byte2_ : vex_byte1_;
+ DCHECK(vex_byte0_ == 0xC4 || vex_byte0_ == 0xC5);
+ byte checked = vex_byte0_ == 0xC4 ? vex_byte2_ : vex_byte1_;
return (checked & 3) == 0;
}
bool vex_66() {
- DCHECK(vex_byte0_ == 0xc4 || vex_byte0_ == 0xc5);
- byte checked = vex_byte0_ == 0xc4 ? vex_byte2_ : vex_byte1_;
+ DCHECK(vex_byte0_ == 0xC4 || vex_byte0_ == 0xC5);
+ byte checked = vex_byte0_ == 0xC4 ? vex_byte2_ : vex_byte1_;
return (checked & 3) == 1;
}
bool vex_f3() {
- DCHECK(vex_byte0_ == 0xc4 || vex_byte0_ == 0xc5);
- byte checked = vex_byte0_ == 0xc4 ? vex_byte2_ : vex_byte1_;
+ DCHECK(vex_byte0_ == 0xC4 || vex_byte0_ == 0xC5);
+ byte checked = vex_byte0_ == 0xC4 ? vex_byte2_ : vex_byte1_;
return (checked & 3) == 2;
}
bool vex_f2() {
- DCHECK(vex_byte0_ == 0xc4 || vex_byte0_ == 0xc5);
- byte checked = vex_byte0_ == 0xc4 ? vex_byte2_ : vex_byte1_;
+ DCHECK(vex_byte0_ == 0xC4 || vex_byte0_ == 0xC5);
+ byte checked = vex_byte0_ == 0xC4 ? vex_byte2_ : vex_byte1_;
return (checked & 3) == 3;
}
bool vex_w() {
- if (vex_byte0_ == 0xc5) return false;
+ if (vex_byte0_ == 0xC5) return false;
return (vex_byte2_ & 0x80) != 0;
}
bool vex_0f() {
- if (vex_byte0_ == 0xc5) return true;
+ if (vex_byte0_ == 0xC5) return true;
return (vex_byte1_ & 3) == 1;
}
bool vex_0f38() {
- if (vex_byte0_ == 0xc5) return false;
+ if (vex_byte0_ == 0xC5) return false;
return (vex_byte1_ & 3) == 2;
}
bool vex_0f3a() {
- if (vex_byte0_ == 0xc5) return false;
+ if (vex_byte0_ == 0xC5) return false;
return (vex_byte1_ & 3) == 3;
}
int vex_vreg() {
- DCHECK(vex_byte0_ == 0xc4 || vex_byte0_ == 0xc5);
- byte checked = vex_byte0_ == 0xc4 ? vex_byte2_ : vex_byte1_;
- return ~(checked >> 3) & 0xf;
+ DCHECK(vex_byte0_ == 0xC4 || vex_byte0_ == 0xC5);
+ byte checked = vex_byte0_ == 0xC4 ? vex_byte2_ : vex_byte1_;
+ return ~(checked >> 3) & 0xF;
}
char float_size_code() { return "sd"[vex_w()]; }
@@ -743,62 +740,62 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xa9:
+ case 0xA9:
AppendToBuffer("vfmadd213s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xb9:
+ case 0xB9:
AppendToBuffer("vfmadd231s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x9b:
+ case 0x9B:
AppendToBuffer("vfmsub132s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xab:
+ case 0xAB:
AppendToBuffer("vfmsub213s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xbb:
+ case 0xBB:
AppendToBuffer("vfmsub231s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x9d:
+ case 0x9D:
AppendToBuffer("vfnmadd132s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xad:
+ case 0xAD:
AppendToBuffer("vfnmadd213s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xbd:
+ case 0xBD:
AppendToBuffer("vfnmadd231s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x9f:
+ case 0x9F:
AppendToBuffer("vfnmsub132s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xaf:
+ case 0xAF:
AppendToBuffer("vfnmsub213s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xbf:
+ case 0xBF:
AppendToBuffer("vfnmsub231s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xf7:
+ case 0xF7:
AppendToBuffer("shlx %s,", NameOfCPURegister(regop));
current += PrintRightOperand(current);
AppendToBuffer(",%s", NameOfCPURegister(vvvv));
@@ -850,6 +847,13 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(current));
current++;
break;
+ case 0x21:
+ AppendToBuffer("vinsertps %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(current));
+ current++;
+ break;
case 0x22:
AppendToBuffer("vpinsrd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -874,22 +878,22 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5c:
+ case 0x5C:
AppendToBuffer("vsubsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5d:
+ case 0x5D:
AppendToBuffer("vminsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5e:
+ case 0x5E:
AppendToBuffer("vdivsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5f:
+ case 0x5F:
AppendToBuffer("vmaxsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
@@ -917,30 +921,39 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5b:
+ case 0x5B:
AppendToBuffer("vcvttps2dq %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
- case 0x5c:
+ case 0x5C:
AppendToBuffer("vsubss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5d:
+ case 0x5D:
AppendToBuffer("vminss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5e:
+ case 0x5E:
AppendToBuffer("vdivss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5f:
+ case 0x5F:
AppendToBuffer("vmaxss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x6f:
+ AppendToBuffer("vmovdqu %s,", NameOfXMMRegister(regop));
+ current += PrintRightOperand(current);
+ break;
+ case 0x7f:
+ AppendToBuffer("vmovdqu ");
+ current += PrintRightOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ break;
default:
UnimplementedInstruction();
}
@@ -949,22 +962,22 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
const char* mnem = "?";
switch (opcode) {
- case 0xf2:
+ case 0xF2:
AppendToBuffer("andn %s,%s,", NameOfCPURegister(regop),
NameOfCPURegister(vvvv));
current += PrintRightOperand(current);
break;
- case 0xf5:
+ case 0xF5:
AppendToBuffer("bzhi %s,", NameOfCPURegister(regop));
current += PrintRightOperand(current);
AppendToBuffer(",%s", NameOfCPURegister(vvvv));
break;
- case 0xf7:
+ case 0xF7:
AppendToBuffer("bextr %s,", NameOfCPURegister(regop));
current += PrintRightOperand(current);
AppendToBuffer(",%s", NameOfCPURegister(vvvv));
break;
- case 0xf3:
+ case 0xF3:
switch (regop) {
case 1:
mnem = "blsr";
@@ -989,17 +1002,17 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
- case 0xf5:
+ case 0xF5:
AppendToBuffer("pdep %s,%s,", NameOfCPURegister(regop),
NameOfCPURegister(vvvv));
current += PrintRightOperand(current);
break;
- case 0xf6:
+ case 0xF6:
AppendToBuffer("mulx %s,%s,", NameOfCPURegister(regop),
NameOfCPURegister(vvvv));
current += PrintRightOperand(current);
break;
- case 0xf7:
+ case 0xF7:
AppendToBuffer("shrx %s,", NameOfCPURegister(regop));
current += PrintRightOperand(current);
AppendToBuffer(",%s", NameOfCPURegister(vvvv));
@@ -1011,12 +1024,12 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
- case 0xf5:
+ case 0xF5:
AppendToBuffer("pext %s,%s,", NameOfCPURegister(regop),
NameOfCPURegister(vvvv));
current += PrintRightOperand(current);
break;
- case 0xf7:
+ case 0xF7:
AppendToBuffer("sarx %s,", NameOfCPURegister(regop));
current += PrintRightOperand(current);
AppendToBuffer(",%s", NameOfCPURegister(vvvv));
@@ -1028,10 +1041,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
- case 0xf0:
+ case 0xF0:
AppendToBuffer("rorx %s,", NameOfCPURegister(regop));
current += PrintRightOperand(current);
- AppendToBuffer(",%d", *current & 0x1f);
+ AppendToBuffer(",%d", *current & 0x1F);
current += 1;
break;
default:
@@ -1041,6 +1054,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
+ case 0x28:
+ AppendToBuffer("vmovaps %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x52:
AppendToBuffer("vrsqrtps %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
@@ -1103,6 +1120,13 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
current++;
break;
}
+ case 0xC6:
+ AppendToBuffer("vshufps %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(", %d", (*current) & 3);
+ current += 1;
+ break;
default:
UnimplementedInstruction();
}
@@ -1451,12 +1475,12 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
} else if (*data == 0x2E /*cs*/) {
branch_hint = "predicted not taken";
data++;
- } else if (*data == 0xC4 && *(data + 1) >= 0xc0) {
+ } else if (*data == 0xC4 && *(data + 1) >= 0xC0) {
vex_byte0_ = *data;
vex_byte1_ = *(data + 1);
vex_byte2_ = *(data + 2);
data += 3;
- } else if (*data == 0xC5 && *(data + 1) >= 0xc0) {
+ } else if (*data == 0xC5 && *(data + 1) >= 0xC0) {
vex_byte0_ = *data;
vex_byte1_ = *(data + 1);
data += 2;
@@ -1628,7 +1652,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
AppendToBuffer("%s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
}
- } else if (f0byte == 0x2e) {
+ } else if (f0byte == 0x2E) {
data += 2;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
@@ -1732,6 +1756,9 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("%s %s,", f0mnem, NameOfCPURegister(regop));
data += PrintRightOperand(data);
+ } else if (f0byte == 0xAE && (data[2] & 0xF8) == 0xE8) {
+ AppendToBuffer("lfence");
+ data += 3;
} else {
UnimplementedInstruction();
}
@@ -1825,7 +1852,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0x66: // prefix
while (*data == 0x66) data++;
- if (*data == 0xf && data[1] == 0x1f) {
+ if (*data == 0xF && data[1] == 0x1F) {
AppendToBuffer("nop"); // 0x66 prefix
} else if (*data == 0x39) {
data++;
@@ -1964,6 +1991,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += PrintRightOperand(data);
AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(data));
data++;
+ } else if (*data == 0x21) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("insertps %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(data));
+ data++;
} else if (*data == 0x22) {
data++;
int mod, regop, rm;
@@ -2261,6 +2296,9 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0x5F:
mnem = "maxsd";
break;
+ case 0x7C:
+ mnem = "haddps";
+ break;
}
data += 3;
int mod, regop, rm;
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index f0f9ec0a30..9edad9a44c 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -44,8 +44,6 @@ const Register LoadDescriptor::SlotRegister() { return eax; }
const Register LoadWithVectorDescriptor::VectorRegister() { return ebx; }
-const Register LoadICProtoArrayDescriptor::HandlerRegister() { return edi; }
-
const Register StoreDescriptor::ReceiverRegister() { return edx; }
const Register StoreDescriptor::NameRegister() { return ecx; }
const Register StoreDescriptor::ValueRegister() { return eax; }
@@ -204,6 +202,11 @@ void TransitionElementsKindDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
+void AbortJSDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {edx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 850424293a..ebc8b39ab9 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -489,29 +489,29 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
- Check(equal, kOperandIsNotASmi);
+ Check(equal, AbortReason::kOperandIsNotASmi);
}
}
void MacroAssembler::AssertFixedArray(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
- Check(not_equal, kOperandIsASmiAndNotAFixedArray);
+ Check(not_equal, AbortReason::kOperandIsASmiAndNotAFixedArray);
Push(object);
CmpObjectType(object, FIXED_ARRAY_TYPE, object);
Pop(object);
- Check(equal, kOperandIsNotAFixedArray);
+ Check(equal, AbortReason::kOperandIsNotAFixedArray);
}
}
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
- Check(not_equal, kOperandIsASmiAndNotAFunction);
+ Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
Push(object);
CmpObjectType(object, JS_FUNCTION_TYPE, object);
Pop(object);
- Check(equal, kOperandIsNotAFunction);
+ Check(equal, AbortReason::kOperandIsNotAFunction);
}
}
@@ -519,11 +519,11 @@ void MacroAssembler::AssertFunction(Register object) {
void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
- Check(not_equal, kOperandIsASmiAndNotABoundFunction);
+ Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction);
Push(object);
CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
Pop(object);
- Check(equal, kOperandIsNotABoundFunction);
+ Check(equal, AbortReason::kOperandIsNotABoundFunction);
}
}
@@ -531,7 +531,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
test(object, Immediate(kSmiTagMask));
- Check(not_equal, kOperandIsASmiAndNotAGeneratorObject);
+ Check(not_equal, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
{
Push(object);
@@ -552,7 +552,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
Pop(object);
}
- Check(equal, kOperandIsNotAGeneratorObject);
+ Check(equal, AbortReason::kOperandIsNotAGeneratorObject);
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
@@ -563,7 +563,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
j(equal, &done_checking);
cmp(FieldOperand(object, 0),
Immediate(isolate()->factory()->allocation_site_map()));
- Assert(equal, kExpectedUndefinedOrCell);
+ Assert(equal, AbortReason::kExpectedUndefinedOrCell);
bind(&done_checking);
}
}
@@ -572,7 +572,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
- Check(not_equal, kOperandIsASmi);
+ Check(not_equal, AbortReason::kOperandIsASmi);
}
}
@@ -598,7 +598,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
}
if (emit_debug_code()) {
cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
- Check(not_equal, kCodeObjectNotProperlyPatched);
+ Check(not_equal, AbortReason::kCodeObjectNotProperlyPatched);
}
}
@@ -606,7 +606,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(StackFrame::TypeToMarker(type)));
- Check(equal, kStackFrameTypesMustMatch);
+ Check(equal, AbortReason::kStackFrameTypesMustMatch);
}
leave();
}
@@ -738,7 +738,8 @@ void MacroAssembler::LeaveExitFrameEpilogue() {
isolate());
mov(esi, Operand::StaticVariable(context_address));
#ifdef DEBUG
- mov(Operand::StaticVariable(context_address), Immediate(0));
+ mov(Operand::StaticVariable(context_address),
+ Immediate(Context::kInvalidContext));
#endif
// Clear the top frame.
@@ -757,9 +758,11 @@ void MacroAssembler::LeaveApiExitFrame() {
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ push(Immediate(0)); // Padding.
+
// Link the current handler as the next handler.
ExternalReference handler_address(IsolateAddressId::kHandlerAddress,
isolate());
@@ -891,7 +894,7 @@ void TurboAssembler::PrepareForTailCall(
if (FLAG_debug_code) {
cmp(esp, new_sp_reg);
- Check(below, kStackAccessBelowStackPointer);
+ Check(below, AbortReason::kStackAccessBelowStackPointer);
}
// Copy return address from caller's frame to current frame's return address
@@ -1447,16 +1450,15 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
}
}
-
-void TurboAssembler::Assert(Condition cc, BailoutReason reason) {
+void TurboAssembler::Assert(Condition cc, AbortReason reason) {
if (emit_debug_code()) Check(cc, reason);
}
-void TurboAssembler::AssertUnreachable(BailoutReason reason) {
+void TurboAssembler::AssertUnreachable(AbortReason reason) {
if (emit_debug_code()) Abort(reason);
}
-void TurboAssembler::Check(Condition cc, BailoutReason reason) {
+void TurboAssembler::Check(Condition cc, AbortReason reason) {
Label L;
j(cc, &L);
Abort(reason);
@@ -1478,9 +1480,9 @@ void TurboAssembler::CheckStackAlignment() {
}
}
-void TurboAssembler::Abort(BailoutReason reason) {
+void TurboAssembler::Abort(AbortReason reason) {
#ifdef DEBUG
- const char* msg = GetBailoutReason(reason);
+ const char* msg = GetAbortReason(reason);
if (msg != nullptr) {
RecordComment("Abort message: ");
RecordComment(msg);
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 342281d0b3..6242333847 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -73,18 +73,18 @@ class TurboAssembler : public Assembler {
void LeaveFrame(StackFrame::Type type);
// Print a message to stdout and abort execution.
- void Abort(BailoutReason reason);
+ void Abort(AbortReason reason);
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cc, BailoutReason reason);
+ void Assert(Condition cc, AbortReason reason);
// Like Assert(), but without condition.
// Use --debug_code to enable.
- void AssertUnreachable(BailoutReason reason);
+ void AssertUnreachable(AbortReason reason);
// Like Assert(), but always enabled.
- void Check(Condition cc, BailoutReason reason);
+ void Check(Condition cc, AbortReason reason);
// Check that the stack is aligned.
void CheckStackAlignment();
@@ -214,6 +214,8 @@ class TurboAssembler : public Assembler {
} \
}
+ AVX_OP2_WITH_TYPE(Movdqu, movdqu, XMMRegister, const Operand&)
+ AVX_OP2_WITH_TYPE(Movdqu, movdqu, const Operand&, XMMRegister)
AVX_OP2_WITH_TYPE(Movd, movd, XMMRegister, Register)
AVX_OP2_WITH_TYPE(Movd, movd, XMMRegister, const Operand&)
AVX_OP2_WITH_TYPE(Movd, movd, Register, XMMRegister)
diff --git a/deps/v8/src/ia32/simulator-ia32.h b/deps/v8/src/ia32/simulator-ia32.h
index 076bde83e6..a55c1fefb8 100644
--- a/deps/v8/src/ia32/simulator-ia32.h
+++ b/deps/v8/src/ia32/simulator-ia32.h
@@ -5,50 +5,6 @@
#ifndef V8_IA32_SIMULATOR_IA32_H_
#define V8_IA32_SIMULATOR_IA32_H_
-#include "src/allocation.h"
-
-namespace v8 {
-namespace internal {
-
-// Since there is no simulator for the ia32 architecture the only thing we can
-// do is to call the entry directly.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- (entry(p0, p1, p2, p3, p4))
-
-
-typedef int (*regexp_matcher)(String*, int, const byte*,
- const byte*, int*, int, Address, int, Isolate*);
-
-// Call the generated regexp code directly. The code at the entry address should
-// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
-
-
-// The stack limit beyond which we will throw stack overflow errors in
-// generated code. Because generated code on ia32 uses the C stack, we
-// just use the C stack limit.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
- uintptr_t c_limit) {
- USE(isolate);
- return c_limit;
- }
-
- static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
- uintptr_t try_catch_address) {
- USE(isolate);
- return try_catch_address;
- }
-
- static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
- USE(isolate);
- }
-};
-
-} // namespace internal
-} // namespace v8
+// Since there is no simulator for the ia32 architecture this file is empty.
#endif // V8_IA32_SIMULATOR_IA32_H_
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index c4852d860d..dfd88862bd 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -20,6 +20,41 @@ using compiler::Node;
//////////////////// Private helpers.
+// Loads dataX field from the DataHandler object.
+Node* AccessorAssembler::LoadHandlerDataField(Node* handler, int data_index) {
+#ifdef DEBUG
+ Node* handler_map = LoadMap(handler);
+ Node* instance_type = LoadMapInstanceType(handler_map);
+#endif
+ CSA_ASSERT(this,
+ Word32Or(InstanceTypeEqual(instance_type, LOAD_HANDLER_TYPE),
+ InstanceTypeEqual(instance_type, STORE_HANDLER_TYPE)));
+ int offset = 0;
+ int minimum_size = 0;
+ switch (data_index) {
+ case 1:
+ offset = DataHandler::kData1Offset;
+ minimum_size = DataHandler::kSizeWithData1;
+ break;
+ case 2:
+ offset = DataHandler::kData2Offset;
+ minimum_size = DataHandler::kSizeWithData2;
+ break;
+ case 3:
+ offset = DataHandler::kData3Offset;
+ minimum_size = DataHandler::kSizeWithData3;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ USE(minimum_size);
+ CSA_ASSERT(this, UintPtrGreaterThanOrEqual(
+ LoadMapInstanceSizeInWords(handler_map),
+ IntPtrConstant(minimum_size / kPointerSize)));
+ return LoadObjectField(handler, offset);
+}
+
Node* AccessorAssembler::TryMonomorphicCase(Node* slot, Node* vector,
Node* receiver_map,
Label* if_handler,
@@ -128,10 +163,11 @@ void AccessorAssembler::HandlePolymorphicCase(Node* receiver_map,
void AccessorAssembler::HandleLoadICHandlerCase(
const LoadICParameters* p, Node* handler, Label* miss,
- ExitPoint* exit_point, ElementSupport support_elements) {
+ ExitPoint* exit_point, ICMode ic_mode, OnNonExistent on_nonexistent,
+ ElementSupport support_elements) {
Comment("have_handler");
- VARIABLE(var_holder, MachineRepresentation::kTagged, p->receiver);
+ VARIABLE(var_holder, MachineRepresentation::kTagged, p->holder);
VARIABLE(var_smi_handler, MachineRepresentation::kTagged, handler);
Variable* vars[] = {&var_holder, &var_smi_handler};
@@ -146,14 +182,15 @@ void AccessorAssembler::HandleLoadICHandlerCase(
BIND(&if_smi_handler);
{
HandleLoadICSmiHandlerCase(p, var_holder.value(), var_smi_handler.value(),
- miss, exit_point, false, support_elements);
+ handler, miss, exit_point, on_nonexistent,
+ support_elements);
}
BIND(&try_proto_handler);
{
GotoIf(IsCodeMap(LoadMap(handler)), &call_handler);
- HandleLoadICProtoHandlerCase(p, handler, &var_holder, &var_smi_handler,
- &if_smi_handler, miss, exit_point, false);
+ HandleLoadICProtoHandler(p, handler, &var_holder, &var_smi_handler,
+ &if_smi_handler, miss, exit_point, ic_mode);
}
BIND(&call_handler);
@@ -220,8 +257,8 @@ Node* AccessorAssembler::LoadDescriptorValue(Node* map, Node* descriptor) {
}
void AccessorAssembler::HandleLoadICSmiHandlerCase(
- const LoadICParameters* p, Node* holder, Node* smi_handler, Label* miss,
- ExitPoint* exit_point, bool throw_reference_error_if_nonexistent,
+ const LoadICParameters* p, Node* holder, Node* smi_handler, Node* handler,
+ Label* miss, ExitPoint* exit_point, OnNonExistent on_nonexistent,
ElementSupport support_elements) {
VARIABLE(var_double_value, MachineRepresentation::kFloat64);
Label rebox_double(this, &var_double_value);
@@ -301,8 +338,8 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
Node* intptr_index = TryToIntptr(p->name, miss);
Node* length = LoadStringLengthAsWord(holder);
GotoIf(UintPtrGreaterThanOrEqual(intptr_index, length), &if_oob);
- Node* code = StringCharCodeAt(holder, intptr_index);
- Node* result = StringFromCharCode(code);
+ TNode<Int32T> code = StringCharCodeAt(holder, intptr_index);
+ TNode<String> result = StringFromCharCode(code);
Return(result);
BIND(&if_oob);
@@ -361,10 +398,11 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
BIND(&nonexistent);
// This is a handler for a load of a non-existent value.
- if (throw_reference_error_if_nonexistent) {
+ if (on_nonexistent == OnNonExistent::kThrowReferenceError) {
exit_point->ReturnCallRuntime(Runtime::kThrowReferenceError, p->context,
p->name);
} else {
+ DCHECK_EQ(OnNonExistent::kReturnUndefined, on_nonexistent);
exit_point->Return(UndefinedConstant());
}
@@ -424,11 +462,18 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
BIND(&api_getter);
{
Comment("api_getter");
- Node* context = LoadWeakCellValueUnchecked(
- LoadObjectField(holder, Tuple2::kValue1Offset));
- Node* call_handler_info = LoadWeakCellValueUnchecked(
- LoadObjectField(holder, Tuple2::kValue2Offset));
-
+ CSA_ASSERT(this, TaggedIsNotSmi(handler));
+ Node* call_handler_info = holder;
+
+ // Context is stored either in data2 or data3 field depending on whether
+ // the access check is enabled for this handler or not.
+ Node* context_cell = Select(
+ IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_word),
+ [=] { return LoadHandlerDataField(handler, 3); },
+ [=] { return LoadHandlerDataField(handler, 2); },
+ MachineRepresentation::kTagged);
+
+ Node* context = LoadWeakCellValueUnchecked(context_cell);
Node* foreign =
LoadObjectField(call_handler_info, CallHandlerInfo::kJsCallbackOffset);
Node* callback = LoadObjectField(foreign, Foreign::kForeignAddressOffset,
@@ -538,104 +583,165 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
exit_point->Return(AllocateHeapNumberWithValue(var_double_value.value()));
}
-void AccessorAssembler::HandleLoadICProtoHandlerCase(
- const LoadICParameters* p, Node* handler, Variable* var_holder,
- Variable* var_smi_handler, Label* if_smi_handler, Label* miss,
- ExitPoint* exit_point, bool throw_reference_error_if_nonexistent) {
- DCHECK_EQ(MachineRepresentation::kTagged, var_holder->rep());
- DCHECK_EQ(MachineRepresentation::kTagged, var_smi_handler->rep());
-
- // IC dispatchers rely on these assumptions to be held.
- STATIC_ASSERT(FixedArray::kLengthOffset == LoadHandler::kDataOffset);
- DCHECK_EQ(FixedArray::OffsetOfElementAt(LoadHandler::kSmiHandlerIndex),
- LoadHandler::kSmiHandlerOffset);
- DCHECK_EQ(FixedArray::OffsetOfElementAt(LoadHandler::kValidityCellIndex),
- LoadHandler::kValidityCellOffset);
-
- // Both FixedArray and Tuple3 handlers have validity cell at the same offset.
- Label validity_cell_check_done(this);
- Node* validity_cell =
- LoadObjectField(handler, LoadHandler::kValidityCellOffset);
- GotoIf(WordEqual(validity_cell, IntPtrConstant(0)),
- &validity_cell_check_done);
- Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
- GotoIf(WordNotEqual(cell_value, SmiConstant(Map::kPrototypeChainValid)),
- miss);
- Goto(&validity_cell_check_done);
-
- BIND(&validity_cell_check_done);
- Node* smi_handler = LoadObjectField(handler, LoadHandler::kSmiHandlerOffset);
- CSA_ASSERT(this, TaggedIsSmi(smi_handler));
- Node* handler_flags = SmiUntag(smi_handler);
-
- Label check_prototypes(this);
- GotoIfNot(IsSetWord<LoadHandler::LookupOnReceiverBits>(handler_flags),
- &check_prototypes);
+// Performs actions common to both load and store handlers:
+// 1. Checks prototype validity cell.
+// 2. If |on_code_handler| is provided, then it checks if the sub handler is
+// a smi or code and if it's a code then it calls |on_code_handler| to
+// generate a code that handles Code handlers.
+// If |on_code_handler| is not provided, then only smi sub handler are
+// expected.
+// 3. Does access check on receiver if ICHandler::DoAccessCheckOnReceiverBits
+// bit is set in the smi handler.
+// 4. Does dictionary lookup on receiver if ICHandler::LookupOnReceiverBits bit
+// is set in the smi handler. If |on_found_on_receiver| is provided then
+// it calls it to generate a code that handles the "found on receiver case"
+// or just misses if the |on_found_on_receiver| is not provided.
+// 5. Falls through in a case of a smi handler which is returned from this
+// function (tagged!).
+// TODO(ishell): Remove templatezation once we move common bits from
+// Load/StoreHandler to the base class.
+template <typename ICHandler, typename ICParameters>
+Node* AccessorAssembler::HandleProtoHandler(
+ const ICParameters* p, Node* handler, const OnCodeHandler& on_code_handler,
+ const OnFoundOnReceiver& on_found_on_receiver, Label* miss,
+ ICMode ic_mode) {
+ //
+ // Check prototype validity cell.
+ //
{
- CSA_ASSERT(this, Word32BinaryNot(
- HasInstanceType(p->receiver, JS_GLOBAL_OBJECT_TYPE)));
- Node* properties = LoadSlowProperties(p->receiver);
- VARIABLE(var_name_index, MachineType::PointerRepresentation());
- Label found(this, &var_name_index);
- NameDictionaryLookup<NameDictionary>(properties, p->name, &found,
- &var_name_index, &check_prototypes);
- BIND(&found);
- {
- VARIABLE(var_details, MachineRepresentation::kWord32);
- VARIABLE(var_value, MachineRepresentation::kTagged);
- LoadPropertyFromNameDictionary(properties, var_name_index.value(),
- &var_details, &var_value);
- Node* value = CallGetterIfAccessor(var_value.value(), var_details.value(),
- p->context, p->receiver, miss);
- exit_point->Return(value);
- }
+ Label done(this);
+ Node* validity_cell =
+ LoadObjectField(handler, ICHandler::kValidityCellOffset);
+ GotoIf(WordEqual(validity_cell, SmiConstant(0)), &done);
+ Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
+ GotoIf(WordNotEqual(cell_value, SmiConstant(Map::kPrototypeChainValid)),
+ miss);
+ Goto(&done);
+ BIND(&done);
}
- BIND(&check_prototypes);
- Node* maybe_holder_cell = LoadObjectField(handler, LoadHandler::kDataOffset);
- Label array_handler(this), tuple_handler(this);
- Branch(TaggedIsSmi(maybe_holder_cell), &array_handler, &tuple_handler);
-
- BIND(&tuple_handler);
+ //
+ // Check smi handler bits.
+ //
{
- Label load_from_cached_holder(this), done(this);
+ Node* smi_or_code_handler =
+ LoadObjectField(handler, ICHandler::kSmiHandlerOffset);
+ if (on_code_handler) {
+ Label if_smi_handler(this);
+ GotoIf(TaggedIsSmi(smi_or_code_handler), &if_smi_handler);
- Branch(IsNull(maybe_holder_cell), &done, &load_from_cached_holder);
+ CSA_ASSERT(this, IsCodeMap(LoadMap(smi_or_code_handler)));
+ on_code_handler(smi_or_code_handler);
- BIND(&load_from_cached_holder);
- {
- Label unwrap_cell(this), bind_holder(this);
- Branch(IsWeakCell(maybe_holder_cell), &unwrap_cell, &bind_holder);
+ BIND(&if_smi_handler);
+ } else {
+ CSA_ASSERT(this, TaggedIsSmi(smi_or_code_handler));
+ }
+ Node* handler_flags = SmiUntag(smi_or_code_handler);
+
+ // Lookup on receiver and access checks are not necessary for global ICs
+ // because in the former case the validity cell check guards modifications
+ // of the global object and the latter is not applicable to the global
+ // object.
+ int mask = ICHandler::LookupOnReceiverBits::kMask |
+ ICHandler::DoAccessCheckOnReceiverBits::kMask;
+ if (ic_mode == ICMode::kGlobalIC) {
+ CSA_ASSERT(this, IsClearWord(handler_flags, mask));
+ } else {
+ DCHECK_EQ(ICMode::kNonGlobalIC, ic_mode);
- BIND(&unwrap_cell);
- {
- // For regular holders, having passed the receiver map check and the
- // validity cell check implies that |holder| is alive. However, for
- // global object receivers, the |maybe_holder_cell| may be cleared.
- Node* holder = LoadWeakCellValue(maybe_holder_cell, miss);
+ Label done(this), if_do_access_check(this), if_lookup_on_receiver(this);
+ GotoIf(IsClearWord(handler_flags, mask), &done);
+ // Only one of the bits can be set at a time.
+ CSA_ASSERT(this,
+ WordNotEqual(WordAnd(handler_flags, IntPtrConstant(mask)),
+ IntPtrConstant(mask)));
+ Branch(IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_flags),
+ &if_do_access_check, &if_lookup_on_receiver);
- var_holder->Bind(holder);
- Goto(&done);
+ BIND(&if_do_access_check);
+ {
+ Node* data2 = LoadHandlerDataField(handler, 2);
+ Node* expected_native_context = LoadWeakCellValue(data2, miss);
+ EmitAccessCheck(expected_native_context, p->context, p->receiver, &done,
+ miss);
}
- BIND(&bind_holder);
+ // Dictionary lookup on receiver is not necessary for Load/StoreGlobalIC
+ // because prototype validity cell check already guards modifications of
+ // the global object.
+ BIND(&if_lookup_on_receiver);
{
- var_holder->Bind(maybe_holder_cell);
- Goto(&done);
+ DCHECK_EQ(ICMode::kNonGlobalIC, ic_mode);
+ CSA_ASSERT(this, Word32BinaryNot(HasInstanceType(
+ p->receiver, JS_GLOBAL_OBJECT_TYPE)));
+
+ Node* properties = LoadSlowProperties(p->receiver);
+ VARIABLE(var_name_index, MachineType::PointerRepresentation());
+ Label found(this, &var_name_index);
+ NameDictionaryLookup<NameDictionary>(properties, p->name, &found,
+ &var_name_index, &done);
+ BIND(&found);
+ {
+ if (on_found_on_receiver) {
+ on_found_on_receiver(properties, var_name_index.value());
+ } else {
+ Goto(miss);
+ }
+ }
}
+
+ BIND(&done);
}
+ return smi_or_code_handler;
+ }
+}
- BIND(&done);
- var_smi_handler->Bind(smi_handler);
- Goto(if_smi_handler);
+void AccessorAssembler::HandleLoadICProtoHandler(
+ const LoadICParameters* p, Node* handler, Variable* var_holder,
+ Variable* var_smi_handler, Label* if_smi_handler, Label* miss,
+ ExitPoint* exit_point, ICMode ic_mode) {
+ DCHECK_EQ(MachineRepresentation::kTagged, var_holder->rep());
+ DCHECK_EQ(MachineRepresentation::kTagged, var_smi_handler->rep());
+
+ Node* smi_handler = HandleProtoHandler<LoadHandler>(
+ p, handler,
+ // Code sub-handlers are not expected in LoadICs, so no |on_code_handler|.
+ nullptr,
+ // on_found_on_receiver
+ [=](Node* properties, Node* name_index) {
+ VARIABLE(var_details, MachineRepresentation::kWord32);
+ VARIABLE(var_value, MachineRepresentation::kTagged);
+ LoadPropertyFromNameDictionary(properties, name_index, &var_details,
+ &var_value);
+ Node* value =
+ CallGetterIfAccessor(var_value.value(), var_details.value(),
+ p->context, p->receiver, miss);
+ exit_point->Return(value);
+ },
+ miss, ic_mode);
+
+ Node* maybe_holder_cell = LoadHandlerDataField(handler, 1);
+
+ Label load_from_cached_holder(this), done(this);
+
+ Branch(IsNull(maybe_holder_cell), &done, &load_from_cached_holder);
+
+ BIND(&load_from_cached_holder);
+ {
+ // For regular holders, having passed the receiver map check and the
+ // validity cell check implies that |holder| is alive. However, for
+ // global object receivers, the |maybe_holder_cell| may be cleared.
+ Node* holder = LoadWeakCellValue(maybe_holder_cell, miss);
+
+ var_holder->Bind(holder);
+ Goto(&done);
}
- BIND(&array_handler);
+ BIND(&done);
{
- exit_point->ReturnCallStub(
- CodeFactory::LoadICProtoArray(isolate(),
- throw_reference_error_if_nonexistent),
- p->context, p->receiver, p->name, p->slot, p->vector, handler);
+ var_smi_handler->Bind(smi_handler);
+ Goto(if_smi_handler);
}
}
@@ -657,94 +763,6 @@ void AccessorAssembler::EmitAccessCheck(Node* expected_native_context,
Branch(WordEqual(expected_token, current_token), can_access, miss);
}
-Node* AccessorAssembler::EmitLoadICProtoArrayCheck(const LoadICParameters* p,
- Node* handler,
- Node* handler_length,
- Node* handler_flags,
- Label* miss) {
- VARIABLE(var_start_index, MachineType::PointerRepresentation(),
- IntPtrConstant(LoadHandler::kFirstPrototypeIndex));
-
- Label can_access(this);
- GotoIfNot(IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_flags),
- &can_access);
- {
- // Skip this entry of a handler.
- var_start_index.Bind(IntPtrConstant(LoadHandler::kFirstPrototypeIndex + 1));
-
- int offset =
- FixedArray::OffsetOfElementAt(LoadHandler::kFirstPrototypeIndex);
- Node* expected_native_context =
- LoadWeakCellValue(LoadObjectField(handler, offset), miss);
-
- EmitAccessCheck(expected_native_context, p->context, p->receiver,
- &can_access, miss);
- }
- BIND(&can_access);
-
- BuildFastLoop(var_start_index.value(), handler_length,
- [=](Node* current) {
- Node* prototype_cell =
- LoadFixedArrayElement(handler, current);
- CheckPrototype(prototype_cell, p->name, miss);
- },
- 1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
-
- Node* maybe_holder_cell =
- LoadFixedArrayElement(handler, LoadHandler::kDataIndex);
-
- VARIABLE(var_holder, MachineRepresentation::kTagged, p->receiver);
- Label done(this);
- GotoIf(IsNull(maybe_holder_cell), &done);
-
- {
- Label unwrap_cell(this), bind_holder(this);
- Branch(IsWeakCell(maybe_holder_cell), &unwrap_cell, &bind_holder);
-
- BIND(&unwrap_cell);
- {
- // For regular holders, having passed the receiver map check and the
- // validity cell check implies that |holder| is alive. However, for
- // global object receivers, the |maybe_holder_cell| may be cleared.
- Node* holder = LoadWeakCellValue(maybe_holder_cell, miss);
-
- var_holder.Bind(holder);
- Goto(&done);
- }
-
- BIND(&bind_holder);
- {
- var_holder.Bind(maybe_holder_cell);
- Goto(&done);
- }
- }
-
- BIND(&done);
- return var_holder.value();
-}
-
-void AccessorAssembler::HandleLoadGlobalICHandlerCase(
- const LoadICParameters* pp, Node* handler, Label* miss,
- ExitPoint* exit_point, bool throw_reference_error_if_nonexistent) {
- LoadICParameters p = *pp;
- DCHECK_NULL(p.receiver);
- Node* native_context = LoadNativeContext(p.context);
- p.receiver = LoadContextElement(native_context, Context::GLOBAL_PROXY_INDEX);
-
- VARIABLE(var_holder, MachineRepresentation::kTagged,
- LoadContextElement(native_context, Context::EXTENSION_INDEX));
- VARIABLE(var_smi_handler, MachineRepresentation::kTagged);
- Label if_smi_handler(this);
-
- HandleLoadICProtoHandlerCase(&p, handler, &var_holder, &var_smi_handler,
- &if_smi_handler, miss, exit_point,
- throw_reference_error_if_nonexistent);
- BIND(&if_smi_handler);
- HandleLoadICSmiHandlerCase(
- &p, var_holder.value(), var_smi_handler.value(), miss, exit_point,
- throw_reference_error_if_nonexistent, kOnlyProperties);
-}
-
void AccessorAssembler::JumpIfDataProperty(Node* details, Label* writable,
Label* readonly) {
// Accessor properties never have the READ_ONLY attribute set.
@@ -768,7 +786,7 @@ void AccessorAssembler::HandleStoreICNativeDataProperty(
}
void AccessorAssembler::HandleStoreICHandlerCase(
- const StoreICParameters* p, Node* handler, Label* miss,
+ const StoreICParameters* p, Node* handler, Label* miss, ICMode ic_mode,
ElementSupport support_elements) {
Label if_smi_handler(this), if_nonsmi_handler(this);
Label if_proto_handler(this), if_element_handler(this), call_handler(this),
@@ -848,20 +866,12 @@ void AccessorAssembler::HandleStoreICHandlerCase(
BIND(&if_nonsmi_handler);
{
Node* handler_map = LoadMap(handler);
- if (support_elements == kSupportElements) {
- GotoIf(IsTuple2Map(handler_map), &if_element_handler);
- }
GotoIf(IsWeakCellMap(handler_map), &store_global);
Branch(IsCodeMap(handler_map), &call_handler, &if_proto_handler);
}
- if (support_elements == kSupportElements) {
- BIND(&if_element_handler);
- HandleStoreICElementHandlerCase(p, handler, miss);
- }
-
BIND(&if_proto_handler);
- HandleStoreICProtoHandler(p, handler, miss, support_elements);
+ HandleStoreICProtoHandler(p, handler, miss, ic_mode, support_elements);
// |handler| is a heap object. Must be code, call it.
BIND(&call_handler);
@@ -881,22 +891,6 @@ void AccessorAssembler::HandleStoreICHandlerCase(
}
}
-void AccessorAssembler::HandleStoreICElementHandlerCase(
- const StoreICParameters* p, Node* handler, Label* miss) {
- Comment("HandleStoreICElementHandlerCase");
- Node* validity_cell = LoadObjectField(handler, Tuple2::kValue1Offset);
- Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
- GotoIf(WordNotEqual(cell_value, SmiConstant(Map::kPrototypeChainValid)),
- miss);
-
- Node* code_handler = LoadObjectField(handler, Tuple2::kValue2Offset);
- CSA_ASSERT(this, IsCodeMap(LoadMap(code_handler)));
-
- StoreWithVectorDescriptor descriptor(isolate());
- TailCallStub(descriptor, code_handler, p->context, p->receiver, p->name,
- p->value, p->slot, p->vector);
-}
-
void AccessorAssembler::HandleStoreAccessor(const StoreICParameters* p,
Node* holder, Node* handler_word) {
Comment("accessor_store");
@@ -911,125 +905,75 @@ void AccessorAssembler::HandleStoreAccessor(const StoreICParameters* p,
}
void AccessorAssembler::HandleStoreICProtoHandler(
- const StoreICParameters* p, Node* handler, Label* miss,
+ const StoreICParameters* p, Node* handler, Label* miss, ICMode ic_mode,
ElementSupport support_elements) {
Comment("HandleStoreICProtoHandler");
- // IC dispatchers rely on these assumptions to be held.
- STATIC_ASSERT(FixedArray::kLengthOffset == StoreHandler::kDataOffset);
- DCHECK_EQ(FixedArray::OffsetOfElementAt(StoreHandler::kSmiHandlerIndex),
- StoreHandler::kSmiHandlerOffset);
- DCHECK_EQ(FixedArray::OffsetOfElementAt(StoreHandler::kValidityCellIndex),
- StoreHandler::kValidityCellOffset);
-
- // Both FixedArray and Tuple3 handlers have validity cell at the same offset.
- Label validity_cell_check_done(this);
- Node* validity_cell =
- LoadObjectField(handler, StoreHandler::kValidityCellOffset);
- GotoIf(WordEqual(validity_cell, IntPtrConstant(0)),
- &validity_cell_check_done);
- Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
- GotoIf(WordNotEqual(cell_value, SmiConstant(Map::kPrototypeChainValid)),
- miss);
- Goto(&validity_cell_check_done);
-
- BIND(&validity_cell_check_done);
- Node* smi_or_code = LoadObjectField(handler, StoreHandler::kSmiHandlerOffset);
-
- Node* maybe_transition_cell =
- LoadObjectField(handler, StoreHandler::kDataOffset);
- Label array_handler(this), do_store(this);
-
- VARIABLE(var_transition_map_or_holder, MachineRepresentation::kTagged,
- maybe_transition_cell);
-
- Branch(TaggedIsSmi(maybe_transition_cell), &array_handler, &do_store);
-
- BIND(&array_handler);
- {
- VARIABLE(var_start_index, MachineType::PointerRepresentation(),
- IntPtrConstant(StoreHandler::kFirstPrototypeIndex));
-
- Comment("array_handler");
- Label can_access(this);
- // Only Tuple3 handlers are allowed to have code handlers.
- CSA_ASSERT(this, TaggedIsSmi(smi_or_code));
- GotoIfNot(
- IsSetSmi(smi_or_code, StoreHandler::DoAccessCheckOnReceiverBits::kMask),
- &can_access);
+ OnCodeHandler on_code_handler;
+ if (support_elements == kSupportElements) {
+ // Code sub-handlers are expected only in KeyedStoreICs.
+ on_code_handler = [=](Node* code_handler) {
+ // This is either element store or transitioning element store.
+ Label if_element_store(this), if_transitioning_element_store(this);
+ Branch(IsStoreHandler0Map(LoadMap(handler)), &if_element_store,
+ &if_transitioning_element_store);
+ BIND(&if_element_store);
+ {
+ StoreWithVectorDescriptor descriptor(isolate());
+ TailCallStub(descriptor, code_handler, p->context, p->receiver, p->name,
+ p->value, p->slot, p->vector);
+ }
- {
- // Skip this entry of a handler.
- var_start_index.Bind(
- IntPtrConstant(StoreHandler::kFirstPrototypeIndex + 1));
+ BIND(&if_transitioning_element_store);
+ {
+ Node* transition_map_cell = LoadHandlerDataField(handler, 1);
+ Node* transition_map = LoadWeakCellValue(transition_map_cell, miss);
+ CSA_ASSERT(this, IsMap(transition_map));
- int offset =
- FixedArray::OffsetOfElementAt(StoreHandler::kFirstPrototypeIndex);
- Node* expected_native_context =
- LoadWeakCellValue(LoadObjectField(handler, offset), miss);
+ GotoIf(IsDeprecatedMap(transition_map), miss);
- EmitAccessCheck(expected_native_context, p->context, p->receiver,
- &can_access, miss);
- }
- BIND(&can_access);
+ StoreTransitionDescriptor descriptor(isolate());
+ TailCallStub(descriptor, code_handler, p->context, p->receiver, p->name,
+ transition_map, p->value, p->slot, p->vector);
+ }
+ };
+ }
- Node* length = SmiUntag(maybe_transition_cell);
- BuildFastLoop(var_start_index.value(), length,
- [=](Node* current) {
- Node* prototype_cell =
- LoadFixedArrayElement(handler, current);
- CheckPrototype(prototype_cell, p->name, miss);
- },
- 1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ Node* smi_handler = HandleProtoHandler<StoreHandler>(
+ p, handler, on_code_handler,
+ // on_found_on_receiver
+ [=](Node* properties, Node* name_index) {
+ // TODO(ishell): combine with |found| case inside |if_store_normal|.
+ Node* details =
+ LoadDetailsByKeyIndex<NameDictionary>(properties, name_index);
+ // Check that the property is a writable data property (no accessor).
+ const int kTypeAndReadOnlyMask =
+ PropertyDetails::KindField::kMask |
+ PropertyDetails::kAttributesReadOnlyMask;
+ STATIC_ASSERT(kData == 0);
+ GotoIf(IsSetWord32(details, kTypeAndReadOnlyMask), miss);
- Node* maybe_transition_cell =
- LoadFixedArrayElement(handler, StoreHandler::kDataIndex);
- var_transition_map_or_holder.Bind(maybe_transition_cell);
- Goto(&do_store);
- }
+ StoreValueByKeyIndex<NameDictionary>(properties, name_index, p->value);
+ Return(p->value);
+ },
+ miss, ic_mode);
Label if_transition_map(this), if_holder_object(this);
- BIND(&do_store);
- {
- Node* maybe_transition_cell = var_transition_map_or_holder.value();
-
- Label unwrap_cell(this);
- Branch(IsWeakCell(maybe_transition_cell), &unwrap_cell, &if_holder_object);
-
- BIND(&unwrap_cell);
- {
- Node* maybe_transition = LoadWeakCellValue(maybe_transition_cell, miss);
- var_transition_map_or_holder.Bind(maybe_transition);
- Branch(IsMap(maybe_transition), &if_transition_map, &if_holder_object);
- }
- }
+ Node* maybe_transition_or_holder_cell = LoadHandlerDataField(handler, 1);
+ Node* maybe_transition_or_holder =
+ LoadWeakCellValue(maybe_transition_or_holder_cell, miss);
+ Branch(IsMap(maybe_transition_or_holder), &if_transition_map,
+ &if_holder_object);
BIND(&if_transition_map);
{
Label if_transition_to_constant(this), if_store_normal(this);
Node* holder = p->receiver;
- Node* transition_map = var_transition_map_or_holder.value();
+ Node* transition_map = maybe_transition_or_holder;
GotoIf(IsDeprecatedMap(transition_map), miss);
-
- if (support_elements == kSupportElements) {
- Label if_smi_handler(this);
-
- GotoIf(TaggedIsSmi(smi_or_code), &if_smi_handler);
- Node* code_handler = smi_or_code;
- CSA_ASSERT(this, IsCodeMap(LoadMap(code_handler)));
-
- StoreTransitionDescriptor descriptor(isolate());
- TailCallStub(descriptor, code_handler, p->context, p->receiver, p->name,
- transition_map, p->value, p->slot, p->vector);
-
- BIND(&if_smi_handler);
- }
-
- Node* smi_handler = smi_or_code;
- CSA_ASSERT(this, TaggedIsSmi(smi_handler));
Node* handler_word = SmiUntag(smi_handler);
Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
@@ -1085,6 +1029,9 @@ void AccessorAssembler::HandleStoreICProtoHandler(
BIND(&not_found);
{
Label slow(this);
+ Node* receiver_map = LoadMap(p->receiver);
+ InvalidateValidityCellIfPrototype(receiver_map);
+
Add<NameDictionary>(properties, p->name, p->value, &slow);
Return(p->value);
@@ -1098,9 +1045,8 @@ void AccessorAssembler::HandleStoreICProtoHandler(
{
Label if_store_global_proxy(this), if_api_setter(this), if_accessor(this),
if_native_data_property(this);
- Node* holder = var_transition_map_or_holder.value();
+ Node* holder = maybe_transition_or_holder;
- Node* smi_handler = smi_or_code;
CSA_ASSERT(this, TaggedIsSmi(smi_handler));
Node* handler_word = SmiUntag(smi_handler);
@@ -1135,10 +1081,18 @@ void AccessorAssembler::HandleStoreICProtoHandler(
BIND(&if_api_setter);
{
Comment("api_setter");
- Node* context = LoadWeakCellValueUnchecked(
- LoadObjectField(holder, Tuple2::kValue1Offset));
- Node* call_handler_info = LoadWeakCellValueUnchecked(
- LoadObjectField(holder, Tuple2::kValue2Offset));
+ CSA_ASSERT(this, TaggedIsNotSmi(handler));
+ Node* call_handler_info = holder;
+
+ // Context is stored either in data2 or data3 field depending on whether
+ // the access check is enabled for this handler or not.
+ Node* context_cell = Select(
+ IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_word),
+ [=] { return LoadHandlerDataField(handler, 3); },
+ [=] { return LoadHandlerDataField(handler, 2); },
+ MachineRepresentation::kTagged);
+
+ Node* context = LoadWeakCellValueUnchecked(context_cell);
Node* foreign = LoadObjectField(call_handler_info,
CallHandlerInfo::kJsCallbackOffset);
@@ -1746,35 +1700,6 @@ void AccessorAssembler::EmitElementLoad(
}
}
-void AccessorAssembler::CheckPrototype(Node* prototype_cell, Node* name,
- Label* miss) {
- Node* maybe_prototype = LoadWeakCellValue(prototype_cell, miss);
-
- Label done(this);
- Label if_property_cell(this), if_dictionary_object(this);
-
- // |maybe_prototype| is either a PropertyCell or a slow-mode prototype.
- Branch(IsPropertyCell(maybe_prototype), &if_property_cell,
- &if_dictionary_object);
-
- BIND(&if_dictionary_object);
- {
- CSA_ASSERT(this, IsDictionaryMap(LoadMap(maybe_prototype)));
- NameDictionaryNegativeLookup(maybe_prototype, name, miss);
- Goto(&done);
- }
-
- BIND(&if_property_cell);
- {
- // Ensure the property cell still contains the hole.
- Node* value = LoadObjectField(maybe_prototype, PropertyCell::kValueOffset);
- GotoIfNot(IsTheHole(value), miss);
- Goto(&done);
- }
-
- BIND(&done);
-}
-
void AccessorAssembler::NameDictionaryNegativeLookup(Node* object, Node* name,
Label* miss) {
CSA_ASSERT(this, IsDictionaryMap(LoadMap(object)));
@@ -1819,6 +1744,32 @@ void AccessorAssembler::BranchIfStrictMode(Node* vector, Node* slot,
if_strict);
}
+void AccessorAssembler::InvalidateValidityCellIfPrototype(Node* map,
+ Node* bitfield2) {
+ Label is_prototype(this), cont(this);
+ if (bitfield2 == nullptr) {
+ bitfield2 = LoadMapBitField2(map);
+ }
+
+ Branch(IsSetWord32(bitfield2, Map::IsPrototypeMapBit::kMask), &is_prototype,
+ &cont);
+
+ BIND(&is_prototype);
+ {
+ Node* maybe_prototype_info =
+ LoadObjectField(map, Map::kTransitionsOrPrototypeInfoOffset);
+ // If there's no prototype info then there's nothing to invalidate.
+ GotoIf(TaggedIsSmi(maybe_prototype_info), &cont);
+
+ Node* function = ExternalConstant(
+ ExternalReference::invalidate_prototype_chains_function(isolate()));
+ CallCFunction1(MachineType::AnyTagged(), MachineType::AnyTagged(), function,
+ map);
+ Goto(&cont);
+ }
+ BIND(&cont);
+}
+
void AccessorAssembler::GenericElementLoad(Node* receiver, Node* receiver_map,
Node* instance_type, Node* index,
Label* slow) {
@@ -1902,7 +1853,8 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
// Check if the receiver has fast or slow properties.
Node* bitfield3 = LoadMapBitField3(receiver_map);
- GotoIf(IsSetWord32<Map::DictionaryMap>(bitfield3), &if_property_dictionary);
+ GotoIf(IsSetWord32<Map::IsDictionaryMapBit>(bitfield3),
+ &if_property_dictionary);
// Try looking up the property on the receiver; if unsuccessful, look
// for a handler in the stub cache.
@@ -2053,9 +2005,8 @@ Node* AccessorAssembler::StubCachePrimaryOffset(Node* name, Node* map) {
// risk of collision even if the heap is spread over an area larger than
// 4Gb (and not at all if it isn't).
Node* map32 = TruncateWordToWord32(BitcastTaggedToWord(map));
- Node* hash = Int32Add(hash_field, map32);
// Base the offset on a simple combination of name and map.
- hash = Word32Xor(hash, Int32Constant(StubCache::kPrimaryMagic));
+ Node* hash = Int32Add(hash_field, map32);
uint32_t mask = (StubCache::kPrimaryTableSize - 1)
<< StubCache::kCacheIndexShift;
return ChangeUint32ToWord(Word32And(hash, Int32Constant(mask)));
@@ -2310,13 +2261,12 @@ void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
// if (!(has_prototype_slot() && !has_non_instance_prototype())) use generic
// property loading mechanism.
- int has_prototype_slot_mask = 1 << Map::kHasPrototypeSlot;
- int has_non_instance_prototype_mask = 1 << Map::kHasNonInstancePrototype;
GotoIfNot(
- Word32Equal(Word32And(LoadMapBitField(receiver_map),
- Int32Constant(has_prototype_slot_mask |
- has_non_instance_prototype_mask)),
- Int32Constant(has_prototype_slot_mask)),
+ Word32Equal(
+ Word32And(LoadMapBitField(receiver_map),
+ Int32Constant(Map::HasPrototypeSlotBit::kMask |
+ Map::HasNonInstancePrototypeBit::kMask)),
+ Int32Constant(Map::HasPrototypeSlotBit::kMask)),
&not_function_prototype);
Return(LoadJSFunctionPrototype(receiver, &miss));
BIND(&not_function_prototype);
@@ -2337,120 +2287,97 @@ void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
}
}
-void AccessorAssembler::LoadICProtoArray(
- const LoadICParameters* p, Node* handler,
- bool throw_reference_error_if_nonexistent) {
- Label miss(this);
- CSA_ASSERT(this, Word32BinaryNot(TaggedIsSmi(handler)));
- CSA_ASSERT(this, IsFixedArrayMap(LoadMap(handler)));
+void AccessorAssembler::LoadGlobalIC(TNode<FeedbackVector> vector, Node* slot,
+ const LazyNode<Context>& lazy_context,
+ const LazyNode<Name>& lazy_name,
+ TypeofMode typeof_mode,
+ ExitPoint* exit_point,
+ ParameterMode slot_mode) {
+ Label try_handler(this, Label::kDeferred), miss(this, Label::kDeferred);
+ LoadGlobalIC_TryPropertyCellCase(vector, slot, lazy_context, exit_point,
+ &try_handler, &miss, slot_mode);
- ExitPoint direct_exit(this);
-
- Node* smi_handler = LoadObjectField(handler, LoadHandler::kSmiHandlerOffset);
- Node* handler_flags = SmiUntag(smi_handler);
-
- Node* handler_length = LoadAndUntagFixedArrayBaseLength(handler);
-
- Node* holder = EmitLoadICProtoArrayCheck(p, handler, handler_length,
- handler_flags, &miss);
-
- HandleLoadICSmiHandlerCase(p, holder, smi_handler, &miss, &direct_exit,
- throw_reference_error_if_nonexistent,
- kOnlyProperties);
+ BIND(&try_handler);
+ LoadGlobalIC_TryHandlerCase(vector, slot, lazy_context, lazy_name,
+ typeof_mode, exit_point, &miss, slot_mode);
BIND(&miss);
{
- TailCallRuntime(Runtime::kLoadIC_Miss, p->context, p->receiver, p->name,
- p->slot, p->vector);
+ Comment("LoadGlobalIC_MissCase");
+ TNode<Context> context = lazy_context();
+ TNode<Name> name = lazy_name();
+ exit_point->ReturnCallRuntime(Runtime::kLoadGlobalIC_Miss, context, name,
+ ParameterToTagged(slot, slot_mode), vector);
}
}
void AccessorAssembler::LoadGlobalIC_TryPropertyCellCase(
- Node* vector, Node* slot, ExitPoint* exit_point, Label* try_handler,
- Label* miss, ParameterMode slot_mode) {
+ TNode<FeedbackVector> vector, Node* slot,
+ const LazyNode<Context>& lazy_context, ExitPoint* exit_point,
+ Label* try_handler, Label* miss, ParameterMode slot_mode) {
Comment("LoadGlobalIC_TryPropertyCellCase");
- Node* weak_cell = LoadFeedbackVectorSlot(vector, slot, 0, slot_mode);
- CSA_ASSERT(this, HasInstanceType(weak_cell, WEAK_CELL_TYPE));
-
- // Load value or try handler case if the {weak_cell} is cleared.
- Node* property_cell = LoadWeakCellValue(weak_cell, try_handler);
- CSA_ASSERT(this, IsPropertyCell(property_cell));
-
- Node* value = LoadObjectField(property_cell, PropertyCell::kValueOffset);
- GotoIf(WordEqual(value, TheHoleConstant()), miss);
- exit_point->Return(value);
-}
-
-void AccessorAssembler::LoadGlobalIC_TryHandlerCase(const LoadICParameters* pp,
- TypeofMode typeof_mode,
- ExitPoint* exit_point,
- Label* miss) {
- Comment("LoadGlobalIC_TryHandlerCase");
-
- Label call_handler(this), non_smi(this);
-
- Node* handler = LoadFeedbackVectorSlot(pp->vector, pp->slot, kPointerSize,
- SMI_PARAMETERS);
- GotoIf(WordEqual(handler, LoadRoot(Heap::kuninitialized_symbolRootIndex)),
- miss);
-
- GotoIfNot(TaggedIsSmi(handler), &non_smi);
-
- bool throw_reference_error_if_nonexistent = typeof_mode == NOT_INSIDE_TYPEOF;
+ Label if_lexical_var(this), if_property_cell(this);
+ TNode<Object> maybe_weak_cell =
+ LoadFeedbackVectorSlot(vector, slot, 0, slot_mode);
+ Branch(TaggedIsSmi(maybe_weak_cell), &if_lexical_var, &if_property_cell);
+ BIND(&if_property_cell);
{
- LoadICParameters p = *pp;
- DCHECK_NULL(p.receiver);
- Node* native_context = LoadNativeContext(p.context);
- p.receiver =
- LoadContextElement(native_context, Context::GLOBAL_PROXY_INDEX);
- Node* holder = LoadContextElement(native_context, Context::EXTENSION_INDEX);
- HandleLoadICSmiHandlerCase(&p, holder, handler, miss, exit_point,
- throw_reference_error_if_nonexistent,
- kOnlyProperties);
- }
+ TNode<WeakCell> weak_cell = CAST(maybe_weak_cell);
- BIND(&non_smi);
- GotoIf(IsCodeMap(LoadMap(handler)), &call_handler);
-
- HandleLoadGlobalICHandlerCase(pp, handler, miss, exit_point,
- throw_reference_error_if_nonexistent);
+ // Load value or try handler case if the {weak_cell} is cleared.
+ TNode<PropertyCell> property_cell =
+ CAST(LoadWeakCellValue(weak_cell, try_handler));
+ TNode<Object> value =
+ LoadObjectField(property_cell, PropertyCell::kValueOffset);
+ GotoIf(WordEqual(value, TheHoleConstant()), miss);
+ exit_point->Return(value);
+ }
- BIND(&call_handler);
+ BIND(&if_lexical_var);
{
- LoadWithVectorDescriptor descriptor(isolate());
- Node* native_context = LoadNativeContext(pp->context);
- Node* receiver =
- LoadContextElement(native_context, Context::GLOBAL_PROXY_INDEX);
- exit_point->ReturnCallStub(descriptor, handler, pp->context, receiver,
- pp->name, pp->slot, pp->vector);
+ Comment("Load lexical variable");
+ TNode<IntPtrT> lexical_handler = SmiUntag(CAST(maybe_weak_cell));
+ TNode<IntPtrT> context_index =
+ Signed(DecodeWord<GlobalICNexus::ContextIndexBits>(lexical_handler));
+ TNode<IntPtrT> slot_index =
+ Signed(DecodeWord<GlobalICNexus::SlotIndexBits>(lexical_handler));
+ TNode<Context> context = lazy_context();
+ TNode<Context> script_context = LoadScriptContext(context, context_index);
+ TNode<Object> result = LoadContextElement(script_context, slot_index);
+ exit_point->Return(result);
}
}
-void AccessorAssembler::LoadGlobalIC_MissCase(const LoadICParameters* p,
- ExitPoint* exit_point) {
- Comment("LoadGlobalIC_MissCase");
+void AccessorAssembler::LoadGlobalIC_TryHandlerCase(
+ TNode<FeedbackVector> vector, Node* slot,
+ const LazyNode<Context>& lazy_context, const LazyNode<Name>& lazy_name,
+ TypeofMode typeof_mode, ExitPoint* exit_point, Label* miss,
+ ParameterMode slot_mode) {
+ Comment("LoadGlobalIC_TryHandlerCase");
- exit_point->ReturnCallRuntime(Runtime::kLoadGlobalIC_Miss, p->context,
- p->name, p->slot, p->vector);
-}
+ Label call_handler(this), non_smi(this);
-void AccessorAssembler::LoadGlobalIC(const LoadICParameters* p,
- TypeofMode typeof_mode) {
- // Must be kept in sync with Interpreter::BuildLoadGlobal.
+ Node* handler = LoadFeedbackVectorSlot(vector, slot, kPointerSize, slot_mode);
+ GotoIf(WordEqual(handler, LoadRoot(Heap::kuninitialized_symbolRootIndex)),
+ miss);
- ExitPoint direct_exit(this);
+ OnNonExistent on_nonexistent = typeof_mode == NOT_INSIDE_TYPEOF
+ ? OnNonExistent::kThrowReferenceError
+ : OnNonExistent::kReturnUndefined;
- Label try_handler(this), miss(this);
- LoadGlobalIC_TryPropertyCellCase(p->vector, p->slot, &direct_exit,
- &try_handler, &miss);
+ TNode<Context> context = lazy_context();
+ TNode<Context> native_context = LoadNativeContext(context);
+ TNode<JSGlobalProxy> receiver =
+ CAST(LoadContextElement(native_context, Context::GLOBAL_PROXY_INDEX));
+ Node* holder = LoadContextElement(native_context, Context::EXTENSION_INDEX);
- BIND(&try_handler);
- LoadGlobalIC_TryHandlerCase(p, typeof_mode, &direct_exit, &miss);
+ LoadICParameters p(context, receiver, lazy_name(),
+ ParameterToTagged(slot, slot_mode), vector, holder);
- BIND(&miss);
- LoadGlobalIC_MissCase(p, &direct_exit);
+ HandleLoadICHandlerCase(&p, handler, miss, exit_point, ICMode::kGlobalIC,
+ on_nonexistent);
}
void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p) {
@@ -2472,7 +2399,8 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p) {
BIND(&if_handler);
{
HandleLoadICHandlerCase(p, var_handler.value(), &miss, &direct_exit,
- kSupportElements);
+ ICMode::kNonGlobalIC,
+ OnNonExistent::kReturnUndefined, kSupportElements);
}
BIND(&try_polymorphic);
@@ -2642,7 +2570,8 @@ void AccessorAssembler::KeyedLoadICPolymorphicName(const LoadICParameters* p) {
{
ExitPoint direct_exit(this);
HandleLoadICHandlerCase(p, var_handler.value(), &miss, &direct_exit,
- kOnlyProperties);
+ ICMode::kNonGlobalIC,
+ OnNonExistent::kReturnUndefined, kOnlyProperties);
}
BIND(&miss);
@@ -2669,7 +2598,8 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
BIND(&if_handler);
{
Comment("StoreIC_if_handler");
- HandleStoreICHandlerCase(p, var_handler.value(), &miss);
+ HandleStoreICHandlerCase(p, var_handler.value(), &miss,
+ ICMode::kNonGlobalIC);
}
BIND(&try_polymorphic);
@@ -2710,6 +2640,61 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
}
}
+void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
+ Label if_lexical_var(this), if_property_cell(this);
+ Node* maybe_weak_cell =
+ LoadFeedbackVectorSlot(pp->vector, pp->slot, 0, SMI_PARAMETERS);
+ Branch(TaggedIsSmi(maybe_weak_cell), &if_lexical_var, &if_property_cell);
+
+ BIND(&if_property_cell);
+ {
+ Label try_handler(this), miss(this, Label::kDeferred);
+ Node* property_cell = LoadWeakCellValue(maybe_weak_cell, &try_handler);
+
+ ExitPoint direct_exit(this);
+ StoreGlobalIC_PropertyCellCase(property_cell, pp->value, &direct_exit,
+ &miss);
+
+ BIND(&try_handler);
+ {
+ Comment("StoreGlobalIC_try_handler");
+ Node* handler = LoadFeedbackVectorSlot(pp->vector, pp->slot, kPointerSize,
+ SMI_PARAMETERS);
+
+ GotoIf(WordEqual(handler, LoadRoot(Heap::kuninitialized_symbolRootIndex)),
+ &miss);
+
+ StoreICParameters p = *pp;
+ DCHECK_NULL(p.receiver);
+ Node* native_context = LoadNativeContext(p.context);
+ p.receiver =
+ LoadContextElement(native_context, Context::GLOBAL_PROXY_INDEX);
+
+ HandleStoreICHandlerCase(&p, handler, &miss, ICMode::kGlobalIC);
+ }
+
+ BIND(&miss);
+ {
+ TailCallRuntime(Runtime::kStoreGlobalIC_Miss, pp->context, pp->value,
+ pp->slot, pp->vector, pp->name);
+ }
+ }
+
+ BIND(&if_lexical_var);
+ {
+ Comment("Store lexical variable");
+ TNode<IntPtrT> lexical_handler = SmiUntag(maybe_weak_cell);
+ TNode<IntPtrT> context_index =
+ Signed(DecodeWord<GlobalICNexus::ContextIndexBits>(lexical_handler));
+ TNode<IntPtrT> slot_index =
+ Signed(DecodeWord<GlobalICNexus::SlotIndexBits>(lexical_handler));
+ TNode<Context> script_context =
+ LoadScriptContext(CAST(pp->context), context_index);
+ StoreContextElement(script_context, slot_index, CAST(pp->value));
+ Return(pp->value);
+ }
+}
+
void AccessorAssembler::StoreGlobalIC_PropertyCellCase(Node* property_cell,
Node* value,
ExitPoint* exit_point,
@@ -2724,6 +2709,11 @@ void AccessorAssembler::StoreGlobalIC_PropertyCellCase(Node* property_cell,
LoadObjectField(property_cell, PropertyCell::kValueOffset);
Node* details = LoadAndUntagToWord32ObjectField(property_cell,
PropertyCell::kDetailsOffset);
+ GotoIf(IsSetWord32(details, PropertyDetails::kAttributesReadOnlyMask), miss);
+ CSA_ASSERT(this,
+ Word32Equal(DecodeWord32<PropertyDetails::KindField>(details),
+ Int32Constant(kData)));
+
Node* type = DecodeWord32<PropertyDetails::PropertyCellTypeField>(details);
Label constant(this), store(this), not_smi(this);
@@ -2789,7 +2779,8 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
BIND(&if_handler);
{
Comment("KeyedStoreIC_if_handler");
- HandleStoreICHandlerCase(p, var_handler.value(), &miss, kSupportElements);
+ HandleStoreICHandlerCase(p, var_handler.value(), &miss,
+ ICMode::kNonGlobalIC, kSupportElements);
}
BIND(&try_polymorphic);
@@ -2904,21 +2895,6 @@ void AccessorAssembler::GenerateLoadICTrampoline() {
TailCallBuiltin(Builtins::kLoadIC, context, receiver, name, slot, vector);
}
-void AccessorAssembler::GenerateLoadICProtoArray(
- bool throw_reference_error_if_nonexistent) {
- typedef LoadICProtoArrayDescriptor Descriptor;
-
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* name = Parameter(Descriptor::kName);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
- Node* handler = Parameter(Descriptor::kHandler);
- Node* context = Parameter(Descriptor::kContext);
-
- LoadICParameters p(context, receiver, name, slot, vector);
- LoadICProtoArray(&p, handler, throw_reference_error_if_nonexistent);
-}
-
void AccessorAssembler::GenerateLoadField() {
typedef LoadFieldDescriptor Descriptor;
@@ -2951,8 +2927,12 @@ void AccessorAssembler::GenerateLoadGlobalIC(TypeofMode typeof_mode) {
Node* vector = Parameter(Descriptor::kVector);
Node* context = Parameter(Descriptor::kContext);
- LoadICParameters p(context, nullptr, name, slot, vector);
- LoadGlobalIC(&p, typeof_mode);
+ ExitPoint direct_exit(this);
+ LoadGlobalIC(CAST(vector), slot,
+ // lazy_context
+ [=] { return CAST(context); },
+ // lazy_name
+ [=] { return CAST(name); }, typeof_mode, &direct_exit);
}
void AccessorAssembler::GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode) {
@@ -3020,6 +3000,33 @@ void AccessorAssembler::GenerateKeyedLoadIC_PolymorphicName() {
KeyedLoadICPolymorphicName(&p);
}
+void AccessorAssembler::GenerateStoreGlobalIC() {
+ typedef StoreGlobalWithVectorDescriptor Descriptor;
+
+ Node* name = Parameter(Descriptor::kName);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ StoreICParameters p(context, nullptr, name, value, slot, vector);
+ StoreGlobalIC(&p);
+}
+
+void AccessorAssembler::GenerateStoreGlobalICTrampoline() {
+ typedef StoreGlobalDescriptor Descriptor;
+
+ Node* name = Parameter(Descriptor::kName);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* context = Parameter(Descriptor::kContext);
+ Node* vector = LoadFeedbackVectorForStub();
+
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kStoreGlobalIC);
+ TailCallStub(callable, context, name, value, slot, vector);
+}
+
void AccessorAssembler::GenerateStoreIC() {
typedef StoreWithVectorDescriptor Descriptor;
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index b11ff738c1..46376dd6a8 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -34,8 +34,8 @@ class AccessorAssembler : public CodeStubAssembler {
void GenerateKeyedLoadIC_PolymorphicName();
void GenerateStoreIC();
void GenerateStoreICTrampoline();
-
- void GenerateLoadICProtoArray(bool throw_reference_error_if_nonexistent);
+ void GenerateStoreGlobalIC();
+ void GenerateStoreGlobalICTrampoline();
void GenerateLoadGlobalIC(TypeofMode typeof_mode);
void GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode);
@@ -56,32 +56,35 @@ class AccessorAssembler : public CodeStubAssembler {
struct LoadICParameters {
LoadICParameters(Node* context, Node* receiver, Node* name, Node* slot,
- Node* vector)
+ Node* vector, Node* holder = nullptr)
: context(context),
receiver(receiver),
name(name),
slot(slot),
- vector(vector) {}
+ vector(vector),
+ holder(holder ? holder : receiver) {}
Node* context;
Node* receiver;
Node* name;
Node* slot;
Node* vector;
+ Node* holder;
};
- void LoadGlobalIC_TryPropertyCellCase(
- Node* vector, Node* slot, ExitPoint* exit_point, Label* try_handler,
- Label* miss, ParameterMode slot_mode = SMI_PARAMETERS);
- void LoadGlobalIC_TryHandlerCase(const LoadICParameters* p,
- TypeofMode typeof_mode,
- ExitPoint* exit_point, Label* miss);
- void LoadGlobalIC_MissCase(const LoadICParameters* p, ExitPoint* exit_point);
+ void LoadGlobalIC(TNode<FeedbackVector> vector, Node* slot,
+ const LazyNode<Context>& lazy_context,
+ const LazyNode<Name>& lazy_name, TypeofMode typeof_mode,
+ ExitPoint* exit_point,
+ ParameterMode slot_mode = SMI_PARAMETERS);
// Specialized LoadIC for inlined bytecode handler, hand-tuned to omit frame
// construction on common paths.
void LoadIC_BytecodeHandler(const LoadICParameters* p, ExitPoint* exit_point);
+ // Loads dataX field from the DataHandler object.
+ Node* LoadHandlerDataField(Node* handler, int data_index);
+
protected:
struct StoreICParameters : public LoadICParameters {
StoreICParameters(Node* context, Node* receiver, Node* name, Node* value,
@@ -91,14 +94,17 @@ class AccessorAssembler : public CodeStubAssembler {
Node* value;
};
+ enum class ICMode { kNonGlobalIC, kGlobalIC };
enum ElementSupport { kOnlyProperties, kSupportElements };
void HandleStoreICHandlerCase(
- const StoreICParameters* p, Node* handler, Label* miss,
+ const StoreICParameters* p, Node* handler, Label* miss, ICMode ic_mode,
ElementSupport support_elements = kOnlyProperties);
void JumpIfDataProperty(Node* details, Label* writable, Label* readonly);
void BranchIfStrictMode(Node* vector, Node* slot, Label* if_strict);
+ void InvalidateValidityCellIfPrototype(Node* map, Node* bitfield2 = nullptr);
+
private:
// Stub generation entry points.
@@ -112,13 +118,12 @@ class AccessorAssembler : public CodeStubAssembler {
Node* LoadDescriptorValue(Node* map, Node* descriptor);
void LoadIC_Uninitialized(const LoadICParameters* p);
- void LoadICProtoArray(const LoadICParameters* p, Node* handler,
- bool throw_reference_error_if_nonexistent);
- void LoadGlobalIC(const LoadICParameters* p, TypeofMode typeof_mode);
+
void KeyedLoadIC(const LoadICParameters* p);
void KeyedLoadICGeneric(const LoadICParameters* p);
void KeyedLoadICPolymorphicName(const LoadICParameters* p);
void StoreIC(const StoreICParameters* p);
+ void StoreGlobalIC(const StoreICParameters* p);
void StoreGlobalIC_PropertyCellCase(Node* property_cell, Node* value,
ExitPoint* exit_point, Label* miss);
void KeyedStoreIC(const StoreICParameters* p);
@@ -134,23 +139,23 @@ class AccessorAssembler : public CodeStubAssembler {
Label* if_miss, int min_feedback_capacity);
// LoadIC implementation.
-
+ enum class OnNonExistent { kThrowReferenceError, kReturnUndefined };
void HandleLoadICHandlerCase(
const LoadICParameters* p, Node* handler, Label* miss,
- ExitPoint* exit_point, ElementSupport support_elements = kOnlyProperties);
+ ExitPoint* exit_point, ICMode ic_mode = ICMode::kNonGlobalIC,
+ OnNonExistent on_nonexistent = OnNonExistent::kReturnUndefined,
+ ElementSupport support_elements = kOnlyProperties);
void HandleLoadICSmiHandlerCase(const LoadICParameters* p, Node* holder,
- Node* smi_handler, Label* miss,
+ Node* smi_handler, Node* handler, Label* miss,
ExitPoint* exit_point,
- bool throw_reference_error_if_nonexistent,
+ OnNonExistent on_nonexistent,
ElementSupport support_elements);
- void HandleLoadICProtoHandlerCase(const LoadICParameters* p, Node* handler,
- Variable* var_holder,
- Variable* var_smi_handler,
- Label* if_smi_handler, Label* miss,
- ExitPoint* exit_point,
- bool throw_reference_error_if_nonexistent);
+ void HandleLoadICProtoHandler(const LoadICParameters* p, Node* handler,
+ Variable* var_holder, Variable* var_smi_handler,
+ Label* if_smi_handler, Label* miss,
+ ExitPoint* exit_point, ICMode ic_mode);
void HandleLoadField(Node* holder, Node* handler_word,
Variable* var_double_value, Label* rebox_double,
@@ -159,23 +164,26 @@ class AccessorAssembler : public CodeStubAssembler {
void EmitAccessCheck(Node* expected_native_context, Node* context,
Node* receiver, Label* can_access, Label* miss);
- Node* EmitLoadICProtoArrayCheck(const LoadICParameters* p, Node* handler,
- Node* handler_length, Node* handler_flags,
- Label* miss);
-
// LoadGlobalIC implementation.
- void HandleLoadGlobalICHandlerCase(const LoadICParameters* p, Node* handler,
- Label* miss, ExitPoint* exit_point,
- bool throw_reference_error_if_nonexistent);
+ void LoadGlobalIC_TryPropertyCellCase(
+ TNode<FeedbackVector> vector, Node* slot,
+ const LazyNode<Context>& lazy_context, ExitPoint* exit_point,
+ Label* try_handler, Label* miss,
+ ParameterMode slot_mode = SMI_PARAMETERS);
+
+ void LoadGlobalIC_TryHandlerCase(TNode<FeedbackVector> vector, Node* slot,
+ const LazyNode<Context>& lazy_context,
+ const LazyNode<Name>& lazy_name,
+ TypeofMode typeof_mode,
+ ExitPoint* exit_point, Label* miss,
+ ParameterMode slot_mode);
// StoreIC implementation.
- void HandleStoreICElementHandlerCase(const StoreICParameters* p,
- Node* handler, Label* miss);
-
void HandleStoreICProtoHandler(const StoreICParameters* p, Node* handler,
- Label* miss, ElementSupport support_elements);
+ Label* miss, ICMode ic_mode,
+ ElementSupport support_elements);
// If |transition| is nullptr then the normal field store is generated or
// transitioning store otherwise.
void HandleStoreICSmiHandlerCase(Node* handler_word, Node* holder,
@@ -208,6 +216,16 @@ class AccessorAssembler : public CodeStubAssembler {
// Low-level helpers.
+ typedef std::function<void(Node* code_handler)> OnCodeHandler;
+ typedef std::function<void(Node* properties, Node* name_index)>
+ OnFoundOnReceiver;
+
+ template <typename ICHandler, typename ICParameters>
+ Node* HandleProtoHandler(const ICParameters* p, Node* handler,
+ const OnCodeHandler& on_code_handler,
+ const OnFoundOnReceiver& on_found_on_receiver,
+ Label* miss, ICMode ic_mode);
+
Node* GetLanguageMode(Node* vector, Node* slot);
Node* PrepareValueForStore(Node* handler_word, Node* holder,
@@ -229,7 +247,6 @@ class AccessorAssembler : public CodeStubAssembler {
Label* rebox_double, Variable* var_double_value,
Label* unimplemented_elements_kind, Label* out_of_bounds,
Label* miss, ExitPoint* exit_point);
- void CheckPrototype(Node* prototype_cell, Node* name, Label* miss);
void NameDictionaryNegativeLookup(Node* object, Node* name, Label* miss);
// Stub cache access helpers.
diff --git a/deps/v8/src/ic/handler-configuration-inl.h b/deps/v8/src/ic/handler-configuration-inl.h
index 5c8e0511cf..cf2577a01f 100644
--- a/deps/v8/src/ic/handler-configuration-inl.h
+++ b/deps/v8/src/ic/handler-configuration-inl.h
@@ -9,10 +9,17 @@
#include "src/field-index-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/data-handler-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
namespace v8 {
namespace internal {
+TYPE_CHECKER(LoadHandler, LOAD_HANDLER_TYPE)
+CAST_ACCESSOR(LoadHandler)
+
// Decodes kind from Smi-handler.
LoadHandler::Kind LoadHandler::GetHandlerKind(Smi* smi_handler) {
return KindBits::decode(smi_handler->value());
@@ -76,28 +83,6 @@ Handle<Smi> LoadHandler::LoadModuleExport(Isolate* isolate, int index) {
return handle(Smi::FromInt(config), isolate);
}
-Handle<Smi> LoadHandler::EnableAccessCheckOnReceiver(Isolate* isolate,
- Handle<Smi> smi_handler) {
- int config = smi_handler->value();
-#ifdef DEBUG
- Kind kind = KindBits::decode(config);
- DCHECK_NE(kElement, kind);
-#endif
- config = DoAccessCheckOnReceiverBits::update(config, true);
- return handle(Smi::FromInt(config), isolate);
-}
-
-Handle<Smi> LoadHandler::EnableLookupOnReceiver(Isolate* isolate,
- Handle<Smi> smi_handler) {
- int config = smi_handler->value();
-#ifdef DEBUG
- Kind kind = KindBits::decode(config);
- DCHECK_NE(kElement, kind);
-#endif
- config = LookupOnReceiverBits::update(config, true);
- return handle(Smi::FromInt(config), isolate);
-}
-
Handle<Smi> LoadHandler::LoadNonExistent(Isolate* isolate) {
int config = KindBits::encode(kNonExistent);
return handle(Smi::FromInt(config), isolate);
@@ -125,6 +110,9 @@ Handle<Smi> LoadHandler::LoadIndexedString(Isolate* isolate,
return handle(Smi::FromInt(config), isolate);
}
+TYPE_CHECKER(StoreHandler, STORE_HANDLER_TYPE)
+CAST_ACCESSOR(StoreHandler)
+
Handle<Smi> StoreHandler::StoreGlobalProxy(Isolate* isolate) {
int config = KindBits::encode(kGlobalProxy);
return handle(Smi::FromInt(config), isolate);
@@ -140,13 +128,6 @@ Handle<Smi> StoreHandler::StoreProxy(Isolate* isolate) {
return handle(Smi::FromInt(config), isolate);
}
-Handle<Smi> StoreHandler::EnableAccessCheckOnReceiver(Isolate* isolate,
- Handle<Smi> smi_handler) {
- int config = smi_handler->value();
- config = DoAccessCheckOnReceiverBits::update(config, true);
- return handle(Smi::FromInt(config), isolate);
-}
-
Handle<Smi> StoreHandler::StoreField(Isolate* isolate, Kind kind,
int descriptor, FieldIndex field_index,
Representation representation,
@@ -230,25 +211,15 @@ Handle<Smi> StoreHandler::StoreApiSetter(Isolate* isolate,
// static
WeakCell* StoreHandler::GetTransitionCell(Object* handler) {
- if (handler->IsTuple3()) {
- STATIC_ASSERT(kDataOffset == Tuple3::kValue1Offset);
- WeakCell* cell = WeakCell::cast(Tuple3::cast(handler)->value1());
- DCHECK(!cell->cleared());
- return cell;
- }
-
- DCHECK(handler->IsFixedArrayExact());
- WeakCell* cell = WeakCell::cast(FixedArray::cast(handler)->get(kDataIndex));
+ DCHECK(handler->IsStoreHandler());
+ WeakCell* cell = WeakCell::cast(StoreHandler::cast(handler)->data1());
DCHECK(!cell->cleared());
return cell;
}
-// static
-bool StoreHandler::IsHandler(Object* maybe_handler) {
- return maybe_handler->IsFixedArrayExact() || maybe_handler->IsTuple3();
-}
-
} // namespace internal
} // namespace v8
+#include "src/objects/object-macros-undef.h"
+
#endif // V8_IC_HANDLER_CONFIGURATION_INL_H_
diff --git a/deps/v8/src/ic/handler-configuration.cc b/deps/v8/src/ic/handler-configuration.cc
index 077bdb49e1..19614a4322 100644
--- a/deps/v8/src/ic/handler-configuration.cc
+++ b/deps/v8/src/ic/handler-configuration.cc
@@ -13,76 +13,67 @@ namespace internal {
namespace {
-template <bool fill_array = true>
-int InitPrototypeChecks(Isolate* isolate, Handle<Map> receiver_map,
- Handle<JSReceiver> holder, Handle<Name> name,
- Handle<FixedArray> array, int first_index) {
- if (!holder.is_null() && holder->map() == *receiver_map) return 0;
+template <typename BitField>
+Handle<Smi> SetBitFieldValue(Isolate* isolate, Handle<Smi> smi_handler,
+ typename BitField::FieldType value) {
+ int config = smi_handler->value();
+ config = BitField::update(config, true);
+ return handle(Smi::FromInt(config), isolate);
+}
- HandleScope scope(isolate);
+// TODO(ishell): Remove templatezation once we move common bits from
+// Load/StoreHandler to the base class.
+template <typename ICHandler, bool fill_handler = true>
+int InitPrototypeChecksImpl(Isolate* isolate, Handle<ICHandler> handler,
+ Handle<Smi>* smi_handler, Handle<Map> receiver_map,
+ Handle<JSReceiver> holder, Handle<Object> data1,
+ MaybeHandle<Object> maybe_data2) {
int checks_count = 0;
+ // Holder-is-receiver case itself does not add entries unless there is an
+ // optional data2 value provided.
- if (receiver_map->IsPrimitiveMap() || receiver_map->IsJSGlobalProxyMap()) {
+ if (receiver_map->IsPrimitiveMap() ||
+ receiver_map->is_access_check_needed()) {
+ DCHECK(!receiver_map->IsJSGlobalObjectMap());
// The validity cell check for primitive and global proxy receivers does
// not guarantee that certain native context ever had access to other
// native context. However, a handler created for one native context could
// be used in other native context through the megamorphic stub cache.
// So we record the original native context to which this handler
// corresponds.
- if (fill_array) {
+ if (fill_handler) {
Handle<Context> native_context = isolate->native_context();
- array->set(first_index + checks_count, native_context->self_weak_cell());
+ handler->set_data2(native_context->self_weak_cell());
+ } else {
+ // Enable access checks on receiver.
+ typedef typename ICHandler::DoAccessCheckOnReceiverBits Bit;
+ *smi_handler = SetBitFieldValue<Bit>(isolate, *smi_handler, true);
}
checks_count++;
-
- } else if (receiver_map->IsJSGlobalObjectMap()) {
- // If we are creating a handler for [Load/Store]GlobalIC then we need to
- // check that the property did not appear in the global object.
- if (fill_array) {
- Handle<JSGlobalObject> global = isolate->global_object();
- Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
- global, name, PropertyCellType::kInvalidated);
- DCHECK(cell->value()->IsTheHole(isolate));
- Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
- array->set(first_index + checks_count, *weak_cell);
+ } else if (receiver_map->is_dictionary_map() &&
+ !receiver_map->IsJSGlobalObjectMap()) {
+ if (!fill_handler) {
+ // Enable lookup on receiver.
+ typedef typename ICHandler::LookupOnReceiverBits Bit;
+ *smi_handler = SetBitFieldValue<Bit>(isolate, *smi_handler, true);
}
- checks_count++;
}
-
- // Create/count entries for each global or dictionary prototype appeared in
- // the prototype chain contains from receiver till holder.
- PrototypeIterator::WhereToEnd end = name->IsPrivate()
- ? PrototypeIterator::END_AT_NON_HIDDEN
- : PrototypeIterator::END_AT_NULL;
- for (PrototypeIterator iter(receiver_map, end); !iter.IsAtEnd();
- iter.Advance()) {
- Handle<JSReceiver> current =
- PrototypeIterator::GetCurrent<JSReceiver>(iter);
- if (holder.is_identical_to(current)) break;
- Handle<Map> current_map(current->map(), isolate);
-
- if (current_map->IsJSGlobalObjectMap()) {
- if (fill_array) {
- Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(current);
- Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
- global, name, PropertyCellType::kInvalidated);
- DCHECK(cell->value()->IsTheHole(isolate));
- Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
- array->set(first_index + checks_count, *weak_cell);
- }
- checks_count++;
-
- } else if (current_map->is_dictionary_map()) {
- DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- if (fill_array) {
- DCHECK_EQ(NameDictionary::kNotFound,
- current->property_dictionary()->FindEntry(name));
- Handle<WeakCell> weak_cell =
- Map::GetOrCreatePrototypeWeakCell(current, isolate);
- array->set(first_index + checks_count, *weak_cell);
+ if (fill_handler) {
+ handler->set_data1(*data1);
+ }
+ Handle<Object> data2;
+ if (maybe_data2.ToHandle(&data2)) {
+ if (fill_handler) {
+ // This value will go either to data2 or data3 slot depending on whether
+ // data2 slot is already occupied by native context.
+ if (checks_count == 0) {
+ handler->set_data2(*data2);
+ } else {
+ DCHECK_EQ(1, checks_count);
+ handler->set_data3(*data2);
}
- checks_count++;
}
+ checks_count++;
}
return checks_count;
}
@@ -93,10 +84,24 @@ int InitPrototypeChecks(Isolate* isolate, Handle<Map> receiver_map,
// checked.
// Returns -1 if the handler has to be compiled or the number of prototype
// checks otherwise.
-int GetPrototypeCheckCount(Isolate* isolate, Handle<Map> receiver_map,
- Handle<JSReceiver> holder, Handle<Name> name) {
- return InitPrototypeChecks<false>(isolate, receiver_map, holder, name,
- Handle<FixedArray>(), 0);
+template <typename ICHandler>
+int GetPrototypeCheckCount(
+ Isolate* isolate, Handle<Smi>* smi_handler, Handle<Map> receiver_map,
+ Handle<JSReceiver> holder, Handle<Object> data1,
+ MaybeHandle<Object> maybe_data2 = MaybeHandle<Object>()) {
+ DCHECK_NOT_NULL(smi_handler);
+ return InitPrototypeChecksImpl<ICHandler, false>(isolate, Handle<ICHandler>(),
+ smi_handler, receiver_map,
+ holder, data1, maybe_data2);
+}
+
+template <typename ICHandler>
+void InitPrototypeChecks(
+ Isolate* isolate, Handle<ICHandler> handler, Handle<Map> receiver_map,
+ Handle<JSReceiver> holder, Handle<Object> data1,
+ MaybeHandle<Object> maybe_data2 = MaybeHandle<Object>()) {
+ InitPrototypeChecksImpl<ICHandler, true>(
+ isolate, handler, nullptr, receiver_map, holder, data1, maybe_data2);
}
} // namespace
@@ -105,65 +110,46 @@ int GetPrototypeCheckCount(Isolate* isolate, Handle<Map> receiver_map,
Handle<Object> LoadHandler::LoadFromPrototype(Isolate* isolate,
Handle<Map> receiver_map,
Handle<JSReceiver> holder,
- Handle<Name> name,
Handle<Smi> smi_handler,
- MaybeHandle<Object> maybe_data) {
- int checks_count =
- GetPrototypeCheckCount(isolate, receiver_map, holder, name);
- DCHECK_LE(0, checks_count);
-
- if (receiver_map->IsPrimitiveMap() ||
- receiver_map->is_access_check_needed()) {
- DCHECK(!receiver_map->is_dictionary_map());
- DCHECK_LE(1, checks_count); // For native context.
- smi_handler = EnableAccessCheckOnReceiver(isolate, smi_handler);
- } else if (receiver_map->is_dictionary_map() &&
- !receiver_map->IsJSGlobalObjectMap()) {
- smi_handler = EnableLookupOnReceiver(isolate, smi_handler);
+ MaybeHandle<Object> maybe_data1,
+ MaybeHandle<Object> maybe_data2) {
+ Handle<Object> data1;
+ if (!maybe_data1.ToHandle(&data1)) {
+ data1 = Map::GetOrCreatePrototypeWeakCell(holder, isolate);
}
- Handle<Cell> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate);
- DCHECK(!validity_cell.is_null());
+ int checks_count = GetPrototypeCheckCount<LoadHandler>(
+ isolate, &smi_handler, receiver_map, holder, data1, maybe_data2);
- Handle<Object> data;
- if (!maybe_data.ToHandle(&data)) {
- data = Map::GetOrCreatePrototypeWeakCell(holder, isolate);
+ Handle<Object> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate);
+ if (validity_cell.is_null()) {
+ // Although in case of kApiGetter we load from receiver we still have to
+ // use the "prototype" shape of a handler in order to provide additional
+ // data to the dispatcher.
+ DCHECK_EQ(kApiGetter, GetHandlerKind(*smi_handler));
+ validity_cell = handle(Smi::kZero, isolate);
}
- if (checks_count == 0) {
- return isolate->factory()->NewTuple3(data, smi_handler, validity_cell,
- TENURED);
- }
- Handle<FixedArray> handler_array(isolate->factory()->NewFixedArray(
- kFirstPrototypeIndex + checks_count, TENURED));
- handler_array->set(kSmiHandlerIndex, *smi_handler);
- handler_array->set(kValidityCellIndex, *validity_cell);
- handler_array->set(kDataIndex, *data);
- InitPrototypeChecks(isolate, receiver_map, holder, name, handler_array,
- kFirstPrototypeIndex);
- return handler_array;
+ int data_count = 1 + checks_count;
+ Handle<LoadHandler> handler = isolate->factory()->NewLoadHandler(data_count);
+
+ handler->set_smi_handler(*smi_handler);
+ handler->set_validity_cell(*validity_cell);
+ InitPrototypeChecks(isolate, handler, receiver_map, holder, data1,
+ maybe_data2);
+ return handler;
}
// static
Handle<Object> LoadHandler::LoadFullChain(Isolate* isolate,
Handle<Map> receiver_map,
Handle<Object> holder,
- Handle<Name> name,
Handle<Smi> smi_handler) {
- Handle<JSReceiver> end; // null handle
- int checks_count = GetPrototypeCheckCount(isolate, receiver_map, end, name);
- DCHECK_LE(0, checks_count);
-
- if (receiver_map->IsPrimitiveMap() ||
- receiver_map->is_access_check_needed()) {
- DCHECK(!receiver_map->is_dictionary_map());
- DCHECK_LE(1, checks_count); // For native context.
- smi_handler = EnableAccessCheckOnReceiver(isolate, smi_handler);
- } else if (receiver_map->is_dictionary_map() &&
- !receiver_map->IsJSGlobalObjectMap()) {
- smi_handler = EnableLookupOnReceiver(isolate, smi_handler);
- }
+ Handle<JSReceiver> end; // null handle, means full prototype chain lookup.
+ Handle<Object> data1 = holder;
+ int checks_count = GetPrototypeCheckCount<LoadHandler>(
+ isolate, &smi_handler, receiver_map, end, data1);
Handle<Object> validity_cell =
Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate);
@@ -174,18 +160,13 @@ Handle<Object> LoadHandler::LoadFullChain(Isolate* isolate,
validity_cell = handle(Smi::kZero, isolate);
}
- Factory* factory = isolate->factory();
- if (checks_count == 0) {
- return factory->NewTuple3(holder, smi_handler, validity_cell, TENURED);
- }
- Handle<FixedArray> handler_array(factory->NewFixedArray(
- LoadHandler::kFirstPrototypeIndex + checks_count, TENURED));
- handler_array->set(kSmiHandlerIndex, *smi_handler);
- handler_array->set(kValidityCellIndex, *validity_cell);
- handler_array->set(kDataIndex, *holder);
- InitPrototypeChecks(isolate, receiver_map, end, name, handler_array,
- kFirstPrototypeIndex);
- return handler_array;
+ int data_count = 1 + checks_count;
+ Handle<LoadHandler> handler = isolate->factory()->NewLoadHandler(data_count);
+
+ handler->set_smi_handler(*smi_handler);
+ handler->set_validity_cell(*validity_cell);
+ InitPrototypeChecks(isolate, handler, receiver_map, end, data1);
+ return handler;
}
// static
@@ -218,7 +199,11 @@ Handle<Object> StoreHandler::StoreElementTransition(
validity_cell = handle(Smi::kZero, isolate);
}
Handle<WeakCell> cell = Map::WeakCellForMap(transition);
- return isolate->factory()->NewTuple3(cell, stub, validity_cell, TENURED);
+ Handle<StoreHandler> handler = isolate->factory()->NewStoreHandler(1);
+ handler->set_smi_handler(*stub);
+ handler->set_validity_cell(*validity_cell);
+ handler->set_data1(*cell);
+ return handler;
}
Handle<Smi> StoreHandler::StoreTransition(Isolate* isolate,
@@ -248,19 +233,16 @@ Handle<Smi> StoreHandler::StoreTransition(Isolate* isolate,
// static
Handle<Object> StoreHandler::StoreThroughPrototype(
Isolate* isolate, Handle<Map> receiver_map, Handle<JSReceiver> holder,
- Handle<Name> name, Handle<Smi> smi_handler,
- MaybeHandle<Object> maybe_data) {
- int checks_count =
- GetPrototypeCheckCount(isolate, receiver_map, holder, name);
-
- DCHECK_LE(0, checks_count);
-
- if (receiver_map->is_access_check_needed()) {
- DCHECK(!receiver_map->is_dictionary_map());
- DCHECK_LE(1, checks_count); // For native context.
- smi_handler = EnableAccessCheckOnReceiver(isolate, smi_handler);
+ Handle<Smi> smi_handler, MaybeHandle<Object> maybe_data1,
+ MaybeHandle<Object> maybe_data2) {
+ Handle<Object> data1;
+ if (!maybe_data1.ToHandle(&data1)) {
+ data1 = Map::GetOrCreatePrototypeWeakCell(holder, isolate);
}
+ int checks_count = GetPrototypeCheckCount<StoreHandler>(
+ isolate, &smi_handler, receiver_map, holder, data1, maybe_data2);
+
Handle<Object> validity_cell =
Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate);
if (validity_cell.is_null()) {
@@ -268,23 +250,15 @@ Handle<Object> StoreHandler::StoreThroughPrototype(
validity_cell = handle(Smi::kZero, isolate);
}
- Handle<Object> data;
- if (!maybe_data.ToHandle(&data)) {
- data = Map::GetOrCreatePrototypeWeakCell(holder, isolate);
- }
+ int data_count = 1 + checks_count;
+ Handle<StoreHandler> handler =
+ isolate->factory()->NewStoreHandler(data_count);
- Factory* factory = isolate->factory();
- if (checks_count == 0) {
- return factory->NewTuple3(data, smi_handler, validity_cell, TENURED);
- }
- Handle<FixedArray> handler_array(
- factory->NewFixedArray(kFirstPrototypeIndex + checks_count, TENURED));
- handler_array->set(kSmiHandlerIndex, *smi_handler);
- handler_array->set(kValidityCellIndex, *validity_cell);
- handler_array->set(kDataIndex, *data);
- InitPrototypeChecks(isolate, receiver_map, holder, name, handler_array,
- kFirstPrototypeIndex);
- return handler_array;
+ handler->set_smi_handler(*smi_handler);
+ handler->set_validity_cell(*validity_cell);
+ InitPrototypeChecks(isolate, handler, receiver_map, holder, data1,
+ maybe_data2);
+ return handler;
}
// static
@@ -297,68 +271,35 @@ Handle<Object> StoreHandler::StoreGlobal(Isolate* isolate,
Handle<Object> StoreHandler::StoreProxy(Isolate* isolate,
Handle<Map> receiver_map,
Handle<JSProxy> proxy,
- Handle<JSReceiver> receiver,
- Handle<Name> name) {
+ Handle<JSReceiver> receiver) {
Handle<Smi> smi_handler = StoreProxy(isolate);
if (receiver.is_identical_to(proxy)) return smi_handler;
Handle<WeakCell> holder_cell = isolate->factory()->NewWeakCell(proxy);
- return StoreThroughPrototype(isolate, receiver_map, proxy, name, smi_handler,
+ return StoreThroughPrototype(isolate, receiver_map, proxy, smi_handler,
holder_cell);
}
Object* StoreHandler::ValidHandlerOrNull(Object* raw_handler, Name* name,
Handle<Map>* out_transition) {
- STATIC_ASSERT(kValidityCellOffset == Tuple3::kValue3Offset);
-
Smi* valid = Smi::FromInt(Map::kPrototypeChainValid);
- if (raw_handler->IsTuple3()) {
- // Check validity cell.
- Tuple3* handler = Tuple3::cast(raw_handler);
+ DCHECK(raw_handler->IsStoreHandler());
- Object* raw_validity_cell = handler->value3();
- // |raw_valitity_cell| can be Smi::kZero if no validity cell is required
- // (which counts as valid).
- if (raw_validity_cell->IsCell() &&
- Cell::cast(raw_validity_cell)->value() != valid) {
- return nullptr;
- }
+ // Check validity cell.
+ StoreHandler* handler = StoreHandler::cast(raw_handler);
- } else {
- DCHECK(raw_handler->IsFixedArrayExact());
- FixedArray* handler = FixedArray::cast(raw_handler);
-
- // Check validity cell.
- Object* value = Cell::cast(handler->get(kValidityCellIndex))->value();
- if (value != valid) return nullptr;
-
- // Check prototypes.
- Heap* heap = handler->GetHeap();
- Isolate* isolate = heap->isolate();
- Handle<Name> name_handle(name, isolate);
- for (int i = kFirstPrototypeIndex; i < handler->length(); i++) {
- // This mirrors AccessorAssembler::CheckPrototype.
- WeakCell* prototype_cell = WeakCell::cast(handler->get(i));
- if (prototype_cell->cleared()) return nullptr;
- HeapObject* maybe_prototype = HeapObject::cast(prototype_cell->value());
- if (maybe_prototype->IsPropertyCell()) {
- Object* value = PropertyCell::cast(maybe_prototype)->value();
- if (value != heap->the_hole_value()) return nullptr;
- } else {
- DCHECK(maybe_prototype->map()->is_dictionary_map());
- // Do a negative dictionary lookup.
- NameDictionary* dict =
- JSObject::cast(maybe_prototype)->property_dictionary();
- int number = dict->FindEntry(isolate, name_handle);
- if (number != NameDictionary::kNotFound) {
- PropertyDetails details = dict->DetailsAt(number);
- if (details.IsReadOnly()) return nullptr;
- if (details.kind() == PropertyKind::kAccessor) return nullptr;
- break;
- }
- }
- }
+ Object* raw_validity_cell = handler->validity_cell();
+ // |raw_valitity_cell| can be Smi::kZero if no validity cell is required
+ // (which counts as valid).
+ if (raw_validity_cell->IsCell() &&
+ Cell::cast(raw_validity_cell)->value() != valid) {
+ return nullptr;
}
+ // We use this ValidHandlerOrNull() function only for transitioning store
+ // handlers which are not applicable to receivers that require access checks.
+ DCHECK(handler->smi_handler()->IsSmi());
+ DCHECK(
+ !DoAccessCheckOnReceiverBits::decode(Smi::ToInt(handler->smi_handler())));
// Check if the transition target is deprecated.
WeakCell* target_cell = GetTransitionCell(raw_handler);
diff --git a/deps/v8/src/ic/handler-configuration.h b/deps/v8/src/ic/handler-configuration.h
index 3d0990e826..514a5ed5fa 100644
--- a/deps/v8/src/ic/handler-configuration.h
+++ b/deps/v8/src/ic/handler-configuration.h
@@ -9,14 +9,25 @@
#include "src/field-index.h"
#include "src/globals.h"
#include "src/objects.h"
+#include "src/objects/data-handler.h"
#include "src/utils.h"
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
namespace v8 {
namespace internal {
-// A set of bit fields representing Smi handlers for loads.
-class LoadHandler {
+// A set of bit fields representing Smi handlers for loads and a HeapObject
+// that represents load handlers that can't be encoded in a Smi.
+// TODO(ishell): move to load-handler.h
+class LoadHandler final : public DataHandler {
public:
+ DECL_CAST(LoadHandler)
+
+ DECL_PRINTER(LoadHandler)
+ DECL_VERIFIER(LoadHandler)
+
enum Kind {
kElement,
kIndexedString,
@@ -72,7 +83,8 @@ class LoadHandler {
//
// Encoding when KindBits contains kElement or kIndexedString.
//
- class AllowOutOfBoundsBits : public BitField<bool, KindBits::kNext, 1> {};
+ class AllowOutOfBoundsBits
+ : public BitField<bool, LookupOnReceiverBits::kNext, 1> {};
//
// Encoding when KindBits contains kElement.
@@ -88,23 +100,9 @@ class LoadHandler {
//
// Encoding when KindBits contains kModuleExport.
//
- class ExportsIndexBits : public BitField<unsigned, KindBits::kNext,
- kSmiValueSize - KindBits::kNext> {};
-
- // The layout of an Tuple3 handler representing a load of a field from
- // prototype when prototype chain checks do not include non-existing lookups
- // or access checks.
- static const int kDataOffset = Tuple3::kValue1Offset;
- static const int kSmiHandlerOffset = Tuple3::kValue2Offset;
- static const int kValidityCellOffset = Tuple3::kValue3Offset;
-
- // The layout of an array handler representing a load of a field from
- // prototype when prototype chain checks include non-existing lookups and
- // access checks.
- static const int kSmiHandlerIndex = 0;
- static const int kValidityCellIndex = 1;
- static const int kDataIndex = 2;
- static const int kFirstPrototypeIndex = 3;
+ class ExportsIndexBits
+ : public BitField<unsigned, LookupOnReceiverBits::kNext,
+ kSmiValueSize - LookupOnReceiverBits::kNext> {};
// Decodes kind from Smi-handler.
static inline Kind GetHandlerKind(Smi* smi_handler);
@@ -149,7 +147,7 @@ class LoadHandler {
// needed (e.g., for "nonexistent"), null_value() may be passed in.
static Handle<Object> LoadFullChain(Isolate* isolate,
Handle<Map> receiver_map,
- Handle<Object> holder, Handle<Name> name,
+ Handle<Object> holder,
Handle<Smi> smi_handler);
// Creates a data handler that represents a prototype chain check followed
@@ -157,8 +155,9 @@ class LoadHandler {
// Can be used only if GetPrototypeCheckCount() returns non negative value.
static Handle<Object> LoadFromPrototype(
Isolate* isolate, Handle<Map> receiver_map, Handle<JSReceiver> holder,
- Handle<Name> name, Handle<Smi> smi_handler,
- MaybeHandle<Object> maybe_data = MaybeHandle<Object>());
+ Handle<Smi> smi_handler,
+ MaybeHandle<Object> maybe_data1 = MaybeHandle<Object>(),
+ MaybeHandle<Object> maybe_data2 = MaybeHandle<Object>());
// Creates a Smi-handler for loading a non-existent property. Works only as
// a part of prototype chain check.
@@ -177,22 +176,18 @@ class LoadHandler {
// Decodes the KeyedAccessLoadMode from a {handler}.
static KeyedAccessLoadMode GetKeyedAccessLoadMode(Object* handler);
-
- private:
- // Sets DoAccessCheckOnReceiverBits in given Smi-handler. The receiver
- // check is a part of a prototype chain check.
- static inline Handle<Smi> EnableAccessCheckOnReceiver(
- Isolate* isolate, Handle<Smi> smi_handler);
-
- // Sets LookupOnReceiverBits in given Smi-handler. The receiver
- // check is a part of a prototype chain check.
- static inline Handle<Smi> EnableLookupOnReceiver(Isolate* isolate,
- Handle<Smi> smi_handler);
};
-// A set of bit fields representing Smi handlers for stores.
-class StoreHandler {
+// A set of bit fields representing Smi handlers for stores and a HeapObject
+// that represents store handlers that can't be encoded in a Smi.
+// TODO(ishell): move to store-handler.h
+class StoreHandler final : public DataHandler {
public:
+ DECL_CAST(StoreHandler)
+
+ DECL_PRINTER(StoreHandler)
+ DECL_VERIFIER(StoreHandler)
+
enum Kind {
kElement,
kField,
@@ -213,21 +208,24 @@ class StoreHandler {
enum FieldRepresentation { kSmi, kDouble, kHeapObject, kTagged };
- static inline bool IsHandler(Object* maybe_handler);
-
// Applicable to kGlobalProxy, kProxy kinds.
// Defines whether access rights check should be done on receiver object.
class DoAccessCheckOnReceiverBits
: public BitField<bool, KindBits::kNext, 1> {};
+ // Defines whether a lookup should be done on receiver object before
+ // proceeding to the prototype chain. Applicable to named property kinds only
+ // when storing through prototype chain. Ignored when storing to holder.
+ class LookupOnReceiverBits
+ : public BitField<bool, DoAccessCheckOnReceiverBits::kNext, 1> {};
+
// Applicable to kField, kTransitionToField and kTransitionToConstant
// kinds.
// Index of a value entry in the descriptor array.
- class DescriptorBits
- : public BitField<unsigned, DoAccessCheckOnReceiverBits::kNext,
- kDescriptorIndexBitCount> {};
+ class DescriptorBits : public BitField<unsigned, LookupOnReceiverBits::kNext,
+ kDescriptorIndexBitCount> {};
//
// Encoding when KindBits contains kTransitionToConstant.
//
@@ -249,24 +247,10 @@ class StoreHandler {
// Make sure we don't overflow the smi.
STATIC_ASSERT(FieldIndexBits::kNext <= kSmiValueSize);
- // The layout of an Tuple3 handler representing a transitioning store
- // when prototype chain checks do not include non-existing lookups or access
- // checks.
- static const int kDataOffset = Tuple3::kValue1Offset;
- static const int kSmiHandlerOffset = Tuple3::kValue2Offset;
- static const int kValidityCellOffset = Tuple3::kValue3Offset;
-
static inline WeakCell* GetTransitionCell(Object* handler);
static Object* ValidHandlerOrNull(Object* handler, Name* name,
Handle<Map>* out_transition);
- // The layout of an array handler representing a transitioning store
- // when prototype chain checks include non-existing lookups and access checks.
- static const int kSmiHandlerIndex = 0;
- static const int kValidityCellIndex = 1;
- static const int kDataIndex = 2;
- static const int kFirstPrototypeIndex = 3;
-
// Creates a Smi-handler for storing a field to fast object.
static inline Handle<Smi> StoreField(Isolate* isolate, int descriptor,
FieldIndex field_index,
@@ -289,8 +273,9 @@ class StoreHandler {
static Handle<Object> StoreThroughPrototype(
Isolate* isolate, Handle<Map> receiver_map, Handle<JSReceiver> holder,
- Handle<Name> name, Handle<Smi> smi_handler,
- MaybeHandle<Object> data = MaybeHandle<Object>());
+ Handle<Smi> smi_handler,
+ MaybeHandle<Object> maybe_data1 = MaybeHandle<Object>(),
+ MaybeHandle<Object> maybe_data2 = MaybeHandle<Object>());
static Handle<Object> StoreElementTransition(Isolate* isolate,
Handle<Map> receiver_map,
@@ -299,8 +284,7 @@ class StoreHandler {
static Handle<Object> StoreProxy(Isolate* isolate, Handle<Map> receiver_map,
Handle<JSProxy> proxy,
- Handle<JSReceiver> receiver,
- Handle<Name> name);
+ Handle<JSReceiver> receiver);
// Creates a handler for storing a property to the property cell of a global
// object.
@@ -317,11 +301,6 @@ class StoreHandler {
static inline Handle<Smi> StoreProxy(Isolate* isolate);
private:
- // Sets DoAccessCheckOnReceiverBits in given Smi-handler. The receiver
- // check is a part of a prototype chain check.
- static inline Handle<Smi> EnableAccessCheckOnReceiver(
- Isolate* isolate, Handle<Smi> smi_handler);
-
static inline Handle<Smi> StoreField(Isolate* isolate, Kind kind,
int descriptor, FieldIndex field_index,
Representation representation,
@@ -342,4 +321,6 @@ class StoreHandler {
} // namespace internal
} // namespace v8
+#include "src/objects/object-macros-undef.h"
+
#endif // V8_IC_HANDLER_CONFIGURATION_H_
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index e705d38679..d6fa23611e 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -41,8 +41,7 @@ Address IC::raw_constant_pool() const {
bool IC::IsHandler(Object* object) {
- return (object->IsSmi() && (object != nullptr)) || object->IsTuple2() ||
- object->IsTuple3() || object->IsFixedArrayExact() ||
+ return (object->IsSmi() && (object != nullptr)) || object->IsDataHandler() ||
object->IsWeakCell() || object->IsCode();
}
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 7e3e6556a1..62a2e7cf59 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -255,7 +255,7 @@ bool IC::ShouldRecomputeHandler(Handle<String> name) {
// This is a contextual access, always just update the handler and stay
// monomorphic.
- if (IsLoadGlobalIC()) return true;
+ if (IsGlobalIC()) return true;
// The current map wasn't handled yet. There's no reason to stay monomorphic,
// *unless* we're moving from a deprecated map to its replacement, or
@@ -395,6 +395,11 @@ void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
if (IsLoadGlobalIC()) {
LoadGlobalICNexus* nexus = casted_nexus<LoadGlobalICNexus>();
nexus->ConfigureHandlerMode(handler);
+
+ } else if (IsStoreGlobalIC()) {
+ StoreGlobalICNexus* nexus = casted_nexus<StoreGlobalICNexus>();
+ nexus->ConfigureHandlerMode(handler);
+
} else {
// Non-keyed ICs don't track the name explicitly.
if (!is_keyed()) name = Handle<Name>::null();
@@ -408,7 +413,7 @@ void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
void IC::ConfigureVectorState(Handle<Name> name, MapHandles const& maps,
ObjectHandles* handlers) {
- DCHECK(!IsLoadGlobalIC());
+ DCHECK(!IsGlobalIC());
// Non-keyed ICs don't track the name explicitly.
if (!is_keyed()) name = Handle<Name>::null();
nexus()->ConfigurePolymorphic(name, maps, handlers);
@@ -486,10 +491,16 @@ MaybeHandle<Object> LoadGlobalIC::Load(Handle<Name> name) {
return ReferenceError(name);
}
- if (FLAG_use_ic && LoadScriptContextFieldStub::Accepted(&lookup_result)) {
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadScriptContextFieldStub);
- LoadScriptContextFieldStub stub(isolate(), &lookup_result);
- PatchCache(name, stub.GetCode());
+ if (FLAG_use_ic) {
+ LoadGlobalICNexus* nexus = casted_nexus<LoadGlobalICNexus>();
+ if (nexus->ConfigureLexicalVarMode(lookup_result.context_index,
+ lookup_result.slot_index)) {
+ TRACE_HANDLER_STATS(isolate(), LoadGlobalIC_LoadScriptContextField);
+ } else {
+ // Given combination of indices can't be encoded, so use slow stub.
+ TRACE_HANDLER_STATS(isolate(), LoadGlobalIC_SlowStub);
+ PatchCache(name, slow_stub());
+ }
TRACE_IC("LoadGlobalIC", name);
}
return result;
@@ -623,7 +634,7 @@ void IC::PatchCache(Handle<Name> name, Handle<Object> handler) {
break;
case RECOMPUTE_HANDLER:
case MONOMORPHIC:
- if (IsLoadGlobalIC()) {
+ if (IsGlobalIC()) {
UpdateMonomorphicIC(handler, name);
break;
}
@@ -664,7 +675,7 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
Handle<Smi> smi_handler = LoadHandler::LoadNonExistent(isolate());
code = LoadHandler::LoadFullChain(isolate(), receiver_map(),
isolate()->factory()->null_value(),
- lookup->name(), smi_handler);
+ smi_handler);
} else {
if (IsLoadGlobalIC()) {
if (lookup->TryLookupCachedProperty()) {
@@ -754,7 +765,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
}
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNonMaskingInterceptorDH);
return LoadHandler::LoadFullChain(isolate(), map, holder_ref,
- lookup->name(), smi_handler);
+ smi_handler);
}
if (receiver_is_holder) {
@@ -765,7 +776,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadInterceptorFromPrototypeDH);
return LoadHandler::LoadFromPrototype(isolate(), map, holder,
- lookup->name(), smi_handler);
+ smi_handler);
}
case LookupIterator::ACCESSOR: {
@@ -831,12 +842,10 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
isolate()->factory()->NewWeakCell(context);
Handle<WeakCell> data_cell = isolate()->factory()->NewWeakCell(
call_optimization.api_call_info());
- Handle<Tuple2> data =
- isolate()->factory()->NewTuple2(context_cell, data_cell, TENURED);
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterFromPrototypeDH);
return LoadHandler::LoadFromPrototype(
- isolate(), map, holder, lookup->name(), smi_handler, data);
+ isolate(), map, holder, smi_handler, data_cell, context_cell);
}
if (holder->HasFastProperties()) {
@@ -851,8 +860,8 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
smi_handler = LoadHandler::LoadGlobal(isolate());
Handle<WeakCell> cell =
isolate()->factory()->NewWeakCell(lookup->GetPropertyCell());
- return LoadHandler::LoadFromPrototype(
- isolate(), map, holder, lookup->name(), smi_handler, cell);
+ return LoadHandler::LoadFromPrototype(isolate(), map, holder,
+ smi_handler, cell);
} else {
smi_handler = LoadHandler::LoadNormal(isolate());
@@ -862,7 +871,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
}
return LoadHandler::LoadFromPrototype(isolate(), map, holder,
- lookup->name(), smi_handler);
+ smi_handler);
}
Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(accessors);
@@ -882,7 +891,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
TRACE_HANDLER_STATS(isolate(),
LoadIC_LoadNativeDataPropertyFromPrototypeDH);
return LoadHandler::LoadFromPrototype(isolate(), map, holder,
- lookup->name(), smi_handler);
+ smi_handler);
}
case LookupIterator::DATA: {
@@ -896,8 +905,8 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
smi_handler = LoadHandler::LoadGlobal(isolate());
Handle<WeakCell> cell =
isolate()->factory()->NewWeakCell(lookup->GetPropertyCell());
- return LoadHandler::LoadFromPrototype(
- isolate(), map, holder, lookup->name(), smi_handler, cell);
+ return LoadHandler::LoadFromPrototype(isolate(), map, holder,
+ smi_handler, cell);
}
smi_handler = LoadHandler::LoadNormal(isolate());
@@ -920,7 +929,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantFromPrototypeDH);
}
return LoadHandler::LoadFromPrototype(isolate(), map, holder,
- lookup->name(), smi_handler);
+ smi_handler);
}
case LookupIterator::INTEGER_INDEXED_EXOTIC:
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadIntegerIndexedExoticDH);
@@ -933,7 +942,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
return smi_handler;
}
return LoadHandler::LoadFromPrototype(isolate(), map, holder_proxy,
- lookup->name(), smi_handler);
+ smi_handler);
}
case LookupIterator::ACCESS_CHECK:
case LookupIterator::NOT_FOUND:
@@ -957,8 +966,6 @@ static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) {
key = handle(Smi::FromInt(int_value), isolate);
}
}
- } else if (key->IsUndefined(isolate)) {
- key = isolate->factory()->undefined_string();
} else if (key->IsString()) {
key = isolate->factory()->InternalizeString(Handle<String>::cast(key));
}
@@ -1287,15 +1294,13 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
return it->IsCacheableTransition();
}
-MaybeHandle<Object> StoreGlobalIC::Store(Handle<Object> object,
- Handle<Name> name,
+MaybeHandle<Object> StoreGlobalIC::Store(Handle<Name> name,
Handle<Object> value) {
- DCHECK(object->IsJSGlobalObject());
DCHECK(name->IsString());
// Look up in script context table.
Handle<String> str_name = Handle<String>::cast(name);
- Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(object);
+ Handle<JSGlobalObject> global = isolate()->global_object();
Handle<ScriptContextTable> script_contexts(
global->native_context()->script_context_table());
@@ -1304,7 +1309,7 @@ MaybeHandle<Object> StoreGlobalIC::Store(Handle<Object> object,
Handle<Context> script_context = ScriptContextTable::GetContext(
script_contexts, lookup_result.context_index);
if (lookup_result.mode == CONST) {
- return TypeError(MessageTemplate::kConstAssign, object, name);
+ return TypeError(MessageTemplate::kConstAssign, global, name);
}
Handle<Object> previous_value =
@@ -1316,17 +1321,24 @@ MaybeHandle<Object> StoreGlobalIC::Store(Handle<Object> object,
return ReferenceError(name);
}
- if (FLAG_use_ic && StoreScriptContextFieldStub::Accepted(&lookup_result)) {
- TRACE_HANDLER_STATS(isolate(), StoreIC_StoreScriptContextFieldStub);
- StoreScriptContextFieldStub stub(isolate(), &lookup_result);
- PatchCache(name, stub.GetCode());
+ if (FLAG_use_ic) {
+ StoreGlobalICNexus* nexus = casted_nexus<StoreGlobalICNexus>();
+ if (nexus->ConfigureLexicalVarMode(lookup_result.context_index,
+ lookup_result.slot_index)) {
+ TRACE_HANDLER_STATS(isolate(), StoreGlobalIC_StoreScriptContextField);
+ } else {
+ // Given combination of indices can't be encoded, so use slow stub.
+ TRACE_HANDLER_STATS(isolate(), StoreGlobalIC_SlowStub);
+ PatchCache(name, slow_stub());
+ }
+ TRACE_IC("StoreGlobalIC", name);
}
script_context->set(lookup_result.slot_index, *value);
return value;
}
- return StoreIC::Store(object, name, value);
+ return StoreIC::Store(global, name, value);
}
MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
@@ -1381,7 +1393,7 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
JSReceiver::StoreFromKeyed store_mode,
MaybeHandle<Object> cached_handler) {
- if (state() == UNINITIALIZED) {
+ if (state() == UNINITIALIZED && !IsStoreGlobalIC()) {
// This is the first time we execute this inline cache. Set the target to
// the pre monomorphic stub to delay setting the monomorphic state.
TRACE_HANDLER_STATS(isolate(), StoreIC_Premonomorphic);
@@ -1394,6 +1406,17 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
if (!cached_handler.is_null()) {
handler = cached_handler.ToHandleChecked();
} else if (LookupForWrite(lookup, value, store_mode)) {
+ if (IsStoreGlobalIC()) {
+ if (lookup->state() == LookupIterator::DATA &&
+ lookup->GetReceiver().is_identical_to(lookup->GetHolder<Object>())) {
+ DCHECK(lookup->GetReceiver()->IsJSGlobalObject());
+ // Now update the cell in the feedback vector.
+ StoreGlobalICNexus* nexus = casted_nexus<StoreGlobalICNexus>();
+ nexus->ConfigurePropertyCellMode(lookup->GetPropertyCell());
+ TRACE_IC("StoreGlobalIC", lookup->name());
+ return;
+ }
+ }
if (created_new_transition_) {
// The first time a transition is performed, there's a good chance that
// it won't be taken again, so don't bother creating a handler.
@@ -1432,8 +1455,7 @@ Handle<Object> StoreIC::ComputeHandler(LookupIterator* lookup) {
Handle<WeakCell> cell =
isolate()->factory()->NewWeakCell(lookup->transition_cell());
Handle<Object> handler = StoreHandler::StoreThroughPrototype(
- isolate(), receiver_map(), store_target, lookup->name(),
- smi_handler, cell);
+ isolate(), receiver_map(), store_target, smi_handler, cell);
return handler;
}
// Currently not handled by CompileStoreTransition.
@@ -1457,7 +1479,7 @@ Handle<Object> StoreIC::ComputeHandler(LookupIterator* lookup) {
Handle<WeakCell> cell = Map::WeakCellForMap(transition);
Handle<Object> handler = StoreHandler::StoreThroughPrototype(
- isolate(), receiver_map(), holder, lookup->name(), smi_handler, cell);
+ isolate(), receiver_map(), holder, smi_handler, cell);
TransitionsAccessor(receiver_map())
.UpdateHandler(*lookup->name(), *handler);
return handler;
@@ -1511,8 +1533,8 @@ Handle<Object> StoreIC::ComputeHandler(LookupIterator* lookup) {
if (receiver.is_identical_to(holder)) return smi_handler;
TRACE_HANDLER_STATS(isolate(),
StoreIC_StoreNativeDataPropertyOnPrototypeDH);
- return StoreHandler::StoreThroughPrototype(
- isolate(), receiver_map(), holder, lookup->name(), smi_handler);
+ return StoreHandler::StoreThroughPrototype(isolate(), receiver_map(),
+ holder, smi_handler);
} else if (accessors->IsAccessorPair()) {
Handle<Object> setter(Handle<AccessorPair>::cast(accessors)->setter(),
@@ -1539,12 +1561,10 @@ Handle<Object> StoreIC::ComputeHandler(LookupIterator* lookup) {
isolate()->factory()->NewWeakCell(context);
Handle<WeakCell> data_cell = isolate()->factory()->NewWeakCell(
call_optimization.api_call_info());
- Handle<Tuple2> data = isolate()->factory()->NewTuple2(
- context_cell, data_cell, TENURED);
TRACE_HANDLER_STATS(isolate(), StoreIC_StoreApiSetterOnPrototypeDH);
return StoreHandler::StoreThroughPrototype(
- isolate(), receiver_map(), holder, lookup->name(), smi_handler,
- data);
+ isolate(), receiver_map(), holder, smi_handler, data_cell,
+ context_cell);
}
TRACE_GENERIC_IC("incompatible receiver");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
@@ -1562,8 +1582,8 @@ Handle<Object> StoreIC::ComputeHandler(LookupIterator* lookup) {
if (receiver.is_identical_to(holder)) return smi_handler;
TRACE_HANDLER_STATS(isolate(), StoreIC_StoreAccessorOnPrototypeDH);
- return StoreHandler::StoreThroughPrototype(
- isolate(), receiver_map(), holder, lookup->name(), smi_handler);
+ return StoreHandler::StoreThroughPrototype(isolate(), receiver_map(),
+ holder, smi_handler);
}
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return slow_stub();
@@ -1614,7 +1634,7 @@ Handle<Object> StoreIC::ComputeHandler(LookupIterator* lookup) {
Handle<JSReceiver>::cast(lookup->GetReceiver());
Handle<JSProxy> holder = lookup->GetHolder<JSProxy>();
return StoreHandler::StoreProxy(isolate(), receiver_map(), holder,
- receiver, lookup->name());
+ receiver);
}
case LookupIterator::INTEGER_INDEXED_EXOTIC:
@@ -1788,6 +1808,7 @@ Handle<Object> KeyedStoreIC::StoreElementHandler(
return StoreHandler::StoreProxy(isolate());
}
+ // TODO(ishell): move to StoreHandler::StoreElement().
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub;
@@ -1809,7 +1830,10 @@ Handle<Object> KeyedStoreIC::StoreElementHandler(
Handle<Object> validity_cell =
Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
if (validity_cell.is_null()) return stub;
- return isolate()->factory()->NewTuple2(validity_cell, stub, TENURED);
+ Handle<StoreHandler> handler = isolate()->factory()->NewStoreHandler(0);
+ handler->set_validity_cell(*validity_cell);
+ handler->set_smi_handler(*stub);
+ return handler;
}
void KeyedStoreIC::StoreElementPolymorphicHandlers(
@@ -2172,10 +2196,12 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
} else if (IsStoreGlobalICKind(kind)) {
- StoreICNexus nexus(vector, vector_slot);
+ DCHECK_EQ(isolate->native_context()->global_proxy(), *receiver);
+ receiver = isolate->global_object();
+ StoreGlobalICNexus nexus(vector, vector_slot);
StoreGlobalIC ic(isolate, &nexus);
ic.UpdateState(receiver, key);
- RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
+ RETURN_RESULT_OR_FAILURE(isolate, ic.Store(key, value));
} else {
DCHECK(IsKeyedStoreICKind(kind));
KeyedStoreICNexus nexus(vector, vector_slot);
@@ -2185,6 +2211,22 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
}
}
+RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Miss) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(4, args.length());
+ // Runtime functions don't follow the IC's calling convention.
+ Handle<Object> value = args.at(0);
+ Handle<Smi> slot = args.at<Smi>(1);
+ Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
+ Handle<Name> key = args.at<Name>(3);
+ FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ StoreGlobalICNexus nexus(vector, vector_slot);
+ StoreGlobalIC ic(isolate, &nexus);
+ Handle<JSGlobalObject> global = isolate->global_object();
+ ic.UpdateState(global, key);
+ RETURN_RESULT_OR_FAILURE(isolate, ic.Store(key, value));
+}
+
RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Slow) {
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
@@ -2192,9 +2234,19 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Slow) {
Handle<Object> value = args.at(0);
Handle<Smi> slot = args.at<Smi>(1);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
- Handle<Object> object = args.at(3);
CONVERT_ARG_HANDLE_CHECKED(String, name, 4);
+#ifdef DEBUG
+ {
+ FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlotKind slot_kind = vector->GetKind(vector_slot);
+ DCHECK(IsStoreGlobalICKind(slot_kind));
+ Handle<Object> receiver = args.at(3);
+ DCHECK(receiver->IsJSGlobalProxy());
+ }
+#endif
+
+ Handle<JSGlobalObject> global = isolate->global_object();
Handle<Context> native_context = isolate->native_context();
Handle<ScriptContextTable> script_contexts(
native_context->script_context_table());
@@ -2205,7 +2257,7 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Slow) {
script_contexts, lookup_result.context_index);
if (lookup_result.mode == CONST) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kConstAssign, object, name));
+ isolate, NewTypeError(MessageTemplate::kConstAssign, global, name));
}
Handle<Object> previous_value =
@@ -2224,7 +2276,7 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Slow) {
LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
RETURN_RESULT_OR_FAILURE(
isolate,
- Runtime::SetObjectProperty(isolate, object, name, value, language_mode));
+ Runtime::SetObjectProperty(isolate, global, name, value, language_mode));
}
// Used from ic-<arch>.cc.
@@ -2286,8 +2338,6 @@ RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
RUNTIME_FUNCTION(Runtime_Unreachable) {
UNREACHABLE();
- CHECK(false);
- return isolate->heap()->undefined_value();
}
@@ -2306,23 +2356,18 @@ RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
language_mode));
}
- Handle<AccessorInfo> callback(
+ Handle<AccessorInfo> info(
callback_or_cell->IsWeakCell()
? AccessorInfo::cast(WeakCell::cast(*callback_or_cell)->value())
: AccessorInfo::cast(*callback_or_cell));
- DCHECK(callback->IsCompatibleReceiver(*receiver));
-
- Address setter_address = v8::ToCData<Address>(callback->setter());
- v8::AccessorNameSetterCallback fun =
- FUNCTION_CAST<v8::AccessorNameSetterCallback>(setter_address);
- DCHECK_NOT_NULL(fun);
+ DCHECK(info->IsCompatibleReceiver(*receiver));
ShouldThrow should_throw =
is_sloppy(language_mode) ? kDontThrow : kThrowOnError;
- PropertyCallbackArguments custom_args(isolate, callback->data(), *receiver,
- *holder, should_throw);
- custom_args.Call(fun, name, value);
+ PropertyCallbackArguments arguments(isolate, info->data(), *receiver, *holder,
+ should_throw);
+ arguments.CallAccessorSetter(info, name, value);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return *value;
}
@@ -2344,14 +2389,10 @@ RUNTIME_FUNCTION(Runtime_LoadPropertyWithInterceptor) {
isolate, receiver, Object::ConvertReceiver(isolate, receiver));
}
- InterceptorInfo* interceptor = holder->GetNamedInterceptor();
+ Handle<InterceptorInfo> interceptor(holder->GetNamedInterceptor(), isolate);
PropertyCallbackArguments arguments(isolate, interceptor->data(), *receiver,
*holder, kDontThrow);
-
- v8::GenericNamedPropertyGetterCallback getter =
- v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
- interceptor->getter());
- Handle<Object> result = arguments.Call(getter, name);
+ Handle<Object> result = arguments.CallNamedGetter(interceptor, name);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
@@ -2398,16 +2439,24 @@ RUNTIME_FUNCTION(Runtime_StorePropertyWithInterceptor) {
FeedbackSlot vector_slot = vector->ToSlot(slot->value());
LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
- DCHECK(receiver->HasNamedInterceptor());
- InterceptorInfo* interceptor = receiver->GetNamedInterceptor();
+ // TODO(ishell): Cache interceptor_holder in the store handler like we do
+ // for LoadHandler::kInterceptor case.
+ Handle<JSObject> interceptor_holder = receiver;
+ if (receiver->IsJSGlobalProxy()) {
+ FeedbackSlotKind kind = vector->GetKind(vector_slot);
+ if (IsStoreGlobalICKind(kind)) {
+ interceptor_holder = Handle<JSObject>::cast(isolate->global_object());
+ }
+ }
+ DCHECK(interceptor_holder->HasNamedInterceptor());
+ Handle<InterceptorInfo> interceptor(interceptor_holder->GetNamedInterceptor(),
+ isolate);
+
DCHECK(!interceptor->non_masking());
PropertyCallbackArguments arguments(isolate, interceptor->data(), *receiver,
*receiver, kDontThrow);
- v8::GenericNamedPropertySetterCallback setter =
- v8::ToCData<v8::GenericNamedPropertySetterCallback>(
- interceptor->setter());
- Handle<Object> result = arguments.Call(setter, name, value);
+ Handle<Object> result = arguments.CallNamedSetter(interceptor, name, value);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.is_null()) return *value;
@@ -2435,13 +2484,11 @@ RUNTIME_FUNCTION(Runtime_LoadElementWithInterceptor) {
DCHECK_GE(args.smi_at(1), 0);
uint32_t index = args.smi_at(1);
- InterceptorInfo* interceptor = receiver->GetIndexedInterceptor();
+ Handle<InterceptorInfo> interceptor(receiver->GetIndexedInterceptor(),
+ isolate);
PropertyCallbackArguments arguments(isolate, interceptor->data(), *receiver,
*receiver, kDontThrow);
-
- v8::IndexedPropertyGetterCallback getter =
- v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
- Handle<Object> result = arguments.Call(getter, index);
+ Handle<Object> result = arguments.CallIndexedGetter(interceptor, index);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index acbfccd4c6..a63202395b 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -119,6 +119,7 @@ class IC {
bool IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map);
void PatchCache(Handle<Name> name, Handle<Object> code);
FeedbackSlotKind kind() const { return kind_; }
+ bool IsGlobalIC() const { return IsLoadGlobalIC() || IsStoreGlobalIC(); }
bool IsLoadIC() const { return IsLoadICKind(kind_); }
bool IsLoadGlobalIC() const { return IsLoadGlobalICKind(kind_); }
bool IsKeyedLoadIC() const { return IsKeyedLoadICKind(kind_); }
@@ -339,8 +340,7 @@ class StoreGlobalIC : public StoreIC {
StoreGlobalIC(Isolate* isolate, FeedbackNexus* nexus)
: StoreIC(isolate, nexus) {}
- MUST_USE_RESULT MaybeHandle<Object> Store(Handle<Object> object,
- Handle<Name> name,
+ MUST_USE_RESULT MaybeHandle<Object> Store(Handle<Name> name,
Handle<Object> value);
protected:
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index 4263dd8552..b9a11c2ec7 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -751,8 +751,8 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
Label stub_cache(this), fast_properties(this), dictionary_properties(this),
accessor(this), readonly(this);
Node* bitfield3 = LoadMapBitField3(receiver_map);
- Branch(IsSetWord32<Map::DictionaryMap>(bitfield3), &dictionary_properties,
- &fast_properties);
+ Branch(IsSetWord32<Map::IsDictionaryMapBit>(bitfield3),
+ &dictionary_properties, &fast_properties);
BIND(&fast_properties);
{
@@ -795,38 +795,22 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
{
Comment("lookup transition");
VARIABLE(var_handler, MachineRepresentation::kTagged);
- Label tuple3(this), fixedarray(this), found_handler(this, &var_handler);
+ Label check_key(this), found_handler(this, &var_handler);
Node* maybe_handler =
LoadObjectField(receiver_map, Map::kTransitionsOrPrototypeInfoOffset);
GotoIf(TaggedIsSmi(maybe_handler), notfound);
- Node* handler_map = LoadMap(maybe_handler);
- GotoIf(WordEqual(handler_map, Tuple3MapConstant()), &tuple3);
- GotoIf(WordEqual(handler_map, FixedArrayMapConstant()), &fixedarray);
+ GotoIf(HasInstanceType(maybe_handler, STORE_HANDLER_TYPE), &check_key);
// TODO(jkummerow): Consider implementing TransitionArray search.
Goto(notfound);
- VARIABLE(var_transition_cell, MachineRepresentation::kTagged);
- Label check_key(this, &var_transition_cell);
- BIND(&tuple3);
- {
- var_transition_cell.Bind(
- LoadObjectField(maybe_handler, StoreHandler::kDataOffset));
- Goto(&check_key);
- }
-
- BIND(&fixedarray);
- {
- var_transition_cell.Bind(
- LoadFixedArrayElement(maybe_handler, StoreHandler::kDataIndex));
- Goto(&check_key);
- }
-
BIND(&check_key);
{
- Node* transition = LoadWeakCellValue(var_transition_cell.value(), slow);
+ Node* transition_cell =
+ LoadObjectField(maybe_handler, StoreHandler::kData1Offset);
+ Node* transition = LoadWeakCellValue(transition_cell, slow);
Node* transition_bitfield3 = LoadMapBitField3(transition);
- GotoIf(IsSetWord32<Map::Deprecated>(transition_bitfield3), slow);
+ GotoIf(IsSetWord32<Map::IsDeprecatedBit>(transition_bitfield3), slow);
Node* nof =
DecodeWord32<Map::NumberOfOwnDescriptorsBits>(transition_bitfield3);
Node* last_added = Int32Sub(nof, Int32Constant(1));
@@ -840,7 +824,8 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&found_handler);
{
Comment("KeyedStoreGeneric found transition handler");
- HandleStoreICHandlerCase(p, var_handler.value(), notfound);
+ HandleStoreICHandlerCase(p, var_handler.value(), notfound,
+ ICMode::kNonGlobalIC);
}
}
}
@@ -882,16 +867,16 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
{
CheckForAssociatedProtector(p->name, slow);
Label extensible(this);
- GotoIf(IsPrivateSymbol(p->name), &extensible);
Node* bitfield2 = LoadMapBitField2(receiver_map);
- Branch(IsSetWord32(bitfield2, 1 << Map::kIsExtensible), &extensible,
- slow);
+ GotoIf(IsPrivateSymbol(p->name), &extensible);
+ Branch(IsSetWord32<Map::IsExtensibleBit>(bitfield2), &extensible, slow);
BIND(&extensible);
LookupPropertyOnPrototypeChain(receiver_map, p->name, &accessor,
&var_accessor_pair, &var_accessor_holder,
&readonly, slow);
Label add_dictionary_property_slow(this);
+ InvalidateValidityCellIfPrototype(receiver_map, bitfield2);
Add<NameDictionary>(properties, p->name, p->value,
&add_dictionary_property_slow);
Return(p->value);
@@ -958,7 +943,8 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&found_handler);
{
Comment("KeyedStoreGeneric found handler");
- HandleStoreICHandlerCase(p, var_handler.value(), &stub_cache_miss);
+ HandleStoreICHandlerCase(p, var_handler.value(), &stub_cache_miss,
+ ICMode::kNonGlobalIC);
}
BIND(&stub_cache_miss);
{
diff --git a/deps/v8/src/ic/stub-cache.cc b/deps/v8/src/ic/stub-cache.cc
index ecdf8c83e1..927c7c6f27 100644
--- a/deps/v8/src/ic/stub-cache.cc
+++ b/deps/v8/src/ic/stub-cache.cc
@@ -39,7 +39,7 @@ int StubCache::PrimaryOffset(Name* name, Map* map) {
uint32_t map_low32bits =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
// Base the offset on a simple combination of name and map.
- uint32_t key = (map_low32bits + field) ^ kPrimaryMagic;
+ uint32_t key = map_low32bits + field;
return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
}
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index 4b3144b9ad..cd081edfb2 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -84,8 +84,7 @@ class StubCache {
static const int kSecondaryTableBits = 9;
static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
- // Some magic number used in primary and secondary hash computations.
- static const int kPrimaryMagic = 0x3d532433;
+ // Some magic number used in the secondary hash computation.
static const int kSecondaryMagic = 0xb16ca6e5;
static int PrimaryOffsetForTesting(Name* name, Map* map) {
diff --git a/deps/v8/src/icu_util.cc b/deps/v8/src/icu_util.cc
index 291cce6fe6..d14c673b62 100644
--- a/deps/v8/src/icu_util.cc
+++ b/deps/v8/src/icu_util.cc
@@ -81,6 +81,8 @@ bool InitializeICU(const char* icu_data_file) {
UErrorCode err = U_ZERO_ERROR;
udata_setCommonData(reinterpret_cast<void*>(addr), &err);
+ // Never try to load ICU data from files.
+ udata_setFileAccess(UDATA_ONLY_PACKAGES, &err);
return err == U_ZERO_ERROR;
#elif ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_STATIC
// Mac/Linux bundle the ICU data in.
@@ -110,6 +112,8 @@ bool InitializeICU(const char* icu_data_file) {
UErrorCode err = U_ZERO_ERROR;
udata_setCommonData(reinterpret_cast<void*>(g_icu_data_ptr), &err);
+ // Never try to load ICU data from files.
+ udata_setFileAccess(UDATA_ONLY_PACKAGES, &err);
return err == U_ZERO_ERROR;
#endif
#endif
diff --git a/deps/v8/src/inspector/BUILD.gn b/deps/v8/src/inspector/BUILD.gn
index 2ebf561135..699b1bcbd4 100644
--- a/deps/v8/src/inspector/BUILD.gn
+++ b/deps/v8/src/inspector/BUILD.gn
@@ -79,17 +79,6 @@ action("inspector_injected_script") {
config("inspector_config") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
- cflags = []
- if (is_win) {
- cflags += [
- "/wd4267", # Truncation from size_t to int.
- "/wd4305", # Truncation from 'type1' to 'type2'.
- "/wd4324", # Struct padded due to declspec(align).
- "/wd4714", # Function marked forceinline not inlined.
- "/wd4800", # Value forced to bool.
- "/wd4996", # Deprecated function call.
- ]
- }
if (is_component_build) {
defines = [ "BUILDING_V8_SHARED" ]
}
diff --git a/deps/v8/src/inspector/OWNERS b/deps/v8/src/inspector/OWNERS
index db3c906262..3cfeff35c4 100644
--- a/deps/v8/src/inspector/OWNERS
+++ b/deps/v8/src/inspector/OWNERS
@@ -12,5 +12,8 @@ yangguo@chromium.org
per-file js_protocol.json=set noparent
per-file js_protocol.json=dgozman@chromium.org
per-file js_protocol.json=pfeldman@chromium.org
+per-file js_protocol.pdl=set noparent
+per-file js_protocol.pdl=dgozman@chromium.org
+per-file js_protocol.pdl=pfeldman@chromium.org
# COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/src/inspector/injected-script-source.js b/deps/v8/src/inspector/injected-script-source.js
index dd9067ca96..0849d44202 100644
--- a/deps/v8/src/inspector/injected-script-source.js
+++ b/deps/v8/src/inspector/injected-script-source.js
@@ -460,6 +460,10 @@ InjectedScript.prototype = {
if (InjectedScriptHost.subtype(o) === "proxy")
continue;
+ var typedArrays = subtype === "arraybuffer" ? InjectedScriptHost.typedArrayProperties(o) || [] : [];
+ for (var i = 0; i < typedArrays.length; i += 2)
+ addPropertyIfNeeded(descriptors, { name: typedArrays[i], value: typedArrays[i + 1], isOwn: true, enumerable: false, configurable: false, __proto__: null });
+
try {
if (skipGetOwnPropertyNames && o === object) {
if (!process(o, undefined, o.length))
@@ -586,15 +590,21 @@ InjectedScript.prototype = {
if (subtype === "node") {
var description = "";
- if (obj.nodeName)
- description = obj.nodeName.toLowerCase();
- else if (obj.constructor)
- description = obj.constructor.name.toLowerCase();
+ var nodeName = InjectedScriptHost.getProperty(obj, "nodeName");
+ if (nodeName) {
+ description = nodeName.toLowerCase();
+ } else {
+ var constructor = InjectedScriptHost.getProperty(obj, "constructor");
+ if (constructor)
+ description = (InjectedScriptHost.getProperty(constructor, "name") || "").toLowerCase();
+ }
- switch (obj.nodeType) {
+ var nodeType = InjectedScriptHost.getProperty(obj, "nodeType");
+ switch (nodeType) {
case 1 /* Node.ELEMENT_NODE */:
- description += obj.id ? "#" + obj.id : "";
- var className = obj.className;
+ var id = InjectedScriptHost.getProperty(obj, "id");
+ description += id ? "#" + id : "";
+ var className = InjectedScriptHost.getProperty(obj, "className");
description += (className && typeof className === "string") ? "." + className.trim().replace(/\s+/g, ".") : "";
break;
case 10 /*Node.DOCUMENT_TYPE_NODE */:
@@ -929,6 +939,10 @@ InjectedScript.RemoteObject.prototype = {
if ((subtype === "map" || subtype === "set") && descriptor.name === "size")
return true;
+ // Ignore ArrayBuffer previews
+ if (subtype === 'arraybuffer' && (descriptor.name === "[[Int8Array]]" || descriptor.name === "[[Uint8Array]]" || descriptor.name === "[[Int16Array]]" || descriptor.name === "[[Int32Array]]"))
+ return true;
+
// Never preview prototype properties.
if (!descriptor.isOwn)
return true;
diff --git a/deps/v8/src/inspector/injected_script_externs.js b/deps/v8/src/inspector/injected_script_externs.js
index 9c5555b624..d293b8547d 100644
--- a/deps/v8/src/inspector/injected_script_externs.js
+++ b/deps/v8/src/inspector/injected_script_externs.js
@@ -108,6 +108,12 @@ InjectedScriptHostClass.prototype.getOwnPropertySymbols = function(obj) {}
*/
InjectedScriptHostClass.prototype.nativeAccessorDescriptor = function(obj, name) {}
+/**
+ * @param {!Object} arrayBuffer
+ * @return {Array<Object>|undefined}
+ */
+InjectedScriptHostClass.prototype.typedArrayProperties = function(arrayBuffer) {}
+
/** @type {!InjectedScriptHostClass} */
var InjectedScriptHost;
/** @type {!Window} */
diff --git a/deps/v8/src/inspector/js_protocol.json b/deps/v8/src/inspector/js_protocol.json
index ea573d11a6..a0f7fcd7ed 100644
--- a/deps/v8/src/inspector/js_protocol.json
+++ b/deps/v8/src/inspector/js_protocol.json
@@ -1,1205 +1,2966 @@
{
- "version": { "major": "1", "minor": "3" },
- "domains": [
- {
- "domain": "Schema",
- "description": "This domain is deprecated.",
- "deprecated": true,
- "types": [
- {
- "id": "Domain",
- "type": "object",
- "description": "Description of the protocol domain.",
- "properties": [
- { "name": "name", "type": "string", "description": "Domain name." },
- { "name": "version", "type": "string", "description": "Domain version." }
- ]
- }
- ],
- "commands": [
- {
- "name": "getDomains",
- "description": "Returns supported domains.",
- "handlers": ["browser", "renderer"],
- "returns": [
- { "name": "domains", "type": "array", "items": { "$ref": "Domain" }, "description": "List of supported domains." }
- ]
- }
- ]
- },
- {
- "domain": "Runtime",
- "description": "Runtime domain exposes JavaScript runtime by means of remote evaluation and mirror objects. Evaluation results are returned as mirror object that expose object type, string representation and unique identifier that can be used for further object reference. Original objects are maintained in memory unless they are either explicitly released or are released along with the other objects in their object group.",
- "types": [
- {
- "id": "ScriptId",
- "type": "string",
- "description": "Unique script identifier."
- },
- {
- "id": "RemoteObjectId",
- "type": "string",
- "description": "Unique object identifier."
- },
- {
- "id": "UnserializableValue",
- "type": "string",
- "enum": ["Infinity", "NaN", "-Infinity", "-0"],
- "description": "Primitive value which cannot be JSON-stringified."
- },
- {
- "id": "RemoteObject",
- "type": "object",
- "description": "Mirror object referencing original JavaScript object.",
- "properties": [
- { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol"], "description": "Object type." },
- { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "weakmap", "weakset", "iterator", "generator", "error", "proxy", "promise", "typedarray"], "description": "Object subtype hint. Specified for <code>object</code> type values only." },
- { "name": "className", "type": "string", "optional": true, "description": "Object class (constructor) name. Specified for <code>object</code> type values only." },
- { "name": "value", "type": "any", "optional": true, "description": "Remote object value in case of primitive values or JSON values (if it was requested)." },
- { "name": "unserializableValue", "$ref": "UnserializableValue", "optional": true, "description": "Primitive value which can not be JSON-stringified does not have <code>value</code>, but gets this property." },
- { "name": "description", "type": "string", "optional": true, "description": "String representation of the object." },
- { "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Unique object identifier (for non-primitive values)." },
- { "name": "preview", "$ref": "ObjectPreview", "optional": true, "description": "Preview containing abbreviated property values. Specified for <code>object</code> type values only.", "experimental": true },
- { "name": "customPreview", "$ref": "CustomPreview", "optional": true, "experimental": true}
- ]
- },
- {
- "id": "CustomPreview",
- "type": "object",
- "experimental": true,
- "properties": [
- { "name": "header", "type": "string"},
- { "name": "hasBody", "type": "boolean"},
- { "name": "formatterObjectId", "$ref": "RemoteObjectId"},
- { "name": "bindRemoteObjectFunctionId", "$ref": "RemoteObjectId" },
- { "name": "configObjectId", "$ref": "RemoteObjectId", "optional": true }
- ]
- },
- {
- "id": "ObjectPreview",
- "type": "object",
- "experimental": true,
- "description": "Object containing abbreviated remote object value.",
- "properties": [
- { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol"], "description": "Object type." },
- { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "weakmap", "weakset", "iterator", "generator", "error"], "description": "Object subtype hint. Specified for <code>object</code> type values only." },
- { "name": "description", "type": "string", "optional": true, "description": "String representation of the object." },
- { "name": "overflow", "type": "boolean", "description": "True iff some of the properties or entries of the original object did not fit." },
- { "name": "properties", "type": "array", "items": { "$ref": "PropertyPreview" }, "description": "List of the properties." },
- { "name": "entries", "type": "array", "items": { "$ref": "EntryPreview" }, "optional": true, "description": "List of the entries. Specified for <code>map</code> and <code>set</code> subtype values only." }
- ]
- },
- {
- "id": "PropertyPreview",
- "type": "object",
- "experimental": true,
- "properties": [
- { "name": "name", "type": "string", "description": "Property name." },
- { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol", "accessor"], "description": "Object type. Accessor means that the property itself is an accessor property." },
- { "name": "value", "type": "string", "optional": true, "description": "User-friendly property value string." },
- { "name": "valuePreview", "$ref": "ObjectPreview", "optional": true, "description": "Nested value preview." },
- { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "weakmap", "weakset", "iterator", "generator", "error"], "description": "Object subtype hint. Specified for <code>object</code> type values only." }
- ]
- },
- {
- "id": "EntryPreview",
- "type": "object",
- "experimental": true,
- "properties": [
- { "name": "key", "$ref": "ObjectPreview", "optional": true, "description": "Preview of the key. Specified for map-like collection entries." },
- { "name": "value", "$ref": "ObjectPreview", "description": "Preview of the value." }
- ]
- },
- {
- "id": "PropertyDescriptor",
- "type": "object",
- "description": "Object property descriptor.",
- "properties": [
- { "name": "name", "type": "string", "description": "Property name or symbol description." },
- { "name": "value", "$ref": "RemoteObject", "optional": true, "description": "The value associated with the property." },
- { "name": "writable", "type": "boolean", "optional": true, "description": "True if the value associated with the property may be changed (data descriptors only)." },
- { "name": "get", "$ref": "RemoteObject", "optional": true, "description": "A function which serves as a getter for the property, or <code>undefined</code> if there is no getter (accessor descriptors only)." },
- { "name": "set", "$ref": "RemoteObject", "optional": true, "description": "A function which serves as a setter for the property, or <code>undefined</code> if there is no setter (accessor descriptors only)." },
- { "name": "configurable", "type": "boolean", "description": "True if the type of this property descriptor may be changed and if the property may be deleted from the corresponding object." },
- { "name": "enumerable", "type": "boolean", "description": "True if this property shows up during enumeration of the properties on the corresponding object." },
- { "name": "wasThrown", "type": "boolean", "optional": true, "description": "True if the result was thrown during the evaluation." },
- { "name": "isOwn", "optional": true, "type": "boolean", "description": "True if the property is owned for the object." },
- { "name": "symbol", "$ref": "RemoteObject", "optional": true, "description": "Property symbol object, if the property is of the <code>symbol</code> type." }
- ]
- },
- {
- "id": "InternalPropertyDescriptor",
- "type": "object",
- "description": "Object internal property descriptor. This property isn't normally visible in JavaScript code.",
- "properties": [
- { "name": "name", "type": "string", "description": "Conventional property name." },
- { "name": "value", "$ref": "RemoteObject", "optional": true, "description": "The value associated with the property." }
- ]
- },
- {
- "id": "CallArgument",
- "type": "object",
- "description": "Represents function call argument. Either remote object id <code>objectId</code>, primitive <code>value</code>, unserializable primitive value or neither of (for undefined) them should be specified.",
- "properties": [
- { "name": "value", "type": "any", "optional": true, "description": "Primitive value or serializable javascript object." },
- { "name": "unserializableValue", "$ref": "UnserializableValue", "optional": true, "description": "Primitive value which can not be JSON-stringified." },
- { "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Remote object handle." }
- ]
- },
- {
- "id": "ExecutionContextId",
- "type": "integer",
- "description": "Id of an execution context."
- },
- {
- "id": "ExecutionContextDescription",
- "type": "object",
- "description": "Description of an isolated world.",
- "properties": [
- { "name": "id", "$ref": "ExecutionContextId", "description": "Unique id of the execution context. It can be used to specify in which execution context script evaluation should be performed." },
- { "name": "origin", "type": "string", "description": "Execution context origin." },
- { "name": "name", "type": "string", "description": "Human readable name describing given context." },
- { "name": "auxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." }
- ]
- },
- {
- "id": "ExceptionDetails",
- "type": "object",
- "description": "Detailed information about exception (or error) that was thrown during script compilation or execution.",
- "properties": [
- { "name": "exceptionId", "type": "integer", "description": "Exception id." },
- { "name": "text", "type": "string", "description": "Exception text, which should be used together with exception object when available." },
- { "name": "lineNumber", "type": "integer", "description": "Line number of the exception location (0-based)." },
- { "name": "columnNumber", "type": "integer", "description": "Column number of the exception location (0-based)." },
- { "name": "scriptId", "$ref": "ScriptId", "optional": true, "description": "Script ID of the exception location." },
- { "name": "url", "type": "string", "optional": true, "description": "URL of the exception location, to be used when the script was not reported." },
- { "name": "stackTrace", "$ref": "StackTrace", "optional": true, "description": "JavaScript stack trace if available." },
- { "name": "exception", "$ref": "RemoteObject", "optional": true, "description": "Exception object if available." },
- { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Identifier of the context where exception happened." }
- ]
- },
- {
- "id": "Timestamp",
- "type": "number",
- "description": "Number of milliseconds since epoch."
- },
- {
- "id": "CallFrame",
- "type": "object",
- "description": "Stack entry for runtime errors and assertions.",
- "properties": [
- { "name": "functionName", "type": "string", "description": "JavaScript function name." },
- { "name": "scriptId", "$ref": "ScriptId", "description": "JavaScript script id." },
- { "name": "url", "type": "string", "description": "JavaScript script name or url." },
- { "name": "lineNumber", "type": "integer", "description": "JavaScript script line number (0-based)." },
- { "name": "columnNumber", "type": "integer", "description": "JavaScript script column number (0-based)." }
- ]
- },
- {
- "id": "StackTrace",
- "type": "object",
- "description": "Call frames for assertions or error messages.",
- "properties": [
- { "name": "description", "type": "string", "optional": true, "description": "String label of this stack trace. For async traces this may be a name of the function that initiated the async call." },
- { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "JavaScript function name." },
- { "name": "parent", "$ref": "StackTrace", "optional": true, "description": "Asynchronous JavaScript stack trace that preceded this stack, if available." },
- { "name": "parentId", "$ref": "StackTraceId", "optional": true, "experimental": true, "description": "Asynchronous JavaScript stack trace that preceded this stack, if available." }
- ]
- },
- {
- "id": "UniqueDebuggerId",
- "type": "string",
- "description": "Unique identifier of current debugger.",
- "experimental": true
- },
- {
- "id": "StackTraceId",
- "type": "object",
- "description": "If <code>debuggerId</code> is set stack trace comes from another debugger and can be resolved there. This allows to track cross-debugger calls. See <code>Runtime.StackTrace</code> and <code>Debugger.paused</code> for usages.",
- "properties": [
- { "name": "id", "type": "string" },
- { "name": "debuggerId", "$ref": "UniqueDebuggerId", "optional": true }
- ],
- "experimental": true
- }
- ],
- "commands": [
- {
- "name": "evaluate",
- "parameters": [
- { "name": "expression", "type": "string", "description": "Expression to evaluate." },
- { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." },
- { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Determines whether Command Line API should be available during the evaluation." },
- { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
- { "name": "contextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform evaluation. If the parameter is omitted the evaluation will be performed in the context of the inspected page." },
- { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
- { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
- { "name": "userGesture", "type": "boolean", "optional": true, "description": "Whether execution should be treated as initiated by user in the UI." },
- { "name": "awaitPromise", "type": "boolean", "optional":true, "description": "Whether execution should <code>await</code> for resulting value and return once awaited promise is resolved." }
- ],
- "returns": [
- { "name": "result", "$ref": "RemoteObject", "description": "Evaluation result." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
- ],
- "description": "Evaluates expression on global object."
- },
- {
- "name": "awaitPromise",
- "parameters": [
- { "name": "promiseObjectId", "$ref": "RemoteObjectId", "description": "Identifier of the promise." },
- { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
- { "name": "generatePreview", "type": "boolean", "optional": true, "description": "Whether preview should be generated for the result." }
- ],
- "returns": [
- { "name": "result", "$ref": "RemoteObject", "description": "Promise result. Will contain rejected value if promise was rejected." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details if stack strace is available."}
- ],
- "description": "Add handler to promise with given promise object id."
- },
- {
- "name": "callFunctionOn",
- "parameters": [
- { "name": "functionDeclaration", "type": "string", "description": "Declaration of the function to call." },
- { "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Identifier of the object to call function on. Either objectId or executionContextId should be specified." },
- { "name": "arguments", "type": "array", "items": { "$ref": "CallArgument", "description": "Call argument." }, "optional": true, "description": "Call arguments. All call arguments must belong to the same JavaScript world as the target object." },
- { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
- { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object which should be sent by value." },
- { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
- { "name": "userGesture", "type": "boolean", "optional": true, "description": "Whether execution should be treated as initiated by user in the UI." },
- { "name": "awaitPromise", "type": "boolean", "optional":true, "description": "Whether execution should <code>await</code> for resulting value and return once awaited promise is resolved." },
- { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies execution context which global object will be used to call function on. Either executionContextId or objectId should be specified." },
- { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects. If objectGroup is not specified and objectId is, objectGroup will be inherited from object." }
- ],
- "returns": [
- { "name": "result", "$ref": "RemoteObject", "description": "Call result." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
- ],
- "description": "Calls function with given declaration on the given object. Object group of the result is inherited from the target object."
- },
- {
- "name": "getProperties",
- "parameters": [
- { "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to return properties for." },
- { "name": "ownProperties", "optional": true, "type": "boolean", "description": "If true, returns properties belonging only to the element itself, not to its prototype chain." },
- { "name": "accessorPropertiesOnly", "optional": true, "type": "boolean", "description": "If true, returns accessor properties (with getter/setter) only; internal properties are not returned either.", "experimental": true },
- { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the results." }
- ],
- "returns": [
- { "name": "result", "type": "array", "items": { "$ref": "PropertyDescriptor" }, "description": "Object properties." },
- { "name": "internalProperties", "optional": true, "type": "array", "items": { "$ref": "InternalPropertyDescriptor" }, "description": "Internal object properties (only of the element itself)." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
- ],
- "description": "Returns properties of a given object. Object group of the result is inherited from the target object."
- },
- {
- "name": "releaseObject",
- "parameters": [
- { "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to release." }
- ],
- "description": "Releases remote object with given id."
- },
- {
- "name": "releaseObjectGroup",
- "parameters": [
- { "name": "objectGroup", "type": "string", "description": "Symbolic object group name." }
- ],
- "description": "Releases all remote objects that belong to a given group."
- },
- {
- "name": "runIfWaitingForDebugger",
- "description": "Tells inspected instance to run if it was waiting for debugger to attach."
- },
- {
- "name": "enable",
- "description": "Enables reporting of execution contexts creation by means of <code>executionContextCreated</code> event. When the reporting gets enabled the event will be sent immediately for each existing execution context."
- },
- {
- "name": "disable",
- "description": "Disables reporting of execution contexts creation."
- },
- {
- "name": "discardConsoleEntries",
- "description": "Discards collected exceptions and console API calls."
- },
- {
- "name": "setCustomObjectFormatterEnabled",
- "parameters": [
- {
- "name": "enabled",
- "type": "boolean"
- }
- ],
- "experimental": true
- },
- {
- "name": "compileScript",
- "parameters": [
- { "name": "expression", "type": "string", "description": "Expression to compile." },
- { "name": "sourceURL", "type": "string", "description": "Source url to be set for the script." },
- { "name": "persistScript", "type": "boolean", "description": "Specifies whether the compiled script should be persisted." },
- { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page." }
- ],
- "returns": [
- { "name": "scriptId", "$ref": "ScriptId", "optional": true, "description": "Id of the script." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
- ],
- "description": "Compiles expression."
- },
- {
- "name": "runScript",
- "parameters": [
- { "name": "scriptId", "$ref": "ScriptId", "description": "Id of the script to run." },
- { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page." },
- { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." },
- { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
- { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Determines whether Command Line API should be available during the evaluation." },
- { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object which should be sent by value." },
- { "name": "generatePreview", "type": "boolean", "optional": true, "description": "Whether preview should be generated for the result." },
- { "name": "awaitPromise", "type": "boolean", "optional": true, "description": "Whether execution should <code>await</code> for resulting value and return once awaited promise is resolved." }
- ],
- "returns": [
- { "name": "result", "$ref": "RemoteObject", "description": "Run result." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
- ],
- "description": "Runs script with given id in a given context."
- },
- {
- "name": "queryObjects",
- "parameters": [
- { "name": "prototypeObjectId", "$ref": "RemoteObjectId", "description": "Identifier of the prototype to return objects for." }
- ],
- "returns": [
- { "name": "objects", "$ref": "RemoteObject", "description": "Array with objects." }
- ]
- },
- {
- "name": "globalLexicalScopeNames",
- "parameters": [
- { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to lookup global scope variables." }
- ],
- "returns": [
- { "name": "names", "type": "array", "items": { "type": "string" } }
- ],
- "description": "Returns all let, const and class variables from global scope."
- }
- ],
- "events": [
- {
- "name": "executionContextCreated",
- "parameters": [
- { "name": "context", "$ref": "ExecutionContextDescription", "description": "A newly created execution context." }
- ],
- "description": "Issued when new execution context is created."
- },
- {
- "name": "executionContextDestroyed",
- "parameters": [
- { "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Id of the destroyed context" }
- ],
- "description": "Issued when execution context is destroyed."
- },
- {
- "name": "executionContextsCleared",
- "description": "Issued when all executionContexts were cleared in browser"
- },
- {
- "name": "exceptionThrown",
- "description": "Issued when exception was thrown and unhandled.",
- "parameters": [
- { "name": "timestamp", "$ref": "Timestamp", "description": "Timestamp of the exception." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails" }
- ]
- },
- {
- "name": "exceptionRevoked",
- "description": "Issued when unhandled exception was revoked.",
- "parameters": [
- { "name": "reason", "type": "string", "description": "Reason describing why exception was revoked." },
- { "name": "exceptionId", "type": "integer", "description": "The id of revoked exception, as reported in <code>exceptionThrown</code>." }
- ]
- },
- {
- "name": "consoleAPICalled",
- "description": "Issued when console API was called.",
- "parameters": [
- { "name": "type", "type": "string", "enum": ["log", "debug", "info", "error", "warning", "dir", "dirxml", "table", "trace", "clear", "startGroup", "startGroupCollapsed", "endGroup", "assert", "profile", "profileEnd", "count", "timeEnd"], "description": "Type of the call." },
- { "name": "args", "type": "array", "items": { "$ref": "RemoteObject" }, "description": "Call arguments." },
- { "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Identifier of the context where the call was made." },
- { "name": "timestamp", "$ref": "Timestamp", "description": "Call timestamp." },
- { "name": "stackTrace", "$ref": "StackTrace", "optional": true, "description": "Stack trace captured when the call was made." },
- { "name": "context", "type": "string", "optional": true, "experimental": true, "description": "Console context descriptor for calls on non-default console context (not console.*): 'anonymous#unique-logger-id' for call on unnamed context, 'name#unique-logger-id' for call on named context." }
- ]
- },
- {
- "name": "inspectRequested",
- "description": "Issued when object should be inspected (for example, as a result of inspect() command line API call).",
- "parameters": [
- { "name": "object", "$ref": "RemoteObject" },
- { "name": "hints", "type": "object" }
- ]
- }
- ]
- },
- {
- "domain": "Debugger",
- "description": "Debugger domain exposes JavaScript debugging capabilities. It allows setting and removing breakpoints, stepping through execution, exploring stack traces, etc.",
- "dependencies": ["Runtime"],
- "types": [
- {
- "id": "BreakpointId",
- "type": "string",
- "description": "Breakpoint identifier."
- },
- {
- "id": "CallFrameId",
- "type": "string",
- "description": "Call frame identifier."
- },
- {
- "id": "Location",
- "type": "object",
- "properties": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Script identifier as reported in the <code>Debugger.scriptParsed</code>." },
- { "name": "lineNumber", "type": "integer", "description": "Line number in the script (0-based)." },
- { "name": "columnNumber", "type": "integer", "optional": true, "description": "Column number in the script (0-based)." }
- ],
- "description": "Location in the source code."
- },
- {
- "id": "ScriptPosition",
- "experimental": true,
- "type": "object",
- "properties": [
- { "name": "lineNumber", "type": "integer" },
- { "name": "columnNumber", "type": "integer" }
- ],
- "description": "Location in the source code."
- },
- {
- "id": "CallFrame",
- "type": "object",
- "properties": [
- { "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier. This identifier is only valid while the virtual machine is paused." },
- { "name": "functionName", "type": "string", "description": "Name of the JavaScript function called on this call frame." },
- { "name": "functionLocation", "$ref": "Location", "optional": true, "description": "Location in the source code." },
- { "name": "location", "$ref": "Location", "description": "Location in the source code." },
- { "name": "url", "type": "string", "description": "JavaScript script name or url." },
- { "name": "scopeChain", "type": "array", "items": { "$ref": "Scope" }, "description": "Scope chain for this call frame." },
- { "name": "this", "$ref": "Runtime.RemoteObject", "description": "<code>this</code> object for this call frame." },
- { "name": "returnValue", "$ref": "Runtime.RemoteObject", "optional": true, "description": "The value being returned, if the function is at return point." }
- ],
- "description": "JavaScript call frame. Array of call frames form the call stack."
- },
- {
- "id": "Scope",
- "type": "object",
- "properties": [
- { "name": "type", "type": "string", "enum": ["global", "local", "with", "closure", "catch", "block", "script", "eval", "module"], "description": "Scope type." },
- { "name": "object", "$ref": "Runtime.RemoteObject", "description": "Object representing the scope. For <code>global</code> and <code>with</code> scopes it represents the actual object; for the rest of the scopes, it is artificial transient object enumerating scope variables as its properties." },
- { "name": "name", "type": "string", "optional": true },
- { "name": "startLocation", "$ref": "Location", "optional": true, "description": "Location in the source code where scope starts" },
- { "name": "endLocation", "$ref": "Location", "optional": true, "description": "Location in the source code where scope ends" }
- ],
- "description": "Scope description."
- },
- {
- "id": "SearchMatch",
- "type": "object",
- "description": "Search match for resource.",
- "properties": [
- { "name": "lineNumber", "type": "number", "description": "Line number in resource content." },
- { "name": "lineContent", "type": "string", "description": "Line with match content." }
- ]
- },
- {
- "id": "BreakLocation",
- "type": "object",
- "properties": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Script identifier as reported in the <code>Debugger.scriptParsed</code>." },
- { "name": "lineNumber", "type": "integer", "description": "Line number in the script (0-based)." },
- { "name": "columnNumber", "type": "integer", "optional": true, "description": "Column number in the script (0-based)." },
- { "name": "type", "type": "string", "enum": [ "debuggerStatement", "call", "return" ], "optional": true }
- ]
- }
- ],
- "commands": [
- {
- "name": "enable",
- "returns": [
- { "name": "debuggerId", "$ref": "Runtime.UniqueDebuggerId", "experimental": true, "description": "Unique identifier of the debugger." }
- ],
- "description": "Enables debugger for the given page. Clients should not assume that the debugging has been enabled until the result for this command is received."
- },
- {
- "name": "disable",
- "description": "Disables debugger for given page."
- },
- {
- "name": "setBreakpointsActive",
- "parameters": [
- { "name": "active", "type": "boolean", "description": "New value for breakpoints active state." }
- ],
- "description": "Activates / deactivates all breakpoints on the page."
- },
- {
- "name": "setSkipAllPauses",
- "parameters": [
- { "name": "skip", "type": "boolean", "description": "New value for skip pauses state." }
- ],
- "description": "Makes page not interrupt on any pauses (breakpoint, exception, dom exception etc)."
- },
- {
- "name": "setBreakpointByUrl",
- "parameters": [
- { "name": "lineNumber", "type": "integer", "description": "Line number to set breakpoint at." },
- { "name": "url", "type": "string", "optional": true, "description": "URL of the resources to set breakpoint on." },
- { "name": "urlRegex", "type": "string", "optional": true, "description": "Regex pattern for the URLs of the resources to set breakpoints on. Either <code>url</code> or <code>urlRegex</code> must be specified." },
- { "name": "scriptHash", "type": "string", "optional": true, "description": "Script hash of the resources to set breakpoint on." },
- { "name": "columnNumber", "type": "integer", "optional": true, "description": "Offset in the line to set breakpoint at." },
- { "name": "condition", "type": "string", "optional": true, "description": "Expression to use as a breakpoint condition. When specified, debugger will only stop on the breakpoint if this expression evaluates to true." }
- ],
- "returns": [
- { "name": "breakpointId", "$ref": "BreakpointId", "description": "Id of the created breakpoint for further reference." },
- { "name": "locations", "type": "array", "items": { "$ref": "Location" }, "description": "List of the locations this breakpoint resolved into upon addition." }
- ],
- "description": "Sets JavaScript breakpoint at given location specified either by URL or URL regex. Once this command is issued, all existing parsed scripts will have breakpoints resolved and returned in <code>locations</code> property. Further matching script parsing will result in subsequent <code>breakpointResolved</code> events issued. This logical breakpoint will survive page reloads."
- },
- {
- "name": "setBreakpoint",
- "parameters": [
- { "name": "location", "$ref": "Location", "description": "Location to set breakpoint in." },
- { "name": "condition", "type": "string", "optional": true, "description": "Expression to use as a breakpoint condition. When specified, debugger will only stop on the breakpoint if this expression evaluates to true." }
- ],
- "returns": [
- { "name": "breakpointId", "$ref": "BreakpointId", "description": "Id of the created breakpoint for further reference." },
- { "name": "actualLocation", "$ref": "Location", "description": "Location this breakpoint resolved into." }
- ],
- "description": "Sets JavaScript breakpoint at a given location."
- },
- {
- "name": "removeBreakpoint",
- "parameters": [
- { "name": "breakpointId", "$ref": "BreakpointId" }
- ],
- "description": "Removes JavaScript breakpoint."
- },
- {
- "name": "getPossibleBreakpoints",
- "parameters": [
- { "name": "start", "$ref": "Location", "description": "Start of range to search possible breakpoint locations in." },
- { "name": "end", "$ref": "Location", "optional": true, "description": "End of range to search possible breakpoint locations in (excluding). When not specified, end of scripts is used as end of range." },
- { "name": "restrictToFunction", "type": "boolean", "optional": true, "description": "Only consider locations which are in the same (non-nested) function as start." }
- ],
- "returns": [
- { "name": "locations", "type": "array", "items": { "$ref": "BreakLocation" }, "description": "List of the possible breakpoint locations." }
- ],
- "description": "Returns possible locations for breakpoint. scriptId in start and end range locations should be the same."
- },
- {
- "name": "continueToLocation",
- "parameters": [
- { "name": "location", "$ref": "Location", "description": "Location to continue to." },
- { "name": "targetCallFrames", "type": "string", "enum": ["any", "current"], "optional": true }
- ],
- "description": "Continues execution until specific location is reached."
- },
- {
- "name": "pauseOnAsyncCall",
- "parameters": [
- { "name": "parentStackTraceId", "$ref": "Runtime.StackTraceId", "description": "Debugger will pause when async call with given stack trace is started." }
- ],
- "experimental": true
- },
- {
- "name": "stepOver",
- "description": "Steps over the statement."
- },
- {
- "name": "stepInto",
- "parameters": [
- { "name": "breakOnAsyncCall", "type": "boolean", "optional": true, "experimental": true, "description": "Debugger will issue additional Debugger.paused notification if any async task is scheduled before next pause." }
- ],
- "description": "Steps into the function call."
- },
- {
- "name": "stepOut",
- "description": "Steps out of the function call."
- },
- {
- "name": "pause",
- "description": "Stops on the next JavaScript statement."
- },
- {
- "name": "scheduleStepIntoAsync",
- "description": "This method is deprecated - use Debugger.stepInto with breakOnAsyncCall and Debugger.pauseOnAsyncTask instead. Steps into next scheduled async task if any is scheduled before next pause. Returns success when async task is actually scheduled, returns error if no task were scheduled or another scheduleStepIntoAsync was called.",
- "experimental": true
- },
- {
- "name": "resume",
- "description": "Resumes JavaScript execution."
- },
- {
- "name": "getStackTrace",
- "parameters": [
- { "name": "stackTraceId", "$ref": "Runtime.StackTraceId" }
- ],
- "returns": [
- { "name": "stackTrace", "$ref": "Runtime.StackTrace" }
- ],
- "description": "Returns stack trace with given <code>stackTraceId</code>.",
- "experimental": true
- },
- {
- "name": "searchInContent",
- "parameters": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to search in." },
- { "name": "query", "type": "string", "description": "String to search for." },
- { "name": "caseSensitive", "type": "boolean", "optional": true, "description": "If true, search is case sensitive." },
- { "name": "isRegex", "type": "boolean", "optional": true, "description": "If true, treats string parameter as regex." }
- ],
- "returns": [
- { "name": "result", "type": "array", "items": { "$ref": "SearchMatch" }, "description": "List of search matches." }
- ],
- "description": "Searches for given string in script content."
- },
- {
- "name": "setScriptSource",
- "parameters": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to edit." },
- { "name": "scriptSource", "type": "string", "description": "New content of the script." },
- { "name": "dryRun", "type": "boolean", "optional": true, "description": " If true the change will not actually be applied. Dry run may be used to get result description without actually modifying the code." }
- ],
- "returns": [
- { "name": "callFrames", "type": "array", "optional": true, "items": { "$ref": "CallFrame" }, "description": "New stack trace in case editing has happened while VM was stopped." },
- { "name": "stackChanged", "type": "boolean", "optional": true, "description": "Whether current call stack was modified after applying the changes." },
- { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." },
- { "name": "asyncStackTraceId", "$ref": "Runtime.StackTraceId", "optional": true, "experimental": true, "description": "Async stack trace, if any." },
- { "name": "exceptionDetails", "optional": true, "$ref": "Runtime.ExceptionDetails", "description": "Exception details if any." }
- ],
- "description": "Edits JavaScript source live."
- },
- {
- "name": "restartFrame",
- "parameters": [
- { "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier to evaluate on." }
- ],
- "returns": [
- { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "New stack trace." },
- { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." },
- { "name": "asyncStackTraceId", "$ref": "Runtime.StackTraceId", "optional": true, "experimental": true, "description": "Async stack trace, if any." }
- ],
- "description": "Restarts particular call frame from the beginning."
- },
- {
- "name": "getScriptSource",
- "parameters": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to get source for." }
- ],
- "returns": [
- { "name": "scriptSource", "type": "string", "description": "Script source." }
- ],
- "description": "Returns source for the script with given id."
- },
- {
- "name": "setPauseOnExceptions",
- "parameters": [
- { "name": "state", "type": "string", "enum": ["none", "uncaught", "all"], "description": "Pause on exceptions mode." }
- ],
- "description": "Defines pause on exceptions state. Can be set to stop on all exceptions, uncaught exceptions or no exceptions. Initial pause on exceptions state is <code>none</code>."
- },
- {
- "name": "evaluateOnCallFrame",
- "parameters": [
- { "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier to evaluate on." },
- { "name": "expression", "type": "string", "description": "Expression to evaluate." },
- { "name": "objectGroup", "type": "string", "optional": true, "description": "String object group name to put result into (allows rapid releasing resulting object handles using <code>releaseObjectGroup</code>)." },
- { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Specifies whether command line API should be available to the evaluated expression, defaults to false." },
- { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
- { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
- { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
- { "name": "throwOnSideEffect", "type": "boolean", "optional": true, "description": "Whether to throw an exception if side effect cannot be ruled out during evaluation." }
- ],
- "returns": [
- { "name": "result", "$ref": "Runtime.RemoteObject", "description": "Object wrapper for the evaluation result." },
- { "name": "exceptionDetails", "$ref": "Runtime.ExceptionDetails", "optional": true, "description": "Exception details."}
- ],
- "description": "Evaluates expression on a given call frame."
- },
- {
- "name": "setVariableValue",
- "parameters": [
- { "name": "scopeNumber", "type": "integer", "description": "0-based number of scope as was listed in scope chain. Only 'local', 'closure' and 'catch' scope types are allowed. Other scopes could be manipulated manually." },
- { "name": "variableName", "type": "string", "description": "Variable name." },
- { "name": "newValue", "$ref": "Runtime.CallArgument", "description": "New variable value." },
- { "name": "callFrameId", "$ref": "CallFrameId", "description": "Id of callframe that holds variable." }
- ],
- "description": "Changes value of variable in a callframe. Object-based scopes are not supported and must be mutated manually."
- },
- {
- "name": "setReturnValue",
- "parameters": [
- { "name": "newValue", "$ref": "Runtime.CallArgument", "description": "New return value." }
- ],
- "experimental": true,
- "description": "Changes return value in top frame. Available only at return break position."
- },
- {
- "name": "setAsyncCallStackDepth",
- "parameters": [
- { "name": "maxDepth", "type": "integer", "description": "Maximum depth of async call stacks. Setting to <code>0</code> will effectively disable collecting async call stacks (default)." }
- ],
- "description": "Enables or disables async call stacks tracking."
- },
- {
- "name": "setBlackboxPatterns",
- "parameters": [
- { "name": "patterns", "type": "array", "items": { "type": "string" }, "description": "Array of regexps that will be used to check script url for blackbox state." }
- ],
- "experimental": true,
- "description": "Replace previous blackbox patterns with passed ones. Forces backend to skip stepping/pausing in scripts with url matching one of the patterns. VM will try to leave blackboxed script by performing 'step in' several times, finally resorting to 'step out' if unsuccessful."
- },
- {
- "name": "setBlackboxedRanges",
- "parameters": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script." },
- { "name": "positions", "type": "array", "items": { "$ref": "ScriptPosition" } }
- ],
- "experimental": true,
- "description": "Makes backend skip steps in the script in blackboxed ranges. VM will try leave blacklisted scripts by performing 'step in' several times, finally resorting to 'step out' if unsuccessful. Positions array contains positions where blackbox state is changed. First interval isn't blackboxed. Array should be sorted."
- }
- ],
- "events": [
- {
- "name": "scriptParsed",
- "parameters": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Identifier of the script parsed." },
- { "name": "url", "type": "string", "description": "URL or name of the script parsed (if any)." },
- { "name": "startLine", "type": "integer", "description": "Line offset of the script within the resource with given URL (for script tags)." },
- { "name": "startColumn", "type": "integer", "description": "Column offset of the script within the resource with given URL." },
- { "name": "endLine", "type": "integer", "description": "Last line of the script." },
- { "name": "endColumn", "type": "integer", "description": "Length of the last line of the script." },
- { "name": "executionContextId", "$ref": "Runtime.ExecutionContextId", "description": "Specifies script creation context." },
- { "name": "hash", "type": "string", "description": "Content hash of the script."},
- { "name": "executionContextAuxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." },
- { "name": "isLiveEdit", "type": "boolean", "optional": true, "description": "True, if this script is generated as a result of the live edit operation.", "experimental": true },
- { "name": "sourceMapURL", "type": "string", "optional": true, "description": "URL of source map associated with script (if any)." },
- { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL." },
- { "name": "isModule", "type": "boolean", "optional": true, "description": "True, if this script is ES6 module." },
- { "name": "length", "type": "integer", "optional": true, "description": "This script length." },
- { "name": "stackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "JavaScript top stack frame of where the script parsed event was triggered if available.", "experimental": true }
- ],
- "description": "Fired when virtual machine parses script. This event is also fired for all known and uncollected scripts upon enabling debugger."
- },
- {
- "name": "scriptFailedToParse",
- "parameters": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Identifier of the script parsed." },
- { "name": "url", "type": "string", "description": "URL or name of the script parsed (if any)." },
- { "name": "startLine", "type": "integer", "description": "Line offset of the script within the resource with given URL (for script tags)." },
- { "name": "startColumn", "type": "integer", "description": "Column offset of the script within the resource with given URL." },
- { "name": "endLine", "type": "integer", "description": "Last line of the script." },
- { "name": "endColumn", "type": "integer", "description": "Length of the last line of the script." },
- { "name": "executionContextId", "$ref": "Runtime.ExecutionContextId", "description": "Specifies script creation context." },
- { "name": "hash", "type": "string", "description": "Content hash of the script."},
- { "name": "executionContextAuxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." },
- { "name": "sourceMapURL", "type": "string", "optional": true, "description": "URL of source map associated with script (if any)." },
- { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL." },
- { "name": "isModule", "type": "boolean", "optional": true, "description": "True, if this script is ES6 module." },
- { "name": "length", "type": "integer", "optional": true, "description": "This script length." },
- { "name": "stackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "JavaScript top stack frame of where the script parsed event was triggered if available.", "experimental": true }
- ],
- "description": "Fired when virtual machine fails to parse the script."
- },
- {
- "name": "breakpointResolved",
- "parameters": [
- { "name": "breakpointId", "$ref": "BreakpointId", "description": "Breakpoint unique identifier." },
- { "name": "location", "$ref": "Location", "description": "Actual breakpoint location." }
- ],
- "description": "Fired when breakpoint is resolved to an actual script and location."
- },
- {
- "name": "paused",
- "parameters": [
- { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "Call stack the virtual machine stopped on." },
- { "name": "reason", "type": "string", "enum": [ "XHR", "DOM", "EventListener", "exception", "assert", "debugCommand", "promiseRejection", "OOM", "other", "ambiguous" ], "description": "Pause reason." },
- { "name": "data", "type": "object", "optional": true, "description": "Object containing break-specific auxiliary properties." },
- { "name": "hitBreakpoints", "type": "array", "optional": true, "items": { "type": "string" }, "description": "Hit breakpoints IDs" },
- { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." },
- { "name": "asyncStackTraceId", "$ref": "Runtime.StackTraceId", "optional": true, "experimental": true, "description": "Async stack trace, if any." },
- { "name": "asyncCallStackTraceId", "$ref": "Runtime.StackTraceId", "optional": true, "experimental": true, "description": "Just scheduled async call will have this stack trace as parent stack during async execution. This field is available only after <code>Debugger.stepInto</code> call with <code>breakOnAsynCall</code> flag." }
- ],
- "description": "Fired when the virtual machine stopped on breakpoint or exception or any other stop criteria."
- },
- {
- "name": "resumed",
- "description": "Fired when the virtual machine resumed execution."
- }
- ]
+ "version": {
+ "major": "1",
+ "minor": "3"
},
- {
- "domain": "Console",
- "description": "This domain is deprecated - use Runtime or Log instead.",
- "dependencies": ["Runtime"],
- "deprecated": true,
- "types": [
- {
- "id": "ConsoleMessage",
- "type": "object",
- "description": "Console message.",
- "properties": [
- { "name": "source", "type": "string", "enum": ["xml", "javascript", "network", "console-api", "storage", "appcache", "rendering", "security", "other", "deprecation", "worker"], "description": "Message source." },
- { "name": "level", "type": "string", "enum": ["log", "warning", "error", "debug", "info"], "description": "Message severity." },
- { "name": "text", "type": "string", "description": "Message text." },
- { "name": "url", "type": "string", "optional": true, "description": "URL of the message origin." },
- { "name": "line", "type": "integer", "optional": true, "description": "Line number in the resource that generated this message (1-based)." },
- { "name": "column", "type": "integer", "optional": true, "description": "Column number in the resource that generated this message (1-based)." }
- ]
- }
- ],
- "commands": [
- {
- "name": "enable",
- "description": "Enables console domain, sends the messages collected so far to the client by means of the <code>messageAdded</code> notification."
- },
- {
- "name": "disable",
- "description": "Disables console domain, prevents further console messages from being reported to the client."
- },
- {
- "name": "clearMessages",
- "description": "Does nothing."
- }
- ],
- "events": [
- {
- "name": "messageAdded",
- "parameters": [
- { "name": "message", "$ref": "ConsoleMessage", "description": "Console message that has been added." }
- ],
- "description": "Issued when new console message is added."
- }
- ]
- },
- {
- "domain": "Profiler",
- "dependencies": ["Runtime", "Debugger"],
- "types": [
- {
- "id": "ProfileNode",
- "type": "object",
- "description": "Profile node. Holds callsite information, execution statistics and child nodes.",
- "properties": [
- { "name": "id", "type": "integer", "description": "Unique id of the node." },
- { "name": "callFrame", "$ref": "Runtime.CallFrame", "description": "Function location." },
- { "name": "hitCount", "type": "integer", "optional": true, "description": "Number of samples where this node was on top of the call stack." },
- { "name": "children", "type": "array", "items": { "type": "integer" }, "optional": true, "description": "Child node ids." },
- { "name": "deoptReason", "type": "string", "optional": true, "description": "The reason of being not optimized. The function may be deoptimized or marked as don't optimize."},
- { "name": "positionTicks", "type": "array", "items": { "$ref": "PositionTickInfo" }, "optional": true, "description": "An array of source position ticks." }
- ]
- },
- {
- "id": "Profile",
- "type": "object",
- "description": "Profile.",
- "properties": [
- { "name": "nodes", "type": "array", "items": { "$ref": "ProfileNode" }, "description": "The list of profile nodes. First item is the root node." },
- { "name": "startTime", "type": "number", "description": "Profiling start timestamp in microseconds." },
- { "name": "endTime", "type": "number", "description": "Profiling end timestamp in microseconds." },
- { "name": "samples", "optional": true, "type": "array", "items": { "type": "integer" }, "description": "Ids of samples top nodes." },
- { "name": "timeDeltas", "optional": true, "type": "array", "items": { "type": "integer" }, "description": "Time intervals between adjacent samples in microseconds. The first delta is relative to the profile startTime." }
- ]
- },
- {
- "id": "PositionTickInfo",
- "type": "object",
- "description": "Specifies a number of samples attributed to a certain source position.",
- "properties": [
- { "name": "line", "type": "integer", "description": "Source line number (1-based)." },
- { "name": "ticks", "type": "integer", "description": "Number of samples attributed to the source line." }
- ]
- },
- { "id": "CoverageRange",
- "type": "object",
- "description": "Coverage data for a source range.",
- "properties": [
- { "name": "startOffset", "type": "integer", "description": "JavaScript script source offset for the range start." },
- { "name": "endOffset", "type": "integer", "description": "JavaScript script source offset for the range end." },
- { "name": "count", "type": "integer", "description": "Collected execution count of the source range." }
- ]
- },
- { "id": "FunctionCoverage",
- "type": "object",
- "description": "Coverage data for a JavaScript function.",
- "properties": [
- { "name": "functionName", "type": "string", "description": "JavaScript function name." },
- { "name": "ranges", "type": "array", "items": { "$ref": "CoverageRange" }, "description": "Source ranges inside the function with coverage data." },
- { "name": "isBlockCoverage", "type": "boolean", "description": "Whether coverage data for this function has block granularity." }
- ]
- },
- {
- "id": "ScriptCoverage",
- "type": "object",
- "description": "Coverage data for a JavaScript script.",
- "properties": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "JavaScript script id." },
- { "name": "url", "type": "string", "description": "JavaScript script name or url." },
- { "name": "functions", "type": "array", "items": { "$ref": "FunctionCoverage" }, "description": "Functions contained in the script that has coverage data." }
- ]
- },
- { "id": "TypeObject",
- "type": "object",
- "description": "Describes a type collected during runtime.",
- "properties": [
- { "name": "name", "type": "string", "description": "Name of a type collected with type profiling." }
- ],
- "experimental": true
- },
- { "id": "TypeProfileEntry",
- "type": "object",
- "description": "Source offset and types for a parameter or return value.",
- "properties": [
- { "name": "offset", "type": "integer", "description": "Source offset of the parameter or end of function for return values." },
- { "name": "types", "type": "array", "items": {"$ref": "TypeObject"}, "description": "The types for this parameter or return value."}
- ],
- "experimental": true
- },
- {
- "id": "ScriptTypeProfile",
- "type": "object",
- "description": "Type profile data collected during runtime for a JavaScript script.",
- "properties": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "JavaScript script id." },
- { "name": "url", "type": "string", "description": "JavaScript script name or url." },
- { "name": "entries", "type": "array", "items": { "$ref": "TypeProfileEntry" }, "description": "Type profile entries for parameters and return values of the functions in the script." }
- ],
- "experimental": true
- }
- ],
- "commands": [
- {
- "name": "enable"
- },
- {
- "name": "disable"
- },
- {
- "name": "setSamplingInterval",
- "parameters": [
- { "name": "interval", "type": "integer", "description": "New sampling interval in microseconds." }
- ],
- "description": "Changes CPU profiler sampling interval. Must be called before CPU profiles recording started."
- },
- {
- "name": "start"
- },
- {
- "name": "stop",
- "returns": [
- { "name": "profile", "$ref": "Profile", "description": "Recorded profile." }
- ]
- },
- {
- "name": "startPreciseCoverage",
- "parameters": [
- { "name": "callCount", "type": "boolean", "optional": true, "description": "Collect accurate call counts beyond simple 'covered' or 'not covered'." },
- { "name": "detailed", "type": "boolean", "optional": true, "description": "Collect block-based coverage." }
- ],
- "description": "Enable precise code coverage. Coverage data for JavaScript executed before enabling precise code coverage may be incomplete. Enabling prevents running optimized code and resets execution counters."
- },
- {
- "name": "stopPreciseCoverage",
- "description": "Disable precise code coverage. Disabling releases unnecessary execution count records and allows executing optimized code."
- },
- {
- "name": "takePreciseCoverage",
- "returns": [
- { "name": "result", "type": "array", "items": { "$ref": "ScriptCoverage" }, "description": "Coverage data for the current isolate." }
- ],
- "description": "Collect coverage data for the current isolate, and resets execution counters. Precise code coverage needs to have started."
- },
- {
- "name": "getBestEffortCoverage",
- "returns": [
- { "name": "result", "type": "array", "items": { "$ref": "ScriptCoverage" }, "description": "Coverage data for the current isolate." }
- ],
- "description": "Collect coverage data for the current isolate. The coverage data may be incomplete due to garbage collection."
- },
- {
- "name": "startTypeProfile",
- "description": "Enable type profile.",
- "experimental": true
- },
- {
- "name": "stopTypeProfile",
- "description": "Disable type profile. Disabling releases type profile data collected so far.",
- "experimental": true
- },
- {
- "name": "takeTypeProfile",
- "returns": [
- { "name": "result", "type": "array", "items": { "$ref": "ScriptTypeProfile" }, "description": "Type profile for all scripts since startTypeProfile() was turned on." }
- ],
- "description": "Collect type profile.",
- "experimental": true
- }
- ],
- "events": [
- {
- "name": "consoleProfileStarted",
- "parameters": [
- { "name": "id", "type": "string" },
- { "name": "location", "$ref": "Debugger.Location", "description": "Location of console.profile()." },
- { "name": "title", "type": "string", "optional": true, "description": "Profile title passed as an argument to console.profile()." }
- ],
- "description": "Sent when new profile recording is started using console.profile() call."
- },
- {
- "name": "consoleProfileFinished",
- "parameters": [
- { "name": "id", "type": "string" },
- { "name": "location", "$ref": "Debugger.Location", "description": "Location of console.profileEnd()." },
- { "name": "profile", "$ref": "Profile" },
- { "name": "title", "type": "string", "optional": true, "description": "Profile title passed as an argument to console.profile()." }
- ]
- }
- ]
- },
- {
- "domain": "HeapProfiler",
- "dependencies": ["Runtime"],
- "experimental": true,
- "types": [
- {
- "id": "HeapSnapshotObjectId",
- "type": "string",
- "description": "Heap snapshot object id."
- },
- {
- "id": "SamplingHeapProfileNode",
- "type": "object",
- "description": "Sampling Heap Profile node. Holds callsite information, allocation statistics and child nodes.",
- "properties": [
- { "name": "callFrame", "$ref": "Runtime.CallFrame", "description": "Function location." },
- { "name": "selfSize", "type": "number", "description": "Allocations size in bytes for the node excluding children." },
- { "name": "children", "type": "array", "items": { "$ref": "SamplingHeapProfileNode" }, "description": "Child nodes." }
- ]
- },
- {
- "id": "SamplingHeapProfile",
- "type": "object",
- "description": "Profile.",
- "properties": [
- { "name": "head", "$ref": "SamplingHeapProfileNode" }
- ]
- }
- ],
- "commands": [
- {
- "name": "enable"
- },
- {
- "name": "disable"
- },
- {
- "name": "startTrackingHeapObjects",
- "parameters": [
- { "name": "trackAllocations", "type": "boolean", "optional": true }
- ]
- },
- {
- "name": "stopTrackingHeapObjects",
- "parameters": [
- { "name": "reportProgress", "type": "boolean", "optional": true, "description": "If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken when the tracking is stopped." }
- ]
- },
- {
- "name": "takeHeapSnapshot",
- "parameters": [
- { "name": "reportProgress", "type": "boolean", "optional": true, "description": "If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken." }
- ]
- },
- {
- "name": "collectGarbage"
- },
- {
- "name": "getObjectByHeapObjectId",
- "parameters": [
- { "name": "objectId", "$ref": "HeapSnapshotObjectId" },
- { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." }
- ],
- "returns": [
- { "name": "result", "$ref": "Runtime.RemoteObject", "description": "Evaluation result." }
- ]
- },
- {
- "name": "addInspectedHeapObject",
- "parameters": [
- { "name": "heapObjectId", "$ref": "HeapSnapshotObjectId", "description": "Heap snapshot object id to be accessible by means of $x command line API." }
- ],
- "description": "Enables console to refer to the node with given id via $x (see Command Line API for more details $x functions)."
- },
- {
- "name": "getHeapObjectId",
- "parameters": [
- { "name": "objectId", "$ref": "Runtime.RemoteObjectId", "description": "Identifier of the object to get heap object id for." }
- ],
- "returns": [
- { "name": "heapSnapshotObjectId", "$ref": "HeapSnapshotObjectId", "description": "Id of the heap snapshot object corresponding to the passed remote object id." }
- ]
- },
- {
- "name": "startSampling",
- "parameters": [
- { "name": "samplingInterval", "type": "number", "optional": true, "description": "Average sample interval in bytes. Poisson distribution is used for the intervals. The default value is 32768 bytes." }
- ]
- },
- {
- "name": "stopSampling",
- "returns": [
- { "name": "profile", "$ref": "SamplingHeapProfile", "description": "Recorded sampling heap profile." }
- ]
- },
- {
- "name": "getSamplingProfile",
- "returns": [
- { "name": "profile", "$ref": "SamplingHeapProfile", "description": "Return the sampling profile being collected." }
- ]
- }
- ],
- "events": [
- {
- "name": "addHeapSnapshotChunk",
- "parameters": [
- { "name": "chunk", "type": "string" }
- ]
- },
- {
- "name": "resetProfiles"
- },
- {
- "name": "reportHeapSnapshotProgress",
- "parameters": [
- { "name": "done", "type": "integer" },
- { "name": "total", "type": "integer" },
- { "name": "finished", "type": "boolean", "optional": true }
- ]
- },
- {
- "name": "lastSeenObjectId",
- "description": "If heap objects tracking has been started then backend regularly sends a current value for last seen object id and corresponding timestamp. If the were changes in the heap since last event then one or more heapStatsUpdate events will be sent before a new lastSeenObjectId event.",
- "parameters": [
- { "name": "lastSeenObjectId", "type": "integer" },
- { "name": "timestamp", "type": "number" }
- ]
- },
- {
- "name": "heapStatsUpdate",
- "description": "If heap objects tracking has been started then backend may send update for one or more fragments",
- "parameters": [
- { "name": "statsUpdate", "type": "array", "items": { "type": "integer" }, "description": "An array of triplets. Each triplet describes a fragment. The first integer is the fragment index, the second integer is a total count of objects for the fragment, the third integer is a total size of the objects for the fragment."}
- ]
- }
- ]
- }]
-}
+ "domains": [
+ {
+ "domain": "Console",
+ "description": "This domain is deprecated - use Runtime or Log instead.",
+ "deprecated": true,
+ "dependencies": [
+ "Runtime"
+ ],
+ "types": [
+ {
+ "id": "ConsoleMessage",
+ "description": "Console message.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "source",
+ "description": "Message source.",
+ "type": "string",
+ "enum": [
+ "xml",
+ "javascript",
+ "network",
+ "console-api",
+ "storage",
+ "appcache",
+ "rendering",
+ "security",
+ "other",
+ "deprecation",
+ "worker"
+ ]
+ },
+ {
+ "name": "level",
+ "description": "Message severity.",
+ "type": "string",
+ "enum": [
+ "log",
+ "warning",
+ "error",
+ "debug",
+ "info"
+ ]
+ },
+ {
+ "name": "text",
+ "description": "Message text.",
+ "type": "string"
+ },
+ {
+ "name": "url",
+ "description": "URL of the message origin.",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "line",
+ "description": "Line number in the resource that generated this message (1-based).",
+ "optional": true,
+ "type": "integer"
+ },
+ {
+ "name": "column",
+ "description": "Column number in the resource that generated this message (1-based).",
+ "optional": true,
+ "type": "integer"
+ }
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "clearMessages",
+ "description": "Does nothing."
+ },
+ {
+ "name": "disable",
+ "description": "Disables console domain, prevents further console messages from being reported to the client."
+ },
+ {
+ "name": "enable",
+ "description": "Enables console domain, sends the messages collected so far to the client by means of the\n`messageAdded` notification."
+ }
+ ],
+ "events": [
+ {
+ "name": "messageAdded",
+ "description": "Issued when new console message is added.",
+ "parameters": [
+ {
+ "name": "message",
+ "description": "Console message that has been added.",
+ "$ref": "ConsoleMessage"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "domain": "Debugger",
+ "description": "Debugger domain exposes JavaScript debugging capabilities. It allows setting and removing\nbreakpoints, stepping through execution, exploring stack traces, etc.",
+ "dependencies": [
+ "Runtime"
+ ],
+ "types": [
+ {
+ "id": "BreakpointId",
+ "description": "Breakpoint identifier.",
+ "type": "string"
+ },
+ {
+ "id": "CallFrameId",
+ "description": "Call frame identifier.",
+ "type": "string"
+ },
+ {
+ "id": "Location",
+ "description": "Location in the source code.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "scriptId",
+ "description": "Script identifier as reported in the `Debugger.scriptParsed`.",
+ "$ref": "Runtime.ScriptId"
+ },
+ {
+ "name": "lineNumber",
+ "description": "Line number in the script (0-based).",
+ "type": "integer"
+ },
+ {
+ "name": "columnNumber",
+ "description": "Column number in the script (0-based).",
+ "optional": true,
+ "type": "integer"
+ }
+ ]
+ },
+ {
+ "id": "ScriptPosition",
+ "description": "Location in the source code.",
+ "experimental": true,
+ "type": "object",
+ "properties": [
+ {
+ "name": "lineNumber",
+ "type": "integer"
+ },
+ {
+ "name": "columnNumber",
+ "type": "integer"
+ }
+ ]
+ },
+ {
+ "id": "CallFrame",
+ "description": "JavaScript call frame. Array of call frames form the call stack.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "callFrameId",
+ "description": "Call frame identifier. This identifier is only valid while the virtual machine is paused.",
+ "$ref": "CallFrameId"
+ },
+ {
+ "name": "functionName",
+ "description": "Name of the JavaScript function called on this call frame.",
+ "type": "string"
+ },
+ {
+ "name": "functionLocation",
+ "description": "Location in the source code.",
+ "optional": true,
+ "$ref": "Location"
+ },
+ {
+ "name": "location",
+ "description": "Location in the source code.",
+ "$ref": "Location"
+ },
+ {
+ "name": "url",
+ "description": "JavaScript script name or url.",
+ "type": "string"
+ },
+ {
+ "name": "scopeChain",
+ "description": "Scope chain for this call frame.",
+ "type": "array",
+ "items": {
+ "$ref": "Scope"
+ }
+ },
+ {
+ "name": "this",
+ "description": "`this` object for this call frame.",
+ "$ref": "Runtime.RemoteObject"
+ },
+ {
+ "name": "returnValue",
+ "description": "The value being returned, if the function is at return point.",
+ "optional": true,
+ "$ref": "Runtime.RemoteObject"
+ }
+ ]
+ },
+ {
+ "id": "Scope",
+ "description": "Scope description.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "type",
+ "description": "Scope type.",
+ "type": "string",
+ "enum": [
+ "global",
+ "local",
+ "with",
+ "closure",
+ "catch",
+ "block",
+ "script",
+ "eval",
+ "module"
+ ]
+ },
+ {
+ "name": "object",
+ "description": "Object representing the scope. For `global` and `with` scopes it represents the actual\nobject; for the rest of the scopes, it is artificial transient object enumerating scope\nvariables as its properties.",
+ "$ref": "Runtime.RemoteObject"
+ },
+ {
+ "name": "name",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "startLocation",
+ "description": "Location in the source code where scope starts",
+ "optional": true,
+ "$ref": "Location"
+ },
+ {
+ "name": "endLocation",
+ "description": "Location in the source code where scope ends",
+ "optional": true,
+ "$ref": "Location"
+ }
+ ]
+ },
+ {
+ "id": "SearchMatch",
+ "description": "Search match for resource.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "lineNumber",
+ "description": "Line number in resource content.",
+ "type": "number"
+ },
+ {
+ "name": "lineContent",
+ "description": "Line with match content.",
+ "type": "string"
+ }
+ ]
+ },
+ {
+ "id": "BreakLocation",
+ "type": "object",
+ "properties": [
+ {
+ "name": "scriptId",
+ "description": "Script identifier as reported in the `Debugger.scriptParsed`.",
+ "$ref": "Runtime.ScriptId"
+ },
+ {
+ "name": "lineNumber",
+ "description": "Line number in the script (0-based).",
+ "type": "integer"
+ },
+ {
+ "name": "columnNumber",
+ "description": "Column number in the script (0-based).",
+ "optional": true,
+ "type": "integer"
+ },
+ {
+ "name": "type",
+ "optional": true,
+ "type": "string",
+ "enum": [
+ "debuggerStatement",
+ "call",
+ "return"
+ ]
+ }
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "continueToLocation",
+ "description": "Continues execution until specific location is reached.",
+ "parameters": [
+ {
+ "name": "location",
+ "description": "Location to continue to.",
+ "$ref": "Location"
+ },
+ {
+ "name": "targetCallFrames",
+ "optional": true,
+ "type": "string",
+ "enum": [
+ "any",
+ "current"
+ ]
+ }
+ ]
+ },
+ {
+ "name": "disable",
+ "description": "Disables debugger for given page."
+ },
+ {
+ "name": "enable",
+ "description": "Enables debugger for the given page. Clients should not assume that the debugging has been\nenabled until the result for this command is received.",
+ "returns": [
+ {
+ "name": "debuggerId",
+ "description": "Unique identifier of the debugger.",
+ "experimental": true,
+ "$ref": "Runtime.UniqueDebuggerId"
+ }
+ ]
+ },
+ {
+ "name": "evaluateOnCallFrame",
+ "description": "Evaluates expression on a given call frame.",
+ "parameters": [
+ {
+ "name": "callFrameId",
+ "description": "Call frame identifier to evaluate on.",
+ "$ref": "CallFrameId"
+ },
+ {
+ "name": "expression",
+ "description": "Expression to evaluate.",
+ "type": "string"
+ },
+ {
+ "name": "objectGroup",
+ "description": "String object group name to put result into (allows rapid releasing resulting object handles\nusing `releaseObjectGroup`).",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "includeCommandLineAPI",
+ "description": "Specifies whether command line API should be available to the evaluated expression, defaults\nto false.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "silent",
+ "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause\nexecution. Overrides `setPauseOnException` state.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "returnByValue",
+ "description": "Whether the result is expected to be a JSON object that should be sent by value.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "generatePreview",
+ "description": "Whether preview should be generated for the result.",
+ "experimental": true,
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "throwOnSideEffect",
+ "description": "Whether to throw an exception if side effect cannot be ruled out during evaluation.",
+ "optional": true,
+ "type": "boolean"
+ }
+ ],
+ "returns": [
+ {
+ "name": "result",
+ "description": "Object wrapper for the evaluation result.",
+ "$ref": "Runtime.RemoteObject"
+ },
+ {
+ "name": "exceptionDetails",
+ "description": "Exception details.",
+ "optional": true,
+ "$ref": "Runtime.ExceptionDetails"
+ }
+ ]
+ },
+ {
+ "name": "getPossibleBreakpoints",
+ "description": "Returns possible locations for breakpoint. scriptId in start and end range locations should be\nthe same.",
+ "parameters": [
+ {
+ "name": "start",
+ "description": "Start of range to search possible breakpoint locations in.",
+ "$ref": "Location"
+ },
+ {
+ "name": "end",
+ "description": "End of range to search possible breakpoint locations in (excluding). When not specified, end\nof scripts is used as end of range.",
+ "optional": true,
+ "$ref": "Location"
+ },
+ {
+ "name": "restrictToFunction",
+ "description": "Only consider locations which are in the same (non-nested) function as start.",
+ "optional": true,
+ "type": "boolean"
+ }
+ ],
+ "returns": [
+ {
+ "name": "locations",
+ "description": "List of the possible breakpoint locations.",
+ "type": "array",
+ "items": {
+ "$ref": "BreakLocation"
+ }
+ }
+ ]
+ },
+ {
+ "name": "getScriptSource",
+ "description": "Returns source for the script with given id.",
+ "parameters": [
+ {
+ "name": "scriptId",
+ "description": "Id of the script to get source for.",
+ "$ref": "Runtime.ScriptId"
+ }
+ ],
+ "returns": [
+ {
+ "name": "scriptSource",
+ "description": "Script source.",
+ "type": "string"
+ }
+ ]
+ },
+ {
+ "name": "getStackTrace",
+ "description": "Returns stack trace with given `stackTraceId`.",
+ "experimental": true,
+ "parameters": [
+ {
+ "name": "stackTraceId",
+ "$ref": "Runtime.StackTraceId"
+ }
+ ],
+ "returns": [
+ {
+ "name": "stackTrace",
+ "$ref": "Runtime.StackTrace"
+ }
+ ]
+ },
+ {
+ "name": "pause",
+ "description": "Stops on the next JavaScript statement."
+ },
+ {
+ "name": "pauseOnAsyncCall",
+ "experimental": true,
+ "parameters": [
+ {
+ "name": "parentStackTraceId",
+ "description": "Debugger will pause when async call with given stack trace is started.",
+ "$ref": "Runtime.StackTraceId"
+ }
+ ]
+ },
+ {
+ "name": "removeBreakpoint",
+ "description": "Removes JavaScript breakpoint.",
+ "parameters": [
+ {
+ "name": "breakpointId",
+ "$ref": "BreakpointId"
+ }
+ ]
+ },
+ {
+ "name": "restartFrame",
+ "description": "Restarts particular call frame from the beginning.",
+ "parameters": [
+ {
+ "name": "callFrameId",
+ "description": "Call frame identifier to evaluate on.",
+ "$ref": "CallFrameId"
+ }
+ ],
+ "returns": [
+ {
+ "name": "callFrames",
+ "description": "New stack trace.",
+ "type": "array",
+ "items": {
+ "$ref": "CallFrame"
+ }
+ },
+ {
+ "name": "asyncStackTrace",
+ "description": "Async stack trace, if any.",
+ "optional": true,
+ "$ref": "Runtime.StackTrace"
+ },
+ {
+ "name": "asyncStackTraceId",
+ "description": "Async stack trace, if any.",
+ "experimental": true,
+ "optional": true,
+ "$ref": "Runtime.StackTraceId"
+ }
+ ]
+ },
+ {
+ "name": "resume",
+ "description": "Resumes JavaScript execution."
+ },
+ {
+ "name": "scheduleStepIntoAsync",
+ "description": "This method is deprecated - use Debugger.stepInto with breakOnAsyncCall and\nDebugger.pauseOnAsyncTask instead. Steps into next scheduled async task if any is scheduled\nbefore next pause. Returns success when async task is actually scheduled, returns error if no\ntask were scheduled or another scheduleStepIntoAsync was called.",
+ "experimental": true
+ },
+ {
+ "name": "searchInContent",
+ "description": "Searches for given string in script content.",
+ "parameters": [
+ {
+ "name": "scriptId",
+ "description": "Id of the script to search in.",
+ "$ref": "Runtime.ScriptId"
+ },
+ {
+ "name": "query",
+ "description": "String to search for.",
+ "type": "string"
+ },
+ {
+ "name": "caseSensitive",
+ "description": "If true, search is case sensitive.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "isRegex",
+ "description": "If true, treats string parameter as regex.",
+ "optional": true,
+ "type": "boolean"
+ }
+ ],
+ "returns": [
+ {
+ "name": "result",
+ "description": "List of search matches.",
+ "type": "array",
+ "items": {
+ "$ref": "SearchMatch"
+ }
+ }
+ ]
+ },
+ {
+ "name": "setAsyncCallStackDepth",
+ "description": "Enables or disables async call stacks tracking.",
+ "parameters": [
+ {
+ "name": "maxDepth",
+ "description": "Maximum depth of async call stacks. Setting to `0` will effectively disable collecting async\ncall stacks (default).",
+ "type": "integer"
+ }
+ ]
+ },
+ {
+ "name": "setBlackboxPatterns",
+ "description": "Replace previous blackbox patterns with passed ones. Forces backend to skip stepping/pausing in\nscripts with url matching one of the patterns. VM will try to leave blackboxed script by\nperforming 'step in' several times, finally resorting to 'step out' if unsuccessful.",
+ "experimental": true,
+ "parameters": [
+ {
+ "name": "patterns",
+ "description": "Array of regexps that will be used to check script url for blackbox state.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ ]
+ },
+ {
+ "name": "setBlackboxedRanges",
+ "description": "Makes backend skip steps in the script in blackboxed ranges. VM will try leave blacklisted\nscripts by performing 'step in' several times, finally resorting to 'step out' if unsuccessful.\nPositions array contains positions where blackbox state is changed. First interval isn't\nblackboxed. Array should be sorted.",
+ "experimental": true,
+ "parameters": [
+ {
+ "name": "scriptId",
+ "description": "Id of the script.",
+ "$ref": "Runtime.ScriptId"
+ },
+ {
+ "name": "positions",
+ "type": "array",
+ "items": {
+ "$ref": "ScriptPosition"
+ }
+ }
+ ]
+ },
+ {
+ "name": "setBreakpoint",
+ "description": "Sets JavaScript breakpoint at a given location.",
+ "parameters": [
+ {
+ "name": "location",
+ "description": "Location to set breakpoint in.",
+ "$ref": "Location"
+ },
+ {
+ "name": "condition",
+ "description": "Expression to use as a breakpoint condition. When specified, debugger will only stop on the\nbreakpoint if this expression evaluates to true.",
+ "optional": true,
+ "type": "string"
+ }
+ ],
+ "returns": [
+ {
+ "name": "breakpointId",
+ "description": "Id of the created breakpoint for further reference.",
+ "$ref": "BreakpointId"
+ },
+ {
+ "name": "actualLocation",
+ "description": "Location this breakpoint resolved into.",
+ "$ref": "Location"
+ }
+ ]
+ },
+ {
+ "name": "setBreakpointByUrl",
+ "description": "Sets JavaScript breakpoint at given location specified either by URL or URL regex. Once this\ncommand is issued, all existing parsed scripts will have breakpoints resolved and returned in\n`locations` property. Further matching script parsing will result in subsequent\n`breakpointResolved` events issued. This logical breakpoint will survive page reloads.",
+ "parameters": [
+ {
+ "name": "lineNumber",
+ "description": "Line number to set breakpoint at.",
+ "type": "integer"
+ },
+ {
+ "name": "url",
+ "description": "URL of the resources to set breakpoint on.",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "urlRegex",
+ "description": "Regex pattern for the URLs of the resources to set breakpoints on. Either `url` or\n`urlRegex` must be specified.",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "scriptHash",
+ "description": "Script hash of the resources to set breakpoint on.",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "columnNumber",
+ "description": "Offset in the line to set breakpoint at.",
+ "optional": true,
+ "type": "integer"
+ },
+ {
+ "name": "condition",
+ "description": "Expression to use as a breakpoint condition. When specified, debugger will only stop on the\nbreakpoint if this expression evaluates to true.",
+ "optional": true,
+ "type": "string"
+ }
+ ],
+ "returns": [
+ {
+ "name": "breakpointId",
+ "description": "Id of the created breakpoint for further reference.",
+ "$ref": "BreakpointId"
+ },
+ {
+ "name": "locations",
+ "description": "List of the locations this breakpoint resolved into upon addition.",
+ "type": "array",
+ "items": {
+ "$ref": "Location"
+ }
+ }
+ ]
+ },
+ {
+ "name": "setBreakpointsActive",
+ "description": "Activates / deactivates all breakpoints on the page.",
+ "parameters": [
+ {
+ "name": "active",
+ "description": "New value for breakpoints active state.",
+ "type": "boolean"
+ }
+ ]
+ },
+ {
+ "name": "setPauseOnExceptions",
+ "description": "Defines pause on exceptions state. Can be set to stop on all exceptions, uncaught exceptions or\nno exceptions. Initial pause on exceptions state is `none`.",
+ "parameters": [
+ {
+ "name": "state",
+ "description": "Pause on exceptions mode.",
+ "type": "string",
+ "enum": [
+ "none",
+ "uncaught",
+ "all"
+ ]
+ }
+ ]
+ },
+ {
+ "name": "setReturnValue",
+ "description": "Changes return value in top frame. Available only at return break position.",
+ "experimental": true,
+ "parameters": [
+ {
+ "name": "newValue",
+ "description": "New return value.",
+ "$ref": "Runtime.CallArgument"
+ }
+ ]
+ },
+ {
+ "name": "setScriptSource",
+ "description": "Edits JavaScript source live.",
+ "parameters": [
+ {
+ "name": "scriptId",
+ "description": "Id of the script to edit.",
+ "$ref": "Runtime.ScriptId"
+ },
+ {
+ "name": "scriptSource",
+ "description": "New content of the script.",
+ "type": "string"
+ },
+ {
+ "name": "dryRun",
+ "description": "If true the change will not actually be applied. Dry run may be used to get result\ndescription without actually modifying the code.",
+ "optional": true,
+ "type": "boolean"
+ }
+ ],
+ "returns": [
+ {
+ "name": "callFrames",
+ "description": "New stack trace in case editing has happened while VM was stopped.",
+ "optional": true,
+ "type": "array",
+ "items": {
+ "$ref": "CallFrame"
+ }
+ },
+ {
+ "name": "stackChanged",
+ "description": "Whether current call stack was modified after applying the changes.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "asyncStackTrace",
+ "description": "Async stack trace, if any.",
+ "optional": true,
+ "$ref": "Runtime.StackTrace"
+ },
+ {
+ "name": "asyncStackTraceId",
+ "description": "Async stack trace, if any.",
+ "experimental": true,
+ "optional": true,
+ "$ref": "Runtime.StackTraceId"
+ },
+ {
+ "name": "exceptionDetails",
+ "description": "Exception details if any.",
+ "optional": true,
+ "$ref": "Runtime.ExceptionDetails"
+ }
+ ]
+ },
+ {
+ "name": "setSkipAllPauses",
+ "description": "Makes page not interrupt on any pauses (breakpoint, exception, dom exception etc).",
+ "parameters": [
+ {
+ "name": "skip",
+ "description": "New value for skip pauses state.",
+ "type": "boolean"
+ }
+ ]
+ },
+ {
+ "name": "setVariableValue",
+ "description": "Changes value of variable in a callframe. Object-based scopes are not supported and must be\nmutated manually.",
+ "parameters": [
+ {
+ "name": "scopeNumber",
+ "description": "0-based number of scope as was listed in scope chain. Only 'local', 'closure' and 'catch'\nscope types are allowed. Other scopes could be manipulated manually.",
+ "type": "integer"
+ },
+ {
+ "name": "variableName",
+ "description": "Variable name.",
+ "type": "string"
+ },
+ {
+ "name": "newValue",
+ "description": "New variable value.",
+ "$ref": "Runtime.CallArgument"
+ },
+ {
+ "name": "callFrameId",
+ "description": "Id of callframe that holds variable.",
+ "$ref": "CallFrameId"
+ }
+ ]
+ },
+ {
+ "name": "stepInto",
+ "description": "Steps into the function call.",
+ "parameters": [
+ {
+ "name": "breakOnAsyncCall",
+ "description": "Debugger will issue additional Debugger.paused notification if any async task is scheduled\nbefore next pause.",
+ "experimental": true,
+ "optional": true,
+ "type": "boolean"
+ }
+ ]
+ },
+ {
+ "name": "stepOut",
+ "description": "Steps out of the function call."
+ },
+ {
+ "name": "stepOver",
+ "description": "Steps over the statement."
+ }
+ ],
+ "events": [
+ {
+ "name": "breakpointResolved",
+ "description": "Fired when breakpoint is resolved to an actual script and location.",
+ "parameters": [
+ {
+ "name": "breakpointId",
+ "description": "Breakpoint unique identifier.",
+ "$ref": "BreakpointId"
+ },
+ {
+ "name": "location",
+ "description": "Actual breakpoint location.",
+ "$ref": "Location"
+ }
+ ]
+ },
+ {
+ "name": "paused",
+ "description": "Fired when the virtual machine stopped on breakpoint or exception or any other stop criteria.",
+ "parameters": [
+ {
+ "name": "callFrames",
+ "description": "Call stack the virtual machine stopped on.",
+ "type": "array",
+ "items": {
+ "$ref": "CallFrame"
+ }
+ },
+ {
+ "name": "reason",
+ "description": "Pause reason.",
+ "type": "string",
+ "enum": [
+ "XHR",
+ "DOM",
+ "EventListener",
+ "exception",
+ "assert",
+ "debugCommand",
+ "promiseRejection",
+ "OOM",
+ "other",
+ "ambiguous"
+ ]
+ },
+ {
+ "name": "data",
+ "description": "Object containing break-specific auxiliary properties.",
+ "optional": true,
+ "type": "object"
+ },
+ {
+ "name": "hitBreakpoints",
+ "description": "Hit breakpoints IDs",
+ "optional": true,
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "asyncStackTrace",
+ "description": "Async stack trace, if any.",
+ "optional": true,
+ "$ref": "Runtime.StackTrace"
+ },
+ {
+ "name": "asyncStackTraceId",
+ "description": "Async stack trace, if any.",
+ "experimental": true,
+ "optional": true,
+ "$ref": "Runtime.StackTraceId"
+ },
+ {
+ "name": "asyncCallStackTraceId",
+ "description": "Just scheduled async call will have this stack trace as parent stack during async execution.\nThis field is available only after `Debugger.stepInto` call with `breakOnAsynCall` flag.",
+ "experimental": true,
+ "optional": true,
+ "$ref": "Runtime.StackTraceId"
+ }
+ ]
+ },
+ {
+ "name": "resumed",
+ "description": "Fired when the virtual machine resumed execution."
+ },
+ {
+ "name": "scriptFailedToParse",
+ "description": "Fired when virtual machine fails to parse the script.",
+ "parameters": [
+ {
+ "name": "scriptId",
+ "description": "Identifier of the script parsed.",
+ "$ref": "Runtime.ScriptId"
+ },
+ {
+ "name": "url",
+ "description": "URL or name of the script parsed (if any).",
+ "type": "string"
+ },
+ {
+ "name": "startLine",
+ "description": "Line offset of the script within the resource with given URL (for script tags).",
+ "type": "integer"
+ },
+ {
+ "name": "startColumn",
+ "description": "Column offset of the script within the resource with given URL.",
+ "type": "integer"
+ },
+ {
+ "name": "endLine",
+ "description": "Last line of the script.",
+ "type": "integer"
+ },
+ {
+ "name": "endColumn",
+ "description": "Length of the last line of the script.",
+ "type": "integer"
+ },
+ {
+ "name": "executionContextId",
+ "description": "Specifies script creation context.",
+ "$ref": "Runtime.ExecutionContextId"
+ },
+ {
+ "name": "hash",
+ "description": "Content hash of the script.",
+ "type": "string"
+ },
+ {
+ "name": "executionContextAuxData",
+ "description": "Embedder-specific auxiliary data.",
+ "optional": true,
+ "type": "object"
+ },
+ {
+ "name": "sourceMapURL",
+ "description": "URL of source map associated with script (if any).",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "hasSourceURL",
+ "description": "True, if this script has sourceURL.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "isModule",
+ "description": "True, if this script is ES6 module.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "length",
+ "description": "This script length.",
+ "optional": true,
+ "type": "integer"
+ },
+ {
+ "name": "stackTrace",
+ "description": "JavaScript top stack frame of where the script parsed event was triggered if available.",
+ "experimental": true,
+ "optional": true,
+ "$ref": "Runtime.StackTrace"
+ }
+ ]
+ },
+ {
+ "name": "scriptParsed",
+ "description": "Fired when virtual machine parses script. This event is also fired for all known and uncollected\nscripts upon enabling debugger.",
+ "parameters": [
+ {
+ "name": "scriptId",
+ "description": "Identifier of the script parsed.",
+ "$ref": "Runtime.ScriptId"
+ },
+ {
+ "name": "url",
+ "description": "URL or name of the script parsed (if any).",
+ "type": "string"
+ },
+ {
+ "name": "startLine",
+ "description": "Line offset of the script within the resource with given URL (for script tags).",
+ "type": "integer"
+ },
+ {
+ "name": "startColumn",
+ "description": "Column offset of the script within the resource with given URL.",
+ "type": "integer"
+ },
+ {
+ "name": "endLine",
+ "description": "Last line of the script.",
+ "type": "integer"
+ },
+ {
+ "name": "endColumn",
+ "description": "Length of the last line of the script.",
+ "type": "integer"
+ },
+ {
+ "name": "executionContextId",
+ "description": "Specifies script creation context.",
+ "$ref": "Runtime.ExecutionContextId"
+ },
+ {
+ "name": "hash",
+ "description": "Content hash of the script.",
+ "type": "string"
+ },
+ {
+ "name": "executionContextAuxData",
+ "description": "Embedder-specific auxiliary data.",
+ "optional": true,
+ "type": "object"
+ },
+ {
+ "name": "isLiveEdit",
+ "description": "True, if this script is generated as a result of the live edit operation.",
+ "experimental": true,
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "sourceMapURL",
+ "description": "URL of source map associated with script (if any).",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "hasSourceURL",
+ "description": "True, if this script has sourceURL.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "isModule",
+ "description": "True, if this script is ES6 module.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "length",
+ "description": "This script length.",
+ "optional": true,
+ "type": "integer"
+ },
+ {
+ "name": "stackTrace",
+ "description": "JavaScript top stack frame of where the script parsed event was triggered if available.",
+ "experimental": true,
+ "optional": true,
+ "$ref": "Runtime.StackTrace"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "domain": "HeapProfiler",
+ "experimental": true,
+ "dependencies": [
+ "Runtime"
+ ],
+ "types": [
+ {
+ "id": "HeapSnapshotObjectId",
+ "description": "Heap snapshot object id.",
+ "type": "string"
+ },
+ {
+ "id": "SamplingHeapProfileNode",
+ "description": "Sampling Heap Profile node. Holds callsite information, allocation statistics and child nodes.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "callFrame",
+ "description": "Function location.",
+ "$ref": "Runtime.CallFrame"
+ },
+ {
+ "name": "selfSize",
+ "description": "Allocations size in bytes for the node excluding children.",
+ "type": "number"
+ },
+ {
+ "name": "children",
+ "description": "Child nodes.",
+ "type": "array",
+ "items": {
+ "$ref": "SamplingHeapProfileNode"
+ }
+ }
+ ]
+ },
+ {
+ "id": "SamplingHeapProfile",
+ "description": "Profile.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "head",
+ "$ref": "SamplingHeapProfileNode"
+ }
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "addInspectedHeapObject",
+ "description": "Enables console to refer to the node with given id via $x (see Command Line API for more details\n$x functions).",
+ "parameters": [
+ {
+ "name": "heapObjectId",
+ "description": "Heap snapshot object id to be accessible by means of $x command line API.",
+ "$ref": "HeapSnapshotObjectId"
+ }
+ ]
+ },
+ {
+ "name": "collectGarbage"
+ },
+ {
+ "name": "disable"
+ },
+ {
+ "name": "enable"
+ },
+ {
+ "name": "getHeapObjectId",
+ "parameters": [
+ {
+ "name": "objectId",
+ "description": "Identifier of the object to get heap object id for.",
+ "$ref": "Runtime.RemoteObjectId"
+ }
+ ],
+ "returns": [
+ {
+ "name": "heapSnapshotObjectId",
+ "description": "Id of the heap snapshot object corresponding to the passed remote object id.",
+ "$ref": "HeapSnapshotObjectId"
+ }
+ ]
+ },
+ {
+ "name": "getObjectByHeapObjectId",
+ "parameters": [
+ {
+ "name": "objectId",
+ "$ref": "HeapSnapshotObjectId"
+ },
+ {
+ "name": "objectGroup",
+ "description": "Symbolic group name that can be used to release multiple objects.",
+ "optional": true,
+ "type": "string"
+ }
+ ],
+ "returns": [
+ {
+ "name": "result",
+ "description": "Evaluation result.",
+ "$ref": "Runtime.RemoteObject"
+ }
+ ]
+ },
+ {
+ "name": "getSamplingProfile",
+ "returns": [
+ {
+ "name": "profile",
+ "description": "Return the sampling profile being collected.",
+ "$ref": "SamplingHeapProfile"
+ }
+ ]
+ },
+ {
+ "name": "startSampling",
+ "parameters": [
+ {
+ "name": "samplingInterval",
+ "description": "Average sample interval in bytes. Poisson distribution is used for the intervals. The\ndefault value is 32768 bytes.",
+ "optional": true,
+ "type": "number"
+ }
+ ]
+ },
+ {
+ "name": "startTrackingHeapObjects",
+ "parameters": [
+ {
+ "name": "trackAllocations",
+ "optional": true,
+ "type": "boolean"
+ }
+ ]
+ },
+ {
+ "name": "stopSampling",
+ "returns": [
+ {
+ "name": "profile",
+ "description": "Recorded sampling heap profile.",
+ "$ref": "SamplingHeapProfile"
+ }
+ ]
+ },
+ {
+ "name": "stopTrackingHeapObjects",
+ "parameters": [
+ {
+ "name": "reportProgress",
+ "description": "If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken\nwhen the tracking is stopped.",
+ "optional": true,
+ "type": "boolean"
+ }
+ ]
+ },
+ {
+ "name": "takeHeapSnapshot",
+ "parameters": [
+ {
+ "name": "reportProgress",
+ "description": "If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken.",
+ "optional": true,
+ "type": "boolean"
+ }
+ ]
+ }
+ ],
+ "events": [
+ {
+ "name": "addHeapSnapshotChunk",
+ "parameters": [
+ {
+ "name": "chunk",
+ "type": "string"
+ }
+ ]
+ },
+ {
+ "name": "heapStatsUpdate",
+ "description": "If heap objects tracking has been started then backend may send update for one or more fragments",
+ "parameters": [
+ {
+ "name": "statsUpdate",
+ "description": "An array of triplets. Each triplet describes a fragment. The first integer is the fragment\nindex, the second integer is a total count of objects for the fragment, the third integer is\na total size of the objects for the fragment.",
+ "type": "array",
+ "items": {
+ "type": "integer"
+ }
+ }
+ ]
+ },
+ {
+ "name": "lastSeenObjectId",
+ "description": "If heap objects tracking has been started then backend regularly sends a current value for last\nseen object id and corresponding timestamp. If the were changes in the heap since last event\nthen one or more heapStatsUpdate events will be sent before a new lastSeenObjectId event.",
+ "parameters": [
+ {
+ "name": "lastSeenObjectId",
+ "type": "integer"
+ },
+ {
+ "name": "timestamp",
+ "type": "number"
+ }
+ ]
+ },
+ {
+ "name": "reportHeapSnapshotProgress",
+ "parameters": [
+ {
+ "name": "done",
+ "type": "integer"
+ },
+ {
+ "name": "total",
+ "type": "integer"
+ },
+ {
+ "name": "finished",
+ "optional": true,
+ "type": "boolean"
+ }
+ ]
+ },
+ {
+ "name": "resetProfiles"
+ }
+ ]
+ },
+ {
+ "domain": "Profiler",
+ "dependencies": [
+ "Runtime",
+ "Debugger"
+ ],
+ "types": [
+ {
+ "id": "ProfileNode",
+ "description": "Profile node. Holds callsite information, execution statistics and child nodes.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "id",
+ "description": "Unique id of the node.",
+ "type": "integer"
+ },
+ {
+ "name": "callFrame",
+ "description": "Function location.",
+ "$ref": "Runtime.CallFrame"
+ },
+ {
+ "name": "hitCount",
+ "description": "Number of samples where this node was on top of the call stack.",
+ "optional": true,
+ "type": "integer"
+ },
+ {
+ "name": "children",
+ "description": "Child node ids.",
+ "optional": true,
+ "type": "array",
+ "items": {
+ "type": "integer"
+ }
+ },
+ {
+ "name": "deoptReason",
+ "description": "The reason of being not optimized. The function may be deoptimized or marked as don't\noptimize.",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "positionTicks",
+ "description": "An array of source position ticks.",
+ "optional": true,
+ "type": "array",
+ "items": {
+ "$ref": "PositionTickInfo"
+ }
+ }
+ ]
+ },
+ {
+ "id": "Profile",
+ "description": "Profile.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "nodes",
+ "description": "The list of profile nodes. First item is the root node.",
+ "type": "array",
+ "items": {
+ "$ref": "ProfileNode"
+ }
+ },
+ {
+ "name": "startTime",
+ "description": "Profiling start timestamp in microseconds.",
+ "type": "number"
+ },
+ {
+ "name": "endTime",
+ "description": "Profiling end timestamp in microseconds.",
+ "type": "number"
+ },
+ {
+ "name": "samples",
+ "description": "Ids of samples top nodes.",
+ "optional": true,
+ "type": "array",
+ "items": {
+ "type": "integer"
+ }
+ },
+ {
+ "name": "timeDeltas",
+ "description": "Time intervals between adjacent samples in microseconds. The first delta is relative to the\nprofile startTime.",
+ "optional": true,
+ "type": "array",
+ "items": {
+ "type": "integer"
+ }
+ }
+ ]
+ },
+ {
+ "id": "PositionTickInfo",
+ "description": "Specifies a number of samples attributed to a certain source position.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "line",
+ "description": "Source line number (1-based).",
+ "type": "integer"
+ },
+ {
+ "name": "ticks",
+ "description": "Number of samples attributed to the source line.",
+ "type": "integer"
+ }
+ ]
+ },
+ {
+ "id": "CoverageRange",
+ "description": "Coverage data for a source range.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "startOffset",
+ "description": "JavaScript script source offset for the range start.",
+ "type": "integer"
+ },
+ {
+ "name": "endOffset",
+ "description": "JavaScript script source offset for the range end.",
+ "type": "integer"
+ },
+ {
+ "name": "count",
+ "description": "Collected execution count of the source range.",
+ "type": "integer"
+ }
+ ]
+ },
+ {
+ "id": "FunctionCoverage",
+ "description": "Coverage data for a JavaScript function.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "functionName",
+ "description": "JavaScript function name.",
+ "type": "string"
+ },
+ {
+ "name": "ranges",
+ "description": "Source ranges inside the function with coverage data.",
+ "type": "array",
+ "items": {
+ "$ref": "CoverageRange"
+ }
+ },
+ {
+ "name": "isBlockCoverage",
+ "description": "Whether coverage data for this function has block granularity.",
+ "type": "boolean"
+ }
+ ]
+ },
+ {
+ "id": "ScriptCoverage",
+ "description": "Coverage data for a JavaScript script.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "scriptId",
+ "description": "JavaScript script id.",
+ "$ref": "Runtime.ScriptId"
+ },
+ {
+ "name": "url",
+ "description": "JavaScript script name or url.",
+ "type": "string"
+ },
+ {
+ "name": "functions",
+ "description": "Functions contained in the script that has coverage data.",
+ "type": "array",
+ "items": {
+ "$ref": "FunctionCoverage"
+ }
+ }
+ ]
+ },
+ {
+ "id": "TypeObject",
+ "description": "Describes a type collected during runtime.",
+ "experimental": true,
+ "type": "object",
+ "properties": [
+ {
+ "name": "name",
+ "description": "Name of a type collected with type profiling.",
+ "type": "string"
+ }
+ ]
+ },
+ {
+ "id": "TypeProfileEntry",
+ "description": "Source offset and types for a parameter or return value.",
+ "experimental": true,
+ "type": "object",
+ "properties": [
+ {
+ "name": "offset",
+ "description": "Source offset of the parameter or end of function for return values.",
+ "type": "integer"
+ },
+ {
+ "name": "types",
+ "description": "The types for this parameter or return value.",
+ "type": "array",
+ "items": {
+ "$ref": "TypeObject"
+ }
+ }
+ ]
+ },
+ {
+ "id": "ScriptTypeProfile",
+ "description": "Type profile data collected during runtime for a JavaScript script.",
+ "experimental": true,
+ "type": "object",
+ "properties": [
+ {
+ "name": "scriptId",
+ "description": "JavaScript script id.",
+ "$ref": "Runtime.ScriptId"
+ },
+ {
+ "name": "url",
+ "description": "JavaScript script name or url.",
+ "type": "string"
+ },
+ {
+ "name": "entries",
+ "description": "Type profile entries for parameters and return values of the functions in the script.",
+ "type": "array",
+ "items": {
+ "$ref": "TypeProfileEntry"
+ }
+ }
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "disable"
+ },
+ {
+ "name": "enable"
+ },
+ {
+ "name": "getBestEffortCoverage",
+ "description": "Collect coverage data for the current isolate. The coverage data may be incomplete due to\ngarbage collection.",
+ "returns": [
+ {
+ "name": "result",
+ "description": "Coverage data for the current isolate.",
+ "type": "array",
+ "items": {
+ "$ref": "ScriptCoverage"
+ }
+ }
+ ]
+ },
+ {
+ "name": "setSamplingInterval",
+ "description": "Changes CPU profiler sampling interval. Must be called before CPU profiles recording started.",
+ "parameters": [
+ {
+ "name": "interval",
+ "description": "New sampling interval in microseconds.",
+ "type": "integer"
+ }
+ ]
+ },
+ {
+ "name": "start"
+ },
+ {
+ "name": "startPreciseCoverage",
+ "description": "Enable precise code coverage. Coverage data for JavaScript executed before enabling precise code\ncoverage may be incomplete. Enabling prevents running optimized code and resets execution\ncounters.",
+ "parameters": [
+ {
+ "name": "callCount",
+ "description": "Collect accurate call counts beyond simple 'covered' or 'not covered'.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "detailed",
+ "description": "Collect block-based coverage.",
+ "optional": true,
+ "type": "boolean"
+ }
+ ]
+ },
+ {
+ "name": "startTypeProfile",
+ "description": "Enable type profile.",
+ "experimental": true
+ },
+ {
+ "name": "stop",
+ "returns": [
+ {
+ "name": "profile",
+ "description": "Recorded profile.",
+ "$ref": "Profile"
+ }
+ ]
+ },
+ {
+ "name": "stopPreciseCoverage",
+ "description": "Disable precise code coverage. Disabling releases unnecessary execution count records and allows\nexecuting optimized code."
+ },
+ {
+ "name": "stopTypeProfile",
+ "description": "Disable type profile. Disabling releases type profile data collected so far.",
+ "experimental": true
+ },
+ {
+ "name": "takePreciseCoverage",
+ "description": "Collect coverage data for the current isolate, and resets execution counters. Precise code\ncoverage needs to have started.",
+ "returns": [
+ {
+ "name": "result",
+ "description": "Coverage data for the current isolate.",
+ "type": "array",
+ "items": {
+ "$ref": "ScriptCoverage"
+ }
+ }
+ ]
+ },
+ {
+ "name": "takeTypeProfile",
+ "description": "Collect type profile.",
+ "experimental": true,
+ "returns": [
+ {
+ "name": "result",
+ "description": "Type profile for all scripts since startTypeProfile() was turned on.",
+ "type": "array",
+ "items": {
+ "$ref": "ScriptTypeProfile"
+ }
+ }
+ ]
+ }
+ ],
+ "events": [
+ {
+ "name": "consoleProfileFinished",
+ "parameters": [
+ {
+ "name": "id",
+ "type": "string"
+ },
+ {
+ "name": "location",
+ "description": "Location of console.profileEnd().",
+ "$ref": "Debugger.Location"
+ },
+ {
+ "name": "profile",
+ "$ref": "Profile"
+ },
+ {
+ "name": "title",
+ "description": "Profile title passed as an argument to console.profile().",
+ "optional": true,
+ "type": "string"
+ }
+ ]
+ },
+ {
+ "name": "consoleProfileStarted",
+ "description": "Sent when new profile recording is started using console.profile() call.",
+ "parameters": [
+ {
+ "name": "id",
+ "type": "string"
+ },
+ {
+ "name": "location",
+ "description": "Location of console.profile().",
+ "$ref": "Debugger.Location"
+ },
+ {
+ "name": "title",
+ "description": "Profile title passed as an argument to console.profile().",
+ "optional": true,
+ "type": "string"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "domain": "Runtime",
+ "description": "Runtime domain exposes JavaScript runtime by means of remote evaluation and mirror objects.\nEvaluation results are returned as mirror object that expose object type, string representation\nand unique identifier that can be used for further object reference. Original objects are\nmaintained in memory unless they are either explicitly released or are released along with the\nother objects in their object group.",
+ "types": [
+ {
+ "id": "ScriptId",
+ "description": "Unique script identifier.",
+ "type": "string"
+ },
+ {
+ "id": "RemoteObjectId",
+ "description": "Unique object identifier.",
+ "type": "string"
+ },
+ {
+ "id": "UnserializableValue",
+ "description": "Primitive value which cannot be JSON-stringified.",
+ "type": "string",
+ "enum": [
+ "Infinity",
+ "NaN",
+ "-Infinity",
+ "-0"
+ ]
+ },
+ {
+ "id": "RemoteObject",
+ "description": "Mirror object referencing original JavaScript object.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "type",
+ "description": "Object type.",
+ "type": "string",
+ "enum": [
+ "object",
+ "function",
+ "undefined",
+ "string",
+ "number",
+ "boolean",
+ "symbol"
+ ]
+ },
+ {
+ "name": "subtype",
+ "description": "Object subtype hint. Specified for `object` type values only.",
+ "optional": true,
+ "type": "string",
+ "enum": [
+ "array",
+ "null",
+ "node",
+ "regexp",
+ "date",
+ "map",
+ "set",
+ "weakmap",
+ "weakset",
+ "iterator",
+ "generator",
+ "error",
+ "proxy",
+ "promise",
+ "typedarray"
+ ]
+ },
+ {
+ "name": "className",
+ "description": "Object class (constructor) name. Specified for `object` type values only.",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "value",
+ "description": "Remote object value in case of primitive values or JSON values (if it was requested).",
+ "optional": true,
+ "type": "any"
+ },
+ {
+ "name": "unserializableValue",
+ "description": "Primitive value which can not be JSON-stringified does not have `value`, but gets this\nproperty.",
+ "optional": true,
+ "$ref": "UnserializableValue"
+ },
+ {
+ "name": "description",
+ "description": "String representation of the object.",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "objectId",
+ "description": "Unique object identifier (for non-primitive values).",
+ "optional": true,
+ "$ref": "RemoteObjectId"
+ },
+ {
+ "name": "preview",
+ "description": "Preview containing abbreviated property values. Specified for `object` type values only.",
+ "experimental": true,
+ "optional": true,
+ "$ref": "ObjectPreview"
+ },
+ {
+ "name": "customPreview",
+ "experimental": true,
+ "optional": true,
+ "$ref": "CustomPreview"
+ }
+ ]
+ },
+ {
+ "id": "CustomPreview",
+ "experimental": true,
+ "type": "object",
+ "properties": [
+ {
+ "name": "header",
+ "type": "string"
+ },
+ {
+ "name": "hasBody",
+ "type": "boolean"
+ },
+ {
+ "name": "formatterObjectId",
+ "$ref": "RemoteObjectId"
+ },
+ {
+ "name": "bindRemoteObjectFunctionId",
+ "$ref": "RemoteObjectId"
+ },
+ {
+ "name": "configObjectId",
+ "optional": true,
+ "$ref": "RemoteObjectId"
+ }
+ ]
+ },
+ {
+ "id": "ObjectPreview",
+ "description": "Object containing abbreviated remote object value.",
+ "experimental": true,
+ "type": "object",
+ "properties": [
+ {
+ "name": "type",
+ "description": "Object type.",
+ "type": "string",
+ "enum": [
+ "object",
+ "function",
+ "undefined",
+ "string",
+ "number",
+ "boolean",
+ "symbol"
+ ]
+ },
+ {
+ "name": "subtype",
+ "description": "Object subtype hint. Specified for `object` type values only.",
+ "optional": true,
+ "type": "string",
+ "enum": [
+ "array",
+ "null",
+ "node",
+ "regexp",
+ "date",
+ "map",
+ "set",
+ "weakmap",
+ "weakset",
+ "iterator",
+ "generator",
+ "error"
+ ]
+ },
+ {
+ "name": "description",
+ "description": "String representation of the object.",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "overflow",
+ "description": "True iff some of the properties or entries of the original object did not fit.",
+ "type": "boolean"
+ },
+ {
+ "name": "properties",
+ "description": "List of the properties.",
+ "type": "array",
+ "items": {
+ "$ref": "PropertyPreview"
+ }
+ },
+ {
+ "name": "entries",
+ "description": "List of the entries. Specified for `map` and `set` subtype values only.",
+ "optional": true,
+ "type": "array",
+ "items": {
+ "$ref": "EntryPreview"
+ }
+ }
+ ]
+ },
+ {
+ "id": "PropertyPreview",
+ "experimental": true,
+ "type": "object",
+ "properties": [
+ {
+ "name": "name",
+ "description": "Property name.",
+ "type": "string"
+ },
+ {
+ "name": "type",
+ "description": "Object type. Accessor means that the property itself is an accessor property.",
+ "type": "string",
+ "enum": [
+ "object",
+ "function",
+ "undefined",
+ "string",
+ "number",
+ "boolean",
+ "symbol",
+ "accessor"
+ ]
+ },
+ {
+ "name": "value",
+ "description": "User-friendly property value string.",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "valuePreview",
+ "description": "Nested value preview.",
+ "optional": true,
+ "$ref": "ObjectPreview"
+ },
+ {
+ "name": "subtype",
+ "description": "Object subtype hint. Specified for `object` type values only.",
+ "optional": true,
+ "type": "string",
+ "enum": [
+ "array",
+ "null",
+ "node",
+ "regexp",
+ "date",
+ "map",
+ "set",
+ "weakmap",
+ "weakset",
+ "iterator",
+ "generator",
+ "error"
+ ]
+ }
+ ]
+ },
+ {
+ "id": "EntryPreview",
+ "experimental": true,
+ "type": "object",
+ "properties": [
+ {
+ "name": "key",
+ "description": "Preview of the key. Specified for map-like collection entries.",
+ "optional": true,
+ "$ref": "ObjectPreview"
+ },
+ {
+ "name": "value",
+ "description": "Preview of the value.",
+ "$ref": "ObjectPreview"
+ }
+ ]
+ },
+ {
+ "id": "PropertyDescriptor",
+ "description": "Object property descriptor.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "name",
+ "description": "Property name or symbol description.",
+ "type": "string"
+ },
+ {
+ "name": "value",
+ "description": "The value associated with the property.",
+ "optional": true,
+ "$ref": "RemoteObject"
+ },
+ {
+ "name": "writable",
+ "description": "True if the value associated with the property may be changed (data descriptors only).",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "get",
+ "description": "A function which serves as a getter for the property, or `undefined` if there is no getter\n(accessor descriptors only).",
+ "optional": true,
+ "$ref": "RemoteObject"
+ },
+ {
+ "name": "set",
+ "description": "A function which serves as a setter for the property, or `undefined` if there is no setter\n(accessor descriptors only).",
+ "optional": true,
+ "$ref": "RemoteObject"
+ },
+ {
+ "name": "configurable",
+ "description": "True if the type of this property descriptor may be changed and if the property may be\ndeleted from the corresponding object.",
+ "type": "boolean"
+ },
+ {
+ "name": "enumerable",
+ "description": "True if this property shows up during enumeration of the properties on the corresponding\nobject.",
+ "type": "boolean"
+ },
+ {
+ "name": "wasThrown",
+ "description": "True if the result was thrown during the evaluation.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "isOwn",
+ "description": "True if the property is owned for the object.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "symbol",
+ "description": "Property symbol object, if the property is of the `symbol` type.",
+ "optional": true,
+ "$ref": "RemoteObject"
+ }
+ ]
+ },
+ {
+ "id": "InternalPropertyDescriptor",
+ "description": "Object internal property descriptor. This property isn't normally visible in JavaScript code.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "name",
+ "description": "Conventional property name.",
+ "type": "string"
+ },
+ {
+ "name": "value",
+ "description": "The value associated with the property.",
+ "optional": true,
+ "$ref": "RemoteObject"
+ }
+ ]
+ },
+ {
+ "id": "CallArgument",
+ "description": "Represents function call argument. Either remote object id `objectId`, primitive `value`,\nunserializable primitive value or neither of (for undefined) them should be specified.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "value",
+ "description": "Primitive value or serializable javascript object.",
+ "optional": true,
+ "type": "any"
+ },
+ {
+ "name": "unserializableValue",
+ "description": "Primitive value which can not be JSON-stringified.",
+ "optional": true,
+ "$ref": "UnserializableValue"
+ },
+ {
+ "name": "objectId",
+ "description": "Remote object handle.",
+ "optional": true,
+ "$ref": "RemoteObjectId"
+ }
+ ]
+ },
+ {
+ "id": "ExecutionContextId",
+ "description": "Id of an execution context.",
+ "type": "integer"
+ },
+ {
+ "id": "ExecutionContextDescription",
+ "description": "Description of an isolated world.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "id",
+ "description": "Unique id of the execution context. It can be used to specify in which execution context\nscript evaluation should be performed.",
+ "$ref": "ExecutionContextId"
+ },
+ {
+ "name": "origin",
+ "description": "Execution context origin.",
+ "type": "string"
+ },
+ {
+ "name": "name",
+ "description": "Human readable name describing given context.",
+ "type": "string"
+ },
+ {
+ "name": "auxData",
+ "description": "Embedder-specific auxiliary data.",
+ "optional": true,
+ "type": "object"
+ }
+ ]
+ },
+ {
+ "id": "ExceptionDetails",
+ "description": "Detailed information about exception (or error) that was thrown during script compilation or\nexecution.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "exceptionId",
+ "description": "Exception id.",
+ "type": "integer"
+ },
+ {
+ "name": "text",
+ "description": "Exception text, which should be used together with exception object when available.",
+ "type": "string"
+ },
+ {
+ "name": "lineNumber",
+ "description": "Line number of the exception location (0-based).",
+ "type": "integer"
+ },
+ {
+ "name": "columnNumber",
+ "description": "Column number of the exception location (0-based).",
+ "type": "integer"
+ },
+ {
+ "name": "scriptId",
+ "description": "Script ID of the exception location.",
+ "optional": true,
+ "$ref": "ScriptId"
+ },
+ {
+ "name": "url",
+ "description": "URL of the exception location, to be used when the script was not reported.",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "stackTrace",
+ "description": "JavaScript stack trace if available.",
+ "optional": true,
+ "$ref": "StackTrace"
+ },
+ {
+ "name": "exception",
+ "description": "Exception object if available.",
+ "optional": true,
+ "$ref": "RemoteObject"
+ },
+ {
+ "name": "executionContextId",
+ "description": "Identifier of the context where exception happened.",
+ "optional": true,
+ "$ref": "ExecutionContextId"
+ }
+ ]
+ },
+ {
+ "id": "Timestamp",
+ "description": "Number of milliseconds since epoch.",
+ "type": "number"
+ },
+ {
+ "id": "CallFrame",
+ "description": "Stack entry for runtime errors and assertions.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "functionName",
+ "description": "JavaScript function name.",
+ "type": "string"
+ },
+ {
+ "name": "scriptId",
+ "description": "JavaScript script id.",
+ "$ref": "ScriptId"
+ },
+ {
+ "name": "url",
+ "description": "JavaScript script name or url.",
+ "type": "string"
+ },
+ {
+ "name": "lineNumber",
+ "description": "JavaScript script line number (0-based).",
+ "type": "integer"
+ },
+ {
+ "name": "columnNumber",
+ "description": "JavaScript script column number (0-based).",
+ "type": "integer"
+ }
+ ]
+ },
+ {
+ "id": "StackTrace",
+ "description": "Call frames for assertions or error messages.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "description",
+ "description": "String label of this stack trace. For async traces this may be a name of the function that\ninitiated the async call.",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "callFrames",
+ "description": "JavaScript function name.",
+ "type": "array",
+ "items": {
+ "$ref": "CallFrame"
+ }
+ },
+ {
+ "name": "parent",
+ "description": "Asynchronous JavaScript stack trace that preceded this stack, if available.",
+ "optional": true,
+ "$ref": "StackTrace"
+ },
+ {
+ "name": "parentId",
+ "description": "Asynchronous JavaScript stack trace that preceded this stack, if available.",
+ "experimental": true,
+ "optional": true,
+ "$ref": "StackTraceId"
+ }
+ ]
+ },
+ {
+ "id": "UniqueDebuggerId",
+ "description": "Unique identifier of current debugger.",
+ "experimental": true,
+ "type": "string"
+ },
+ {
+ "id": "StackTraceId",
+ "description": "If `debuggerId` is set stack trace comes from another debugger and can be resolved there. This\nallows to track cross-debugger calls. See `Runtime.StackTrace` and `Debugger.paused` for usages.",
+ "experimental": true,
+ "type": "object",
+ "properties": [
+ {
+ "name": "id",
+ "type": "string"
+ },
+ {
+ "name": "debuggerId",
+ "optional": true,
+ "$ref": "UniqueDebuggerId"
+ }
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "awaitPromise",
+ "description": "Add handler to promise with given promise object id.",
+ "parameters": [
+ {
+ "name": "promiseObjectId",
+ "description": "Identifier of the promise.",
+ "$ref": "RemoteObjectId"
+ },
+ {
+ "name": "returnByValue",
+ "description": "Whether the result is expected to be a JSON object that should be sent by value.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "generatePreview",
+ "description": "Whether preview should be generated for the result.",
+ "optional": true,
+ "type": "boolean"
+ }
+ ],
+ "returns": [
+ {
+ "name": "result",
+ "description": "Promise result. Will contain rejected value if promise was rejected.",
+ "$ref": "RemoteObject"
+ },
+ {
+ "name": "exceptionDetails",
+ "description": "Exception details if stack strace is available.",
+ "optional": true,
+ "$ref": "ExceptionDetails"
+ }
+ ]
+ },
+ {
+ "name": "callFunctionOn",
+ "description": "Calls function with given declaration on the given object. Object group of the result is\ninherited from the target object.",
+ "parameters": [
+ {
+ "name": "functionDeclaration",
+ "description": "Declaration of the function to call.",
+ "type": "string"
+ },
+ {
+ "name": "objectId",
+ "description": "Identifier of the object to call function on. Either objectId or executionContextId should\nbe specified.",
+ "optional": true,
+ "$ref": "RemoteObjectId"
+ },
+ {
+ "name": "arguments",
+ "description": "Call arguments. All call arguments must belong to the same JavaScript world as the target\nobject.",
+ "optional": true,
+ "type": "array",
+ "items": {
+ "$ref": "CallArgument"
+ }
+ },
+ {
+ "name": "silent",
+ "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause\nexecution. Overrides `setPauseOnException` state.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "returnByValue",
+ "description": "Whether the result is expected to be a JSON object which should be sent by value.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "generatePreview",
+ "description": "Whether preview should be generated for the result.",
+ "experimental": true,
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "userGesture",
+ "description": "Whether execution should be treated as initiated by user in the UI.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "awaitPromise",
+ "description": "Whether execution should `await` for resulting value and return once awaited promise is\nresolved.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "executionContextId",
+ "description": "Specifies execution context which global object will be used to call function on. Either\nexecutionContextId or objectId should be specified.",
+ "optional": true,
+ "$ref": "ExecutionContextId"
+ },
+ {
+ "name": "objectGroup",
+ "description": "Symbolic group name that can be used to release multiple objects. If objectGroup is not\nspecified and objectId is, objectGroup will be inherited from object.",
+ "optional": true,
+ "type": "string"
+ }
+ ],
+ "returns": [
+ {
+ "name": "result",
+ "description": "Call result.",
+ "$ref": "RemoteObject"
+ },
+ {
+ "name": "exceptionDetails",
+ "description": "Exception details.",
+ "optional": true,
+ "$ref": "ExceptionDetails"
+ }
+ ]
+ },
+ {
+ "name": "compileScript",
+ "description": "Compiles expression.",
+ "parameters": [
+ {
+ "name": "expression",
+ "description": "Expression to compile.",
+ "type": "string"
+ },
+ {
+ "name": "sourceURL",
+ "description": "Source url to be set for the script.",
+ "type": "string"
+ },
+ {
+ "name": "persistScript",
+ "description": "Specifies whether the compiled script should be persisted.",
+ "type": "boolean"
+ },
+ {
+ "name": "executionContextId",
+ "description": "Specifies in which execution context to perform script run. If the parameter is omitted the\nevaluation will be performed in the context of the inspected page.",
+ "optional": true,
+ "$ref": "ExecutionContextId"
+ }
+ ],
+ "returns": [
+ {
+ "name": "scriptId",
+ "description": "Id of the script.",
+ "optional": true,
+ "$ref": "ScriptId"
+ },
+ {
+ "name": "exceptionDetails",
+ "description": "Exception details.",
+ "optional": true,
+ "$ref": "ExceptionDetails"
+ }
+ ]
+ },
+ {
+ "name": "disable",
+ "description": "Disables reporting of execution contexts creation."
+ },
+ {
+ "name": "discardConsoleEntries",
+ "description": "Discards collected exceptions and console API calls."
+ },
+ {
+ "name": "enable",
+ "description": "Enables reporting of execution contexts creation by means of `executionContextCreated` event.\nWhen the reporting gets enabled the event will be sent immediately for each existing execution\ncontext."
+ },
+ {
+ "name": "evaluate",
+ "description": "Evaluates expression on global object.",
+ "parameters": [
+ {
+ "name": "expression",
+ "description": "Expression to evaluate.",
+ "type": "string"
+ },
+ {
+ "name": "objectGroup",
+ "description": "Symbolic group name that can be used to release multiple objects.",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "includeCommandLineAPI",
+ "description": "Determines whether Command Line API should be available during the evaluation.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "silent",
+ "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause\nexecution. Overrides `setPauseOnException` state.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "contextId",
+ "description": "Specifies in which execution context to perform evaluation. If the parameter is omitted the\nevaluation will be performed in the context of the inspected page.",
+ "optional": true,
+ "$ref": "ExecutionContextId"
+ },
+ {
+ "name": "returnByValue",
+ "description": "Whether the result is expected to be a JSON object that should be sent by value.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "generatePreview",
+ "description": "Whether preview should be generated for the result.",
+ "experimental": true,
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "userGesture",
+ "description": "Whether execution should be treated as initiated by user in the UI.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "awaitPromise",
+ "description": "Whether execution should `await` for resulting value and return once awaited promise is\nresolved.",
+ "optional": true,
+ "type": "boolean"
+ }
+ ],
+ "returns": [
+ {
+ "name": "result",
+ "description": "Evaluation result.",
+ "$ref": "RemoteObject"
+ },
+ {
+ "name": "exceptionDetails",
+ "description": "Exception details.",
+ "optional": true,
+ "$ref": "ExceptionDetails"
+ }
+ ]
+ },
+ {
+ "name": "getProperties",
+ "description": "Returns properties of a given object. Object group of the result is inherited from the target\nobject.",
+ "parameters": [
+ {
+ "name": "objectId",
+ "description": "Identifier of the object to return properties for.",
+ "$ref": "RemoteObjectId"
+ },
+ {
+ "name": "ownProperties",
+ "description": "If true, returns properties belonging only to the element itself, not to its prototype\nchain.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "accessorPropertiesOnly",
+ "description": "If true, returns accessor properties (with getter/setter) only; internal properties are not\nreturned either.",
+ "experimental": true,
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "generatePreview",
+ "description": "Whether preview should be generated for the results.",
+ "experimental": true,
+ "optional": true,
+ "type": "boolean"
+ }
+ ],
+ "returns": [
+ {
+ "name": "result",
+ "description": "Object properties.",
+ "type": "array",
+ "items": {
+ "$ref": "PropertyDescriptor"
+ }
+ },
+ {
+ "name": "internalProperties",
+ "description": "Internal object properties (only of the element itself).",
+ "optional": true,
+ "type": "array",
+ "items": {
+ "$ref": "InternalPropertyDescriptor"
+ }
+ },
+ {
+ "name": "exceptionDetails",
+ "description": "Exception details.",
+ "optional": true,
+ "$ref": "ExceptionDetails"
+ }
+ ]
+ },
+ {
+ "name": "globalLexicalScopeNames",
+ "description": "Returns all let, const and class variables from global scope.",
+ "parameters": [
+ {
+ "name": "executionContextId",
+ "description": "Specifies in which execution context to lookup global scope variables.",
+ "optional": true,
+ "$ref": "ExecutionContextId"
+ }
+ ],
+ "returns": [
+ {
+ "name": "names",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ ]
+ },
+ {
+ "name": "queryObjects",
+ "parameters": [
+ {
+ "name": "prototypeObjectId",
+ "description": "Identifier of the prototype to return objects for.",
+ "$ref": "RemoteObjectId"
+ }
+ ],
+ "returns": [
+ {
+ "name": "objects",
+ "description": "Array with objects.",
+ "$ref": "RemoteObject"
+ }
+ ]
+ },
+ {
+ "name": "releaseObject",
+ "description": "Releases remote object with given id.",
+ "parameters": [
+ {
+ "name": "objectId",
+ "description": "Identifier of the object to release.",
+ "$ref": "RemoteObjectId"
+ }
+ ]
+ },
+ {
+ "name": "releaseObjectGroup",
+ "description": "Releases all remote objects that belong to a given group.",
+ "parameters": [
+ {
+ "name": "objectGroup",
+ "description": "Symbolic object group name.",
+ "type": "string"
+ }
+ ]
+ },
+ {
+ "name": "runIfWaitingForDebugger",
+ "description": "Tells inspected instance to run if it was waiting for debugger to attach."
+ },
+ {
+ "name": "runScript",
+ "description": "Runs script with given id in a given context.",
+ "parameters": [
+ {
+ "name": "scriptId",
+ "description": "Id of the script to run.",
+ "$ref": "ScriptId"
+ },
+ {
+ "name": "executionContextId",
+ "description": "Specifies in which execution context to perform script run. If the parameter is omitted the\nevaluation will be performed in the context of the inspected page.",
+ "optional": true,
+ "$ref": "ExecutionContextId"
+ },
+ {
+ "name": "objectGroup",
+ "description": "Symbolic group name that can be used to release multiple objects.",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "silent",
+ "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause\nexecution. Overrides `setPauseOnException` state.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "includeCommandLineAPI",
+ "description": "Determines whether Command Line API should be available during the evaluation.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "returnByValue",
+ "description": "Whether the result is expected to be a JSON object which should be sent by value.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "generatePreview",
+ "description": "Whether preview should be generated for the result.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "awaitPromise",
+ "description": "Whether execution should `await` for resulting value and return once awaited promise is\nresolved.",
+ "optional": true,
+ "type": "boolean"
+ }
+ ],
+ "returns": [
+ {
+ "name": "result",
+ "description": "Run result.",
+ "$ref": "RemoteObject"
+ },
+ {
+ "name": "exceptionDetails",
+ "description": "Exception details.",
+ "optional": true,
+ "$ref": "ExceptionDetails"
+ }
+ ]
+ },
+ {
+ "name": "setCustomObjectFormatterEnabled",
+ "experimental": true,
+ "parameters": [
+ {
+ "name": "enabled",
+ "type": "boolean"
+ }
+ ]
+ }
+ ],
+ "events": [
+ {
+ "name": "consoleAPICalled",
+ "description": "Issued when console API was called.",
+ "parameters": [
+ {
+ "name": "type",
+ "description": "Type of the call.",
+ "type": "string",
+ "enum": [
+ "log",
+ "debug",
+ "info",
+ "error",
+ "warning",
+ "dir",
+ "dirxml",
+ "table",
+ "trace",
+ "clear",
+ "startGroup",
+ "startGroupCollapsed",
+ "endGroup",
+ "assert",
+ "profile",
+ "profileEnd",
+ "count",
+ "timeEnd"
+ ]
+ },
+ {
+ "name": "args",
+ "description": "Call arguments.",
+ "type": "array",
+ "items": {
+ "$ref": "RemoteObject"
+ }
+ },
+ {
+ "name": "executionContextId",
+ "description": "Identifier of the context where the call was made.",
+ "$ref": "ExecutionContextId"
+ },
+ {
+ "name": "timestamp",
+ "description": "Call timestamp.",
+ "$ref": "Timestamp"
+ },
+ {
+ "name": "stackTrace",
+ "description": "Stack trace captured when the call was made.",
+ "optional": true,
+ "$ref": "StackTrace"
+ },
+ {
+ "name": "context",
+ "description": "Console context descriptor for calls on non-default console context (not console.*):\n'anonymous#unique-logger-id' for call on unnamed context, 'name#unique-logger-id' for call\non named context.",
+ "experimental": true,
+ "optional": true,
+ "type": "string"
+ }
+ ]
+ },
+ {
+ "name": "exceptionRevoked",
+ "description": "Issued when unhandled exception was revoked.",
+ "parameters": [
+ {
+ "name": "reason",
+ "description": "Reason describing why exception was revoked.",
+ "type": "string"
+ },
+ {
+ "name": "exceptionId",
+ "description": "The id of revoked exception, as reported in `exceptionThrown`.",
+ "type": "integer"
+ }
+ ]
+ },
+ {
+ "name": "exceptionThrown",
+ "description": "Issued when exception was thrown and unhandled.",
+ "parameters": [
+ {
+ "name": "timestamp",
+ "description": "Timestamp of the exception.",
+ "$ref": "Timestamp"
+ },
+ {
+ "name": "exceptionDetails",
+ "$ref": "ExceptionDetails"
+ }
+ ]
+ },
+ {
+ "name": "executionContextCreated",
+ "description": "Issued when new execution context is created.",
+ "parameters": [
+ {
+ "name": "context",
+ "description": "A newly created execution context.",
+ "$ref": "ExecutionContextDescription"
+ }
+ ]
+ },
+ {
+ "name": "executionContextDestroyed",
+ "description": "Issued when execution context is destroyed.",
+ "parameters": [
+ {
+ "name": "executionContextId",
+ "description": "Id of the destroyed context",
+ "$ref": "ExecutionContextId"
+ }
+ ]
+ },
+ {
+ "name": "executionContextsCleared",
+ "description": "Issued when all executionContexts were cleared in browser"
+ },
+ {
+ "name": "inspectRequested",
+ "description": "Issued when object should be inspected (for example, as a result of inspect() command line API\ncall).",
+ "parameters": [
+ {
+ "name": "object",
+ "$ref": "RemoteObject"
+ },
+ {
+ "name": "hints",
+ "type": "object"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "domain": "Schema",
+ "description": "This domain is deprecated.",
+ "deprecated": true,
+ "types": [
+ {
+ "id": "Domain",
+ "description": "Description of the protocol domain.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "name",
+ "description": "Domain name.",
+ "type": "string"
+ },
+ {
+ "name": "version",
+ "description": "Domain version.",
+ "type": "string"
+ }
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "getDomains",
+ "description": "Returns supported domains.",
+ "returns": [
+ {
+ "name": "domains",
+ "description": "List of supported domains.",
+ "type": "array",
+ "items": {
+ "$ref": "Domain"
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ]
+} \ No newline at end of file
diff --git a/deps/v8/src/inspector/js_protocol.pdl b/deps/v8/src/inspector/js_protocol.pdl
new file mode 100644
index 0000000000..5a23199e4a
--- /dev/null
+++ b/deps/v8/src/inspector/js_protocol.pdl
@@ -0,0 +1,1370 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+version
+ major 1
+ minor 3
+
+# This domain is deprecated - use Runtime or Log instead.
+deprecated domain Console
+ depends on Runtime
+
+ # Console message.
+ type ConsoleMessage extends object
+ properties
+ # Message source.
+ enum source
+ xml
+ javascript
+ network
+ console-api
+ storage
+ appcache
+ rendering
+ security
+ other
+ deprecation
+ worker
+ # Message severity.
+ enum level
+ log
+ warning
+ error
+ debug
+ info
+ # Message text.
+ string text
+ # URL of the message origin.
+ optional string url
+ # Line number in the resource that generated this message (1-based).
+ optional integer line
+ # Column number in the resource that generated this message (1-based).
+ optional integer column
+
+ # Does nothing.
+ command clearMessages
+
+ # Disables console domain, prevents further console messages from being reported to the client.
+ command disable
+
+ # Enables console domain, sends the messages collected so far to the client by means of the
+ # `messageAdded` notification.
+ command enable
+
+ # Issued when new console message is added.
+ event messageAdded
+ parameters
+ # Console message that has been added.
+ ConsoleMessage message
+
+# Debugger domain exposes JavaScript debugging capabilities. It allows setting and removing
+# breakpoints, stepping through execution, exploring stack traces, etc.
+domain Debugger
+ depends on Runtime
+
+ # Breakpoint identifier.
+ type BreakpointId extends string
+
+ # Call frame identifier.
+ type CallFrameId extends string
+
+ # Location in the source code.
+ type Location extends object
+ properties
+ # Script identifier as reported in the `Debugger.scriptParsed`.
+ Runtime.ScriptId scriptId
+ # Line number in the script (0-based).
+ integer lineNumber
+ # Column number in the script (0-based).
+ optional integer columnNumber
+
+ # Location in the source code.
+ experimental type ScriptPosition extends object
+ properties
+ integer lineNumber
+ integer columnNumber
+
+ # JavaScript call frame. Array of call frames form the call stack.
+ type CallFrame extends object
+ properties
+ # Call frame identifier. This identifier is only valid while the virtual machine is paused.
+ CallFrameId callFrameId
+ # Name of the JavaScript function called on this call frame.
+ string functionName
+ # Location in the source code.
+ optional Location functionLocation
+ # Location in the source code.
+ Location location
+ # JavaScript script name or url.
+ string url
+ # Scope chain for this call frame.
+ array of Scope scopeChain
+ # `this` object for this call frame.
+ Runtime.RemoteObject this
+ # The value being returned, if the function is at return point.
+ optional Runtime.RemoteObject returnValue
+
+ # Scope description.
+ type Scope extends object
+ properties
+ # Scope type.
+ enum type
+ global
+ local
+ with
+ closure
+ catch
+ block
+ script
+ eval
+ module
+ # Object representing the scope. For `global` and `with` scopes it represents the actual
+ # object; for the rest of the scopes, it is artificial transient object enumerating scope
+ # variables as its properties.
+ Runtime.RemoteObject object
+ optional string name
+ # Location in the source code where scope starts
+ optional Location startLocation
+ # Location in the source code where scope ends
+ optional Location endLocation
+
+ # Search match for resource.
+ type SearchMatch extends object
+ properties
+ # Line number in resource content.
+ number lineNumber
+ # Line with match content.
+ string lineContent
+
+ type BreakLocation extends object
+ properties
+ # Script identifier as reported in the `Debugger.scriptParsed`.
+ Runtime.ScriptId scriptId
+ # Line number in the script (0-based).
+ integer lineNumber
+ # Column number in the script (0-based).
+ optional integer columnNumber
+ optional enum type
+ debuggerStatement
+ call
+ return
+
+ # Continues execution until specific location is reached.
+ command continueToLocation
+ parameters
+ # Location to continue to.
+ Location location
+ optional enum targetCallFrames
+ any
+ current
+
+ # Disables debugger for given page.
+ command disable
+
+ # Enables debugger for the given page. Clients should not assume that the debugging has been
+ # enabled until the result for this command is received.
+ command enable
+ returns
+ # Unique identifier of the debugger.
+ experimental Runtime.UniqueDebuggerId debuggerId
+
+ # Evaluates expression on a given call frame.
+ command evaluateOnCallFrame
+ parameters
+ # Call frame identifier to evaluate on.
+ CallFrameId callFrameId
+ # Expression to evaluate.
+ string expression
+ # String object group name to put result into (allows rapid releasing resulting object handles
+ # using `releaseObjectGroup`).
+ optional string objectGroup
+ # Specifies whether command line API should be available to the evaluated expression, defaults
+ # to false.
+ optional boolean includeCommandLineAPI
+ # In silent mode exceptions thrown during evaluation are not reported and do not pause
+ # execution. Overrides `setPauseOnException` state.
+ optional boolean silent
+ # Whether the result is expected to be a JSON object that should be sent by value.
+ optional boolean returnByValue
+ # Whether preview should be generated for the result.
+ experimental optional boolean generatePreview
+ # Whether to throw an exception if side effect cannot be ruled out during evaluation.
+ optional boolean throwOnSideEffect
+ returns
+ # Object wrapper for the evaluation result.
+ Runtime.RemoteObject result
+ # Exception details.
+ optional Runtime.ExceptionDetails exceptionDetails
+
+ # Returns possible locations for breakpoint. scriptId in start and end range locations should be
+ # the same.
+ command getPossibleBreakpoints
+ parameters
+ # Start of range to search possible breakpoint locations in.
+ Location start
+ # End of range to search possible breakpoint locations in (excluding). When not specified, end
+ # of scripts is used as end of range.
+ optional Location end
+ # Only consider locations which are in the same (non-nested) function as start.
+ optional boolean restrictToFunction
+ returns
+ # List of the possible breakpoint locations.
+ array of BreakLocation locations
+
+ # Returns source for the script with given id.
+ command getScriptSource
+ parameters
+ # Id of the script to get source for.
+ Runtime.ScriptId scriptId
+ returns
+ # Script source.
+ string scriptSource
+
+ # Returns stack trace with given `stackTraceId`.
+ experimental command getStackTrace
+ parameters
+ Runtime.StackTraceId stackTraceId
+ returns
+ Runtime.StackTrace stackTrace
+
+ # Stops on the next JavaScript statement.
+ command pause
+
+ experimental command pauseOnAsyncCall
+ parameters
+ # Debugger will pause when async call with given stack trace is started.
+ Runtime.StackTraceId parentStackTraceId
+
+ # Removes JavaScript breakpoint.
+ command removeBreakpoint
+ parameters
+ BreakpointId breakpointId
+
+ # Restarts particular call frame from the beginning.
+ command restartFrame
+ parameters
+ # Call frame identifier to evaluate on.
+ CallFrameId callFrameId
+ returns
+ # New stack trace.
+ array of CallFrame callFrames
+ # Async stack trace, if any.
+ optional Runtime.StackTrace asyncStackTrace
+ # Async stack trace, if any.
+ experimental optional Runtime.StackTraceId asyncStackTraceId
+
+ # Resumes JavaScript execution.
+ command resume
+
+ # This method is deprecated - use Debugger.stepInto with breakOnAsyncCall and
+ # Debugger.pauseOnAsyncTask instead. Steps into next scheduled async task if any is scheduled
+ # before next pause. Returns success when async task is actually scheduled, returns error if no
+ # task were scheduled or another scheduleStepIntoAsync was called.
+ experimental command scheduleStepIntoAsync
+
+ # Searches for given string in script content.
+ command searchInContent
+ parameters
+ # Id of the script to search in.
+ Runtime.ScriptId scriptId
+ # String to search for.
+ string query
+ # If true, search is case sensitive.
+ optional boolean caseSensitive
+ # If true, treats string parameter as regex.
+ optional boolean isRegex
+ returns
+ # List of search matches.
+ array of SearchMatch result
+
+ # Enables or disables async call stacks tracking.
+ command setAsyncCallStackDepth
+ parameters
+ # Maximum depth of async call stacks. Setting to `0` will effectively disable collecting async
+ # call stacks (default).
+ integer maxDepth
+
+ # Replace previous blackbox patterns with passed ones. Forces backend to skip stepping/pausing in
+ # scripts with url matching one of the patterns. VM will try to leave blackboxed script by
+ # performing 'step in' several times, finally resorting to 'step out' if unsuccessful.
+ experimental command setBlackboxPatterns
+ parameters
+ # Array of regexps that will be used to check script url for blackbox state.
+ array of string patterns
+
+ # Makes backend skip steps in the script in blackboxed ranges. VM will try leave blacklisted
+ # scripts by performing 'step in' several times, finally resorting to 'step out' if unsuccessful.
+ # Positions array contains positions where blackbox state is changed. First interval isn't
+ # blackboxed. Array should be sorted.
+ experimental command setBlackboxedRanges
+ parameters
+ # Id of the script.
+ Runtime.ScriptId scriptId
+ array of ScriptPosition positions
+
+ # Sets JavaScript breakpoint at a given location.
+ command setBreakpoint
+ parameters
+ # Location to set breakpoint in.
+ Location location
+ # Expression to use as a breakpoint condition. When specified, debugger will only stop on the
+ # breakpoint if this expression evaluates to true.
+ optional string condition
+ returns
+ # Id of the created breakpoint for further reference.
+ BreakpointId breakpointId
+ # Location this breakpoint resolved into.
+ Location actualLocation
+
+ # Sets JavaScript breakpoint at given location specified either by URL or URL regex. Once this
+ # command is issued, all existing parsed scripts will have breakpoints resolved and returned in
+ # `locations` property. Further matching script parsing will result in subsequent
+ # `breakpointResolved` events issued. This logical breakpoint will survive page reloads.
+ command setBreakpointByUrl
+ parameters
+ # Line number to set breakpoint at.
+ integer lineNumber
+ # URL of the resources to set breakpoint on.
+ optional string url
+ # Regex pattern for the URLs of the resources to set breakpoints on. Either `url` or
+ # `urlRegex` must be specified.
+ optional string urlRegex
+ # Script hash of the resources to set breakpoint on.
+ optional string scriptHash
+ # Offset in the line to set breakpoint at.
+ optional integer columnNumber
+ # Expression to use as a breakpoint condition. When specified, debugger will only stop on the
+ # breakpoint if this expression evaluates to true.
+ optional string condition
+ returns
+ # Id of the created breakpoint for further reference.
+ BreakpointId breakpointId
+ # List of the locations this breakpoint resolved into upon addition.
+ array of Location locations
+
+ # Activates / deactivates all breakpoints on the page.
+ command setBreakpointsActive
+ parameters
+ # New value for breakpoints active state.
+ boolean active
+
+ # Defines pause on exceptions state. Can be set to stop on all exceptions, uncaught exceptions or
+ # no exceptions. Initial pause on exceptions state is `none`.
+ command setPauseOnExceptions
+ parameters
+ # Pause on exceptions mode.
+ enum state
+ none
+ uncaught
+ all
+
+ # Changes return value in top frame. Available only at return break position.
+ experimental command setReturnValue
+ parameters
+ # New return value.
+ Runtime.CallArgument newValue
+
+ # Edits JavaScript source live.
+ command setScriptSource
+ parameters
+ # Id of the script to edit.
+ Runtime.ScriptId scriptId
+ # New content of the script.
+ string scriptSource
+ # If true the change will not actually be applied. Dry run may be used to get result
+ # description without actually modifying the code.
+ optional boolean dryRun
+ returns
+ # New stack trace in case editing has happened while VM was stopped.
+ optional array of CallFrame callFrames
+ # Whether current call stack was modified after applying the changes.
+ optional boolean stackChanged
+ # Async stack trace, if any.
+ optional Runtime.StackTrace asyncStackTrace
+ # Async stack trace, if any.
+ experimental optional Runtime.StackTraceId asyncStackTraceId
+ # Exception details if any.
+ optional Runtime.ExceptionDetails exceptionDetails
+
+ # Makes page not interrupt on any pauses (breakpoint, exception, dom exception etc).
+ command setSkipAllPauses
+ parameters
+ # New value for skip pauses state.
+ boolean skip
+
+ # Changes value of variable in a callframe. Object-based scopes are not supported and must be
+ # mutated manually.
+ command setVariableValue
+ parameters
+ # 0-based number of scope as was listed in scope chain. Only 'local', 'closure' and 'catch'
+ # scope types are allowed. Other scopes could be manipulated manually.
+ integer scopeNumber
+ # Variable name.
+ string variableName
+ # New variable value.
+ Runtime.CallArgument newValue
+ # Id of callframe that holds variable.
+ CallFrameId callFrameId
+
+ # Steps into the function call.
+ command stepInto
+ parameters
+ # Debugger will issue additional Debugger.paused notification if any async task is scheduled
+ # before next pause.
+ experimental optional boolean breakOnAsyncCall
+
+ # Steps out of the function call.
+ command stepOut
+
+ # Steps over the statement.
+ command stepOver
+
+ # Fired when breakpoint is resolved to an actual script and location.
+ event breakpointResolved
+ parameters
+ # Breakpoint unique identifier.
+ BreakpointId breakpointId
+ # Actual breakpoint location.
+ Location location
+
+ # Fired when the virtual machine stopped on breakpoint or exception or any other stop criteria.
+ event paused
+ parameters
+ # Call stack the virtual machine stopped on.
+ array of CallFrame callFrames
+ # Pause reason.
+ enum reason
+ XHR
+ DOM
+ EventListener
+ exception
+ assert
+ debugCommand
+ promiseRejection
+ OOM
+ other
+ ambiguous
+ # Object containing break-specific auxiliary properties.
+ optional object data
+ # Hit breakpoints IDs
+ optional array of string hitBreakpoints
+ # Async stack trace, if any.
+ optional Runtime.StackTrace asyncStackTrace
+ # Async stack trace, if any.
+ experimental optional Runtime.StackTraceId asyncStackTraceId
+ # Just scheduled async call will have this stack trace as parent stack during async execution.
+ # This field is available only after `Debugger.stepInto` call with `breakOnAsynCall` flag.
+ experimental optional Runtime.StackTraceId asyncCallStackTraceId
+
+ # Fired when the virtual machine resumed execution.
+ event resumed
+
+ # Fired when virtual machine fails to parse the script.
+ event scriptFailedToParse
+ parameters
+ # Identifier of the script parsed.
+ Runtime.ScriptId scriptId
+ # URL or name of the script parsed (if any).
+ string url
+ # Line offset of the script within the resource with given URL (for script tags).
+ integer startLine
+ # Column offset of the script within the resource with given URL.
+ integer startColumn
+ # Last line of the script.
+ integer endLine
+ # Length of the last line of the script.
+ integer endColumn
+ # Specifies script creation context.
+ Runtime.ExecutionContextId executionContextId
+ # Content hash of the script.
+ string hash
+ # Embedder-specific auxiliary data.
+ optional object executionContextAuxData
+ # URL of source map associated with script (if any).
+ optional string sourceMapURL
+ # True, if this script has sourceURL.
+ optional boolean hasSourceURL
+ # True, if this script is ES6 module.
+ optional boolean isModule
+ # This script length.
+ optional integer length
+ # JavaScript top stack frame of where the script parsed event was triggered if available.
+ experimental optional Runtime.StackTrace stackTrace
+
+ # Fired when virtual machine parses script. This event is also fired for all known and uncollected
+ # scripts upon enabling debugger.
+ event scriptParsed
+ parameters
+ # Identifier of the script parsed.
+ Runtime.ScriptId scriptId
+ # URL or name of the script parsed (if any).
+ string url
+ # Line offset of the script within the resource with given URL (for script tags).
+ integer startLine
+ # Column offset of the script within the resource with given URL.
+ integer startColumn
+ # Last line of the script.
+ integer endLine
+ # Length of the last line of the script.
+ integer endColumn
+ # Specifies script creation context.
+ Runtime.ExecutionContextId executionContextId
+ # Content hash of the script.
+ string hash
+ # Embedder-specific auxiliary data.
+ optional object executionContextAuxData
+ # True, if this script is generated as a result of the live edit operation.
+ experimental optional boolean isLiveEdit
+ # URL of source map associated with script (if any).
+ optional string sourceMapURL
+ # True, if this script has sourceURL.
+ optional boolean hasSourceURL
+ # True, if this script is ES6 module.
+ optional boolean isModule
+ # This script length.
+ optional integer length
+ # JavaScript top stack frame of where the script parsed event was triggered if available.
+ experimental optional Runtime.StackTrace stackTrace
+
+experimental domain HeapProfiler
+ depends on Runtime
+
+ # Heap snapshot object id.
+ type HeapSnapshotObjectId extends string
+
+ # Sampling Heap Profile node. Holds callsite information, allocation statistics and child nodes.
+ type SamplingHeapProfileNode extends object
+ properties
+ # Function location.
+ Runtime.CallFrame callFrame
+ # Allocations size in bytes for the node excluding children.
+ number selfSize
+ # Child nodes.
+ array of SamplingHeapProfileNode children
+
+ # Profile.
+ type SamplingHeapProfile extends object
+ properties
+ SamplingHeapProfileNode head
+
+ # Enables console to refer to the node with given id via $x (see Command Line API for more details
+ # $x functions).
+ command addInspectedHeapObject
+ parameters
+ # Heap snapshot object id to be accessible by means of $x command line API.
+ HeapSnapshotObjectId heapObjectId
+
+ command collectGarbage
+
+ command disable
+
+ command enable
+
+ command getHeapObjectId
+ parameters
+ # Identifier of the object to get heap object id for.
+ Runtime.RemoteObjectId objectId
+ returns
+ # Id of the heap snapshot object corresponding to the passed remote object id.
+ HeapSnapshotObjectId heapSnapshotObjectId
+
+ command getObjectByHeapObjectId
+ parameters
+ HeapSnapshotObjectId objectId
+ # Symbolic group name that can be used to release multiple objects.
+ optional string objectGroup
+ returns
+ # Evaluation result.
+ Runtime.RemoteObject result
+
+ command getSamplingProfile
+ returns
+ # Return the sampling profile being collected.
+ SamplingHeapProfile profile
+
+ command startSampling
+ parameters
+ # Average sample interval in bytes. Poisson distribution is used for the intervals. The
+ # default value is 32768 bytes.
+ optional number samplingInterval
+
+ command startTrackingHeapObjects
+ parameters
+ optional boolean trackAllocations
+
+ command stopSampling
+ returns
+ # Recorded sampling heap profile.
+ SamplingHeapProfile profile
+
+ command stopTrackingHeapObjects
+ parameters
+ # If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken
+ # when the tracking is stopped.
+ optional boolean reportProgress
+
+ command takeHeapSnapshot
+ parameters
+ # If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken.
+ optional boolean reportProgress
+
+ event addHeapSnapshotChunk
+ parameters
+ string chunk
+
+ # If heap objects tracking has been started then backend may send update for one or more fragments
+ event heapStatsUpdate
+ parameters
+ # An array of triplets. Each triplet describes a fragment. The first integer is the fragment
+ # index, the second integer is a total count of objects for the fragment, the third integer is
+ # a total size of the objects for the fragment.
+ array of integer statsUpdate
+
+ # If heap objects tracking has been started then backend regularly sends a current value for last
+ # seen object id and corresponding timestamp. If the were changes in the heap since last event
+ # then one or more heapStatsUpdate events will be sent before a new lastSeenObjectId event.
+ event lastSeenObjectId
+ parameters
+ integer lastSeenObjectId
+ number timestamp
+
+ event reportHeapSnapshotProgress
+ parameters
+ integer done
+ integer total
+ optional boolean finished
+
+ event resetProfiles
+
+domain Profiler
+ depends on Runtime
+ depends on Debugger
+
+ # Profile node. Holds callsite information, execution statistics and child nodes.
+ type ProfileNode extends object
+ properties
+ # Unique id of the node.
+ integer id
+ # Function location.
+ Runtime.CallFrame callFrame
+ # Number of samples where this node was on top of the call stack.
+ optional integer hitCount
+ # Child node ids.
+ optional array of integer children
+ # The reason of being not optimized. The function may be deoptimized or marked as don't
+ # optimize.
+ optional string deoptReason
+ # An array of source position ticks.
+ optional array of PositionTickInfo positionTicks
+
+ # Profile.
+ type Profile extends object
+ properties
+ # The list of profile nodes. First item is the root node.
+ array of ProfileNode nodes
+ # Profiling start timestamp in microseconds.
+ number startTime
+ # Profiling end timestamp in microseconds.
+ number endTime
+ # Ids of samples top nodes.
+ optional array of integer samples
+ # Time intervals between adjacent samples in microseconds. The first delta is relative to the
+ # profile startTime.
+ optional array of integer timeDeltas
+
+ # Specifies a number of samples attributed to a certain source position.
+ type PositionTickInfo extends object
+ properties
+ # Source line number (1-based).
+ integer line
+ # Number of samples attributed to the source line.
+ integer ticks
+
+ # Coverage data for a source range.
+ type CoverageRange extends object
+ properties
+ # JavaScript script source offset for the range start.
+ integer startOffset
+ # JavaScript script source offset for the range end.
+ integer endOffset
+ # Collected execution count of the source range.
+ integer count
+
+ # Coverage data for a JavaScript function.
+ type FunctionCoverage extends object
+ properties
+ # JavaScript function name.
+ string functionName
+ # Source ranges inside the function with coverage data.
+ array of CoverageRange ranges
+ # Whether coverage data for this function has block granularity.
+ boolean isBlockCoverage
+
+ # Coverage data for a JavaScript script.
+ type ScriptCoverage extends object
+ properties
+ # JavaScript script id.
+ Runtime.ScriptId scriptId
+ # JavaScript script name or url.
+ string url
+ # Functions contained in the script that has coverage data.
+ array of FunctionCoverage functions
+
+ # Describes a type collected during runtime.
+ experimental type TypeObject extends object
+ properties
+ # Name of a type collected with type profiling.
+ string name
+
+ # Source offset and types for a parameter or return value.
+ experimental type TypeProfileEntry extends object
+ properties
+ # Source offset of the parameter or end of function for return values.
+ integer offset
+ # The types for this parameter or return value.
+ array of TypeObject types
+
+ # Type profile data collected during runtime for a JavaScript script.
+ experimental type ScriptTypeProfile extends object
+ properties
+ # JavaScript script id.
+ Runtime.ScriptId scriptId
+ # JavaScript script name or url.
+ string url
+ # Type profile entries for parameters and return values of the functions in the script.
+ array of TypeProfileEntry entries
+
+ command disable
+
+ command enable
+
+ # Collect coverage data for the current isolate. The coverage data may be incomplete due to
+ # garbage collection.
+ command getBestEffortCoverage
+ returns
+ # Coverage data for the current isolate.
+ array of ScriptCoverage result
+
+ # Changes CPU profiler sampling interval. Must be called before CPU profiles recording started.
+ command setSamplingInterval
+ parameters
+ # New sampling interval in microseconds.
+ integer interval
+
+ command start
+
+ # Enable precise code coverage. Coverage data for JavaScript executed before enabling precise code
+ # coverage may be incomplete. Enabling prevents running optimized code and resets execution
+ # counters.
+ command startPreciseCoverage
+ parameters
+ # Collect accurate call counts beyond simple 'covered' or 'not covered'.
+ optional boolean callCount
+ # Collect block-based coverage.
+ optional boolean detailed
+
+ # Enable type profile.
+ experimental command startTypeProfile
+
+ command stop
+ returns
+ # Recorded profile.
+ Profile profile
+
+ # Disable precise code coverage. Disabling releases unnecessary execution count records and allows
+ # executing optimized code.
+ command stopPreciseCoverage
+
+ # Disable type profile. Disabling releases type profile data collected so far.
+ experimental command stopTypeProfile
+
+ # Collect coverage data for the current isolate, and resets execution counters. Precise code
+ # coverage needs to have started.
+ command takePreciseCoverage
+ returns
+ # Coverage data for the current isolate.
+ array of ScriptCoverage result
+
+ # Collect type profile.
+ experimental command takeTypeProfile
+ returns
+ # Type profile for all scripts since startTypeProfile() was turned on.
+ array of ScriptTypeProfile result
+
+ event consoleProfileFinished
+ parameters
+ string id
+ # Location of console.profileEnd().
+ Debugger.Location location
+ Profile profile
+ # Profile title passed as an argument to console.profile().
+ optional string title
+
+ # Sent when new profile recording is started using console.profile() call.
+ event consoleProfileStarted
+ parameters
+ string id
+ # Location of console.profile().
+ Debugger.Location location
+ # Profile title passed as an argument to console.profile().
+ optional string title
+
+# Runtime domain exposes JavaScript runtime by means of remote evaluation and mirror objects.
+# Evaluation results are returned as mirror object that expose object type, string representation
+# and unique identifier that can be used for further object reference. Original objects are
+# maintained in memory unless they are either explicitly released or are released along with the
+# other objects in their object group.
+domain Runtime
+
+ # Unique script identifier.
+ type ScriptId extends string
+
+ # Unique object identifier.
+ type RemoteObjectId extends string
+
+ # Primitive value which cannot be JSON-stringified.
+ type UnserializableValue extends string
+ enum
+ Infinity
+ NaN
+ -Infinity
+ -0
+
+ # Mirror object referencing original JavaScript object.
+ type RemoteObject extends object
+ properties
+ # Object type.
+ enum type
+ object
+ function
+ undefined
+ string
+ number
+ boolean
+ symbol
+ # Object subtype hint. Specified for `object` type values only.
+ optional enum subtype
+ array
+ null
+ node
+ regexp
+ date
+ map
+ set
+ weakmap
+ weakset
+ iterator
+ generator
+ error
+ proxy
+ promise
+ typedarray
+ # Object class (constructor) name. Specified for `object` type values only.
+ optional string className
+ # Remote object value in case of primitive values or JSON values (if it was requested).
+ optional any value
+ # Primitive value which can not be JSON-stringified does not have `value`, but gets this
+ # property.
+ optional UnserializableValue unserializableValue
+ # String representation of the object.
+ optional string description
+ # Unique object identifier (for non-primitive values).
+ optional RemoteObjectId objectId
+ # Preview containing abbreviated property values. Specified for `object` type values only.
+ experimental optional ObjectPreview preview
+ experimental optional CustomPreview customPreview
+
+ experimental type CustomPreview extends object
+ properties
+ string header
+ boolean hasBody
+ RemoteObjectId formatterObjectId
+ RemoteObjectId bindRemoteObjectFunctionId
+ optional RemoteObjectId configObjectId
+
+ # Object containing abbreviated remote object value.
+ experimental type ObjectPreview extends object
+ properties
+ # Object type.
+ enum type
+ object
+ function
+ undefined
+ string
+ number
+ boolean
+ symbol
+ # Object subtype hint. Specified for `object` type values only.
+ optional enum subtype
+ array
+ null
+ node
+ regexp
+ date
+ map
+ set
+ weakmap
+ weakset
+ iterator
+ generator
+ error
+ # String representation of the object.
+ optional string description
+ # True iff some of the properties or entries of the original object did not fit.
+ boolean overflow
+ # List of the properties.
+ array of PropertyPreview properties
+ # List of the entries. Specified for `map` and `set` subtype values only.
+ optional array of EntryPreview entries
+
+ experimental type PropertyPreview extends object
+ properties
+ # Property name.
+ string name
+ # Object type. Accessor means that the property itself is an accessor property.
+ enum type
+ object
+ function
+ undefined
+ string
+ number
+ boolean
+ symbol
+ accessor
+ # User-friendly property value string.
+ optional string value
+ # Nested value preview.
+ optional ObjectPreview valuePreview
+ # Object subtype hint. Specified for `object` type values only.
+ optional enum subtype
+ array
+ null
+ node
+ regexp
+ date
+ map
+ set
+ weakmap
+ weakset
+ iterator
+ generator
+ error
+
+ experimental type EntryPreview extends object
+ properties
+ # Preview of the key. Specified for map-like collection entries.
+ optional ObjectPreview key
+ # Preview of the value.
+ ObjectPreview value
+
+ # Object property descriptor.
+ type PropertyDescriptor extends object
+ properties
+ # Property name or symbol description.
+ string name
+ # The value associated with the property.
+ optional RemoteObject value
+ # True if the value associated with the property may be changed (data descriptors only).
+ optional boolean writable
+ # A function which serves as a getter for the property, or `undefined` if there is no getter
+ # (accessor descriptors only).
+ optional RemoteObject get
+ # A function which serves as a setter for the property, or `undefined` if there is no setter
+ # (accessor descriptors only).
+ optional RemoteObject set
+ # True if the type of this property descriptor may be changed and if the property may be
+ # deleted from the corresponding object.
+ boolean configurable
+ # True if this property shows up during enumeration of the properties on the corresponding
+ # object.
+ boolean enumerable
+ # True if the result was thrown during the evaluation.
+ optional boolean wasThrown
+ # True if the property is owned for the object.
+ optional boolean isOwn
+ # Property symbol object, if the property is of the `symbol` type.
+ optional RemoteObject symbol
+
+ # Object internal property descriptor. This property isn't normally visible in JavaScript code.
+ type InternalPropertyDescriptor extends object
+ properties
+ # Conventional property name.
+ string name
+ # The value associated with the property.
+ optional RemoteObject value
+
+ # Represents function call argument. Either remote object id `objectId`, primitive `value`,
+ # unserializable primitive value or neither of (for undefined) them should be specified.
+ type CallArgument extends object
+ properties
+ # Primitive value or serializable javascript object.
+ optional any value
+ # Primitive value which can not be JSON-stringified.
+ optional UnserializableValue unserializableValue
+ # Remote object handle.
+ optional RemoteObjectId objectId
+
+ # Id of an execution context.
+ type ExecutionContextId extends integer
+
+ # Description of an isolated world.
+ type ExecutionContextDescription extends object
+ properties
+ # Unique id of the execution context. It can be used to specify in which execution context
+ # script evaluation should be performed.
+ ExecutionContextId id
+ # Execution context origin.
+ string origin
+ # Human readable name describing given context.
+ string name
+ # Embedder-specific auxiliary data.
+ optional object auxData
+
+ # Detailed information about exception (or error) that was thrown during script compilation or
+ # execution.
+ type ExceptionDetails extends object
+ properties
+ # Exception id.
+ integer exceptionId
+ # Exception text, which should be used together with exception object when available.
+ string text
+ # Line number of the exception location (0-based).
+ integer lineNumber
+ # Column number of the exception location (0-based).
+ integer columnNumber
+ # Script ID of the exception location.
+ optional ScriptId scriptId
+ # URL of the exception location, to be used when the script was not reported.
+ optional string url
+ # JavaScript stack trace if available.
+ optional StackTrace stackTrace
+ # Exception object if available.
+ optional RemoteObject exception
+ # Identifier of the context where exception happened.
+ optional ExecutionContextId executionContextId
+
+ # Number of milliseconds since epoch.
+ type Timestamp extends number
+
+ # Stack entry for runtime errors and assertions.
+ type CallFrame extends object
+ properties
+ # JavaScript function name.
+ string functionName
+ # JavaScript script id.
+ ScriptId scriptId
+ # JavaScript script name or url.
+ string url
+ # JavaScript script line number (0-based).
+ integer lineNumber
+ # JavaScript script column number (0-based).
+ integer columnNumber
+
+ # Call frames for assertions or error messages.
+ type StackTrace extends object
+ properties
+ # String label of this stack trace. For async traces this may be a name of the function that
+ # initiated the async call.
+ optional string description
+ # JavaScript function name.
+ array of CallFrame callFrames
+ # Asynchronous JavaScript stack trace that preceded this stack, if available.
+ optional StackTrace parent
+ # Asynchronous JavaScript stack trace that preceded this stack, if available.
+ experimental optional StackTraceId parentId
+
+ # Unique identifier of current debugger.
+ experimental type UniqueDebuggerId extends string
+
+ # If `debuggerId` is set stack trace comes from another debugger and can be resolved there. This
+ # allows to track cross-debugger calls. See `Runtime.StackTrace` and `Debugger.paused` for usages.
+ experimental type StackTraceId extends object
+ properties
+ string id
+ optional UniqueDebuggerId debuggerId
+
+ # Add handler to promise with given promise object id.
+ command awaitPromise
+ parameters
+ # Identifier of the promise.
+ RemoteObjectId promiseObjectId
+ # Whether the result is expected to be a JSON object that should be sent by value.
+ optional boolean returnByValue
+ # Whether preview should be generated for the result.
+ optional boolean generatePreview
+ returns
+ # Promise result. Will contain rejected value if promise was rejected.
+ RemoteObject result
+ # Exception details if stack strace is available.
+ optional ExceptionDetails exceptionDetails
+
+ # Calls function with given declaration on the given object. Object group of the result is
+ # inherited from the target object.
+ command callFunctionOn
+ parameters
+ # Declaration of the function to call.
+ string functionDeclaration
+ # Identifier of the object to call function on. Either objectId or executionContextId should
+ # be specified.
+ optional RemoteObjectId objectId
+ # Call arguments. All call arguments must belong to the same JavaScript world as the target
+ # object.
+ optional array of CallArgument arguments
+ # In silent mode exceptions thrown during evaluation are not reported and do not pause
+ # execution. Overrides `setPauseOnException` state.
+ optional boolean silent
+ # Whether the result is expected to be a JSON object which should be sent by value.
+ optional boolean returnByValue
+ # Whether preview should be generated for the result.
+ experimental optional boolean generatePreview
+ # Whether execution should be treated as initiated by user in the UI.
+ optional boolean userGesture
+ # Whether execution should `await` for resulting value and return once awaited promise is
+ # resolved.
+ optional boolean awaitPromise
+ # Specifies execution context which global object will be used to call function on. Either
+ # executionContextId or objectId should be specified.
+ optional ExecutionContextId executionContextId
+ # Symbolic group name that can be used to release multiple objects. If objectGroup is not
+ # specified and objectId is, objectGroup will be inherited from object.
+ optional string objectGroup
+ returns
+ # Call result.
+ RemoteObject result
+ # Exception details.
+ optional ExceptionDetails exceptionDetails
+
+ # Compiles expression.
+ command compileScript
+ parameters
+ # Expression to compile.
+ string expression
+ # Source url to be set for the script.
+ string sourceURL
+ # Specifies whether the compiled script should be persisted.
+ boolean persistScript
+ # Specifies in which execution context to perform script run. If the parameter is omitted the
+ # evaluation will be performed in the context of the inspected page.
+ optional ExecutionContextId executionContextId
+ returns
+ # Id of the script.
+ optional ScriptId scriptId
+ # Exception details.
+ optional ExceptionDetails exceptionDetails
+
+ # Disables reporting of execution contexts creation.
+ command disable
+
+ # Discards collected exceptions and console API calls.
+ command discardConsoleEntries
+
+ # Enables reporting of execution contexts creation by means of `executionContextCreated` event.
+ # When the reporting gets enabled the event will be sent immediately for each existing execution
+ # context.
+ command enable
+
+ # Evaluates expression on global object.
+ command evaluate
+ parameters
+ # Expression to evaluate.
+ string expression
+ # Symbolic group name that can be used to release multiple objects.
+ optional string objectGroup
+ # Determines whether Command Line API should be available during the evaluation.
+ optional boolean includeCommandLineAPI
+ # In silent mode exceptions thrown during evaluation are not reported and do not pause
+ # execution. Overrides `setPauseOnException` state.
+ optional boolean silent
+ # Specifies in which execution context to perform evaluation. If the parameter is omitted the
+ # evaluation will be performed in the context of the inspected page.
+ optional ExecutionContextId contextId
+ # Whether the result is expected to be a JSON object that should be sent by value.
+ optional boolean returnByValue
+ # Whether preview should be generated for the result.
+ experimental optional boolean generatePreview
+ # Whether execution should be treated as initiated by user in the UI.
+ optional boolean userGesture
+ # Whether execution should `await` for resulting value and return once awaited promise is
+ # resolved.
+ optional boolean awaitPromise
+ returns
+ # Evaluation result.
+ RemoteObject result
+ # Exception details.
+ optional ExceptionDetails exceptionDetails
+
+ # Returns properties of a given object. Object group of the result is inherited from the target
+ # object.
+ command getProperties
+ parameters
+ # Identifier of the object to return properties for.
+ RemoteObjectId objectId
+ # If true, returns properties belonging only to the element itself, not to its prototype
+ # chain.
+ optional boolean ownProperties
+ # If true, returns accessor properties (with getter/setter) only; internal properties are not
+ # returned either.
+ experimental optional boolean accessorPropertiesOnly
+ # Whether preview should be generated for the results.
+ experimental optional boolean generatePreview
+ returns
+ # Object properties.
+ array of PropertyDescriptor result
+ # Internal object properties (only of the element itself).
+ optional array of InternalPropertyDescriptor internalProperties
+ # Exception details.
+ optional ExceptionDetails exceptionDetails
+
+ # Returns all let, const and class variables from global scope.
+ command globalLexicalScopeNames
+ parameters
+ # Specifies in which execution context to lookup global scope variables.
+ optional ExecutionContextId executionContextId
+ returns
+ array of string names
+
+ command queryObjects
+ parameters
+ # Identifier of the prototype to return objects for.
+ RemoteObjectId prototypeObjectId
+ returns
+ # Array with objects.
+ RemoteObject objects
+
+ # Releases remote object with given id.
+ command releaseObject
+ parameters
+ # Identifier of the object to release.
+ RemoteObjectId objectId
+
+ # Releases all remote objects that belong to a given group.
+ command releaseObjectGroup
+ parameters
+ # Symbolic object group name.
+ string objectGroup
+
+ # Tells inspected instance to run if it was waiting for debugger to attach.
+ command runIfWaitingForDebugger
+
+ # Runs script with given id in a given context.
+ command runScript
+ parameters
+ # Id of the script to run.
+ ScriptId scriptId
+ # Specifies in which execution context to perform script run. If the parameter is omitted the
+ # evaluation will be performed in the context of the inspected page.
+ optional ExecutionContextId executionContextId
+ # Symbolic group name that can be used to release multiple objects.
+ optional string objectGroup
+ # In silent mode exceptions thrown during evaluation are not reported and do not pause
+ # execution. Overrides `setPauseOnException` state.
+ optional boolean silent
+ # Determines whether Command Line API should be available during the evaluation.
+ optional boolean includeCommandLineAPI
+ # Whether the result is expected to be a JSON object which should be sent by value.
+ optional boolean returnByValue
+ # Whether preview should be generated for the result.
+ optional boolean generatePreview
+ # Whether execution should `await` for resulting value and return once awaited promise is
+ # resolved.
+ optional boolean awaitPromise
+ returns
+ # Run result.
+ RemoteObject result
+ # Exception details.
+ optional ExceptionDetails exceptionDetails
+
+ experimental command setCustomObjectFormatterEnabled
+ parameters
+ boolean enabled
+
+ # Issued when console API was called.
+ event consoleAPICalled
+ parameters
+ # Type of the call.
+ enum type
+ log
+ debug
+ info
+ error
+ warning
+ dir
+ dirxml
+ table
+ trace
+ clear
+ startGroup
+ startGroupCollapsed
+ endGroup
+ assert
+ profile
+ profileEnd
+ count
+ timeEnd
+ # Call arguments.
+ array of RemoteObject args
+ # Identifier of the context where the call was made.
+ ExecutionContextId executionContextId
+ # Call timestamp.
+ Timestamp timestamp
+ # Stack trace captured when the call was made.
+ optional StackTrace stackTrace
+ # Console context descriptor for calls on non-default console context (not console.*):
+ # 'anonymous#unique-logger-id' for call on unnamed context, 'name#unique-logger-id' for call
+ # on named context.
+ experimental optional string context
+
+ # Issued when unhandled exception was revoked.
+ event exceptionRevoked
+ parameters
+ # Reason describing why exception was revoked.
+ string reason
+ # The id of revoked exception, as reported in `exceptionThrown`.
+ integer exceptionId
+
+ # Issued when exception was thrown and unhandled.
+ event exceptionThrown
+ parameters
+ # Timestamp of the exception.
+ Timestamp timestamp
+ ExceptionDetails exceptionDetails
+
+ # Issued when new execution context is created.
+ event executionContextCreated
+ parameters
+ # A newly created execution context.
+ ExecutionContextDescription context
+
+ # Issued when execution context is destroyed.
+ event executionContextDestroyed
+ parameters
+ # Id of the destroyed context
+ ExecutionContextId executionContextId
+
+ # Issued when all executionContexts were cleared in browser
+ event executionContextsCleared
+
+ # Issued when object should be inspected (for example, as a result of inspect() command line API
+ # call).
+ event inspectRequested
+ parameters
+ RemoteObject object
+ object hints
+
+# This domain is deprecated.
+deprecated domain Schema
+
+ # Description of the protocol domain.
+ type Domain extends object
+ properties
+ # Domain name.
+ string name
+ # Domain version.
+ string version
+
+ # Returns supported domains.
+ command getDomains
+ returns
+ # List of supported domains.
+ array of Domain domains
diff --git a/deps/v8/src/inspector/string-16.cc b/deps/v8/src/inspector/string-16.cc
index 36a0cca26c..dc753fee40 100644
--- a/deps/v8/src/inspector/string-16.cc
+++ b/deps/v8/src/inspector/string-16.cc
@@ -162,15 +162,15 @@ ConversionResult convertUTF16ToUTF8(const UChar** sourceStart,
* @return TRUE or FALSE
* @stable ICU 2.8
*/
-#define U_IS_BMP(c) ((uint32_t)(c) <= 0xffff)
+#define U_IS_BMP(c) ((uint32_t)(c) <= 0xFFFF)
/**
- * Is this code point a supplementary code point (U+10000..U+10ffff)?
+ * Is this code point a supplementary code point (U+010000..U+10FFFF)?
* @param c 32-bit code point
* @return TRUE or FALSE
* @stable ICU 2.8
*/
-#define U_IS_SUPPLEMENTARY(c) ((uint32_t)((c)-0x10000) <= 0xfffff)
+#define U_IS_SUPPLEMENTARY(c) ((uint32_t)((c)-0x010000) <= 0xFFFFF)
/**
* Is this code point a surrogate (U+d800..U+dfff)?
@@ -178,25 +178,25 @@ ConversionResult convertUTF16ToUTF8(const UChar** sourceStart,
* @return TRUE or FALSE
* @stable ICU 2.4
*/
-#define U_IS_SURROGATE(c) (((c)&0xfffff800) == 0xd800)
+#define U_IS_SURROGATE(c) (((c)&0xFFFFF800) == 0xD800)
/**
- * Get the lead surrogate (0xd800..0xdbff) for a
- * supplementary code point (0x10000..0x10ffff).
- * @param supplementary 32-bit code point (U+10000..U+10ffff)
- * @return lead surrogate (U+d800..U+dbff) for supplementary
+ * Get the lead surrogate (0xD800..0xDBFF) for a
+ * supplementary code point (0x010000..0x10FFFF).
+ * @param supplementary 32-bit code point (U+010000..U+10FFFF)
+ * @return lead surrogate (U+D800..U+DBFF) for supplementary
* @stable ICU 2.4
*/
-#define U16_LEAD(supplementary) (UChar)(((supplementary) >> 10) + 0xd7c0)
+#define U16_LEAD(supplementary) (UChar)(((supplementary) >> 10) + 0xD7C0)
/**
- * Get the trail surrogate (0xdc00..0xdfff) for a
- * supplementary code point (0x10000..0x10ffff).
- * @param supplementary 32-bit code point (U+10000..U+10ffff)
- * @return trail surrogate (U+dc00..U+dfff) for supplementary
+ * Get the trail surrogate (0xDC00..0xDFFF) for a
+ * supplementary code point (0x010000..0x10FFFF).
+ * @param supplementary 32-bit code point (U+010000..U+10FFFF)
+ * @return trail surrogate (U+DC00..U+DFFF) for supplementary
* @stable ICU 2.4
*/
-#define U16_TRAIL(supplementary) (UChar)(((supplementary)&0x3ff) | 0xdc00)
+#define U16_TRAIL(supplementary) (UChar)(((supplementary)&0x3FF) | 0xDC00)
// This must be called with the length pre-determined by the first byte.
// If presented with a length > 4, this returns false. The Unicode
@@ -329,7 +329,7 @@ ConversionResult convertUTF8ToUTF16(const char** sourceStart,
}
*target++ = U16_LEAD(character);
*target++ = U16_TRAIL(character);
- orAllData = 0xffff;
+ orAllData = 0xFFFF;
} else {
if (strict) {
source -= utf8SequenceLength; // return to the start
@@ -344,7 +344,7 @@ ConversionResult convertUTF8ToUTF16(const char** sourceStart,
*sourceStart = source;
*targetStart = target;
- if (sourceAllASCII) *sourceAllASCII = !(orAllData & ~0x7f);
+ if (sourceAllASCII) *sourceAllASCII = !(orAllData & ~0x7F);
return result;
}
diff --git a/deps/v8/src/inspector/v8-console-message.cc b/deps/v8/src/inspector/v8-console-message.cc
index 1129eac676..e96e89c0eb 100644
--- a/deps/v8/src/inspector/v8-console-message.cc
+++ b/deps/v8/src/inspector/v8-console-message.cc
@@ -528,6 +528,11 @@ double V8ConsoleMessageStorage::timeEnd(int contextId, const String16& id) {
return elapsed;
}
+bool V8ConsoleMessageStorage::hasTimer(int contextId, const String16& id) {
+ const std::map<String16, double>& time = m_data[contextId].m_time;
+ return time.find(id) != time.end();
+}
+
void V8ConsoleMessageStorage::contextDestroyed(int contextId) {
m_estimatedSize = 0;
for (size_t i = 0; i < m_messages.size(); ++i) {
diff --git a/deps/v8/src/inspector/v8-console-message.h b/deps/v8/src/inspector/v8-console-message.h
index 57f692f6db..f82f8e5a13 100644
--- a/deps/v8/src/inspector/v8-console-message.h
+++ b/deps/v8/src/inspector/v8-console-message.h
@@ -120,6 +120,7 @@ class V8ConsoleMessageStorage {
int count(int contextId, const String16& id);
void time(int contextId, const String16& id);
double timeEnd(int contextId, const String16& id);
+ bool hasTimer(int contextId, const String16& id);
private:
V8InspectorImpl* m_inspector;
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index 7a0caf08a1..fa04209dec 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -284,7 +284,7 @@ void V8Console::Clear(const v8::debug::ConsoleCallArguments& info,
void V8Console::Count(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
ConsoleHelper helper(info, consoleContext, m_inspector);
- String16 title = helper.firstArgToString(String16());
+ String16 title = helper.firstArgToString(String16("default"), false);
String16 identifier;
if (title.isEmpty()) {
std::unique_ptr<V8StackTraceImpl> stackTrace =
@@ -354,10 +354,16 @@ static void timeFunction(const v8::debug::ConsoleCallArguments& info,
ConsoleHelper helper(info, consoleContext, inspector);
String16 protocolTitle = helper.firstArgToString("default", false);
if (timelinePrefix) protocolTitle = "Timeline '" + protocolTitle + "'";
+ const String16& timerId =
+ protocolTitle + "@" + consoleContextToString(consoleContext);
+ if (helper.consoleMessageStorage()->hasTimer(helper.contextId(), timerId)) {
+ helper.reportCallWithArgument(
+ ConsoleAPIType::kWarning,
+ "Timer '" + protocolTitle + "' already exists");
+ return;
+ }
inspector->client()->consoleTime(toStringView(protocolTitle));
- helper.consoleMessageStorage()->time(
- helper.contextId(),
- protocolTitle + "@" + consoleContextToString(consoleContext));
+ helper.consoleMessageStorage()->time(helper.contextId(), timerId);
}
static void timeEndFunction(const v8::debug::ConsoleCallArguments& info,
@@ -366,6 +372,14 @@ static void timeEndFunction(const v8::debug::ConsoleCallArguments& info,
ConsoleHelper helper(info, consoleContext, inspector);
String16 protocolTitle = helper.firstArgToString("default", false);
if (timelinePrefix) protocolTitle = "Timeline '" + protocolTitle + "'";
+ const String16& timerId =
+ protocolTitle + "@" + consoleContextToString(consoleContext);
+ if (!helper.consoleMessageStorage()->hasTimer(helper.contextId(), timerId)) {
+ helper.reportCallWithArgument(
+ ConsoleAPIType::kWarning,
+ "Timer '" + protocolTitle + "' does not exist");
+ return;
+ }
inspector->client()->consoleTimeEnd(toStringView(protocolTitle));
double elapsed = helper.consoleMessageStorage()->timeEnd(
helper.contextId(),
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index 8e5142d36e..7bfde09b71 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -553,7 +553,7 @@ Response V8DebuggerAgentImpl::setBreakpointByUrl(
}
std::unique_ptr<protocol::Debugger::Location> location = setBreakpointImpl(
breakpointId, script.first, condition, lineNumber, columnNumber);
- if (type != BreakpointType::kByUrlRegex) {
+ if (location && type != BreakpointType::kByUrlRegex) {
hint = breakpointHint(*script.second, lineNumber, columnNumber);
}
if (location) (*locations)->addItem(std::move(location));
@@ -1330,6 +1330,24 @@ V8DebuggerAgentImpl::currentExternalStackTrace() {
.build();
}
+std::unique_ptr<protocol::Runtime::StackTraceId>
+V8DebuggerAgentImpl::currentScheduledAsyncCall() {
+ v8_inspector::V8StackTraceId scheduledAsyncCall =
+ m_debugger->scheduledAsyncCall();
+ if (scheduledAsyncCall.IsInvalid()) return nullptr;
+ std::unique_ptr<protocol::Runtime::StackTraceId> asyncCallStackTrace =
+ protocol::Runtime::StackTraceId::create()
+ .setId(stackTraceIdToString(scheduledAsyncCall.id))
+ .build();
+ // TODO(kozyatinskiy): extract this check to IsLocal function.
+ if (scheduledAsyncCall.debugger_id.first ||
+ scheduledAsyncCall.debugger_id.second) {
+ asyncCallStackTrace->setDebuggerId(
+ debuggerIdToString(scheduledAsyncCall.debugger_id));
+ }
+ return asyncCallStackTrace;
+}
+
bool V8DebuggerAgentImpl::isPaused() const {
return m_debugger->isPausedInContextGroup(m_session->contextGroupId());
}
@@ -1532,22 +1550,10 @@ void V8DebuggerAgentImpl::didPause(
Response response = currentCallFrames(&protocolCallFrames);
if (!response.isSuccess()) protocolCallFrames = Array<CallFrame>::create();
- Maybe<protocol::Runtime::StackTraceId> asyncCallStackTrace;
- void* rawScheduledAsyncTask = m_debugger->scheduledAsyncTask();
- if (rawScheduledAsyncTask) {
- asyncCallStackTrace =
- protocol::Runtime::StackTraceId::create()
- .setId(stackTraceIdToString(
- reinterpret_cast<uintptr_t>(rawScheduledAsyncTask)))
- .setDebuggerId(debuggerIdToString(
- m_debugger->debuggerIdFor(m_session->contextGroupId())))
- .build();
- }
-
m_frontend.paused(std::move(protocolCallFrames), breakReason,
std::move(breakAuxData), std::move(hitBreakpointIds),
currentAsyncStackTrace(), currentExternalStackTrace(),
- std::move(asyncCallStackTrace));
+ currentScheduledAsyncCall());
}
void V8DebuggerAgentImpl::didContinue() {
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.h b/deps/v8/src/inspector/v8-debugger-agent-impl.h
index e697b700e9..168c5a7724 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.h
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.h
@@ -156,6 +156,7 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
std::unique_ptr<protocol::Array<protocol::Debugger::CallFrame>>*);
std::unique_ptr<protocol::Runtime::StackTrace> currentAsyncStackTrace();
std::unique_ptr<protocol::Runtime::StackTraceId> currentExternalStackTrace();
+ std::unique_ptr<protocol::Runtime::StackTraceId> currentScheduledAsyncCall();
void setPauseOnExceptionsImpl(int);
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index 8f843b54b2..c86f320252 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -331,11 +331,7 @@ void V8Debugger::pauseOnAsyncCall(int targetContextGroupId, uintptr_t task,
m_targetContextGroupId = targetContextGroupId;
m_taskWithScheduledBreak = reinterpret_cast<void*>(task);
- String16 currentDebuggerId =
- debuggerIdToString(debuggerIdFor(targetContextGroupId));
- if (currentDebuggerId != debuggerId) {
- m_taskWithScheduledBreakDebuggerId = debuggerId;
- }
+ m_taskWithScheduledBreakDebuggerId = debuggerId;
}
Response V8Debugger::continueToLocation(
@@ -542,19 +538,18 @@ void V8Debugger::PromiseEventOccurred(v8::debug::PromiseDebugActionType type,
switch (type) {
case v8::debug::kDebugAsyncFunctionPromiseCreated:
asyncTaskScheduledForStack("async function", task, true);
- if (!isBlackboxed) asyncTaskCandidateForStepping(task);
break;
case v8::debug::kDebugPromiseThen:
asyncTaskScheduledForStack("Promise.then", task, false);
- if (!isBlackboxed) asyncTaskCandidateForStepping(task);
+ if (!isBlackboxed) asyncTaskCandidateForStepping(task, true);
break;
case v8::debug::kDebugPromiseCatch:
asyncTaskScheduledForStack("Promise.catch", task, false);
- if (!isBlackboxed) asyncTaskCandidateForStepping(task);
+ if (!isBlackboxed) asyncTaskCandidateForStepping(task, true);
break;
case v8::debug::kDebugPromiseFinally:
asyncTaskScheduledForStack("Promise.finally", task, false);
- if (!isBlackboxed) asyncTaskCandidateForStepping(task);
+ if (!isBlackboxed) asyncTaskCandidateForStepping(task, true);
break;
case v8::debug::kDebugWillHandle:
asyncTaskStartedForStack(task);
@@ -767,7 +762,7 @@ V8StackTraceId V8Debugger::storeCurrentStackTrace(
++m_asyncStacksCount;
collectOldAsyncStacksIfNeeded();
- asyncTaskCandidateForStepping(reinterpret_cast<void*>(id));
+ asyncTaskCandidateForStepping(reinterpret_cast<void*>(id), false);
return V8StackTraceId(id, debuggerIdFor(contextGroupId));
}
@@ -816,7 +811,7 @@ void V8Debugger::externalAsyncTaskFinished(const V8StackTraceId& parent) {
void V8Debugger::asyncTaskScheduled(const StringView& taskName, void* task,
bool recurring) {
asyncTaskScheduledForStack(toString16(taskName), task, recurring);
- asyncTaskCandidateForStepping(task);
+ asyncTaskCandidateForStepping(task, true);
}
void V8Debugger::asyncTaskCanceled(void* task) {
@@ -890,16 +885,23 @@ void V8Debugger::asyncTaskFinishedForStack(void* task) {
}
}
-void V8Debugger::asyncTaskCandidateForStepping(void* task) {
- if (m_pauseOnAsyncCall) {
- m_scheduledAsyncTask = task;
+void V8Debugger::asyncTaskCandidateForStepping(void* task, bool isLocal) {
+ int contextGroupId = currentContextGroupId();
+ if (m_pauseOnAsyncCall && contextGroupId) {
+ if (isLocal) {
+ m_scheduledAsyncCall = v8_inspector::V8StackTraceId(
+ reinterpret_cast<uintptr_t>(task), std::make_pair(0, 0));
+ } else {
+ m_scheduledAsyncCall = v8_inspector::V8StackTraceId(
+ reinterpret_cast<uintptr_t>(task), debuggerIdFor(contextGroupId));
+ }
breakProgram(m_targetContextGroupId);
- m_scheduledAsyncTask = nullptr;
+ m_scheduledAsyncCall = v8_inspector::V8StackTraceId();
return;
}
if (!m_stepIntoAsyncCallback) return;
DCHECK(m_targetContextGroupId);
- if (currentContextGroupId() != m_targetContextGroupId) return;
+ if (contextGroupId != m_targetContextGroupId) return;
m_taskWithScheduledBreak = task;
v8::debug::ClearStepping(m_isolate);
m_stepIntoAsyncCallback->sendSuccess();
@@ -1031,6 +1033,7 @@ std::pair<int64_t, int64_t> V8Debugger::debuggerIdFor(int contextGroupId) {
std::pair<int64_t, int64_t> debuggerId(
v8::debug::GetNextRandomInt64(m_isolate),
v8::debug::GetNextRandomInt64(m_isolate));
+ if (!debuggerId.first && !debuggerId.second) ++debuggerId.first;
m_contextGroupIdToDebuggerId.insert(
it, std::make_pair(contextGroupId, debuggerId));
m_serializedDebuggerIdToDebuggerId.insert(
diff --git a/deps/v8/src/inspector/v8-debugger.h b/deps/v8/src/inspector/v8-debugger.h
index 455bb5952d..4828fcad52 100644
--- a/deps/v8/src/inspector/v8-debugger.h
+++ b/deps/v8/src/inspector/v8-debugger.h
@@ -117,7 +117,9 @@ class V8Debugger : public v8::debug::DebugDelegate {
void setMaxAsyncTaskStacksForTest(int limit);
void dumpAsyncTaskStacksStateForTest();
- void* scheduledAsyncTask() { return m_scheduledAsyncTask; }
+ v8_inspector::V8StackTraceId scheduledAsyncCall() {
+ return m_scheduledAsyncCall;
+ }
std::pair<int64_t, int64_t> debuggerIdFor(int contextGroupId);
std::pair<int64_t, int64_t> debuggerIdFor(
@@ -155,7 +157,7 @@ class V8Debugger : public v8::debug::DebugDelegate {
void asyncTaskStartedForStack(void* task);
void asyncTaskFinishedForStack(void* task);
- void asyncTaskCandidateForStepping(void* task);
+ void asyncTaskCandidateForStepping(void* task, bool isLocal);
void asyncTaskStartedForStepping(void* task);
void asyncTaskFinishedForStepping(void* task);
void asyncTaskCanceledForStepping(void* task);
@@ -219,7 +221,7 @@ class V8Debugger : public v8::debug::DebugDelegate {
v8::debug::ExceptionBreakState m_pauseOnExceptionsState;
bool m_pauseOnAsyncCall = false;
- void* m_scheduledAsyncTask = nullptr;
+ v8_inspector::V8StackTraceId m_scheduledAsyncCall;
using StackTraceIdToStackTrace =
protocol::HashMap<uintptr_t, std::weak_ptr<AsyncStackTrace>>;
diff --git a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
index 8af3edf7e1..b876a956b2 100644
--- a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
@@ -63,7 +63,7 @@ class GlobalObjectNameResolver final
if (m_offset + length + 1 >= m_strings.size()) return "";
for (size_t i = 0; i < length; ++i) {
UChar ch = name[i];
- m_strings[m_offset + i] = ch > 0xff ? '?' : static_cast<char>(ch);
+ m_strings[m_offset + i] = ch > 0xFF ? '?' : static_cast<char>(ch);
}
m_strings[m_offset + length] = '\0';
char* result = &*m_strings.begin() + m_offset;
diff --git a/deps/v8/src/inspector/v8-injected-script-host.cc b/deps/v8/src/inspector/v8-injected-script-host.cc
index ef978ceda3..1455cf6dbc 100644
--- a/deps/v8/src/inspector/v8-injected-script-host.cc
+++ b/deps/v8/src/inspector/v8-injected-script-host.cc
@@ -44,6 +44,15 @@ V8InspectorImpl* unwrapInspector(
return inspector;
}
+template <typename TypedArray>
+void addTypedArrayProperty(std::vector<v8::Local<v8::Value>>* props,
+ v8::Isolate* isolate,
+ v8::Local<v8::ArrayBuffer> arraybuffer,
+ String16 name, size_t length) {
+ props->push_back(toV8String(isolate, name));
+ props->push_back(TypedArray::New(arraybuffer, 0, length));
+}
+
} // namespace
v8::Local<v8::Object> V8InjectedScriptHost::create(
@@ -84,6 +93,9 @@ v8::Local<v8::Object> V8InjectedScriptHost::create(
setFunctionProperty(context, injectedScriptHost, "nativeAccessorDescriptor",
V8InjectedScriptHost::nativeAccessorDescriptorCallback,
debuggerExternal);
+ setFunctionProperty(context, injectedScriptHost, "typedArrayProperties",
+ V8InjectedScriptHost::typedArrayPropertiesCallback,
+ debuggerExternal);
createDataProperty(context, injectedScriptHost,
toV8StringInternalized(isolate, "keys"),
v8::debug::GetBuiltin(isolate, v8::debug::kObjectKeys));
@@ -335,7 +347,7 @@ void V8InjectedScriptHost::proxyTargetValueCallback(
UNREACHABLE();
return;
}
- v8::Local<v8::Object> target = info[0].As<v8::Proxy>();
+ v8::Local<v8::Value> target = info[0].As<v8::Proxy>();
while (target->IsProxy())
target = v8::Local<v8::Proxy>::Cast(target)->GetTarget();
info.GetReturnValue().Set(target);
@@ -374,4 +386,40 @@ void V8InjectedScriptHost::nativeAccessorDescriptorCallback(
info.GetReturnValue().Set(result);
}
+void V8InjectedScriptHost::typedArrayPropertiesCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ v8::Isolate* isolate = info.GetIsolate();
+ if (info.Length() != 1 || !info[0]->IsArrayBuffer()) return;
+
+ v8::TryCatch tryCatch(isolate);
+ v8::Isolate::DisallowJavascriptExecutionScope throwJs(
+ isolate, v8::Isolate::DisallowJavascriptExecutionScope::THROW_ON_FAILURE);
+ v8::Local<v8::ArrayBuffer> arrayBuffer = info[0].As<v8::ArrayBuffer>();
+ size_t length = arrayBuffer->ByteLength();
+ if (length == 0) return;
+ std::vector<v8::Local<v8::Value>> arrays_vector;
+ addTypedArrayProperty<v8::Int8Array>(&arrays_vector, isolate, arrayBuffer,
+ "[[Int8Array]]", length);
+ addTypedArrayProperty<v8::Uint8Array>(&arrays_vector, isolate, arrayBuffer,
+ "[[Uint8Array]]", length);
+
+ if (length % 2 == 0) {
+ addTypedArrayProperty<v8::Int16Array>(&arrays_vector, isolate, arrayBuffer,
+ "[[Int16Array]]", length / 2);
+ }
+ if (length % 4 == 0) {
+ addTypedArrayProperty<v8::Int32Array>(&arrays_vector, isolate, arrayBuffer,
+ "[[Int32Array]]", length / 4);
+ }
+
+ if (tryCatch.HasCaught()) return;
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ v8::Local<v8::Array> arrays =
+ v8::Array::New(isolate, static_cast<uint32_t>(arrays_vector.size()));
+ for (uint32_t i = 0; i < static_cast<uint32_t>(arrays_vector.size()); i++)
+ createDataProperty(context, arrays, i, arrays_vector[i]);
+ if (tryCatch.HasCaught()) return;
+ info.GetReturnValue().Set(arrays);
+}
+
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-injected-script-host.h b/deps/v8/src/inspector/v8-injected-script-host.h
index 491a157ea8..18f9139d63 100644
--- a/deps/v8/src/inspector/v8-injected-script-host.h
+++ b/deps/v8/src/inspector/v8-injected-script-host.h
@@ -44,6 +44,8 @@ class V8InjectedScriptHost {
const v8::FunctionCallbackInfo<v8::Value>&);
static void nativeAccessorDescriptorCallback(
const v8::FunctionCallbackInfo<v8::Value>&);
+ static void typedArrayPropertiesCallback(
+ const v8::FunctionCallbackInfo<v8::Value>&);
};
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.cc b/deps/v8/src/inspector/v8-inspector-session-impl.cc
index 6fba10ff11..d580c41e30 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.cc
@@ -262,8 +262,9 @@ Response V8InspectorSessionImpl::unwrapObject(const String16& objectId,
std::unique_ptr<protocol::Runtime::API::RemoteObject>
V8InspectorSessionImpl::wrapObject(v8::Local<v8::Context> context,
v8::Local<v8::Value> value,
- const StringView& groupName) {
- return wrapObject(context, value, toString16(groupName), false);
+ const StringView& groupName,
+ bool generatePreview) {
+ return wrapObject(context, value, toString16(groupName), generatePreview);
}
std::unique_ptr<protocol::Runtime::RemoteObject>
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.h b/deps/v8/src/inspector/v8-inspector-session-impl.h
index adac6f1a85..4fb924f749 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.h
@@ -85,8 +85,8 @@ class V8InspectorSessionImpl : public V8InspectorSession,
v8::Local<v8::Value>*, v8::Local<v8::Context>*,
std::unique_ptr<StringBuffer>* objectGroup) override;
std::unique_ptr<protocol::Runtime::API::RemoteObject> wrapObject(
- v8::Local<v8::Context>, v8::Local<v8::Value>,
- const StringView& groupName) override;
+ v8::Local<v8::Context>, v8::Local<v8::Value>, const StringView& groupName,
+ bool generatePreview) override;
V8InspectorSession::Inspectable* inspectedObject(unsigned num);
static const unsigned kInspectedObjectBufferSize = 5;
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.cc b/deps/v8/src/inspector/v8-stack-trace-impl.cc
index a8aaa1158b..8c208aaf8a 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.cc
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.cc
@@ -42,6 +42,7 @@ void calculateAsyncChain(V8Debugger* debugger, int contextGroupId,
// not happen if we have proper instrumentation, but let's double-check to be
// safe.
if (contextGroupId && *asyncParent &&
+ (*asyncParent)->externalParent().IsInvalid() &&
(*asyncParent)->contextGroupId() != contextGroupId) {
asyncParent->reset();
*externalParent = V8StackTraceId();
@@ -338,14 +339,15 @@ std::shared_ptr<AsyncStackTrace> AsyncStackTrace::capture(
// but doesn't synchronous we can merge them together. e.g. Promise
// ThenableJob.
if (asyncParent && frames.empty() &&
- asyncParent->m_description == description) {
+ (asyncParent->m_description == description || description.isEmpty())) {
return asyncParent;
}
- DCHECK(contextGroupId || asyncParent);
+ DCHECK(contextGroupId || asyncParent || !externalParent.IsInvalid());
if (!contextGroupId && asyncParent) {
contextGroupId = asyncParent->m_contextGroupId;
}
+
return std::shared_ptr<AsyncStackTrace>(
new AsyncStackTrace(contextGroupId, description, std::move(frames),
asyncParent, externalParent));
@@ -362,7 +364,7 @@ AsyncStackTrace::AsyncStackTrace(
m_frames(std::move(frames)),
m_asyncParent(asyncParent),
m_externalParent(externalParent) {
- DCHECK(m_contextGroupId);
+ DCHECK(m_contextGroupId || (!externalParent.IsInvalid() && m_frames.empty()));
}
std::unique_ptr<protocol::Runtime::StackTrace>
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.h b/deps/v8/src/inspector/v8-stack-trace-impl.h
index b8314c8fc4..08d98110ae 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.h
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.h
@@ -120,6 +120,7 @@ class AsyncStackTrace {
const String16& description() const;
std::weak_ptr<AsyncStackTrace> parent() const;
bool isEmpty() const;
+ const V8StackTraceId& externalParent() const { return m_externalParent; }
const std::vector<std::shared_ptr<StackFrame>>& frames() const {
return m_frames;
diff --git a/deps/v8/src/inspector/v8-value-utils.cc b/deps/v8/src/inspector/v8-value-utils.cc
index f32369df36..3835f34f6d 100644
--- a/deps/v8/src/inspector/v8-value-utils.cc
+++ b/deps/v8/src/inspector/v8-value-utils.cc
@@ -85,6 +85,7 @@ protocol::Response toProtocolValue(v8::Local<v8::Context> context,
v8::Local<v8::Value> property;
if (!object->Get(context, name).ToLocal(&property))
return Response::InternalError();
+ if (property->IsUndefined()) continue;
std::unique_ptr<protocol::Value> propertyValue;
Response response =
toProtocolValue(context, property, maxDepth, &propertyValue);
diff --git a/deps/v8/src/interface-descriptors.cc b/deps/v8/src/interface-descriptors.cc
index 970a4ad3ad..3b466aceb9 100644
--- a/deps/v8/src/interface-descriptors.cc
+++ b/deps/v8/src/interface-descriptors.cc
@@ -170,6 +170,42 @@ void LoadGlobalWithVectorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void StoreGlobalDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kName, kValue, kSlot
+ MachineType machine_types[] = {MachineType::AnyTagged(),
+ MachineType::AnyTagged(),
+ MachineType::TaggedSigned()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void StoreGlobalDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {NameRegister(), ValueRegister(), SlotRegister()};
+
+ int len = arraysize(registers) - kStackArgumentsCount;
+ data->InitializePlatformSpecific(len, registers);
+}
+
+void StoreGlobalWithVectorDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kName, kValue, kSlot, kVector
+ MachineType machine_types[] = {
+ MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::TaggedSigned(), MachineType::AnyTagged()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void StoreGlobalWithVectorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {NameRegister(), ValueRegister(), SlotRegister(),
+ VectorRegister()};
+ int len = arraysize(registers) - kStackArgumentsCount;
+ data->InitializePlatformSpecific(len, registers);
+}
+
void StoreDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kReceiver, kName, kValue, kSlot
@@ -233,21 +269,7 @@ void StoreNamedTransitionDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(len, registers);
}
-void StringCharAtDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kReceiver, kPosition
- MachineType machine_types[] = {MachineType::AnyTagged(),
- MachineType::IntPtr()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void StringCharAtDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void StringCharCodeAtDescriptor::InitializePlatformIndependent(
+void StringAtDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kReceiver, kPosition
// TODO(turbofan): Allow builtins to return untagged values.
@@ -257,7 +279,7 @@ void StringCharCodeAtDescriptor::InitializePlatformIndependent(
machine_types);
}
-void StringCharCodeAtDescriptor::InitializePlatformSpecific(
+void StringAtDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
}
@@ -320,24 +342,6 @@ void LoadWithVectorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void LoadICProtoArrayDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kReceiver, kName, kSlot, kVector, kHandler
- MachineType machine_types[] = {
- MachineType::AnyTagged(), MachineType::AnyTagged(),
- MachineType::TaggedSigned(), MachineType::AnyTagged(),
- MachineType::AnyTagged()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void LoadICProtoArrayDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), SlotRegister(),
- VectorRegister(), HandlerRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void StoreWithVectorDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kReceiver, kName, kValue, kSlot, kVector
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index 49c047333a..12b25a510a 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -22,13 +22,14 @@ class PlatformInterfaceDescriptor;
V(Load) \
V(LoadWithVector) \
V(LoadField) \
- V(LoadICProtoArray) \
V(LoadGlobal) \
V(LoadGlobalWithVector) \
V(Store) \
V(StoreWithVector) \
V(StoreNamedTransition) \
V(StoreTransition) \
+ V(StoreGlobal) \
+ V(StoreGlobalWithVector) \
V(FastNewClosure) \
V(FastNewFunctionContext) \
V(FastNewObject) \
@@ -50,6 +51,7 @@ class PlatformInterfaceDescriptor;
V(ConstructWithArrayLike) \
V(ConstructTrampoline) \
V(TransitionElementsKind) \
+ V(AbortJS) \
V(AllocateHeapNumber) \
V(Builtin) \
V(ArrayConstructor) \
@@ -60,8 +62,7 @@ class PlatformInterfaceDescriptor;
V(Compare) \
V(BinaryOp) \
V(StringAdd) \
- V(StringCharAt) \
- V(StringCharCodeAt) \
+ V(StringAt) \
V(ForInPrepare) \
V(GetProperty) \
V(ArgumentAdaptor) \
@@ -78,6 +79,7 @@ class PlatformInterfaceDescriptor;
V(ResumeGenerator) \
V(FrameDropperTrampoline) \
V(WasmRuntimeCall) \
+ V(RunMicrotasks) \
BUILTIN_LIST_TFS(V)
class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
@@ -454,6 +456,44 @@ class StoreWithVectorDescriptor : public StoreDescriptor {
static const int kStackArgumentsCount = kPassLastArgsOnStack ? 3 : 0;
};
+class StoreGlobalDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kName, kValue, kSlot)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StoreGlobalDescriptor,
+ CallInterfaceDescriptor)
+
+ static const bool kPassLastArgsOnStack =
+ StoreDescriptor::kPassLastArgsOnStack;
+ // Pass value and slot through the stack.
+ static const int kStackArgumentsCount = kPassLastArgsOnStack ? 2 : 0;
+
+ static const Register NameRegister() {
+ return StoreDescriptor::NameRegister();
+ }
+
+ static const Register ValueRegister() {
+ return StoreDescriptor::ValueRegister();
+ }
+
+ static const Register SlotRegister() {
+ return StoreDescriptor::SlotRegister();
+ }
+};
+
+class StoreGlobalWithVectorDescriptor : public StoreGlobalDescriptor {
+ public:
+ DEFINE_PARAMETERS(kName, kValue, kSlot, kVector)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StoreGlobalWithVectorDescriptor,
+ StoreGlobalDescriptor)
+
+ static const Register VectorRegister() {
+ return StoreWithVectorDescriptor::VectorRegister();
+ }
+
+ // Pass value, slot and vector through the stack.
+ static const int kStackArgumentsCount = kPassLastArgsOnStack ? 3 : 0;
+};
+
class LoadWithVectorDescriptor : public LoadDescriptor {
public:
DEFINE_PARAMETERS(kReceiver, kName, kSlot, kVector)
@@ -463,15 +503,6 @@ class LoadWithVectorDescriptor : public LoadDescriptor {
static const Register VectorRegister();
};
-class LoadICProtoArrayDescriptor : public LoadWithVectorDescriptor {
- public:
- DEFINE_PARAMETERS(kReceiver, kName, kSlot, kVector, kHandler)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(LoadICProtoArrayDescriptor,
- LoadWithVectorDescriptor)
-
- static const Register HandlerRegister();
-};
-
class LoadGlobalWithVectorDescriptor : public LoadGlobalDescriptor {
public:
DEFINE_PARAMETERS(kName, kSlot, kVector)
@@ -650,6 +681,11 @@ class TransitionElementsKindDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(TransitionElementsKindDescriptor, CallInterfaceDescriptor)
};
+class AbortJSDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kObject)
+ DECLARE_DESCRIPTOR(AbortJSDescriptor, CallInterfaceDescriptor)
+};
class AllocateHeapNumberDescriptor : public CallInterfaceDescriptor {
public:
@@ -725,17 +761,12 @@ class StringAddDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(StringAddDescriptor, CallInterfaceDescriptor)
};
-class StringCharAtDescriptor final : public CallInterfaceDescriptor {
+// This desciptor is shared among String.p.charAt/charCodeAt/codePointAt
+// as they all have the same interface.
+class StringAtDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kReceiver, kPosition)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StringCharAtDescriptor,
- CallInterfaceDescriptor)
-};
-
-class StringCharCodeAtDescriptor final : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS(kReceiver, kPosition)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StringCharCodeAtDescriptor,
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StringAtDescriptor,
CallInterfaceDescriptor)
};
@@ -846,6 +877,13 @@ class WasmRuntimeCallDescriptor final : public CallInterfaceDescriptor {
0)
};
+class RunMicrotasksDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_EMPTY_PARAMETERS()
+ DECLARE_DEFAULT_DESCRIPTOR(RunMicrotasksDescriptor, CallInterfaceDescriptor,
+ 0)
+};
+
#define DEFINE_TFS_BUILTIN_DESCRIPTOR(Name, ...) \
class Name##Descriptor : public CallInterfaceDescriptor { \
public: \
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index 5be818eb2d..dcbe8029f9 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -701,14 +701,9 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(const AstRawString* name,
}
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreGlobal(
- const AstRawString* name, int feedback_slot, LanguageMode language_mode) {
+ const AstRawString* name, int feedback_slot) {
size_t name_index = GetConstantPoolEntry(name);
- if (language_mode == LanguageMode::kSloppy) {
- OutputStaGlobalSloppy(name_index, feedback_slot);
- } else {
- DCHECK_EQ(language_mode, LanguageMode::kStrict);
- OutputStaGlobalStrict(name_index, feedback_slot);
- }
+ OutputStaGlobal(name_index, feedback_slot);
return *this;
}
@@ -1185,8 +1180,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::ReThrow() {
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::Abort(BailoutReason reason) {
- OutputAbort(reason);
+BytecodeArrayBuilder& BytecodeArrayBuilder::Abort(AbortReason reason) {
+ DCHECK_LT(reason, AbortReason::kLastErrorMessage);
+ DCHECK_GE(reason, AbortReason::kNoReason);
+ OutputAbort(static_cast<int>(reason));
return *this;
}
@@ -1280,10 +1277,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::RestoreGeneratorState(
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::RestoreGeneratorRegisters(
- Register generator, RegisterList registers) {
- OutputRestoreGeneratorRegisters(generator, registers,
- registers.register_count());
+BytecodeArrayBuilder& BytecodeArrayBuilder::ResumeGenerator(
+ Register generator, Register generator_state, RegisterList registers) {
+ OutputResumeGenerator(generator, generator_state, registers,
+ registers.register_count());
return *this;
}
@@ -1389,7 +1386,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
Runtime::FunctionId function_id, Register arg) {
- return CallRuntime(function_id, RegisterList(arg.index(), 1));
+ return CallRuntime(function_id, RegisterList(arg));
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
@@ -1411,8 +1408,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntimeForPair(
BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntimeForPair(
Runtime::FunctionId function_id, Register arg, RegisterList return_pair) {
- return CallRuntimeForPair(function_id, RegisterList(arg.index(), 1),
- return_pair);
+ return CallRuntimeForPair(function_id, RegisterList(arg), return_pair);
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(int context_index,
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index 4063791a18..021222abe5 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -85,8 +85,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
// Global loads to the accumulator and stores from the accumulator.
BytecodeArrayBuilder& LoadGlobal(const AstRawString* name, int feedback_slot,
TypeofMode typeof_mode);
- BytecodeArrayBuilder& StoreGlobal(const AstRawString* name, int feedback_slot,
- LanguageMode language_mode);
+ BytecodeArrayBuilder& StoreGlobal(const AstRawString* name,
+ int feedback_slot);
// Load the object at |slot_index| at |depth| in the context chain starting
// with |context| into the accumulator.
@@ -404,7 +404,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BytecodeArrayBuilder& Throw();
BytecodeArrayBuilder& ReThrow();
- BytecodeArrayBuilder& Abort(BailoutReason reason);
+ BytecodeArrayBuilder& Abort(AbortReason reason);
BytecodeArrayBuilder& Return();
BytecodeArrayBuilder& ThrowReferenceErrorIfHole(const AstRawString* name);
BytecodeArrayBuilder& ThrowSuperNotCalledIfHole();
@@ -431,8 +431,9 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
RegisterList registers,
int suspend_id);
BytecodeArrayBuilder& RestoreGeneratorState(Register generator);
- BytecodeArrayBuilder& RestoreGeneratorRegisters(Register generator,
- RegisterList registers);
+ BytecodeArrayBuilder& ResumeGenerator(Register generator,
+ Register generator_state,
+ RegisterList registers);
// Exception handling.
BytecodeArrayBuilder& MarkHandler(int handler_id,
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 45f0d1eca9..ee94e7a2e2 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -835,6 +835,24 @@ class BytecodeGenerator::FeedbackSlotCache : public ZoneObject {
ZoneMap<Key, FeedbackSlot> map_;
};
+class BytecodeGenerator::IteratorRecord final {
+ public:
+ IteratorRecord(Register object_register, Register next_register,
+ IteratorType type = IteratorType::kNormal)
+ : type_(type), object_(object_register), next_(next_register) {
+ DCHECK(object_.is_valid() && next_.is_valid());
+ }
+
+ inline IteratorType type() const { return type_; }
+ inline Register object() const { return object_; }
+ inline Register next() const { return next_; }
+
+ private:
+ IteratorType type_;
+ Register object_;
+ Register next_;
+};
+
BytecodeGenerator::BytecodeGenerator(
CompilationInfo* info, const AstStringConstants* ast_string_constants)
: zone_(info->zone()),
@@ -1130,7 +1148,7 @@ void BytecodeGenerator::VisitIterationHeader(int first_suspend_id,
.JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &not_resuming);
// Otherwise this is an error.
- builder()->Abort(BailoutReason::kInvalidJumpTableIndex);
+ builder()->Abort(AbortReason::kInvalidJumpTableIndex);
builder()->Bind(&not_resuming);
}
@@ -1162,7 +1180,7 @@ void BytecodeGenerator::BuildGeneratorPrologue() {
}
// We fall through when the generator state is not in the jump table.
// TODO(leszeks): Only generate this for debug builds.
- builder()->Abort(BailoutReason::kInvalidJumpTableIndex);
+ builder()->Abort(AbortReason::kInvalidJumpTableIndex);
// This is a regular call.
builder()
@@ -1674,6 +1692,7 @@ void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
builder()->SetExpressionAsStatementPosition(stmt->assign_iterator());
VisitForEffect(stmt->assign_iterator());
+ VisitForEffect(stmt->assign_next());
VisitIterationHeader(stmt, &loop_builder);
builder()->SetExpressionAsStatementPosition(stmt->next_result());
@@ -1712,9 +1731,11 @@ void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
}
try_control_builder.EndTry();
- // Create a catch scope that binds the exception.
- BuildNewLocalCatchContext(stmt->scope());
- builder()->StoreAccumulatorInRegister(context);
+ if (stmt->scope()) {
+ // Create a catch scope that binds the exception.
+ BuildNewLocalCatchContext(stmt->scope());
+ builder()->StoreAccumulatorInRegister(context);
+ }
// If requested, clear message object as we enter the catch block.
if (stmt->ShouldClearPendingException(outer_catch_prediction)) {
@@ -1725,7 +1746,11 @@ void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
builder()->LoadAccumulatorWithRegister(context);
// Evaluate the catch-block.
- VisitInScope(stmt->catch_block(), stmt->scope());
+ if (stmt->scope()) {
+ VisitInScope(stmt->catch_block(), stmt->scope());
+ } else {
+ VisitBlock(stmt->catch_block());
+ }
try_control_builder.EndCatch();
}
@@ -2069,6 +2094,8 @@ void BytecodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
}
void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ expr->InitDepthAndFlags();
+
// Fast path for the empty object literal which doesn't need an
// AllocationSite.
if (expr->IsEmptyObjectLiteral()) {
@@ -2275,6 +2302,8 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+ expr->InitDepthAndFlags();
+
// Deep-copy the literal boilerplate.
int literal_index = feedback_index(feedback_spec()->AddLiteralSlot());
if (expr->is_empty()) {
@@ -2290,31 +2319,25 @@ void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
builder()->CreateArrayLiteral(entry, literal_index, flags);
array_literals_.push_back(std::make_pair(expr, entry));
- Register index, literal;
+ Register index = register_allocator()->NewRegister();
+ Register literal = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(literal);
// We'll reuse the same literal slot for all of the non-constant
// subexpressions that use a keyed store IC.
// Evaluate all the non-constant subexpressions and store them into the
// newly cloned array.
- bool literal_in_accumulator = true;
FeedbackSlot slot;
- for (int array_index = 0; array_index < expr->values()->length();
- array_index++) {
- Expression* subexpr = expr->values()->at(array_index);
- if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+ int array_index = 0;
+ ZoneList<Expression*>::iterator iter = expr->BeginValue();
+ for (; iter != expr->FirstSpreadOrEndValue(); ++iter, array_index++) {
+ Expression* subexpr = *iter;
DCHECK(!subexpr->IsSpread());
-
- if (literal_in_accumulator) {
- index = register_allocator()->NewRegister();
- literal = register_allocator()->NewRegister();
- builder()->StoreAccumulatorInRegister(literal);
- literal_in_accumulator = false;
- }
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (slot.IsInvalid()) {
slot = feedback_spec()->AddKeyedStoreICSlot(language_mode());
}
-
builder()
->LoadLiteral(Smi::FromInt(array_index))
.StoreAccumulatorInRegister(index);
@@ -2323,10 +2346,68 @@ void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
language_mode());
}
- if (!literal_in_accumulator) {
- // Restore literal array into accumulator.
- builder()->LoadAccumulatorWithRegister(literal);
+ // Handle spread elements and elements following.
+ for (; iter != expr->EndValue(); ++iter) {
+ Expression* subexpr = *iter;
+ if (subexpr->IsSpread()) {
+ BuildArrayLiteralSpread(subexpr->AsSpread(), literal);
+ } else if (!subexpr->IsTheHoleLiteral()) {
+ // Perform %AppendElement(array, <subexpr>)
+ RegisterAllocationScope register_scope(this);
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()->MoveRegister(literal, args[0]);
+ VisitForRegisterValue(subexpr, args[1]);
+ builder()->CallRuntime(Runtime::kAppendElement, args);
+ } else {
+ // Peform ++<array>.length;
+ // TODO(caitp): Why can't we just %AppendElement(array, <The Hole>?)
+ auto length = ast_string_constants()->length_string();
+ builder()->LoadNamedProperty(
+ literal, length, feedback_index(feedback_spec()->AddLoadICSlot()));
+ builder()->UnaryOperation(
+ Token::INC, feedback_index(feedback_spec()->AddBinaryOpICSlot()));
+ builder()->StoreNamedProperty(
+ literal, length,
+ feedback_index(
+ feedback_spec()->AddStoreICSlot(LanguageMode::kStrict)),
+ LanguageMode::kStrict);
+ }
}
+
+ // Restore literal array into accumulator.
+ builder()->LoadAccumulatorWithRegister(literal);
+}
+
+void BytecodeGenerator::BuildArrayLiteralSpread(Spread* spread,
+ Register array) {
+ RegisterAllocationScope register_scope(this);
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()->MoveRegister(array, args[0]);
+ Register next_result = args[1];
+
+ builder()->SetExpressionAsStatementPosition(spread->expression());
+ IteratorRecord iterator =
+ BuildGetIteratorRecord(spread->expression(), IteratorType::kNormal);
+ LoopBuilder loop_builder(builder(), nullptr, nullptr);
+ loop_builder.LoopHeader();
+
+ // Call the iterator's .next() method. Break from the loop if the `done`
+ // property is truthy, otherwise load the value from the iterator result and
+ // append the argument.
+ BuildIteratorNext(iterator, next_result);
+ builder()->LoadNamedProperty(
+ next_result, ast_string_constants()->done_string(),
+ feedback_index(feedback_spec()->AddLoadICSlot()));
+ loop_builder.BreakIfTrue(ToBooleanMode::kConvertToBoolean);
+
+ loop_builder.LoopBody();
+ builder()
+ ->LoadNamedProperty(next_result, ast_string_constants()->value_string(),
+ feedback_index(feedback_spec()->AddLoadICSlot()))
+ .StoreAccumulatorInRegister(args[1])
+ .CallRuntime(Runtime::kAppendElement, args);
+ loop_builder.BindContinueTarget();
+ loop_builder.JumpToHeader(loop_depth_);
}
void BytecodeGenerator::VisitVariableProxy(VariableProxy* proxy) {
@@ -2557,8 +2638,7 @@ void BytecodeGenerator::BuildVariableAssignment(
// TODO(ishell): consider using FeedbackSlotCache for variables here.
FeedbackSlot slot =
feedback_spec()->AddStoreGlobalICSlot(language_mode());
- builder()->StoreGlobal(variable->raw_name(), feedback_index(slot),
- language_mode());
+ builder()->StoreGlobal(variable->raw_name(), feedback_index(slot));
break;
}
case VariableLocation::CONTEXT: {
@@ -2787,7 +2867,7 @@ void BytecodeGenerator::VisitCompoundAssignment(CompoundAssignment* expr) {
// accumulator. When the generator is resumed, the sent value is loaded in the
// accumulator.
void BytecodeGenerator::BuildSuspendPoint(int suspend_id) {
- RegisterList registers(0, register_allocator()->next_register_index());
+ RegisterList registers = register_allocator()->AllLiveRegisters();
// Save context, registers, and state. Then return.
builder()->SuspendGenerator(generator_object(), registers, suspend_id);
@@ -2798,19 +2878,10 @@ void BytecodeGenerator::BuildSuspendPoint(int suspend_id) {
// Upon resume, we continue here.
builder()->Bind(generator_jump_table_, suspend_id);
- // Clobbers all registers.
- builder()->RestoreGeneratorRegisters(generator_object(), registers);
-
- // Update state to indicate that we have finished resuming. Loop headers
- // rely on this.
- builder()
- ->LoadLiteral(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))
- .StoreAccumulatorInRegister(generator_state_);
-
- // When resuming execution of a generator, module or async function, the sent
- // value is in the [[input_or_debug_pos]] slot.
- builder()->CallRuntime(Runtime::kInlineGeneratorGetInputOrDebugPos,
- generator_object());
+ // Clobbers all registers, updating the state to indicate that we have
+ // finished resuming and setting the accumulator to the [[input_or_debug_pos]]
+ // slot of the generator object.
+ builder()->ResumeGenerator(generator_object(), generator_state_, registers);
}
void BytecodeGenerator::VisitYield(Yield* expr) {
@@ -2903,7 +2974,9 @@ void BytecodeGenerator::VisitYield(Yield* expr) {
//
// let output; // uninitialized
//
-// let iterator = GetIterator(iterable);
+// let iteratorRecord = GetIterator(iterable);
+// let iterator = iteratorRecord.[[Iterator]];
+// let next = iteratorRecord.[[NextMethod]];
// let input = undefined;
// let resumeMode = kNext;
//
@@ -2912,25 +2985,25 @@ void BytecodeGenerator::VisitYield(Yield* expr) {
// // Forward input according to resumeMode and obtain output.
// switch (resumeMode) {
// case kNext:
-// output = iterator.next(input);
+// output = next.[[Call]](iterator, « »);;
// break;
// case kReturn:
// let iteratorReturn = iterator.return;
// if (IS_NULL_OR_UNDEFINED(iteratorReturn)) return input;
-// output = %_Call(iteratorReturn, iterator, input);
+// output = iteratorReturn.[[Call]](iterator, «input»);
// break;
// case kThrow:
// let iteratorThrow = iterator.throw;
// if (IS_NULL_OR_UNDEFINED(iteratorThrow)) {
// let iteratorReturn = iterator.return;
// if (!IS_NULL_OR_UNDEFINED(iteratorReturn)) {
-// output = %_Call(iteratorReturn, iterator);
+// output = iteratorReturn.[[Call]](iterator, « »);
// if (IS_ASYNC_GENERATOR) output = await output;
// if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
// }
// throw MakeTypeError(kThrowMethodMissing);
// }
-// output = %_Call(iteratorThrow, iterator, input);
+// output = iteratorThrow.[[Call]](iterator, «input»);
// break;
// }
//
@@ -2963,13 +3036,12 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
{
RegisterAllocationScope register_scope(this);
-
RegisterList iterator_and_input = register_allocator()->NewRegisterList(2);
+ IteratorRecord iterator = BuildGetIteratorRecord(
+ expr->expression(),
+ register_allocator()->NewRegister() /* next method */,
+ iterator_and_input[0], iterator_type);
- Register iterator = iterator_and_input[0];
-
- BuildGetIterator(expr->expression(), iterator_type);
- builder()->StoreAccumulatorInRegister(iterator);
Register input = iterator_and_input[1];
builder()->LoadUndefined().StoreAccumulatorInRegister(input);
builder()
@@ -3000,109 +3072,46 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
// {JSGeneratorObject::kNext} in this case.
STATIC_ASSERT(JSGeneratorObject::kNext == 0);
{
- RegisterAllocationScope register_scope(this);
- // output = iterator.next(input);
- Register iterator_next = register_allocator()->NewRegister();
- FeedbackSlot load_slot = feedback_spec()->AddLoadICSlot();
- FeedbackSlot call_slot = feedback_spec()->AddCallICSlot();
- builder()
- ->LoadNamedProperty(iterator,
- ast_string_constants()->next_string(),
- feedback_index(load_slot))
- .StoreAccumulatorInRegister(iterator_next)
- .CallProperty(iterator_next, iterator_and_input,
- feedback_index(call_slot))
- .Jump(after_switch.New());
+ FeedbackSlot slot = feedback_spec()->AddCallICSlot();
+ builder()->CallProperty(iterator.next(), iterator_and_input,
+ feedback_index(slot));
+ builder()->Jump(after_switch.New());
}
STATIC_ASSERT(JSGeneratorObject::kReturn == 1);
builder()->Bind(switch_jump_table, JSGeneratorObject::kReturn);
{
- RegisterAllocationScope register_scope(this);
- BytecodeLabels return_input(zone());
- // Trigger return from within the inner iterator.
- Register iterator_return = register_allocator()->NewRegister();
- FeedbackSlot load_slot = feedback_spec()->AddLoadICSlot();
- FeedbackSlot call_slot = feedback_spec()->AddCallICSlot();
- builder()
- ->LoadNamedProperty(iterator,
- ast_string_constants()->return_string(),
- feedback_index(load_slot))
- .JumpIfUndefined(return_input.New())
- .JumpIfNull(return_input.New())
- .StoreAccumulatorInRegister(iterator_return)
- .CallProperty(iterator_return, iterator_and_input,
- feedback_index(call_slot))
- .Jump(after_switch.New());
-
- return_input.Bind(builder());
- {
- builder()->LoadAccumulatorWithRegister(input);
- if (iterator_type == IteratorType::kAsync) {
- execution_control()->AsyncReturnAccumulator();
- } else {
- execution_control()->ReturnAccumulator();
- }
+ const AstRawString* return_string =
+ ast_string_constants()->return_string();
+ BytecodeLabels no_return_method(zone());
+
+ BuildCallIteratorMethod(iterator.object(), return_string,
+ iterator_and_input, after_switch.New(),
+ &no_return_method);
+ no_return_method.Bind(builder());
+ builder()->LoadAccumulatorWithRegister(input);
+ if (iterator_type == IteratorType::kAsync) {
+ execution_control()->AsyncReturnAccumulator();
+ } else {
+ execution_control()->ReturnAccumulator();
}
}
STATIC_ASSERT(JSGeneratorObject::kThrow == 2);
builder()->Bind(switch_jump_table, JSGeneratorObject::kThrow);
{
- BytecodeLabels iterator_throw_is_undefined(zone());
- {
- RegisterAllocationScope register_scope(this);
- // If the inner iterator has a throw method, use it to trigger an
- // exception inside.
- Register iterator_throw = register_allocator()->NewRegister();
- FeedbackSlot load_slot = feedback_spec()->AddLoadICSlot();
- FeedbackSlot call_slot = feedback_spec()->AddCallICSlot();
- builder()
- ->LoadNamedProperty(iterator,
- ast_string_constants()->throw_string(),
- feedback_index(load_slot))
- .JumpIfUndefined(iterator_throw_is_undefined.New())
- .JumpIfNull(iterator_throw_is_undefined.New())
- .StoreAccumulatorInRegister(iterator_throw);
- builder()
- ->CallProperty(iterator_throw, iterator_and_input,
- feedback_index(call_slot))
- .Jump(after_switch.New());
- }
-
- iterator_throw_is_undefined.Bind(builder());
- {
- RegisterAllocationScope register_scope(this);
- BytecodeLabels throw_throw_method_missing(zone());
- Register iterator_return = register_allocator()->NewRegister();
- // If iterator.throw does not exist, try to use iterator.return to
- // inform the iterator that it should stop.
- FeedbackSlot load_slot = feedback_spec()->AddLoadICSlot();
- FeedbackSlot call_slot = feedback_spec()->AddCallICSlot();
- builder()
- ->LoadNamedProperty(iterator,
- ast_string_constants()->return_string(),
- feedback_index(load_slot))
- .StoreAccumulatorInRegister(iterator_return);
- builder()
- ->JumpIfUndefined(throw_throw_method_missing.New())
- .JumpIfNull(throw_throw_method_missing.New())
- .CallProperty(iterator_return, RegisterList(iterator),
- feedback_index(call_slot));
-
- if (iterator_type == IteratorType::kAsync) {
- // For async generators, await the result of the .return() call.
- BuildAwait(expr->await_iterator_close_suspend_id());
- builder()->StoreAccumulatorInRegister(output);
- }
-
- builder()
- ->JumpIfJSReceiver(throw_throw_method_missing.New())
- .CallRuntime(Runtime::kThrowIteratorResultNotAnObject, output);
-
- throw_throw_method_missing.Bind(builder());
- builder()->CallRuntime(Runtime::kThrowThrowMethodMissing);
- }
+ const AstRawString* throw_string =
+ ast_string_constants()->throw_string();
+ BytecodeLabels no_throw_method(zone());
+ BuildCallIteratorMethod(iterator.object(), throw_string,
+ iterator_and_input, after_switch.New(),
+ &no_throw_method);
+
+ // If there is no "throw" method, perform IteratorClose, and finally
+ // throw a TypeError.
+ no_throw_method.Bind(builder());
+ BuildIteratorClose(iterator, expr->await_iterator_close_suspend_id());
+ builder()->CallRuntime(Runtime::kThrowThrowMethodMissing);
}
after_switch.Bind(builder());
@@ -3355,6 +3364,11 @@ void BytecodeGenerator::VisitProperty(Property* expr) {
}
}
+void BytecodeGenerator::VisitResolvedProperty(ResolvedProperty* expr) {
+ // Handled by VisitCall().
+ UNREACHABLE();
+}
+
void BytecodeGenerator::VisitArguments(ZoneList<Expression*>* args,
RegisterList* arg_regs) {
// Visit arguments.
@@ -3397,6 +3411,13 @@ void BytecodeGenerator::VisitCall(Call* expr) {
VisitPropertyLoadForRegister(args.last_register(), property, callee);
break;
}
+ case Call::RESOLVED_PROPERTY_CALL: {
+ ResolvedProperty* resolved = callee_expr->AsResolvedProperty();
+ VisitAndPushIntoRegisterList(resolved->object(), &args);
+ VisitForAccumulatorValue(resolved->property());
+ builder()->StoreAccumulatorInRegister(callee);
+ break;
+ }
case Call::GLOBAL_CALL: {
// Receiver is undefined for global calls.
if (!is_spread_call) {
@@ -3421,16 +3442,15 @@ void BytecodeGenerator::VisitCall(Call* expr) {
Register name = register_allocator()->NewRegister();
// Call %LoadLookupSlotForCall to get the callee and receiver.
- DCHECK(Register::AreContiguous(callee, receiver));
- RegisterList result_pair(callee.index(), 2);
- USE(receiver);
-
+ RegisterList result_pair = register_allocator()->NewRegisterList(2);
Variable* variable = callee_expr->AsVariableProxy()->var();
builder()
->LoadLiteral(variable->raw_name())
.StoreAccumulatorInRegister(name)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, name,
- result_pair);
+ result_pair)
+ .MoveRegister(result_pair[0], callee)
+ .MoveRegister(result_pair[1], receiver);
}
break;
}
@@ -3506,7 +3526,8 @@ void BytecodeGenerator::VisitCall(Call* expr) {
DCHECK(!implicit_undefined_receiver);
builder()->CallWithSpread(callee, args, feedback_slot_index);
} else if (call_type == Call::NAMED_PROPERTY_CALL ||
- call_type == Call::KEYED_PROPERTY_CALL) {
+ call_type == Call::KEYED_PROPERTY_CALL ||
+ call_type == Call::RESOLVED_PROPERTY_CALL) {
DCHECK(!implicit_undefined_receiver);
builder()->CallProperty(callee, args, feedback_slot_index);
} else if (implicit_undefined_receiver) {
@@ -4075,6 +4096,91 @@ void BytecodeGenerator::BuildGetIterator(Expression* iterable,
}
}
+// Returns an IteratorRecord which is valid for the lifetime of the current
+// register_allocation_scope.
+BytecodeGenerator::IteratorRecord BytecodeGenerator::BuildGetIteratorRecord(
+ Expression* iterable, Register next, Register object, IteratorType hint) {
+ DCHECK(next.is_valid() && object.is_valid());
+ BuildGetIterator(iterable, hint);
+
+ builder()
+ ->StoreAccumulatorInRegister(object)
+ .LoadNamedProperty(object, ast_string_constants()->next_string(),
+ feedback_index(feedback_spec()->AddLoadICSlot()))
+ .StoreAccumulatorInRegister(next);
+ return IteratorRecord(object, next, hint);
+}
+
+BytecodeGenerator::IteratorRecord BytecodeGenerator::BuildGetIteratorRecord(
+ Expression* iterable, IteratorType hint) {
+ Register next = register_allocator()->NewRegister();
+ Register object = register_allocator()->NewRegister();
+ return BuildGetIteratorRecord(iterable, next, object, hint);
+}
+
+void BytecodeGenerator::BuildIteratorNext(const IteratorRecord& iterator,
+ Register next_result) {
+ DCHECK(next_result.is_valid());
+ builder()->CallProperty(iterator.next(), RegisterList(iterator.object()),
+ feedback_index(feedback_spec()->AddCallICSlot()));
+
+ // TODO(caitp): support async IteratorNext here.
+
+ BytecodeLabel is_object;
+ builder()
+ ->StoreAccumulatorInRegister(next_result)
+ .JumpIfJSReceiver(&is_object)
+ .CallRuntime(Runtime::kThrowIteratorResultNotAnObject, next_result)
+ .Bind(&is_object);
+}
+
+void BytecodeGenerator::BuildCallIteratorMethod(Register iterator,
+ const AstRawString* method_name,
+ RegisterList receiver_and_args,
+ BytecodeLabel* if_called,
+ BytecodeLabels* if_notcalled) {
+ RegisterAllocationScope register_scope(this);
+
+ Register method = register_allocator()->NewRegister();
+ FeedbackSlot slot = feedback_spec()->AddLoadICSlot();
+ builder()
+ ->LoadNamedProperty(iterator, method_name, feedback_index(slot))
+ .JumpIfUndefined(if_notcalled->New())
+ .JumpIfNull(if_notcalled->New())
+ .StoreAccumulatorInRegister(method)
+ .CallProperty(method, receiver_and_args,
+ feedback_index(feedback_spec()->AddCallICSlot()))
+ .Jump(if_called);
+}
+
+void BytecodeGenerator::BuildIteratorClose(const IteratorRecord& iterator,
+ int suspend_id) {
+ RegisterAllocationScope register_scope(this);
+ BytecodeLabels done(zone());
+ BytecodeLabel if_called;
+ RegisterList args = RegisterList(iterator.object());
+ BuildCallIteratorMethod(iterator.object(),
+ ast_string_constants()->return_string(), args,
+ &if_called, &done);
+ builder()->Bind(&if_called);
+
+ if (iterator.type() == IteratorType::kAsync) {
+ DCHECK_GE(suspend_id, 0);
+ BuildAwait(suspend_id);
+ }
+
+ builder()->JumpIfJSReceiver(done.New());
+ {
+ RegisterAllocationScope register_scope(this);
+ Register return_result = register_allocator()->NewRegister();
+ builder()
+ ->StoreAccumulatorInRegister(return_result)
+ .CallRuntime(Runtime::kThrowIteratorResultNotAnObject, return_result);
+ }
+
+ done.Bind(builder());
+}
+
void BytecodeGenerator::VisitGetIterator(GetIterator* expr) {
builder()->SetExpressionPosition(expr);
BuildGetIterator(expr->iterable(), expr->hint());
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 9b7b572db3..f9de9550fe 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -56,6 +56,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
class EffectResultScope;
class FeedbackSlotCache;
class GlobalDeclarationsBuilder;
+ class IteratorRecord;
class NaryCodeCoverageSlots;
class RegisterAllocationScope;
class TestResultScope;
@@ -151,6 +152,26 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildGetIterator(Expression* iterable, IteratorType hint);
+ // Create an IteratorRecord with pre-allocated registers holding the next
+ // method and iterator object.
+ IteratorRecord BuildGetIteratorRecord(Expression* iterable,
+ Register iterator_next,
+ Register iterator_object,
+ IteratorType hint);
+
+ // Create an IteratorRecord allocating new registers to hold the next method
+ // and iterator object.
+ IteratorRecord BuildGetIteratorRecord(Expression* iterable,
+ IteratorType hint);
+ void BuildIteratorNext(const IteratorRecord& iterator, Register next_result);
+ void BuildIteratorClose(const IteratorRecord& iterator, int suspend_id = -1);
+ void BuildCallIteratorMethod(Register iterator, const AstRawString* method,
+ RegisterList receiver_and_args,
+ BytecodeLabel* if_called,
+ BytecodeLabels* if_notcalled);
+
+ void BuildArrayLiteralSpread(Spread* spread, Register array);
+
void AllocateTopLevelRegisters();
void VisitArgumentsObject(Variable* variable);
void VisitRestArgumentsArray(Variable* rest);
diff --git a/deps/v8/src/interpreter/bytecode-register-allocator.h b/deps/v8/src/interpreter/bytecode-register-allocator.h
index ff335d6f20..8509bd43e0 100644
--- a/deps/v8/src/interpreter/bytecode-register-allocator.h
+++ b/deps/v8/src/interpreter/bytecode-register-allocator.h
@@ -87,6 +87,11 @@ class BytecodeRegisterAllocator final {
return reg.index() < next_register_index_;
}
+ // Returns a register list for all currently live registers.
+ RegisterList AllLiveRegisters() const {
+ return RegisterList(0, next_register_index());
+ }
+
void set_observer(Observer* observer) { observer_ = observer; }
int next_register_index() const { return next_register_index_; }
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.cc b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
index 94dc930920..d75e45967b 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.cc
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
@@ -448,7 +448,7 @@ RegisterList BytecodeRegisterOptimizer::GetInputRegisterList(
if (reg_list.register_count() == 1) {
// If there is only a single register, treat it as a normal input register.
Register reg(GetInputRegister(reg_list.first_register()));
- return RegisterList(reg.index(), 1);
+ return RegisterList(reg);
} else {
int start_index = reg_list.first_register().index();
for (int i = 0; i < reg_list.register_count(); ++i) {
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.h b/deps/v8/src/interpreter/bytecode-register-optimizer.h
index fababcf19e..92673d9cac 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.h
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.h
@@ -67,7 +67,7 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
if (Bytecodes::IsJump(bytecode) || Bytecodes::IsSwitch(bytecode) ||
bytecode == Bytecode::kDebugger ||
bytecode == Bytecode::kSuspendGenerator ||
- bytecode == Bytecode::kRestoreGeneratorRegisters) {
+ bytecode == Bytecode::kResumeGenerator) {
// All state must be flushed before emitting
// - a jump bytecode (as the register equivalents at the jump target
// aren't known)
diff --git a/deps/v8/src/interpreter/bytecode-register.h b/deps/v8/src/interpreter/bytecode-register.h
index 07ed756522..b5420f7e72 100644
--- a/deps/v8/src/interpreter/bytecode-register.h
+++ b/deps/v8/src/interpreter/bytecode-register.h
@@ -61,9 +61,9 @@ class V8_EXPORT_PRIVATE Register final {
}
static bool AreContiguous(Register reg1, Register reg2,
- Register reg3 = Register(),
- Register reg4 = Register(),
- Register reg5 = Register());
+ Register reg3 = invalid_value(),
+ Register reg4 = invalid_value(),
+ Register reg5 = invalid_value());
std::string ToString(int parameter_count) const;
@@ -98,14 +98,11 @@ class V8_EXPORT_PRIVATE Register final {
class RegisterList {
public:
- RegisterList() : first_reg_index_(Register().index()), register_count_(0) {}
- RegisterList(int first_reg_index, int register_count)
- : first_reg_index_(first_reg_index), register_count_(register_count) {}
+ RegisterList()
+ : first_reg_index_(Register::invalid_value().index()),
+ register_count_(0) {}
explicit RegisterList(Register r) : RegisterList(r.index(), 1) {}
- // Increases the size of the register list by one.
- void IncrementRegisterCount() { register_count_++; }
-
// Returns a new RegisterList which is a truncated version of this list, with
// |count| registers.
const RegisterList Truncate(int new_count) {
@@ -130,6 +127,17 @@ class RegisterList {
int register_count() const { return register_count_; }
private:
+ friend class BytecodeRegisterAllocator;
+ friend class BytecodeDecoder;
+ friend class InterpreterTester;
+ friend class BytecodeUtils;
+
+ RegisterList(int first_reg_index, int register_count)
+ : first_reg_index_(first_reg_index), register_count_(register_count) {}
+
+ // Increases the size of the register list by one.
+ void IncrementRegisterCount() { register_count_++; }
+
int first_reg_index_;
int register_count_;
};
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index 2d3fc2c96e..ce01566d52 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -42,10 +42,7 @@ namespace interpreter {
V(LdaGlobal, AccumulatorUse::kWrite, OperandType::kIdx, OperandType::kIdx) \
V(LdaGlobalInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx, \
OperandType::kIdx) \
- V(StaGlobalSloppy, AccumulatorUse::kRead, OperandType::kIdx, \
- OperandType::kIdx) \
- V(StaGlobalStrict, AccumulatorUse::kRead, OperandType::kIdx, \
- OperandType::kIdx) \
+ V(StaGlobal, AccumulatorUse::kRead, OperandType::kIdx, OperandType::kIdx) \
\
/* Context operations */ \
V(PushContext, AccumulatorUse::kRead, OperandType::kRegOut) \
@@ -320,8 +317,8 @@ namespace interpreter {
V(RestoreGeneratorState, AccumulatorUse::kWrite, OperandType::kReg) \
V(SuspendGenerator, AccumulatorUse::kNone, OperandType::kReg, \
OperandType::kRegList, OperandType::kRegCount, OperandType::kUImm) \
- V(RestoreGeneratorRegisters, AccumulatorUse::kNone, OperandType::kReg, \
- OperandType::kRegOutList, OperandType::kRegCount) \
+ V(ResumeGenerator, AccumulatorUse::kWrite, OperandType::kReg, \
+ OperandType::kRegOut, OperandType::kRegOutList, OperandType::kRegCount) \
\
/* Debugger */ \
V(Debugger, AccumulatorUse::kNone) \
diff --git a/deps/v8/src/interpreter/constant-array-builder.cc b/deps/v8/src/interpreter/constant-array-builder.cc
index 70b8bc5c1a..47bb955374 100644
--- a/deps/v8/src/interpreter/constant-array-builder.cc
+++ b/deps/v8/src/interpreter/constant-array-builder.cc
@@ -115,7 +115,7 @@ void ConstantArrayBuilder::ConstantArraySlice::CheckAllElementsAreUnique(
for (const Entry& prev_entry : constants_) {
os << i++ << ": " << Brief(*prev_entry.ToHandle(isolate)) << std::endl;
}
- FATAL(os.str().c_str());
+ FATAL("%s", os.str().c_str());
}
}
}
diff --git a/deps/v8/src/interpreter/handler-table-builder.cc b/deps/v8/src/interpreter/handler-table-builder.cc
index 2ff7f2130a..4b6c44b95d 100644
--- a/deps/v8/src/interpreter/handler-table-builder.cc
+++ b/deps/v8/src/interpreter/handler-table-builder.cc
@@ -34,7 +34,7 @@ Handle<HandlerTable> HandlerTableBuilder::ToHandlerTable(Isolate* isolate) {
int HandlerTableBuilder::NewHandlerEntry() {
int handler_id = static_cast<int>(entries_.size());
- Entry entry = {0, 0, 0, Register(), HandlerTable::UNCAUGHT};
+ Entry entry = {0, 0, 0, Register::invalid_value(), HandlerTable::UNCAUGHT};
entries_.push_back(entry);
return handler_id;
}
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index e4cc104b76..846b69281e 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -577,7 +577,7 @@ void InterpreterAssembler::CallEpilogue() {
Node* stack_pointer_before_call = stack_pointer_before_call_;
stack_pointer_before_call_ = nullptr;
AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
- kUnexpectedStackPointer);
+ AbortReason::kUnexpectedStackPointer);
}
}
@@ -586,7 +586,11 @@ void InterpreterAssembler::IncrementCallCount(Node* feedback_vector,
Comment("increment call count");
Node* call_count =
LoadFeedbackVectorSlot(feedback_vector, slot_id, kPointerSize);
- Node* new_count = SmiAdd(call_count, SmiConstant(1));
+ // The lowest {CallICNexus::CallCountField::kShift} bits of the call
+ // count are used as flags. To increment the call count by 1 we hence
+ // have to increment by 1 << {CallICNexus::CallCountField::kShift}.
+ Node* new_count =
+ SmiAdd(call_count, SmiConstant(1 << CallICNexus::CallCountField::kShift));
// Count is Smi, so we don't need a write barrier.
StoreFeedbackVectorSlot(feedback_vector, slot_id, new_count,
SKIP_WRITE_BARRIER, kPointerSize);
@@ -1309,7 +1313,6 @@ void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
break;
default:
UNREACHABLE();
- base_index = nullptr;
}
Node* target_index = IntPtrAdd(base_index, next_bytecode);
Node* target_code_entry =
@@ -1350,20 +1353,20 @@ Node* InterpreterAssembler::LoadOSRNestingLevel() {
MachineType::Int8());
}
-void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
+void InterpreterAssembler::Abort(AbortReason abort_reason) {
disable_stack_check_across_call_ = true;
- Node* abort_id = SmiConstant(bailout_reason);
+ Node* abort_id = SmiConstant(abort_reason);
CallRuntime(Runtime::kAbort, GetContext(), abort_id);
disable_stack_check_across_call_ = false;
}
void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
- BailoutReason bailout_reason) {
+ AbortReason abort_reason) {
Label ok(this), abort(this, Label::kDeferred);
Branch(WordEqual(lhs, rhs), &ok, &abort);
BIND(&abort);
- Abort(bailout_reason);
+ Abort(abort_reason);
Goto(&ok);
BIND(&ok);
@@ -1383,7 +1386,7 @@ void InterpreterAssembler::MaybeDropFrames(Node* context) {
// We don't expect this call to return since the frame dropper tears down
// the stack and jumps into the function on the target frame to restart it.
CallStub(CodeFactory::FrameDropperTrampoline(isolate()), context, restart_fp);
- Abort(kUnexpectedReturnFromFrameDropper);
+ Abort(AbortReason::kUnexpectedReturnFromFrameDropper);
Goto(&ok);
BIND(&ok);
@@ -1442,7 +1445,7 @@ void InterpreterAssembler::AbortIfRegisterCountInvalid(Node* register_file,
Branch(UintPtrLessThanOrEqual(register_count, array_size), &ok, &abort);
BIND(&abort);
- Abort(kInvalidRegisterFileInGenerator);
+ Abort(AbortReason::kInvalidRegisterFileInGenerator);
Goto(&ok);
BIND(&ok);
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index 2b38508441..63d1709145 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -225,10 +225,10 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Dispatch bytecode as wide operand variant.
void DispatchWide(OperandScale operand_scale);
- // Abort with the given bailout reason.
- void Abort(BailoutReason bailout_reason);
+ // Abort with the given abort reason.
+ void Abort(AbortReason abort_reason);
void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
- BailoutReason bailout_reason);
+ AbortReason abort_reason);
// Abort if |register_count| is invalid for given register file array.
void AbortIfRegisterCountInvalid(compiler::Node* register_file,
compiler::Node* register_count);
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index 1665aff29b..5dabc13ea0 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -158,68 +158,29 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
void LdaGlobal(int slot_operand_index, int name_operand_index,
TypeofMode typeof_mode) {
- // Must be kept in sync with AccessorAssembler::LoadGlobalIC.
-
- // Load the global via the LoadGlobalIC.
- Node* feedback_vector = LoadFeedbackVector();
+ TNode<FeedbackVector> feedback_vector = CAST(LoadFeedbackVector());
Node* feedback_slot = BytecodeOperandIdx(slot_operand_index);
AccessorAssembler accessor_asm(state());
+ Label done(this);
+ Variable var_result(this, MachineRepresentation::kTagged);
+ ExitPoint exit_point(this, &done, &var_result);
- Label try_handler(this, Label::kDeferred), miss(this, Label::kDeferred);
-
- // Fast path without frame construction for the data case.
- {
- Label done(this);
- Variable var_result(this, MachineRepresentation::kTagged);
- ExitPoint exit_point(this, &done, &var_result);
+ LazyNode<Context> lazy_context = [=] { return CAST(GetContext()); };
- accessor_asm.LoadGlobalIC_TryPropertyCellCase(
- feedback_vector, feedback_slot, &exit_point, &try_handler, &miss,
- CodeStubAssembler::INTPTR_PARAMETERS);
-
- BIND(&done);
- SetAccumulator(var_result.value());
- Dispatch();
- }
-
- // Slow path with frame construction.
- {
- Label done(this);
- Variable var_result(this, MachineRepresentation::kTagged);
- ExitPoint exit_point(this, &done, &var_result);
-
- BIND(&try_handler);
- {
- Node* context = GetContext();
- Node* smi_slot = SmiTag(feedback_slot);
- Node* name_index = BytecodeOperandIdx(name_operand_index);
- Node* name = LoadConstantPoolEntry(name_index);
-
- AccessorAssembler::LoadICParameters params(context, nullptr, name,
- smi_slot, feedback_vector);
- accessor_asm.LoadGlobalIC_TryHandlerCase(&params, typeof_mode,
- &exit_point, &miss);
- }
+ LazyNode<Name> lazy_name = [=] {
+ Node* name_index = BytecodeOperandIdx(name_operand_index);
+ Node* name = LoadConstantPoolEntry(name_index);
+ return CAST(name);
+ };
- BIND(&miss);
- {
- Node* context = GetContext();
- Node* smi_slot = SmiTag(feedback_slot);
- Node* name_index = BytecodeOperandIdx(name_operand_index);
- Node* name = LoadConstantPoolEntry(name_index);
-
- AccessorAssembler::LoadICParameters params(context, nullptr, name,
- smi_slot, feedback_vector);
- accessor_asm.LoadGlobalIC_MissCase(&params, &exit_point);
- }
+ accessor_asm.LoadGlobalIC(feedback_vector, feedback_slot, lazy_context,
+ lazy_name, typeof_mode, &exit_point,
+ CodeStubAssembler::INTPTR_PARAMETERS);
- BIND(&done);
- {
- SetAccumulator(var_result.value());
- Dispatch();
- }
- }
+ BIND(&done);
+ SetAccumulator(var_result.value());
+ Dispatch();
}
};
@@ -245,50 +206,23 @@ IGNITION_HANDLER(LdaGlobalInsideTypeof, InterpreterLoadGlobalAssembler) {
LdaGlobal(kSlotOperandIndex, kNameOperandIndex, INSIDE_TYPEOF);
}
-class InterpreterStoreGlobalAssembler : public InterpreterAssembler {
- public:
- InterpreterStoreGlobalAssembler(CodeAssemblerState* state, Bytecode bytecode,
- OperandScale operand_scale)
- : InterpreterAssembler(state, bytecode, operand_scale) {}
-
- void StaGlobal(Callable ic) {
- // Get the global object.
- Node* context = GetContext();
- Node* native_context = LoadNativeContext(context);
- Node* global = LoadContextElement(native_context, Context::EXTENSION_INDEX);
-
- // Store the global via the StoreIC.
- Node* code_target = HeapConstant(ic.code());
- Node* constant_index = BytecodeOperandIdx(0);
- Node* name = LoadConstantPoolEntry(constant_index);
- Node* value = GetAccumulator();
- Node* raw_slot = BytecodeOperandIdx(1);
- Node* smi_slot = SmiTag(raw_slot);
- Node* feedback_vector = LoadFeedbackVector();
- CallStub(ic.descriptor(), code_target, context, global, name, value,
- smi_slot, feedback_vector);
- Dispatch();
- }
-};
-
-// StaGlobalSloppy <name_index> <slot>
+// StaGlobal <name_index> <slot>
//
// Store the value in the accumulator into the global with name in constant pool
-// entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
-IGNITION_HANDLER(StaGlobalSloppy, InterpreterStoreGlobalAssembler) {
- Callable ic = CodeFactory::StoreGlobalICInOptimizedCode(
- isolate(), LanguageMode::kSloppy);
- StaGlobal(ic);
-}
+// entry <name_index> using FeedBackVector slot <slot>.
+IGNITION_HANDLER(StaGlobal, InterpreterAssembler) {
+ Node* context = GetContext();
-// StaGlobalStrict <name_index> <slot>
-//
-// Store the value in the accumulator into the global with name in constant pool
-// entry <name_index> using FeedBackVector slot <slot> in strict mode.
-IGNITION_HANDLER(StaGlobalStrict, InterpreterStoreGlobalAssembler) {
- Callable ic = CodeFactory::StoreGlobalICInOptimizedCode(
- isolate(), LanguageMode::kStrict);
- StaGlobal(ic);
+ // Store the global via the StoreGlobalIC.
+ Node* constant_index = BytecodeOperandIdx(0);
+ Node* name = LoadConstantPoolEntry(constant_index);
+ Node* value = GetAccumulator();
+ Node* raw_slot = BytecodeOperandIdx(1);
+ Node* smi_slot = SmiTag(raw_slot);
+ Node* feedback_vector = LoadFeedbackVector();
+ Callable ic = Builtins::CallableFor(isolate(), Builtins::kStoreGlobalIC);
+ CallStub(ic, context, name, value, smi_slot, feedback_vector);
+ Dispatch();
}
// LdaContextSlot <context> <slot_index> <depth>
@@ -802,7 +736,7 @@ IGNITION_HANDLER(StaModuleVariable, InterpreterAssembler) {
BIND(&if_import);
{
// Not supported (probably never).
- Abort(kUnsupportedModuleOperation);
+ Abort(AbortReason::kUnsupportedModuleOperation);
Goto(&end);
}
@@ -1245,8 +1179,7 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
BIND(&if_bigint);
{
var_result.Bind(BigIntOp(value));
- CombineFeedback(&var_feedback,
- SmiConstant(BinaryOperationFeedback::kBigInt));
+ CombineFeedback(&var_feedback, BinaryOperationFeedback::kBigInt);
Goto(&end);
}
@@ -1257,8 +1190,8 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
// only reach this path on the first pass when the feedback is kNone.
CSA_ASSERT(this, SmiEqual(var_feedback.value(),
SmiConstant(BinaryOperationFeedback::kNone)));
- var_feedback.Bind(
- SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
+ OverwriteFeedback(&var_feedback,
+ BinaryOperationFeedback::kNumberOrOddball);
var_value.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
Goto(&start);
}
@@ -1270,7 +1203,7 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
// only reach this path on the first pass when the feedback is kNone.
CSA_ASSERT(this, SmiEqual(var_feedback.value(),
SmiConstant(BinaryOperationFeedback::kNone)));
- var_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
+ OverwriteFeedback(&var_feedback, BinaryOperationFeedback::kAny);
var_value.Bind(
CallBuiltin(Builtins::kNonNumberToNumeric, GetContext(), value));
Goto(&start);
@@ -1279,8 +1212,7 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
BIND(&do_float_op);
{
- CombineFeedback(&var_feedback,
- SmiConstant(BinaryOperationFeedback::kNumber));
+ CombineFeedback(&var_feedback, BinaryOperationFeedback::kNumber);
var_result.Bind(
AllocateHeapNumberWithValue(FloatOp(var_float_value.value())));
Goto(&end);
@@ -1310,14 +1242,12 @@ class NegateAssemblerImpl : public UnaryNumericOpAssembler {
GotoIf(SmiEqual(smi_value, SmiConstant(Smi::kMinValue)), &if_min_smi);
// Else simply subtract operand from 0.
- CombineFeedback(var_feedback,
- SmiConstant(BinaryOperationFeedback::kSignedSmall));
+ CombineFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall);
var_result.Bind(SmiSub(SmiConstant(0), smi_value));
Goto(&end);
BIND(&if_zero);
- CombineFeedback(var_feedback,
- SmiConstant(BinaryOperationFeedback::kNumber));
+ CombineFeedback(var_feedback, BinaryOperationFeedback::kNumber);
var_result.Bind(MinusZeroConstant());
Goto(&end);
@@ -1412,8 +1342,7 @@ class IncDecAssembler : public UnaryNumericOpAssembler {
}
BIND(&if_notoverflow);
- CombineFeedback(var_feedback,
- SmiConstant(BinaryOperationFeedback::kSignedSmall));
+ CombineFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall);
return BitcastWordToTaggedSigned(Projection(0, pair));
}
@@ -2076,11 +2005,11 @@ IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) {
GotoIf(TaggedIsSmi(object), &if_false);
// Check if callable bit is set and not undetectable.
Node* map_bitfield = LoadMapBitField(LoadMap(object));
- Node* callable_undetectable = Word32And(
- map_bitfield,
- Int32Constant(1 << Map::kIsUndetectable | 1 << Map::kIsCallable));
+ Node* callable_undetectable =
+ Word32And(map_bitfield, Int32Constant(Map::IsUndetectableBit::kMask |
+ Map::IsCallableBit::kMask));
Branch(Word32Equal(callable_undetectable,
- Int32Constant(1 << Map::kIsCallable)),
+ Int32Constant(Map::IsCallableBit::kMask)),
&if_true, &if_false);
}
BIND(&if_object);
@@ -2095,9 +2024,9 @@ IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) {
Node* map = LoadMap(object);
GotoIfNot(IsJSReceiverMap(map), &if_false);
Node* map_bitfield = LoadMapBitField(map);
- Node* callable_undetectable = Word32And(
- map_bitfield,
- Int32Constant(1 << Map::kIsUndetectable | 1 << Map::kIsCallable));
+ Node* callable_undetectable =
+ Word32And(map_bitfield, Int32Constant(Map::IsUndetectableBit::kMask |
+ Map::IsCallableBit::kMask));
Branch(Word32Equal(callable_undetectable, Int32Constant(0)), &if_true,
&if_false);
}
@@ -2798,7 +2727,7 @@ IGNITION_HANDLER(Throw, InterpreterAssembler) {
Node* context = GetContext();
CallRuntime(Runtime::kThrow, context, exception);
// We shouldn't ever return from a throw.
- Abort(kUnexpectedReturnFromThrow);
+ Abort(AbortReason::kUnexpectedReturnFromThrow);
}
// ReThrow
@@ -2809,10 +2738,10 @@ IGNITION_HANDLER(ReThrow, InterpreterAssembler) {
Node* context = GetContext();
CallRuntime(Runtime::kReThrow, context, exception);
// We shouldn't ever return from a throw.
- Abort(kUnexpectedReturnFromThrow);
+ Abort(AbortReason::kUnexpectedReturnFromThrow);
}
-// Abort <bailout_reason>
+// Abort <abort_reason>
//
// Aborts execution (via a call to the runtime function).
IGNITION_HANDLER(Abort, InterpreterAssembler) {
@@ -2845,7 +2774,7 @@ IGNITION_HANDLER(ThrowReferenceErrorIfHole, InterpreterAssembler) {
Node* name = LoadConstantPoolEntry(BytecodeOperandIdx(0));
CallRuntime(Runtime::kThrowReferenceError, GetContext(), name);
// We shouldn't ever return from a throw.
- Abort(kUnexpectedReturnFromThrow);
+ Abort(AbortReason::kUnexpectedReturnFromThrow);
}
}
@@ -2863,7 +2792,7 @@ IGNITION_HANDLER(ThrowSuperNotCalledIfHole, InterpreterAssembler) {
{
CallRuntime(Runtime::kThrowSuperNotCalled, GetContext());
// We shouldn't ever return from a throw.
- Abort(kUnexpectedReturnFromThrow);
+ Abort(AbortReason::kUnexpectedReturnFromThrow);
}
}
@@ -2882,7 +2811,7 @@ IGNITION_HANDLER(ThrowSuperAlreadyCalledIfNotHole, InterpreterAssembler) {
{
CallRuntime(Runtime::kThrowSuperAlreadyCalledError, GetContext());
// We shouldn't ever return from a throw.
- Abort(kUnexpectedReturnFromThrow);
+ Abort(AbortReason::kUnexpectedReturnFromThrow);
}
}
@@ -3140,7 +3069,9 @@ IGNITION_HANDLER(ExtraWide, InterpreterAssembler) {
// Illegal
//
// An invalid bytecode aborting execution if dispatched.
-IGNITION_HANDLER(Illegal, InterpreterAssembler) { Abort(kInvalidBytecode); }
+IGNITION_HANDLER(Illegal, InterpreterAssembler) {
+ Abort(AbortReason::kInvalidBytecode);
+}
// SuspendGenerator <generator> <first input register> <register count>
// <suspend_id>
@@ -3212,18 +3143,20 @@ IGNITION_HANDLER(RestoreGeneratorState, InterpreterAssembler) {
Dispatch();
}
-// RestoreGeneratorRegisters <generator> <first output register> <register
-// count>
+// ResumeGenerator <generator> <generator_state> <first output
+// register> <register count>
//
-// Imports the register file stored in the generator.
-IGNITION_HANDLER(RestoreGeneratorRegisters, InterpreterAssembler) {
+// Imports the register file stored in the generator and marks the generator
+// state as executing.
+IGNITION_HANDLER(ResumeGenerator, InterpreterAssembler) {
Node* generator_reg = BytecodeOperandReg(0);
- // Bytecode operand 1 is the start register. It should always be 0, so let's
+ Node* generator_state_reg = BytecodeOperandReg(1);
+ // Bytecode operand 2 is the start register. It should always be 0, so let's
// ignore it.
- CSA_ASSERT(this, WordEqual(BytecodeOperandReg(1),
+ CSA_ASSERT(this, WordEqual(BytecodeOperandReg(2),
IntPtrConstant(Register(0).ToOperand())));
- // Bytecode operand 2 is the number of registers to store to the generator.
- Node* register_count = ChangeUint32ToWord(BytecodeOperandCount(2));
+ // Bytecode operand 3 is the number of registers to store to the generator.
+ Node* register_count = ChangeUint32ToWord(BytecodeOperandCount(3));
Node* generator = LoadRegister(generator_reg);
@@ -3231,6 +3164,15 @@ IGNITION_HANDLER(RestoreGeneratorRegisters, InterpreterAssembler) {
LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset),
register_count);
+ // Since we're resuming, update the generator state to indicate that the
+ // generator is now executing.
+ StoreRegister(SmiConstant(JSGeneratorObject::kGeneratorExecuting),
+ generator_state_reg);
+
+ // Return the generator's input_or_debug_pos in the accumulator.
+ SetAccumulator(
+ LoadObjectField(generator, JSGeneratorObject::kInputOrDebugPosOffset));
+
Dispatch();
}
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
index 39cb45c96c..7ad8d49b63 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
@@ -107,7 +107,7 @@ Node* IntrinsicsGenerator::InvokeIntrinsic(Node* function_id, Node* context,
__ BIND(&abort);
{
- __ Abort(BailoutReason::kUnexpectedFunctionIDForInvokeIntrinsic);
+ __ Abort(AbortReason::kUnexpectedFunctionIDForInvokeIntrinsic);
result.Bind(__ UndefinedConstant());
__ Goto(&end);
}
@@ -331,7 +331,7 @@ Node* IntrinsicsGenerator::Call(Node* args_reg, Node* arg_count,
InterpreterAssembler::Label arg_count_positive(assembler_);
Node* comparison = __ Int32LessThan(target_args_count, __ Int32Constant(0));
__ GotoIfNot(comparison, &arg_count_positive);
- __ Abort(kWrongArgumentCountForInvokeIntrinsic);
+ __ Abort(AbortReason::kWrongArgumentCountForInvokeIntrinsic);
__ Goto(&arg_count_positive);
__ BIND(&arg_count_positive);
}
@@ -472,7 +472,7 @@ void IntrinsicsGenerator::AbortIfArgCountMismatch(int expected, Node* actual) {
InterpreterAssembler::Label match(assembler_);
Node* comparison = __ Word32Equal(actual, __ Int32Constant(expected));
__ GotoIf(comparison, &match);
- __ Abort(kWrongArgumentCountForInvokeIntrinsic);
+ __ Abort(AbortReason::kWrongArgumentCountForInvokeIntrinsic);
__ Goto(&match);
__ BIND(&match);
}
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 1f359f1a0f..fb74d37df4 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -180,8 +180,8 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
RuntimeCallTimerScope runtimeTimerScope(
parse_info()->runtime_call_stats(),
parse_info()->on_background_thread()
- ? &RuntimeCallStats::CompileBackgroundIgnition
- : &RuntimeCallStats::CompileIgnition);
+ ? RuntimeCallCounterId::kCompileBackgroundIgnition
+ : RuntimeCallCounterId::kCompileIgnition);
// TODO(lpy): add support for background compilation RCS trace.
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileIgnition");
@@ -201,7 +201,7 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl(
Isolate* isolate) {
RuntimeCallTimerScope runtimeTimerScope(
parse_info()->runtime_call_stats(),
- &RuntimeCallStats::CompileIgnitionFinalization);
+ RuntimeCallCounterId::kCompileIgnitionFinalization);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileIgnitionFinalization");
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index e3ee968f79..7165d88d34 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -54,7 +54,8 @@
#include "src/visitors.h"
#include "src/vm-state-inl.h"
#include "src/wasm/compilation-manager.h"
-#include "src/wasm/wasm-heap.h"
+#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects.h"
#include "src/zone/accounting-allocator.h"
@@ -109,6 +110,8 @@ void ThreadLocalTop::InitializeInternal() {
rethrowing_message_ = false;
pending_message_obj_ = nullptr;
scheduled_exception_ = nullptr;
+ microtask_queue_bailout_index_ = -1;
+ microtask_queue_bailout_count_ = 0;
}
@@ -332,8 +335,8 @@ void Isolate::PushStackTraceAndDie(unsigned int magic1, void* ptr1, void* ptr2,
"ptr6=%p ptr7=%p ptr8=%p\n\n%s",
magic1, magic2, ptr1, ptr2, ptr3, ptr4, ptr5, ptr6, ptr7, ptr8,
reinterpret_cast<char*>(buffer));
- PushCodeObjectsAndDie(0xdeadc0de, ptr1, ptr2, ptr3, ptr4, ptr5, ptr6, ptr7,
- ptr8, 0xdeadc0de);
+ PushCodeObjectsAndDie(0xDEADC0DE, ptr1, ptr2, ptr3, ptr4, ptr5, ptr6, ptr7,
+ ptr8, 0xDEADC0DE);
}
void Isolate::PushCodeObjectsAndDie(unsigned int magic1, void* ptr1, void* ptr2,
@@ -429,12 +432,12 @@ class FrameArrayBuilder {
//====================================================================
const auto& summary = summ.AsWasmCompiled();
if (!summary.code().IsCodeObject() &&
- summary.code().GetWasmCode()->kind() != wasm::WasmCode::Function) {
+ summary.code().GetWasmCode()->kind() != wasm::WasmCode::kFunction) {
continue;
}
Handle<WasmInstanceObject> instance = summary.wasm_instance();
int flags = 0;
- if (instance->compiled_module()->is_asm_js()) {
+ if (instance->compiled_module()->shared()->is_asm_js()) {
flags |= FrameArray::kIsAsmJsWasmFrame;
if (WasmCompiledFrame::cast(frame)->at_to_number_conversion()) {
flags |= FrameArray::kAsmJsAtNumberConversion;
@@ -453,7 +456,7 @@ class FrameArrayBuilder {
const auto& summary = summ.AsWasmInterpreted();
Handle<WasmInstanceObject> instance = summary.wasm_instance();
int flags = FrameArray::kIsWasmInterpretedFrame;
- DCHECK(!instance->compiled_module()->is_asm_js());
+ DCHECK(!instance->compiled_module()->shared()->is_asm_js());
elements_ = FrameArray::AppendWasmFrame(elements_, instance,
summary.function_index(), {},
summary.byte_offset(), flags);
@@ -667,6 +670,11 @@ Handle<FixedArray> Isolate::GetDetailedStackTrace(
Address Isolate::GetAbstractPC(int* line, int* column) {
JavaScriptFrameIterator it(this);
+ if (it.done()) {
+ *line = -1;
+ *column = -1;
+ return nullptr;
+ }
JavaScriptFrame* frame = it.frame();
DCHECK(!frame->is_builtin());
int position = frame->position();
@@ -764,10 +772,10 @@ class CaptureStackTraceHelper {
const FrameSummary::WasmFrameSummary& summ) {
Handle<StackFrameInfo> info = factory()->NewStackFrameInfo();
- Handle<WasmCompiledModule> compiled_module(
- summ.wasm_instance()->compiled_module(), isolate_);
- Handle<String> name = WasmCompiledModule::GetFunctionName(
- isolate_, compiled_module, summ.function_index());
+ Handle<WasmSharedModuleData> shared(
+ summ.wasm_instance()->compiled_module()->shared(), isolate_);
+ Handle<String> name = WasmSharedModuleData::GetFunctionName(
+ isolate_, shared, summ.function_index());
info->set_function_name(*name);
// Encode the function index as line number (1-based).
info->set_line_number(summ.function_index() + 1);
@@ -1029,7 +1037,7 @@ void Isolate::RequestInterrupt(InterruptCallback callback, void* data) {
void Isolate::InvokeApiInterruptCallbacks() {
RuntimeCallTimerScope runtimeTimer(
- this, &RuntimeCallStats::InvokeApiInterruptCallbacks);
+ this, RuntimeCallCounterId::kInvokeApiInterruptCallbacks);
// Note: callback below should be called outside of execution access lock.
while (true) {
InterruptEntry entry;
@@ -1133,7 +1141,7 @@ Object* Isolate::Throw(Object* exception, MessageLocation* location) {
#ifdef DEBUG
if (AllowHeapAllocation::IsAllowed()) {
#else
- if (false) {
+ if ((false)) {
#endif
printf(", %d:%d - %d:%d\n",
Script::GetLineNumber(script, location->start_pos()) + 1,
@@ -1301,7 +1309,7 @@ Object* Isolate::UnwindAndFindHandler() {
set_wasm_caught_exception(exception);
if (FLAG_wasm_jit_to_native) {
wasm::WasmCode* wasm_code =
- wasm_code_manager()->LookupCode(frame->pc());
+ wasm_engine()->code_manager()->LookupCode(frame->pc());
return FoundHandler(nullptr, wasm_code->instructions().start(),
offset, wasm_code->constant_pool(), return_sp,
frame->fp());
@@ -1683,8 +1691,7 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
for (int i = 0; i < frame_count; i++) {
if (elements->IsWasmFrame(i) || elements->IsAsmJsWasmFrame(i)) {
Handle<WasmCompiledModule> compiled_module(
- WasmInstanceObject::cast(elements->WasmInstance(i))
- ->compiled_module());
+ elements->WasmInstance(i)->compiled_module());
uint32_t func_index =
static_cast<uint32_t>(elements->WasmFunctionIndex(i)->value());
int code_offset = elements->Offset(i)->value();
@@ -1701,9 +1708,10 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
bool is_at_number_conversion =
elements->IsAsmJsWasmFrame(i) &&
elements->Flags(i)->value() & FrameArray::kAsmJsAtNumberConversion;
- int pos = WasmCompiledModule::GetSourcePosition(
- compiled_module, func_index, byte_offset, is_at_number_conversion);
- Handle<Script> script(compiled_module->script());
+ int pos = WasmSharedModuleData::GetSourcePosition(
+ handle(compiled_module->shared(), this), func_index, byte_offset,
+ is_at_number_conversion);
+ Handle<Script> script(compiled_module->shared()->script());
*target = MessageLocation(script, pos, pos + 1);
return true;
@@ -1810,21 +1818,9 @@ bool Isolate::IsExternalHandlerOnTop(Object* exception) {
return (entry_handler > external_handler);
}
-
-void Isolate::ReportPendingMessages() {
- DCHECK(AllowExceptions::IsAllowed(this));
-
- // The embedder might run script in response to an exception.
- AllowJavascriptExecutionDebugOnly allow_script(this);
-
+void Isolate::ReportPendingMessagesImpl(bool report_externally) {
Object* exception = pending_exception();
- // Try to propagate the exception to an external v8::TryCatch handler. If
- // propagation was unsuccessful, then we will get another chance at reporting
- // the pending message if the exception is re-thrown.
- bool has_been_propagated = PropagatePendingExceptionToExternalTryCatch();
- if (!has_been_propagated) return;
-
// Clear the pending message object early to avoid endless recursion.
Object* message_obj = thread_local_top_.pending_message_obj_;
clear_pending_message();
@@ -1837,7 +1833,7 @@ void Isolate::ReportPendingMessages() {
// depending on whether and external v8::TryCatch or an internal JavaScript
// handler is on top.
bool should_report_exception;
- if (IsExternalHandlerOnTop(exception)) {
+ if (report_externally) {
// Only report the exception if the external handler is verbose.
should_report_exception = try_catch_handler()->is_verbose_;
} else {
@@ -1858,6 +1854,85 @@ void Isolate::ReportPendingMessages() {
}
}
+void Isolate::ReportPendingMessages() {
+ DCHECK(AllowExceptions::IsAllowed(this));
+
+ // The embedder might run script in response to an exception.
+ AllowJavascriptExecutionDebugOnly allow_script(this);
+
+ Object* exception = pending_exception();
+
+ // Try to propagate the exception to an external v8::TryCatch handler. If
+ // propagation was unsuccessful, then we will get another chance at reporting
+ // the pending message if the exception is re-thrown.
+ bool has_been_propagated = PropagatePendingExceptionToExternalTryCatch();
+ if (!has_been_propagated) return;
+
+ ReportPendingMessagesImpl(IsExternalHandlerOnTop(exception));
+}
+
+void Isolate::ReportPendingMessagesFromJavaScript() {
+ DCHECK(AllowExceptions::IsAllowed(this));
+
+ auto IsHandledByJavaScript = [=]() {
+ // In this situation, the exception is always a non-terminating exception.
+
+ // Get the top-most JS_ENTRY handler, cannot be on top if it doesn't exist.
+ Address entry_handler = Isolate::handler(thread_local_top());
+ DCHECK_NOT_NULL(entry_handler);
+ entry_handler =
+ reinterpret_cast<StackHandler*>(entry_handler)->next()->address();
+
+ // Get the address of the external handler so we can compare the address to
+ // determine which one is closer to the top of the stack.
+ Address external_handler = thread_local_top()->try_catch_handler_address();
+ if (external_handler == nullptr) return true;
+
+ return (entry_handler < external_handler);
+ };
+
+ auto IsHandledExternally = [=]() {
+ Address external_handler = thread_local_top()->try_catch_handler_address();
+ if (external_handler == nullptr) return false;
+
+ // Get the top-most JS_ENTRY handler, cannot be on top if it doesn't exist.
+ Address entry_handler = Isolate::handler(thread_local_top());
+ DCHECK_NOT_NULL(entry_handler);
+ entry_handler =
+ reinterpret_cast<StackHandler*>(entry_handler)->next()->address();
+ return (entry_handler > external_handler);
+ };
+
+ auto PropagateToExternalHandler = [=]() {
+ if (IsHandledByJavaScript()) {
+ thread_local_top_.external_caught_exception_ = false;
+ return false;
+ }
+
+ if (!IsHandledExternally()) {
+ thread_local_top_.external_caught_exception_ = false;
+ return true;
+ }
+
+ thread_local_top_.external_caught_exception_ = true;
+ v8::TryCatch* handler = try_catch_handler();
+ DCHECK(thread_local_top_.pending_message_obj_->IsJSMessageObject() ||
+ thread_local_top_.pending_message_obj_->IsTheHole(this));
+ handler->can_continue_ = true;
+ handler->has_terminated_ = false;
+ handler->exception_ = pending_exception();
+ // Propagate to the external try-catch only if we got an actual message.
+ if (thread_local_top_.pending_message_obj_->IsTheHole(this)) return true;
+
+ handler->message_obj_ = thread_local_top_.pending_message_obj_;
+ return true;
+ };
+
+ // Try to propagate to an external v8::TryCatch handler.
+ if (!PropagateToExternalHandler()) return;
+
+ ReportPendingMessagesImpl(true);
+}
MessageLocation Isolate::GetMessageLocation() {
DCHECK(has_pending_exception());
@@ -2041,8 +2116,23 @@ Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
}
Handle<Object> retval = undefined;
PromiseOnStack* promise_on_stack = tltop->promise_on_stack_;
- for (JavaScriptFrameIterator it(this); !it.done(); it.Advance()) {
- switch (PredictException(it.frame())) {
+ for (StackFrameIterator it(this); !it.done(); it.Advance()) {
+ StackFrame* frame = it.frame();
+ HandlerTable::CatchPrediction catch_prediction;
+ if (frame->is_java_script()) {
+ catch_prediction = PredictException(JavaScriptFrame::cast(frame));
+ } else if (frame->type() == StackFrame::STUB) {
+ Code* code = frame->LookupCode();
+ if (!code->IsCode() || code->kind() != Code::BUILTIN ||
+ !code->handler_table()->length() || !code->is_turbofanned()) {
+ continue;
+ }
+ catch_prediction = code->GetBuiltinCatchPrediction();
+ } else {
+ continue;
+ }
+
+ switch (catch_prediction) {
case HandlerTable::UNCAUGHT:
continue;
case HandlerTable::CAUGHT:
@@ -2392,9 +2482,9 @@ Isolate::Isolate(bool enable_serializer)
descriptor_lookup_cache_(nullptr),
handle_scope_implementer_(nullptr),
unicode_cache_(nullptr),
- allocator_(FLAG_trace_gc_object_stats ? new VerboseAccountingAllocator(
- &heap_, 256 * KB, 128 * KB)
- : new AccountingAllocator()),
+ allocator_(FLAG_trace_zone_stats ? new VerboseAccountingAllocator(
+ &heap_, 256 * KB, 128 * KB)
+ : new AccountingAllocator()),
inner_pointer_to_code_cache_(nullptr),
global_handles_(nullptr),
eternal_handles_(nullptr),
@@ -2432,7 +2522,6 @@ Isolate::Isolate(bool enable_serializer)
use_counter_callback_(nullptr),
basic_block_profiler_(nullptr),
cancelable_task_manager_(new CancelableTaskManager()),
- wasm_compilation_manager_(new wasm::CompilationManager()),
abort_on_uncaught_exception_callback_(nullptr),
total_regexp_code_generated_(0) {
{
@@ -2454,9 +2543,6 @@ Isolate::Isolate(bool enable_serializer)
thread_manager_->isolate_ = this;
#ifdef DEBUG
- // heap_histograms_ initializes itself.
- memset(&js_spill_information_, 0, sizeof(js_spill_information_));
-
non_disposed_isolates_.Increment(1);
#endif // DEBUG
@@ -2482,6 +2568,10 @@ Isolate::Isolate(bool enable_serializer)
void Isolate::TearDown() {
TRACE_ISOLATE(tear_down);
+ if (FLAG_stress_sampling_allocation_profiler > 0) {
+ heap_profiler()->StopSamplingHeapProfiler();
+ }
+
// Temporarily set this isolate as current so that various parts of
// the isolate can access it in their destructors without having a
// direct pointer. We don't use Enter/Exit here to avoid
@@ -2535,7 +2625,7 @@ void Isolate::Deinit() {
optimizing_compile_dispatcher_ = nullptr;
}
- wasm_compilation_manager_->TearDown();
+ wasm_engine()->compilation_manager()->TearDown();
heap_.mark_compact_collector()->EnsureSweepingCompleted();
heap_.memory_allocator()->unmapper()->WaitUntilCompleted();
@@ -2686,9 +2776,8 @@ Isolate::~Isolate() {
allocator_ = nullptr;
#if USE_SIMULATOR
- Simulator::TearDown(simulator_i_cache_, simulator_redirection_);
+ Simulator::TearDown(simulator_i_cache_);
simulator_i_cache_ = nullptr;
- simulator_redirection_ = nullptr;
#endif
}
@@ -2816,11 +2905,8 @@ bool Isolate::Init(StartupDeserializer* des) {
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390
Simulator::Initialize(this);
#endif
-#endif
{ // NOLINT
// Ensure that the thread has a valid stack guard. The v8::Locker object
@@ -2837,16 +2923,15 @@ bool Isolate::Init(StartupDeserializer* des) {
return false;
}
- // Setup the wasm code manager. Currently, there's one per Isolate.
- if (!wasm_code_manager_) {
- size_t max_code_size = kMaxWasmCodeMemory;
- if (kRequiresCodeRange) {
- max_code_size = std::min(max_code_size,
- heap_.memory_allocator()->code_range()->size());
- }
- wasm_code_manager_.reset(new wasm::WasmCodeManager(
- reinterpret_cast<v8::Isolate*>(this), max_code_size));
- }
+ // Setup the wasm engine. Currently, there's one per Isolate.
+ const size_t max_code_size =
+ kRequiresCodeRange
+ ? std::min(kMaxWasmCodeMemory,
+ heap_.memory_allocator()->code_range()->size())
+ : kMaxWasmCodeMemory;
+ wasm_engine_.reset(new wasm::WasmEngine(
+ std::unique_ptr<wasm::WasmCodeManager>(new wasm::WasmCodeManager(
+ reinterpret_cast<v8::Isolate*>(this), max_code_size))));
// Initialize the interface descriptors ahead of time.
#define INTERFACE_DESCRIPTOR(Name, ...) \
@@ -2953,6 +3038,15 @@ bool Isolate::Init(StartupDeserializer* des) {
if (!FLAG_inline_new) heap_.DisableInlineAllocation();
+ if (FLAG_stress_sampling_allocation_profiler > 0) {
+ uint64_t sample_interval = FLAG_stress_sampling_allocation_profiler;
+ int stack_depth = 128;
+ v8::HeapProfiler::SamplingFlags sampling_flags =
+ v8::HeapProfiler::SamplingFlags::kSamplingForceGC;
+ heap_profiler()->StartSamplingHeapProfiler(sample_interval, stack_depth,
+ sampling_flags);
+ }
+
return true;
}
@@ -3090,7 +3184,7 @@ bool Isolate::use_optimizer() {
bool Isolate::NeedsSourcePositionsForProfiling() const {
return FLAG_trace_deopt || FLAG_trace_turbo || FLAG_trace_turbo_graph ||
FLAG_turbo_profiling || FLAG_perf_prof || is_profiling() ||
- debug_->is_active() || logger_->is_logging();
+ debug_->is_active() || logger_->is_logging() || FLAG_trace_maps;
}
void Isolate::SetFeedbackVectorsForProfilingTools(Object* value) {
@@ -3098,27 +3192,32 @@ void Isolate::SetFeedbackVectorsForProfilingTools(Object* value) {
heap()->set_feedback_vectors_for_profiling_tools(value);
}
-void Isolate::InitializeVectorListFromHeap() {
+void Isolate::MaybeInitializeVectorListFromHeap() {
+ if (!heap()->feedback_vectors_for_profiling_tools()->IsUndefined(this)) {
+ // Already initialized, return early.
+ DCHECK(heap()->feedback_vectors_for_profiling_tools()->IsArrayList());
+ return;
+ }
+
// Collect existing feedback vectors.
std::vector<Handle<FeedbackVector>> vectors;
+
{
HeapIterator heap_iterator(heap());
while (HeapObject* current_obj = heap_iterator.next()) {
- if (current_obj->IsSharedFunctionInfo()) {
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(current_obj);
- shared->set_has_reported_binary_coverage(false);
- } else if (current_obj->IsFeedbackVector()) {
- FeedbackVector* vector = FeedbackVector::cast(current_obj);
- SharedFunctionInfo* shared = vector->shared_function_info();
- if (!shared->IsSubjectToDebugging()) continue;
- vector->clear_invocation_count();
- vectors.emplace_back(vector, this);
- }
+ if (!current_obj->IsFeedbackVector()) continue;
+
+ FeedbackVector* vector = FeedbackVector::cast(current_obj);
+ SharedFunctionInfo* shared = vector->shared_function_info();
+
+ // No need to preserve the feedback vector for non-user-visible functions.
+ if (!shared->IsSubjectToDebugging()) continue;
+
+ vectors.emplace_back(vector, this);
}
}
- // Add collected feedback vectors to the root list lest we lose them to
- // GC.
+ // Add collected feedback vectors to the root list lest we lose them to GC.
Handle<ArrayList> list =
ArrayList::New(this, static_cast<int>(vectors.size()));
for (const auto& vector : vectors) list = ArrayList::Add(list, vector);
@@ -3356,7 +3455,16 @@ base::RandomNumberGenerator* Isolate::random_number_generator() {
}
base::RandomNumberGenerator* Isolate::fuzzer_rng() {
- return ensure_rng_exists(&fuzzer_rng_, FLAG_fuzzer_random_seed);
+ if (fuzzer_rng_ == nullptr) {
+ int64_t seed = FLAG_fuzzer_random_seed;
+ if (seed == 0) {
+ seed = random_number_generator()->initial_seed();
+ }
+
+ fuzzer_rng_ = new base::RandomNumberGenerator(seed);
+ }
+
+ return fuzzer_rng_;
}
int Isolate::GenerateIdentityHash(uint32_t mask) {
@@ -3688,72 +3796,89 @@ void Isolate::RunMicrotasksInternal() {
if (!pending_microtask_count()) return;
TRACE_EVENT0("v8.execute", "RunMicrotasks");
TRACE_EVENT_CALL_STATS_SCOPED(this, "v8", "V8.RunMicrotasks");
- while (pending_microtask_count() > 0) {
- HandleScope scope(this);
- int num_tasks = pending_microtask_count();
- // Do not use factory()->microtask_queue() here; we need a fresh handle!
- Handle<FixedArray> queue(heap()->microtask_queue(), this);
- DCHECK(num_tasks <= queue->length());
- set_pending_microtask_count(0);
- heap()->set_microtask_queue(heap()->empty_fixed_array());
- Isolate* isolate = this;
- FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < num_tasks, i++, {
- Handle<Object> microtask(queue->get(i), this);
-
- if (microtask->IsCallHandlerInfo()) {
- Handle<CallHandlerInfo> callback_info =
- Handle<CallHandlerInfo>::cast(microtask);
- v8::MicrotaskCallback callback =
- v8::ToCData<v8::MicrotaskCallback>(callback_info->callback());
- void* data = v8::ToCData<void*>(callback_info->data());
- callback(data);
- } else {
- SaveContext save(this);
- Context* context;
- if (microtask->IsJSFunction()) {
- context = Handle<JSFunction>::cast(microtask)->context();
- } else if (microtask->IsPromiseResolveThenableJobInfo()) {
- context =
- Handle<PromiseResolveThenableJobInfo>::cast(microtask)->context();
- } else {
- context = Handle<PromiseReactionJobInfo>::cast(microtask)->context();
- }
+ do {
+ HandleScope handle_scope(this);
+ set_microtask_queue_bailout_index(-1);
+ set_microtask_queue_bailout_count(-1);
+ MaybeHandle<Object> maybe_exception;
+ MaybeHandle<Object> maybe_result = Execution::RunMicrotasks(
+ this, Execution::MessageHandling::kReport, &maybe_exception);
+ if (maybe_result.is_null() && maybe_exception.is_null()) {
+ heap()->set_microtask_queue(heap()->empty_fixed_array());
+ set_pending_microtask_count(0);
+ return;
+ }
- set_context(context->native_context());
- handle_scope_implementer_->EnterMicrotaskContext(
- Handle<Context>(context, this));
-
- MaybeHandle<Object> result;
- MaybeHandle<Object> maybe_exception;
-
- if (microtask->IsJSFunction()) {
- Handle<JSFunction> microtask_function =
- Handle<JSFunction>::cast(microtask);
- result = Execution::TryCall(
- this, microtask_function, factory()->undefined_value(), 0,
- nullptr, Execution::MessageHandling::kReport, &maybe_exception);
- } else if (microtask->IsPromiseResolveThenableJobInfo()) {
- PromiseResolveThenableJob(
- Handle<PromiseResolveThenableJobInfo>::cast(microtask), &result,
- &maybe_exception);
- } else {
- PromiseReactionJob(Handle<PromiseReactionJobInfo>::cast(microtask),
- &result, &maybe_exception);
- }
+ Handle<Object> result = maybe_result.ToHandleChecked();
+ if (result->IsUndefined(this)) return;
- handle_scope_implementer_->LeaveMicrotaskContext();
+ Handle<FixedArray> queue = Handle<FixedArray>::cast(result);
+ int num_tasks = microtask_queue_bailout_count();
+ DCHECK_GE(microtask_queue_bailout_index(), 0);
- // If execution is terminating, just bail out.
- if (result.is_null() && maybe_exception.is_null()) {
- // Clear out any remaining callbacks in the queue.
- heap()->set_microtask_queue(heap()->empty_fixed_array());
- set_pending_microtask_count(0);
- return;
- }
- }
- });
- }
+ Isolate* isolate = this;
+ FOR_WITH_HANDLE_SCOPE(
+ isolate, int, i = microtask_queue_bailout_index(), i, i < num_tasks,
+ i++, {
+ Handle<Object> microtask(queue->get(i), this);
+
+ if (microtask->IsCallHandlerInfo()) {
+ Handle<CallHandlerInfo> callback_info =
+ Handle<CallHandlerInfo>::cast(microtask);
+ v8::MicrotaskCallback callback =
+ v8::ToCData<v8::MicrotaskCallback>(callback_info->callback());
+ void* data = v8::ToCData<void*>(callback_info->data());
+ callback(data);
+ } else {
+ SaveContext save(this);
+ Context* context;
+ if (microtask->IsJSFunction()) {
+ context = Handle<JSFunction>::cast(microtask)->context();
+ } else if (microtask->IsPromiseResolveThenableJobInfo()) {
+ context = Handle<PromiseResolveThenableJobInfo>::cast(microtask)
+ ->context();
+ } else {
+ context =
+ Handle<PromiseReactionJobInfo>::cast(microtask)->context();
+ }
+
+ set_context(context->native_context());
+ handle_scope_implementer_->EnterMicrotaskContext(
+ Handle<Context>(context, this));
+
+ MaybeHandle<Object> result;
+ MaybeHandle<Object> maybe_exception;
+
+ if (microtask->IsJSFunction()) {
+ Handle<JSFunction> microtask_function =
+ Handle<JSFunction>::cast(microtask);
+ result = Execution::TryCall(
+ this, microtask_function, factory()->undefined_value(), 0,
+ nullptr, Execution::MessageHandling::kReport,
+ &maybe_exception);
+ } else if (microtask->IsPromiseResolveThenableJobInfo()) {
+ PromiseResolveThenableJob(
+ Handle<PromiseResolveThenableJobInfo>::cast(microtask),
+ &result, &maybe_exception);
+ } else {
+ PromiseReactionJob(
+ Handle<PromiseReactionJobInfo>::cast(microtask), &result,
+ &maybe_exception);
+ }
+
+ handle_scope_implementer_->LeaveMicrotaskContext();
+
+ // If execution is terminating, just bail out.
+ if (result.is_null() && maybe_exception.is_null()) {
+ // Clear out any remaining callbacks in the queue.
+ heap()->set_microtask_queue(heap()->empty_fixed_array());
+ set_pending_microtask_count(0);
+ return;
+ }
+ }
+ });
+ } while (pending_microtask_count() > 0);
}
void Isolate::SetUseCounterCallback(v8::Isolate::UseCounterCallback callback) {
@@ -3884,10 +4009,6 @@ void Isolate::PrintWithTimestamp(const char* format, ...) {
va_end(arguments);
}
-wasm::WasmCodeManager* Isolate::wasm_code_manager() {
- return wasm_code_manager_.get();
-}
-
bool StackLimitCheck::JsHasOverflowed(uintptr_t gap) const {
StackGuard* stack_guard = isolate_->stack_guard();
#ifdef USE_SIMULATOR
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 9e3de53675..8eca55ffd6 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -109,8 +109,7 @@ class Interpreter;
}
namespace wasm {
-class CompilationManager;
-class WasmCodeManager;
+class WasmEngine;
}
#define RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate) \
@@ -331,7 +330,7 @@ class ThreadLocalTop BASE_EMBEDDED {
Object* pending_exception_;
// TODO(kschimpf): Change this to a stack of caught exceptions (rather than
// just innermost catching try block).
- Object* wasm_caught_exception_;
+ Object* wasm_caught_exception_ = nullptr;
// Communication channel between Isolate::FindHandler and the CEntryStub.
Context* pending_handler_context_;
@@ -373,6 +372,9 @@ class ThreadLocalTop BASE_EMBEDDED {
// Call back function to report unsafe JS accesses.
v8::FailedAccessCheckCallback failed_access_check_callback_;
+ int microtask_queue_bailout_index_;
+ int microtask_queue_bailout_count_;
+
private:
void InitializeInternal();
@@ -382,10 +384,8 @@ class ThreadLocalTop BASE_EMBEDDED {
#if USE_SIMULATOR
-#define ISOLATE_INIT_SIMULATOR_LIST(V) \
- V(bool, simulator_initialized, false) \
- V(base::CustomMatcherHashMap*, simulator_i_cache, nullptr) \
- V(Redirection*, simulator_redirection, nullptr)
+#define ISOLATE_INIT_SIMULATOR_LIST(V) \
+ V(base::CustomMatcherHashMap*, simulator_i_cache, nullptr)
#else
#define ISOLATE_INIT_SIMULATOR_LIST(V)
@@ -675,6 +675,18 @@ class Isolate {
return &thread_local_top_.js_entry_sp_;
}
+ THREAD_LOCAL_TOP_ACCESSOR(int, microtask_queue_bailout_index)
+ Address microtask_queue_bailout_index_address() {
+ return reinterpret_cast<Address>(
+ &thread_local_top_.microtask_queue_bailout_index_);
+ }
+
+ THREAD_LOCAL_TOP_ACCESSOR(int, microtask_queue_bailout_count)
+ Address microtask_queue_bailout_count_address() {
+ return reinterpret_cast<Address>(
+ &thread_local_top_.microtask_queue_bailout_count_);
+ }
+
// Returns the global object of the current context. It could be
// a builtin object, or a JS global object.
inline Handle<JSGlobalObject> global_object();
@@ -808,6 +820,11 @@ class Isolate {
// Un-schedule an exception that was caught by a TryCatch handler.
void CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler);
void ReportPendingMessages();
+ void ReportPendingMessagesFromJavaScript();
+
+ // Implements code shared between the two above methods
+ void ReportPendingMessagesImpl(bool report_externally);
+
// Return pending location if any or unfilled structure.
MessageLocation GetMessageLocation();
@@ -906,7 +923,7 @@ class Isolate {
}
StackGuard* stack_guard() { return &stack_guard_; }
Heap* heap() { return &heap_; }
- V8_EXPORT_PRIVATE wasm::WasmCodeManager* wasm_code_manager();
+ wasm::WasmEngine* wasm_engine() const { return wasm_engine_.get(); }
StubCache* load_stub_cache() { return load_stub_cache_; }
StubCache* store_stub_cache() { return store_stub_cache_; }
DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
@@ -991,12 +1008,6 @@ class Isolate {
static size_t non_disposed_isolates() {
return non_disposed_isolates_.Value();
}
-
- HistogramInfo* heap_histograms() { return heap_histograms_; }
-
- JSObject::SpillInformation* js_spill_information() {
- return &js_spill_information_;
- }
#endif
Factory* factory() { return reinterpret_cast<Factory*>(this); }
@@ -1065,7 +1076,7 @@ class Isolate {
// memory usage is expected.
void SetFeedbackVectorsForProfilingTools(Object* value);
- void InitializeVectorListFromHeap();
+ void MaybeInitializeVectorListFromHeap();
double time_millis_since_init() {
return heap_.MonotonicallyIncreasingTimeInMs() - time_millis_at_init_;
@@ -1210,6 +1221,7 @@ class Isolate {
void PromiseResolveThenableJob(Handle<PromiseResolveThenableJobInfo> info,
MaybeHandle<Object>* result,
MaybeHandle<Object>* maybe_exception);
+
void EnqueueMicrotask(Handle<Object> microtask);
void RunMicrotasks();
bool IsRunningMicrotasks() const { return is_running_microtasks_; }
@@ -1233,6 +1245,14 @@ class Isolate {
return reinterpret_cast<Address>(&promise_hook_or_debug_is_active_);
}
+ Address pending_microtask_count_address() {
+ return reinterpret_cast<Address>(&pending_microtask_count_);
+ }
+
+ Address handle_scope_implementer_address() {
+ return reinterpret_cast<Address>(&handle_scope_implementer_);
+ }
+
void DebugStateUpdated();
void SetPromiseHook(PromiseHook hook);
@@ -1259,10 +1279,6 @@ class Isolate {
return cancelable_task_manager_;
}
- wasm::CompilationManager* wasm_compilation_manager() {
- return wasm_compilation_manager_.get();
- }
-
const AstStringConstants* ast_string_constants() const {
return ast_string_constants_;
}
@@ -1303,9 +1319,6 @@ class Isolate {
#ifdef USE_SIMULATOR
base::Mutex* simulator_i_cache_mutex() { return &simulator_i_cache_mutex_; }
- base::Mutex* simulator_redirection_mutex() {
- return &simulator_redirection_mutex_;
- }
#endif
void set_allow_atomics_wait(bool set) { allow_atomics_wait_ = set; }
@@ -1558,8 +1571,6 @@ class Isolate {
#ifdef DEBUG
static base::AtomicNumber<size_t> non_disposed_isolates_;
- // A static array of histogram info for each type.
- HistogramInfo heap_histograms_[LAST_TYPE + 1];
JSObject::SpillInformation js_spill_information_;
#endif
@@ -1634,8 +1645,6 @@ class Isolate {
CancelableTaskManager* cancelable_task_manager_;
- std::unique_ptr<wasm::CompilationManager> wasm_compilation_manager_;
-
debug::ConsoleDelegate* console_delegate_ = nullptr;
v8::Isolate::AbortOnUncaughtExceptionCallback
@@ -1643,7 +1652,6 @@ class Isolate {
#ifdef USE_SIMULATOR
base::Mutex simulator_i_cache_mutex_;
- base::Mutex simulator_redirection_mutex_;
#endif
bool allow_atomics_wait_;
@@ -1654,7 +1662,7 @@ class Isolate {
size_t elements_deletion_counter_ = 0;
- std::unique_ptr<wasm::WasmCodeManager> wasm_code_manager_;
+ std::unique_ptr<wasm::WasmEngine> wasm_engine_;
// The top entry of the v8::Context::BackupIncumbentScope stack.
const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope_ =
diff --git a/deps/v8/src/js/array.js b/deps/v8/src/js/array.js
index 80fd250d22..7605fc1a7d 100644
--- a/deps/v8/src/js/array.js
+++ b/deps/v8/src/js/array.js
@@ -1118,64 +1118,6 @@ DEFINE_METHOD_LEN(
);
-function InnerArrayFind(predicate, thisArg, array, length) {
- if (!IS_CALLABLE(predicate)) {
- throw %make_type_error(kCalledNonCallable, predicate);
- }
-
- for (var i = 0; i < length; i++) {
- var element = array[i];
- if (%_Call(predicate, thisArg, element, i, array)) {
- return element;
- }
- }
-
- return;
-}
-
-
-// ES6 draft 07-15-13, section 15.4.3.23
-DEFINE_METHOD_LEN(
- GlobalArray.prototype,
- find(predicate, thisArg) {
- var array = TO_OBJECT(this);
- var length = TO_INTEGER(array.length);
-
- return InnerArrayFind(predicate, thisArg, array, length);
- },
- 1 /* Set function length */
-);
-
-
-function InnerArrayFindIndex(predicate, thisArg, array, length) {
- if (!IS_CALLABLE(predicate)) {
- throw %make_type_error(kCalledNonCallable, predicate);
- }
-
- for (var i = 0; i < length; i++) {
- var element = array[i];
- if (%_Call(predicate, thisArg, element, i, array)) {
- return i;
- }
- }
-
- return -1;
-}
-
-
-// ES6 draft 07-15-13, section 15.4.3.24
-DEFINE_METHOD_LEN(
- GlobalArray.prototype,
- findIndex(predicate, thisArg) {
- var array = TO_OBJECT(this);
- var length = TO_INTEGER(array.length);
-
- return InnerArrayFindIndex(predicate, thisArg, array, length);
- },
- 1 /* Set function length */
-);
-
-
// ES6, draft 04-05-14, section 22.1.3.6
DEFINE_METHOD_LEN(
GlobalArray.prototype,
diff --git a/deps/v8/src/js/prologue.js b/deps/v8/src/js/prologue.js
index 08ef3ba520..32f826691d 100644
--- a/deps/v8/src/js/prologue.js
+++ b/deps/v8/src/js/prologue.js
@@ -99,6 +99,42 @@ function PostNatives(utils) {
utils.PostNatives = UNDEFINED;
}
+// ----------------------------------------------------------------------------
+// Object
+
+var iteratorSymbol = ImportNow("iterator_symbol");
+
+// ES6 7.3.9
+function GetMethod(obj, p) {
+ var func = obj[p];
+ if (IS_NULL_OR_UNDEFINED(func)) return UNDEFINED;
+ if (IS_CALLABLE(func)) return func;
+ throw %make_type_error(kCalledNonCallable, typeof func);
+}
+
+// ----------------------------------------------------------------------------
+// Iterator related spec functions.
+
+// ES6 7.4.1 GetIterator(obj, method)
+function GetIterator(obj, method) {
+ if (IS_UNDEFINED(method)) {
+ method = obj[iteratorSymbol];
+ }
+ if (!IS_CALLABLE(method)) {
+ throw %make_type_error(kNotIterable, obj);
+ }
+ var iterator = %_Call(method, obj);
+ if (!IS_RECEIVER(iterator)) {
+ throw %make_type_error(kNotAnIterator, iterator);
+ }
+ return iterator;
+}
+
+
+exports_container.GetIterator = GetIterator;
+exports_container.GetMethod = GetMethod;
+
+
// -----------------------------------------------------------------------
%OptimizeObjectForAddingMultipleProperties(utils, 14);
diff --git a/deps/v8/src/js/proxy.js b/deps/v8/src/js/proxy.js
deleted file mode 100644
index 4b6255a8ff..0000000000
--- a/deps/v8/src/js/proxy.js
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// ----------------------------------------------------------------------------
-// Imports
-//
-var GlobalProxy = global.Proxy;
-
-//----------------------------------------------------------------------------
-
-//Set up non-enumerable properties of the Proxy object.
-DEFINE_METHOD(
- GlobalProxy,
- revocable(target, handler) {
- var p = new GlobalProxy(target, handler);
- return {proxy: p, revoke: () => %JSProxyRevoke(p)};
- }
-);
-
-})
diff --git a/deps/v8/src/js/v8natives.js b/deps/v8/src/js/v8natives.js
deleted file mode 100644
index 26dada3759..0000000000
--- a/deps/v8/src/js/v8natives.js
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// ----------------------------------------------------------------------------
-// Imports
-
-var GlobalObject = global.Object;
-var iteratorSymbol = utils.ImportNow("iterator_symbol");
-
-// ----------------------------------------------------------------------------
-// Object
-
-// Set up non-enumerable functions on the Object.prototype object.
-DEFINE_METHOD(
- GlobalObject.prototype,
- // ES6 19.1.3.5 Object.prototype.toLocaleString([reserved1 [,reserved2]])
- toLocaleString() {
- REQUIRE_OBJECT_COERCIBLE(this, "Object.prototype.toLocaleString");
- return this.toString();
- }
-);
-
-// ES6 7.3.9
-function GetMethod(obj, p) {
- var func = obj[p];
- if (IS_NULL_OR_UNDEFINED(func)) return UNDEFINED;
- if (IS_CALLABLE(func)) return func;
- throw %make_type_error(kCalledNonCallable, typeof func);
-}
-
-// ----------------------------------------------------------------------------
-// Iterator related spec functions.
-
-// ES6 7.4.1 GetIterator(obj, method)
-function GetIterator(obj, method) {
- if (IS_UNDEFINED(method)) {
- method = obj[iteratorSymbol];
- }
- if (!IS_CALLABLE(method)) {
- throw %make_type_error(kNotIterable, obj);
- }
- var iterator = %_Call(method, obj);
- if (!IS_RECEIVER(iterator)) {
- throw %make_type_error(kNotAnIterator, iterator);
- }
- return iterator;
-}
-
-// ----------------------------------------------------------------------------
-// Exports
-
-utils.Export(function(to) {
- to.GetIterator = GetIterator;
- to.GetMethod = GetMethod;
-});
-
-})
diff --git a/deps/v8/src/json-parser.cc b/deps/v8/src/json-parser.cc
index 13f65705a3..57e7fff8c5 100644
--- a/deps/v8/src/json-parser.cc
+++ b/deps/v8/src/json-parser.cc
@@ -20,6 +20,38 @@
namespace v8 {
namespace internal {
+namespace {
+
+// A vector-like data structure that uses a larger vector for allocation, and
+// provides limited utility access. The original vector must not be used for the
+// duration, and it may even be reallocated. This allows vector storage to be
+// reused for the properties of sibling objects.
+template <typename Container>
+class VectorSegment {
+ public:
+ using value_type = typename Container::value_type;
+
+ explicit VectorSegment(Container* container)
+ : container_(*container), begin_(container->size()) {}
+ ~VectorSegment() { container_.resize(begin_); }
+
+ Vector<const value_type> GetVector() const {
+ return Vector<const value_type>(container_.data() + begin_,
+ container_.size() - begin_);
+ }
+
+ template <typename T>
+ void push_back(T&& value) {
+ container_.push_back(std::forward<T>(value));
+ }
+
+ private:
+ Container& container_;
+ const typename Container::size_type begin_;
+};
+
+} // namespace
+
MaybeHandle<Object> JsonParseInternalizer::Internalize(Isolate* isolate,
Handle<Object> object,
Handle<Object> reviver) {
@@ -107,11 +139,11 @@ JsonParser<seq_one_byte>::JsonParser(Isolate* isolate, Handle<String> source)
: source_(source),
source_length_(source->length()),
isolate_(isolate),
- factory_(isolate_->factory()),
zone_(isolate_->allocator(), ZONE_NAME),
object_constructor_(isolate_->native_context()->object_function(),
isolate_),
- position_(-1) {
+ position_(-1),
+ properties_(&zone_) {
source_ = String::Flatten(source_);
pretenure_ = (source_length_ >= kPretenureTreshold) ? TENURED : NOT_TENURED;
@@ -164,6 +196,9 @@ MaybeHandle<Object> JsonParser<seq_one_byte>::ParseJson() {
}
Handle<Script> script(factory->NewScript(source_));
+ if (isolate()->NeedsSourcePositionsForProfiling()) {
+ Script::InitLineEnds(script);
+ }
// We should sent compile error event because we compile JSON object in
// separated source file.
isolate()->debug()->OnCompileError(script);
@@ -333,7 +368,7 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
factory()->NewJSObject(object_constructor(), pretenure_);
Handle<Map> map(json_object->map());
int descriptor = 0;
- ZoneVector<Handle<Object>> properties(zone());
+ VectorSegment<ZoneVector<Handle<Object>>> properties(&properties_);
DCHECK_EQ(c0_, '{');
bool transitioning = true;
@@ -424,7 +459,7 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
DCHECK(!transitioning);
// Commit the intermediate state to the object and stop transitioning.
- CommitStateToJsonObject(json_object, map, &properties);
+ CommitStateToJsonObject(json_object, map, properties.GetVector());
JSObject::DefinePropertyOrElementIgnoreAttributes(json_object, key, value)
.Check();
@@ -432,7 +467,7 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
// If we transitioned until the very end, transition the map now.
if (transitioning) {
- CommitStateToJsonObject(json_object, map, &properties);
+ CommitStateToJsonObject(json_object, map, properties.GetVector());
} else {
while (MatchSkipWhiteSpace(',')) {
HandleScope local_scope(isolate());
@@ -480,15 +515,14 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
template <bool seq_one_byte>
void JsonParser<seq_one_byte>::CommitStateToJsonObject(
Handle<JSObject> json_object, Handle<Map> map,
- ZoneVector<Handle<Object>>* properties) {
+ Vector<const Handle<Object>> properties) {
JSObject::AllocateStorageForMap(json_object, map);
DCHECK(!json_object->map()->is_dictionary_map());
DisallowHeapAllocation no_gc;
DescriptorArray* descriptors = json_object->map()->instance_descriptors();
- int length = static_cast<int>(properties->size());
- for (int i = 0; i < length; i++) {
- Handle<Object> value = (*properties)[i];
+ for (int i = 0; i < properties.length(); i++) {
+ Handle<Object> value = properties[i];
// Initializing store.
json_object->WriteToField(i, descriptors->GetDetails(i), *value);
}
@@ -697,7 +731,7 @@ Handle<String> JsonParser<seq_one_byte>::SlowScanJsonString(
String::WriteToFlat(*prefix, dest, start, end);
while (c0_ != '"') {
- // Check for control character (0x00-0x1f) or unterminated string (<0).
+ // Check for control character (0x00-0x1F) or unterminated string (<0).
if (c0_ < 0x20) return Handle<String>::null();
if (count >= length) {
// We need to create a longer sequential string for the result.
@@ -728,13 +762,13 @@ Handle<String> JsonParser<seq_one_byte>::SlowScanJsonString(
SeqStringSet(seq_string, count++, '\x08');
break;
case 'f':
- SeqStringSet(seq_string, count++, '\x0c');
+ SeqStringSet(seq_string, count++, '\x0C');
break;
case 'n':
- SeqStringSet(seq_string, count++, '\x0a');
+ SeqStringSet(seq_string, count++, '\x0A');
break;
case 'r':
- SeqStringSet(seq_string, count++, '\x0d');
+ SeqStringSet(seq_string, count++, '\x0D');
break;
case 't':
SeqStringSet(seq_string, count++, '\x09');
@@ -862,7 +896,7 @@ Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
int beg_pos = position_;
// Fast case for Latin1 only without escape characters.
do {
- // Check for control character (0x00-0x1f) or unterminated string (<0).
+ // Check for control character (0x00-0x1F) or unterminated string (<0).
if (c0_ < 0x20) return Handle<String>::null();
if (c0_ != '\\') {
if (seq_one_byte || c0_ <= String::kMaxOneByteCharCode) {
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index cab094591f..6566c92e40 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -135,7 +135,7 @@ class JsonParser BASE_EMBEDDED {
}
inline Isolate* isolate() { return isolate_; }
- inline Factory* factory() { return factory_; }
+ inline Factory* factory() { return isolate_->factory(); }
inline Handle<JSFunction> object_constructor() { return object_constructor_; }
static const int kInitialSpecialStringLength = 32;
@@ -145,7 +145,7 @@ class JsonParser BASE_EMBEDDED {
Zone* zone() { return &zone_; }
void CommitStateToJsonObject(Handle<JSObject> json_object, Handle<Map> map,
- ZoneVector<Handle<Object>>* properties);
+ Vector<const Handle<Object>> properties);
Handle<String> source_;
int source_length_;
@@ -153,11 +153,13 @@ class JsonParser BASE_EMBEDDED {
PretenureFlag pretenure_;
Isolate* isolate_;
- Factory* factory_;
Zone zone_;
Handle<JSFunction> object_constructor_;
uc32 c0_;
int position_;
+
+ // Property handles are stored here inside ParseJsonObject.
+ ZoneVector<Handle<Object>> properties_;
};
} // namespace internal
diff --git a/deps/v8/src/json-stringifier.cc b/deps/v8/src/json-stringifier.cc
index c2b53a85bd..d77a761b13 100644
--- a/deps/v8/src/json-stringifier.cc
+++ b/deps/v8/src/json-stringifier.cc
@@ -47,39 +47,39 @@ const char* const JsonStringifier::JsonEscapeTable =
"p\0 q\0 r\0 s\0 "
"t\0 u\0 v\0 w\0 "
"x\0 y\0 z\0 {\0 "
- "|\0 }\0 ~\0 \177\0 "
- "\200\0 \201\0 \202\0 \203\0 "
- "\204\0 \205\0 \206\0 \207\0 "
- "\210\0 \211\0 \212\0 \213\0 "
- "\214\0 \215\0 \216\0 \217\0 "
- "\220\0 \221\0 \222\0 \223\0 "
- "\224\0 \225\0 \226\0 \227\0 "
- "\230\0 \231\0 \232\0 \233\0 "
- "\234\0 \235\0 \236\0 \237\0 "
- "\240\0 \241\0 \242\0 \243\0 "
- "\244\0 \245\0 \246\0 \247\0 "
- "\250\0 \251\0 \252\0 \253\0 "
- "\254\0 \255\0 \256\0 \257\0 "
- "\260\0 \261\0 \262\0 \263\0 "
- "\264\0 \265\0 \266\0 \267\0 "
- "\270\0 \271\0 \272\0 \273\0 "
- "\274\0 \275\0 \276\0 \277\0 "
- "\300\0 \301\0 \302\0 \303\0 "
- "\304\0 \305\0 \306\0 \307\0 "
- "\310\0 \311\0 \312\0 \313\0 "
- "\314\0 \315\0 \316\0 \317\0 "
- "\320\0 \321\0 \322\0 \323\0 "
- "\324\0 \325\0 \326\0 \327\0 "
- "\330\0 \331\0 \332\0 \333\0 "
- "\334\0 \335\0 \336\0 \337\0 "
- "\340\0 \341\0 \342\0 \343\0 "
- "\344\0 \345\0 \346\0 \347\0 "
- "\350\0 \351\0 \352\0 \353\0 "
- "\354\0 \355\0 \356\0 \357\0 "
- "\360\0 \361\0 \362\0 \363\0 "
- "\364\0 \365\0 \366\0 \367\0 "
- "\370\0 \371\0 \372\0 \373\0 "
- "\374\0 \375\0 \376\0 \377\0 ";
+ "|\0 }\0 ~\0 \x7F\0 "
+ "\x80\0 \x81\0 \x82\0 \x83\0 "
+ "\x84\0 \x85\0 \x86\0 \x87\0 "
+ "\x88\0 \x89\0 \x8A\0 \x8B\0 "
+ "\x8C\0 \x8D\0 \x8E\0 \x8F\0 "
+ "\x90\0 \x91\0 \x92\0 \x93\0 "
+ "\x94\0 \x95\0 \x96\0 \x97\0 "
+ "\x98\0 \x99\0 \x9A\0 \x9B\0 "
+ "\x9C\0 \x9D\0 \x9E\0 \x9F\0 "
+ "\xA0\0 \xA1\0 \xA2\0 \xA3\0 "
+ "\xA4\0 \xA5\0 \xA6\0 \xA7\0 "
+ "\xA8\0 \xA9\0 \xAA\0 \xAB\0 "
+ "\xAC\0 \xAD\0 \xAE\0 \xAF\0 "
+ "\xB0\0 \xB1\0 \xB2\0 \xB3\0 "
+ "\xB4\0 \xB5\0 \xB6\0 \xB7\0 "
+ "\xB8\0 \xB9\0 \xBA\0 \xBB\0 "
+ "\xBC\0 \xBD\0 \xBE\0 \xBF\0 "
+ "\xC0\0 \xC1\0 \xC2\0 \xC3\0 "
+ "\xC4\0 \xC5\0 \xC6\0 \xC7\0 "
+ "\xC8\0 \xC9\0 \xCA\0 \xCB\0 "
+ "\xCC\0 \xCD\0 \xCE\0 \xCF\0 "
+ "\xD0\0 \xD1\0 \xD2\0 \xD3\0 "
+ "\xD4\0 \xD5\0 \xD6\0 \xD7\0 "
+ "\xD8\0 \xD9\0 \xDA\0 \xDB\0 "
+ "\xDC\0 \xDD\0 \xDE\0 \xDF\0 "
+ "\xE0\0 \xE1\0 \xE2\0 \xE3\0 "
+ "\xE4\0 \xE5\0 \xE6\0 \xE7\0 "
+ "\xE8\0 \xE9\0 \xEA\0 \xEB\0 "
+ "\xEC\0 \xED\0 \xEE\0 \xEF\0 "
+ "\xF0\0 \xF1\0 \xF2\0 \xF3\0 "
+ "\xF4\0 \xF5\0 \xF6\0 \xF7\0 "
+ "\xF8\0 \xF9\0 \xFA\0 \xFB\0 "
+ "\xFC\0 \xFD\0 \xFE\0 \xFF\0 ";
JsonStringifier::JsonStringifier(Isolate* isolate)
: isolate_(isolate), builder_(isolate), gap_(nullptr), indent_(0) {
@@ -682,7 +682,7 @@ bool JsonStringifier::DoNotEscape(uint8_t c) {
template <>
bool JsonStringifier::DoNotEscape(uint16_t c) {
- return c >= '#' && c != '\\' && c != 0x7f;
+ return c >= '#' && c != '\\' && c != 0x7F;
}
void JsonStringifier::NewLine() {
diff --git a/deps/v8/src/keys.cc b/deps/v8/src/keys.cc
index 9ac0079ac2..4f59c2553c 100644
--- a/deps/v8/src/keys.cc
+++ b/deps/v8/src/keys.cc
@@ -479,14 +479,11 @@ void FilterForEnumerableProperties(Handle<JSReceiver> receiver,
if (type == kIndexed) {
uint32_t number;
CHECK(element->ToUint32(&number));
- attributes = args.Call(
- v8::ToCData<v8::IndexedPropertyQueryCallback>(interceptor->query()),
- number);
+ attributes = args.CallIndexedQuery(interceptor, number);
} else {
CHECK(element->IsName());
- attributes = args.Call(v8::ToCData<v8::GenericNamedPropertyQueryCallback>(
- interceptor->query()),
- Handle<Name>::cast(element));
+ attributes =
+ args.CallNamedQuery(interceptor, Handle<Name>::cast(element));
}
if (!attributes.is_null()) {
@@ -512,20 +509,10 @@ Maybe<bool> CollectInterceptorKeysInternal(Handle<JSReceiver> receiver,
Handle<JSObject> result;
if (!interceptor->enumerator()->IsUndefined(isolate)) {
if (type == kIndexed) {
- v8::IndexedPropertyEnumeratorCallback enum_fun =
- v8::ToCData<v8::IndexedPropertyEnumeratorCallback>(
- interceptor->enumerator());
- const char* log_tag = "interceptor-indexed-enum";
- LOG(isolate, ApiObjectAccess(log_tag, *object));
- result = enum_args.Call(enum_fun);
+ result = enum_args.CallIndexedEnumerator(interceptor);
} else {
DCHECK_EQ(type, kNamed);
- v8::GenericNamedPropertyEnumeratorCallback enum_fun =
- v8::ToCData<v8::GenericNamedPropertyEnumeratorCallback>(
- interceptor->enumerator());
- const char* log_tag = "interceptor-named-enum";
- LOG(isolate, ApiObjectAccess(log_tag, *object));
- result = enum_args.Call(enum_fun);
+ result = enum_args.CallNamedEnumerator(interceptor);
}
}
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
@@ -790,7 +777,7 @@ Maybe<bool> KeyAccumulator::CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
return Nothing<bool>();
}
// 4. Let target be the value of the [[ProxyTarget]] internal slot of O.
- Handle<JSReceiver> target(proxy->target(), isolate_);
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate_);
// 5. Let trap be ? GetMethod(handler, "ownKeys").
Handle<Object> trap;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
diff --git a/deps/v8/src/label.h b/deps/v8/src/label.h
index 1dc8849812..eb93397518 100644
--- a/deps/v8/src/label.h
+++ b/deps/v8/src/label.h
@@ -18,7 +18,10 @@ namespace internal {
class Label {
public:
- enum Distance { kNear, kFar };
+ enum Distance {
+ kNear, // near jump: 8 bit displacement (signed)
+ kFar // far jump: 32 bit displacement (signed)
+ };
Label() = default;
diff --git a/deps/v8/src/layout-descriptor-inl.h b/deps/v8/src/layout-descriptor-inl.h
index c75eea6fd8..93818ef710 100644
--- a/deps/v8/src/layout-descriptor-inl.h
+++ b/deps/v8/src/layout-descriptor-inl.h
@@ -67,10 +67,7 @@ LayoutDescriptor* LayoutDescriptor::SetTagged(int field_index, bool tagged) {
int layout_word_index = 0;
int layout_bit_index = 0;
- if (!GetIndexes(field_index, &layout_word_index, &layout_bit_index)) {
- CHECK(false);
- return this;
- }
+ CHECK(GetIndexes(field_index, &layout_word_index, &layout_bit_index));
uint32_t layout_mask = static_cast<uint32_t>(1) << layout_bit_index;
if (IsSlowLayout()) {
diff --git a/deps/v8/src/layout-descriptor.h b/deps/v8/src/layout-descriptor.h
index 7f8b311f3c..0e6869805c 100644
--- a/deps/v8/src/layout-descriptor.h
+++ b/deps/v8/src/layout-descriptor.h
@@ -7,7 +7,7 @@
#include <iosfwd>
-#include "src/objects.h"
+#include "src/objects/fixed-array.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index 833e37a290..39d9525eff 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -10,6 +10,7 @@
#include "include/libplatform/libplatform.h"
#include "src/base/debug/stack_trace.h"
#include "src/base/logging.h"
+#include "src/base/page-allocator.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
#include "src/base/sys-info.h"
@@ -84,6 +85,7 @@ DefaultPlatform::DefaultPlatform(
: thread_pool_size_(0),
idle_task_support_(idle_task_support),
tracing_controller_(std::move(tracing_controller)),
+ page_allocator_(new v8::base::PageAllocator()),
time_function_for_testing_(nullptr) {
if (!tracing_controller_) {
tracing::TracingController* controller = new tracing::TracingController();
@@ -254,5 +256,9 @@ Platform::StackTracePrinter DefaultPlatform::GetStackTracePrinter() {
return PrintStackTrace;
}
+v8::PageAllocator* DefaultPlatform::GetPageAllocator() {
+ return page_allocator_.get();
+}
+
} // namespace platform
} // namespace v8
diff --git a/deps/v8/src/libplatform/default-platform.h b/deps/v8/src/libplatform/default-platform.h
index 3280a7aa7c..b73f38a5fe 100644
--- a/deps/v8/src/libplatform/default-platform.h
+++ b/deps/v8/src/libplatform/default-platform.h
@@ -27,6 +27,7 @@ class Thread;
class WorkerThread;
class DefaultForegroundTaskRunner;
class DefaultBackgroundTaskRunner;
+class DefaultPageAllocator;
class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
public:
@@ -70,6 +71,7 @@ class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
double CurrentClockTimeMillis() override;
v8::TracingController* GetTracingController() override;
StackTracePrinter GetStackTracePrinter() override;
+ v8::PageAllocator* GetPageAllocator() override;
private:
static const int kMaxThreadPoolSize;
@@ -82,6 +84,7 @@ class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
foreground_task_runner_map_;
std::unique_ptr<TracingController> tracing_controller_;
+ std::unique_ptr<PageAllocator> page_allocator_;
TimeFunction time_function_for_testing_;
DISALLOW_COPY_AND_ASSIGN(DefaultPlatform);
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc
index b2eb44796d..938a84bffd 100644
--- a/deps/v8/src/log-utils.cc
+++ b/deps/v8/src/log-utils.cc
@@ -42,7 +42,6 @@ Log::Log(Logger* logger, const char* file_name)
if (FLAG_log_all) {
FLAG_log_api = true;
FLAG_log_code = true;
- FLAG_log_gc = true;
FLAG_log_suspect = true;
FLAG_log_handles = true;
FLAG_log_internal_timer_events = true;
@@ -152,7 +151,7 @@ void Log::MessageBuilder::AppendStringPart(String* str, int len) {
// TODO(cbruni): unify escaping.
for (int i = 0; i < len; i++) {
uc32 c = str->Get(i);
- if (c <= 0xff) {
+ if (c <= 0xFF) {
AppendCharacter(static_cast<char>(c));
} else {
// Escape any non-ascii range characters.
@@ -174,7 +173,7 @@ void Log::MessageBuilder::AppendCharacter(char c) {
if (c >= 32 && c <= 126) {
if (c == ',') {
// Escape commas (log field separator) directly.
- os << "\x2c";
+ os << "\\x2C";
} else {
// Directly append any printable ascii character.
os << c;
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h
index 99ed03f34a..feb14ea1a0 100644
--- a/deps/v8/src/log-utils.h
+++ b/deps/v8/src/log-utils.h
@@ -30,9 +30,9 @@ class Log {
void stop() { is_stopped_ = true; }
static bool InitLogAtStart() {
- return FLAG_log || FLAG_log_api || FLAG_log_code || FLAG_log_gc ||
- FLAG_log_handles || FLAG_log_suspect || FLAG_ll_prof ||
- FLAG_perf_basic_prof || FLAG_perf_prof || FLAG_log_source_code ||
+ return FLAG_log || FLAG_log_api || FLAG_log_code || FLAG_log_handles ||
+ FLAG_log_suspect || FLAG_ll_prof || FLAG_perf_basic_prof ||
+ FLAG_perf_prof || FLAG_log_source_code ||
FLAG_log_internal_timer_events || FLAG_prof_cpp || FLAG_trace_ic ||
FLAG_log_function_events;
}
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index b529df7bbe..f5d5be6848 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -1049,27 +1049,25 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
// Make sure the script is written to the log file.
Script* script = Script::cast(script_object);
int script_id = script->id();
- if (logged_source_code_.find(script_id) != logged_source_code_.end()) {
- return;
- }
-
- // This script has not been logged yet.
- logged_source_code_.insert(script_id);
- Object* source_object = script->source();
- if (source_object->IsString()) {
- String* source_code = String::cast(source_object);
- msg << "script" << kNext << script_id << kNext;
+ if (logged_source_code_.find(script_id) == logged_source_code_.end()) {
+ // This script has not been logged yet.
+ logged_source_code_.insert(script_id);
+ Object* source_object = script->source();
+ if (source_object->IsString()) {
+ String* source_code = String::cast(source_object);
+ msg << "script" << kNext << script_id << kNext;
+
+ // Log the script name.
+ if (script->name()->IsString()) {
+ msg << String::cast(script->name()) << kNext;
+ } else {
+ msg << "<unknown>" << kNext;
+ }
- // Log the script name.
- if (script->name()->IsString()) {
- msg << String::cast(script->name()) << kNext;
- } else {
- msg << "<unknown>" << kNext;
+ // Log the source code.
+ msg << source_code;
+ msg.WriteToLogFile();
}
-
- // Log the source code.
- msg << source_code;
- msg.WriteToLogFile();
}
// We log source code information in the form:
@@ -1294,34 +1292,6 @@ void Logger::FunctionEvent(const char* reason, Script* script, int script_id,
msg.WriteToLogFile();
}
-void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
- if (!log_->IsEnabled() || !FLAG_log_gc) return;
- Log::MessageBuilder msg(log_);
- // Using non-relative system time in order to be able to synchronize with
- // external memory profiling events (e.g. DOM memory size).
- msg << "heap-sample-begin" << kNext << space << kNext << kind << kNext;
- msg.Append("%.0f", V8::GetCurrentPlatform()->CurrentClockTimeMillis());
- msg.WriteToLogFile();
-}
-
-
-void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
- if (!log_->IsEnabled() || !FLAG_log_gc) return;
- Log::MessageBuilder msg(log_);
- msg << "heap-sample-end" << kNext << space << kNext << kind;
- msg.WriteToLogFile();
-}
-
-
-void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
- if (!log_->IsEnabled() || !FLAG_log_gc) return;
- Log::MessageBuilder msg(log_);
- msg << "heap-sample-item" << kNext << type << kNext << number << kNext
- << bytes;
- msg.WriteToLogFile();
-}
-
-
void Logger::RuntimeCallTimerEvent() {
RuntimeCallStats* stats = isolate_->counters()->runtime_call_stats();
RuntimeCallCounter* counter = stats->current_counter();
@@ -1389,6 +1359,7 @@ void Logger::MapEvent(const char* type, Map* from, Map* to, const char* reason,
int line = -1;
int column = -1;
Address pc = 0;
+
if (!isolate_->bootstrapper()->IsActive()) {
pc = isolate_->GetAbstractPC(&line, &column);
}
@@ -1412,6 +1383,15 @@ void Logger::MapEvent(const char* type, Map* from, Map* to, const char* reason,
msg.WriteToLogFile();
}
+void Logger::MapCreate(Map* map) {
+ if (!log_->IsEnabled() || !FLAG_trace_maps) return;
+ DisallowHeapAllocation no_gc;
+ Log::MessageBuilder msg(log_);
+ msg << "map-create" << kNext << timer_.Elapsed().InMicroseconds() << kNext
+ << reinterpret_cast<void*>(map);
+ msg.WriteToLogFile();
+}
+
void Logger::MapDetails(Map* map) {
if (!log_->IsEnabled() || !FLAG_trace_maps) return;
// Disable logging Map details during bootstrapping since we use LogMaps() to
@@ -1421,9 +1401,11 @@ void Logger::MapDetails(Map* map) {
Log::MessageBuilder msg(log_);
msg << "map-details" << kNext << timer_.Elapsed().InMicroseconds() << kNext
<< reinterpret_cast<void*>(map) << kNext;
- std::ostringstream buffer;
- map->PrintMapDetails(buffer);
- msg << buffer.str().c_str();
+ if (FLAG_trace_maps_details) {
+ std::ostringstream buffer;
+ map->PrintMapDetails(buffer);
+ msg << buffer.str().c_str();
+ }
msg.WriteToLogFile();
}
@@ -1575,6 +1557,16 @@ void Logger::LogCodeObjects() {
}
}
+void Logger::LogBytecodeHandler(interpreter::Bytecode bytecode,
+ interpreter::OperandScale operand_scale,
+ Code* code) {
+ std::string bytecode_name =
+ interpreter::Bytecodes::ToString(bytecode, operand_scale);
+ PROFILE(isolate_,
+ CodeCreateEvent(CodeEventListener::BYTECODE_HANDLER_TAG,
+ AbstractCode::cast(code), bytecode_name.c_str()));
+}
+
void Logger::LogBytecodeHandlers() {
const interpreter::OperandScale kOperandScales[] = {
#define VALUE(Name, _) interpreter::OperandScale::k##Name,
@@ -1590,11 +1582,7 @@ void Logger::LogBytecodeHandlers() {
if (interpreter::Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
Code* code = interpreter->GetBytecodeHandler(bytecode, operand_scale);
if (isolate_->heap()->IsDeserializeLazyHandler(code)) continue;
- std::string bytecode_name =
- interpreter::Bytecodes::ToString(bytecode, operand_scale);
- PROFILE(isolate_, CodeCreateEvent(
- CodeEventListener::BYTECODE_HANDLER_TAG,
- AbstractCode::cast(code), bytecode_name.c_str()));
+ LogBytecodeHandler(bytecode, operand_scale, code);
}
}
}
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 7efa50b8de..8305eb1001 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -37,7 +37,7 @@ namespace internal {
//
// --log-all
// Log all events to the file, default is off. This is the same as combining
-// --log-api, --log-code, --log-gc, and --log-regexp.
+// --log-api, --log-code, and --log-regexp.
//
// --log-api
// Log API events to the logfile, default is off. --log-api implies --log.
@@ -46,10 +46,6 @@ namespace internal {
// Log code (create, move, and delete) events to the logfile, default is off.
// --log-code implies --log.
//
-// --log-gc
-// Log GC heap samples after each GC that can be processed by hp2ps, default
-// is off. --log-gc implies --log.
-//
// --log-regexp
// Log creation and use of regular expressions, Default is off.
// --log-regexp implies --log.
@@ -205,21 +201,9 @@ class Logger : public CodeEventListener {
void MapEvent(const char* type, Map* from, Map* to,
const char* reason = nullptr,
HeapObject* name_or_sfi = nullptr);
+ void MapCreate(Map* map);
void MapDetails(Map* map);
- // ==== Events logged by --log-gc. ====
- // Heap sampling events: start, end, and individual types.
- void HeapSampleBeginEvent(const char* space, const char* kind);
- void HeapSampleEndEvent(const char* space, const char* kind);
- void HeapSampleItemEvent(const char* type, int number, int bytes);
- void HeapSampleJSConstructorEvent(const char* constructor,
- int number, int bytes);
- void HeapSampleJSRetainersEvent(const char* constructor,
- const char* event);
- void HeapSampleJSProducerEvent(const char* constructor,
- Address* stack);
- void HeapSampleStats(const char* space, const char* kind,
- intptr_t capacity, intptr_t used);
void SharedLibraryEvent(const std::string& library_path, uintptr_t start,
uintptr_t end, intptr_t aslr_slide);
@@ -258,6 +242,8 @@ class Logger : public CodeEventListener {
void LogCodeObjects();
// Used for logging bytecode handlers found in the snapshot.
void LogBytecodeHandlers();
+ void LogBytecodeHandler(interpreter::Bytecode bytecode,
+ interpreter::OperandScale operand_scale, Code* code);
// Logs all Mpas foind in the heap.
void LogMaps();
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index 2d3cc3253e..71902dff84 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -211,7 +211,7 @@ Handle<JSReceiver> LookupIterator::GetRootForNonJSReceiver(
auto root =
handle(receiver->GetPrototypeChainRootMap(isolate)->prototype(), isolate);
if (root->IsNull(isolate)) {
- unsigned int magic = 0xbbbbbbbb;
+ unsigned int magic = 0xBBBBBBBB;
isolate->PushStackTraceAndDie(magic, *receiver, nullptr, magic);
}
return Handle<JSReceiver>::cast(root);
@@ -237,22 +237,45 @@ void LookupIterator::ReloadPropertyInformation() {
DCHECK(IsFound() || !holder_->HasFastProperties());
}
+namespace {
+bool IsTypedArrayFunctionInAnyContext(Isolate* isolate, JSReceiver* holder) {
+ static uint32_t context_slots[] = {
+#define TYPED_ARRAY_CONTEXT_SLOTS(Type, type, TYPE, ctype, size) \
+ Context::TYPE##_ARRAY_FUN_INDEX,
+
+ TYPED_ARRAYS(TYPED_ARRAY_CONTEXT_SLOTS)
+#undef TYPED_ARRAY_CONTEXT_SLOTS
+ };
+
+ if (!holder->IsJSFunction()) return false;
+
+ return std::any_of(
+ std::begin(context_slots), std::end(context_slots),
+ [=](uint32_t slot) { return isolate->IsInAnyContext(holder, slot); });
+}
+} // namespace
+
void LookupIterator::InternalUpdateProtector() {
if (isolate_->bootstrapper()->IsActive()) return;
if (*name_ == heap()->constructor_string()) {
if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
// Setting the constructor property could change an instance's @@species
- if (holder_->IsJSArray()) {
+ if (holder_->IsJSArray() || holder_->IsJSTypedArray()) {
isolate_->CountUsage(
v8::Isolate::UseCounterFeature::kArrayInstanceConstructorModified);
isolate_->InvalidateArraySpeciesProtector();
} else if (holder_->map()->is_prototype_map()) {
DisallowHeapAllocation no_gc;
- // Setting the constructor of Array.prototype of any realm also needs
- // to invalidate the species protector
+ // Setting the constructor of Array.prototype or %TypedArray%.prototype of
+ // any realm also needs to invalidate the species protector.
+ // For typed arrays, we check a prototype of this holder since TypedArrays
+ // have different prototypes for each type, and their parent prototype is
+ // pointing the same TYPED_ARRAY_PROTOTYPE.
if (isolate_->IsInAnyContext(*holder_,
- Context::INITIAL_ARRAY_PROTOTYPE_INDEX)) {
+ Context::INITIAL_ARRAY_PROTOTYPE_INDEX) ||
+ isolate_->IsInAnyContext(holder_->map()->prototype(),
+ Context::TYPED_ARRAY_PROTOTYPE_INDEX)) {
isolate_->CountUsage(v8::Isolate::UseCounterFeature::
kArrayPrototypeConstructorModified);
isolate_->InvalidateArraySpeciesProtector();
@@ -260,9 +283,10 @@ void LookupIterator::InternalUpdateProtector() {
}
} else if (*name_ == heap()->species_symbol()) {
if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
- // Setting the Symbol.species property of any Array constructor invalidates
- // the species protector
- if (isolate_->IsInAnyContext(*holder_, Context::ARRAY_FUNCTION_INDEX)) {
+ // Setting the Symbol.species property of any Array or TypedArray
+ // constructor invalidates the species protector
+ if (isolate_->IsInAnyContext(*holder_, Context::ARRAY_FUNCTION_INDEX) ||
+ IsTypedArrayFunctionInAnyContext(isolate_, *holder_)) {
isolate_->CountUsage(
v8::Isolate::UseCounterFeature::kArraySpeciesModified);
isolate_->InvalidateArraySpeciesProtector();
@@ -479,6 +503,7 @@ void LookupIterator::ApplyTransitionToDataProperty(Handle<JSObject> receiver) {
DCHECK(receiver.is_identical_to(GetStoreTarget()));
holder_ = receiver;
if (receiver->IsJSGlobalObject()) {
+ JSObject::InvalidatePrototypeChains(receiver->map());
state_ = DATA;
return;
}
@@ -495,6 +520,9 @@ void LookupIterator::ApplyTransitionToDataProperty(Handle<JSObject> receiver) {
Handle<NameDictionary> dictionary(receiver->property_dictionary(),
isolate_);
int entry;
+ if (receiver->map()->is_prototype_map()) {
+ JSObject::InvalidatePrototypeChains(receiver->map());
+ }
dictionary = NameDictionary::Add(dictionary, name(),
isolate_->factory()->uninitialized_value(),
property_details_, &entry);
@@ -521,8 +549,8 @@ void LookupIterator::Delete() {
bool is_prototype_map = holder->map()->is_prototype_map();
RuntimeCallTimerScope stats_scope(
isolate_, is_prototype_map
- ? &RuntimeCallStats::PrototypeObject_DeleteProperty
- : &RuntimeCallStats::Object_DeleteProperty);
+ ? RuntimeCallCounterId::kPrototypeObject_DeleteProperty
+ : RuntimeCallCounterId::kObject_DeleteProperty);
PropertyNormalizationMode mode =
is_prototype_map ? KEEP_INOBJECT_PROPERTIES : CLEAR_INOBJECT_PROPERTIES;
@@ -638,9 +666,12 @@ void LookupIterator::TransitionToAccessorPair(Handle<Object> pair,
ReloadPropertyInformation<true>();
} else {
- PropertyNormalizationMode mode = receiver->map()->is_prototype_map()
- ? KEEP_INOBJECT_PROPERTIES
- : CLEAR_INOBJECT_PROPERTIES;
+ PropertyNormalizationMode mode = CLEAR_INOBJECT_PROPERTIES;
+ if (receiver->map()->is_prototype_map()) {
+ JSObject::InvalidatePrototypeChains(receiver->map());
+ mode = KEEP_INOBJECT_PROPERTIES;
+ }
+
// Normalize object to make this operation simple.
JSObject::NormalizeProperties(receiver, mode, 0,
"TransitionToAccessorPair");
diff --git a/deps/v8/src/machine-type.h b/deps/v8/src/machine-type.h
index 4502b2fdc2..63e3c7a462 100644
--- a/deps/v8/src/machine-type.h
+++ b/deps/v8/src/machine-type.h
@@ -39,7 +39,7 @@ static_assert(static_cast<int>(MachineRepresentation::kLastRepresentation) <
kIntSize * kBitsPerByte,
"Bit masks of MachineRepresentation should fit in an int");
-const char* MachineReprToString(MachineRepresentation);
+V8_EXPORT_PRIVATE const char* MachineReprToString(MachineRepresentation);
enum class MachineSemantic : uint8_t {
kNone,
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index e9d2be1843..5876e5f5e4 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -12,7 +12,7 @@
#include "src/keys.h"
#include "src/objects/frame-array-inl.h"
#include "src/string-builder.h"
-#include "src/wasm/wasm-heap.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
@@ -189,11 +189,10 @@ std::unique_ptr<char[]> MessageHandler::GetLocalizedMessage(
namespace {
Object* EvalFromFunctionName(Isolate* isolate, Handle<Script> script) {
- if (script->eval_from_shared()->IsUndefined(isolate))
+ if (!script->has_eval_from_shared())
return isolate->heap()->undefined_value();
- Handle<SharedFunctionInfo> shared(
- SharedFunctionInfo::cast(script->eval_from_shared()));
+ Handle<SharedFunctionInfo> shared(script->eval_from_shared());
// Find the name of the function calling eval.
if (shared->name()->BooleanValue()) {
return shared->name();
@@ -203,11 +202,10 @@ Object* EvalFromFunctionName(Isolate* isolate, Handle<Script> script) {
}
Object* EvalFromScript(Isolate* isolate, Handle<Script> script) {
- if (script->eval_from_shared()->IsUndefined(isolate))
+ if (!script->has_eval_from_shared())
return isolate->heap()->undefined_value();
- Handle<SharedFunctionInfo> eval_from_shared(
- SharedFunctionInfo::cast(script->eval_from_shared()));
+ Handle<SharedFunctionInfo> eval_from_shared(script->eval_from_shared());
return eval_from_shared->script()->IsScript()
? eval_from_shared->script()
: isolate->heap()->undefined_value();
@@ -674,10 +672,10 @@ Handle<Object> WasmStackFrame::GetFunction() const {
Handle<Object> WasmStackFrame::GetFunctionName() {
Handle<Object> name;
- Handle<WasmCompiledModule> compiled_module(wasm_instance_->compiled_module(),
- isolate_);
- if (!WasmCompiledModule::GetFunctionNameOrNull(isolate_, compiled_module,
- wasm_func_index_)
+ Handle<WasmSharedModuleData> shared(
+ wasm_instance_->compiled_module()->shared(), isolate_);
+ if (!WasmSharedModuleData::GetFunctionNameOrNull(isolate_, shared,
+ wasm_func_index_)
.ToHandle(&name)) {
name = isolate_->factory()->null_value();
}
@@ -687,12 +685,13 @@ Handle<Object> WasmStackFrame::GetFunctionName() {
MaybeHandle<String> WasmStackFrame::ToString() {
IncrementalStringBuilder builder(isolate_);
- Handle<WasmCompiledModule> compiled_module(wasm_instance_->compiled_module(),
- isolate_);
+ Handle<WasmSharedModuleData> shared(
+ wasm_instance_->compiled_module()->shared(), isolate_);
MaybeHandle<String> module_name =
- WasmCompiledModule::GetModuleNameOrNull(isolate_, compiled_module);
- MaybeHandle<String> function_name = WasmCompiledModule::GetFunctionNameOrNull(
- isolate_, compiled_module, wasm_func_index_);
+ WasmSharedModuleData::GetModuleNameOrNull(isolate_, shared);
+ MaybeHandle<String> function_name =
+ WasmSharedModuleData::GetFunctionNameOrNull(isolate_, shared,
+ wasm_func_index_);
bool has_name = !module_name.is_null() || !function_name.is_null();
if (has_name) {
if (module_name.is_null()) {
@@ -738,7 +737,8 @@ Handle<Object> WasmStackFrame::Null() const {
bool WasmStackFrame::HasScript() const { return true; }
Handle<Script> WasmStackFrame::GetScript() const {
- return handle(wasm_instance_->compiled_module()->script(), isolate_);
+ return handle(wasm_instance_->compiled_module()->shared()->script(),
+ isolate_);
}
AsmJsWasmStackFrame::AsmJsWasmStackFrame() {}
@@ -762,13 +762,15 @@ Handle<Object> AsmJsWasmStackFrame::GetFunction() const {
}
Handle<Object> AsmJsWasmStackFrame::GetFileName() {
- Handle<Script> script(wasm_instance_->compiled_module()->script(), isolate_);
+ Handle<Script> script(wasm_instance_->compiled_module()->shared()->script(),
+ isolate_);
DCHECK(script->IsUserJavaScript());
return handle(script->name(), isolate_);
}
Handle<Object> AsmJsWasmStackFrame::GetScriptNameOrSourceUrl() {
- Handle<Script> script(wasm_instance_->compiled_module()->script(), isolate_);
+ Handle<Script> script(wasm_instance_->compiled_module()->shared()->script(),
+ isolate_);
DCHECK_EQ(Script::TYPE_NORMAL, script->type());
return ScriptNameOrSourceUrl(script, isolate_);
}
@@ -780,24 +782,26 @@ int AsmJsWasmStackFrame::GetPosition() const {
? Handle<AbstractCode>::cast(code_.GetCode())->SourcePosition(offset_)
: FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(
code_.GetWasmCode(), offset_);
- Handle<WasmCompiledModule> compiled_module(wasm_instance_->compiled_module(),
- isolate_);
+ Handle<WasmSharedModuleData> shared(
+ wasm_instance_->compiled_module()->shared(), isolate_);
DCHECK_LE(0, byte_offset);
- return WasmCompiledModule::GetSourcePosition(
- compiled_module, wasm_func_index_, static_cast<uint32_t>(byte_offset),
+ return WasmSharedModuleData::GetSourcePosition(
+ shared, wasm_func_index_, static_cast<uint32_t>(byte_offset),
is_at_number_conversion_);
}
int AsmJsWasmStackFrame::GetLineNumber() {
DCHECK_LE(0, GetPosition());
- Handle<Script> script(wasm_instance_->compiled_module()->script(), isolate_);
+ Handle<Script> script(wasm_instance_->compiled_module()->shared()->script(),
+ isolate_);
DCHECK(script->IsUserJavaScript());
return Script::GetLineNumber(script, GetPosition()) + 1;
}
int AsmJsWasmStackFrame::GetColumnNumber() {
DCHECK_LE(0, GetPosition());
- Handle<Script> script(wasm_instance_->compiled_module()->script(), isolate_);
+ Handle<Script> script(wasm_instance_->compiled_module()->shared()->script(),
+ isolate_);
DCHECK(script->IsUserJavaScript());
return Script::GetColumnNumber(script, GetPosition()) + 1;
}
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index bf0c8db355..923535517a 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -267,6 +267,8 @@ class ErrorUtils : public AllStatic {
T(ApplyNonFunction, \
"Function.prototype.apply was called on %, which is a % and not a " \
"function") \
+ T(ArgumentsDisallowedInInitializer, \
+ "'arguments' is not allowed in class field initializer") \
T(ArrayBufferTooShort, \
"Derived ArrayBuffer constructor created a buffer which was too small") \
T(ArrayBufferSpeciesThis, \
@@ -308,7 +310,7 @@ class ErrorUtils : public AllStatic {
T(ConstructorNotFunction, "Constructor % requires 'new'") \
T(ConstructorNotReceiver, "The .constructor property is not an object") \
T(CurrencyCode, "Currency code is required with currency style.") \
- T(CyclicModuleDependency, "Detected cycle while resolving name '%'") \
+ T(CyclicModuleDependency, "Detected cycle while resolving name '%' in '%'") \
T(DataViewNotArrayBuffer, \
"First argument to DataView constructor must be an ArrayBuffer") \
T(DateType, "this is not a Date object.") \
@@ -520,6 +522,7 @@ class ErrorUtils : public AllStatic {
T(UnsupportedSuper, "Unsupported reference to 'super'") \
/* RangeError */ \
T(BigIntDivZero, "Division by zero") \
+ T(BigIntNegativeExponent, "Exponent must be positive") \
T(BigIntTooBig, "Maximum BigInt size exceeded") \
T(DateRange, "Provided date is not in valid range.") \
T(ExpectedTimezoneID, \
@@ -563,7 +566,7 @@ class ErrorUtils : public AllStatic {
T(ValueOutOfRange, "Value % out of range for % options property %") \
/* SyntaxError */ \
T(AmbiguousExport, \
- "The requested module contains conflicting star exports for name '%'") \
+ "The requested module '%' contains conflicting star exports for name '%'") \
T(BadGetterArity, "Getter must not have any formal parameters.") \
T(BadSetterArity, "Setter must have exactly one formal parameter.") \
T(BigIntInvalidString, "Invalid BigInt string") \
@@ -698,7 +701,7 @@ class ErrorUtils : public AllStatic {
"Lexical declaration cannot appear in a single-statement context") \
T(UnknownLabel, "Undefined label '%'") \
T(UnresolvableExport, \
- "The requested module does not provide an export named '%'") \
+ "The requested module '%' does not provide an export named '%'") \
T(UnterminatedArgList, "missing ) after argument list") \
T(UnterminatedRegExp, "Invalid regular expression: missing /") \
T(UnterminatedTemplate, "Unterminated template literal") \
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index e42210ea0e..803c16b829 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -77,15 +77,13 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- return Assembler::target_address_at(pc_, host_);
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
+ return Assembler::target_address_at(pc_, constant_pool_);
}
Address RelocInfo::target_address_address() {
- DCHECK(IsCodeTarget(rmode_) ||
- IsRuntimeEntry(rmode_) ||
- rmode_ == EMBEDDED_OBJECT ||
- rmode_ == EXTERNAL_REFERENCE);
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
+ rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
// Read the address of the word containing the target_address in an
// instruction stream.
// The only architecture-independent user of this function is the serializer.
@@ -123,18 +121,6 @@ int RelocInfo::target_address_size() {
return Assembler::kSpecialTargetSize;
}
-Address Assembler::target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- return target_address_at(pc, constant_pool);
-}
-
-void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
-}
-
Address Assembler::target_address_from_return_address(Address pc) {
return pc - kCallTargetAddressOffset;
}
@@ -146,12 +132,12 @@ void Assembler::deserialization_set_special_target_at(
set_target_address_at(
isolate,
instruction_payload - (kInstructionsFor32BitConstant - 1) * kInstrSize,
- code, target);
+ code ? code->constant_pool() : nullptr, target);
} else {
set_target_address_at(
isolate,
- instruction_payload - kInstructionsFor32BitConstant * kInstrSize, code,
- target);
+ instruction_payload - kInstructionsFor32BitConstant * kInstrSize,
+ code ? code->constant_pool() : nullptr, target);
}
}
@@ -200,21 +186,21 @@ void Assembler::deserialization_set_target_internal_reference_at(
HeapObject* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return HeapObject::cast(
- reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)));
+ return HeapObject::cast(reinterpret_cast<Object*>(
+ Assembler::target_address_at(pc_, constant_pool_)));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Handle<HeapObject>(
- reinterpret_cast<HeapObject**>(Assembler::target_address_at(pc_, host_)));
+ return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
+ Assembler::target_address_at(pc_, constant_pool_)));
}
void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
+ Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
@@ -227,7 +213,7 @@ void RelocInfo::set_target_object(HeapObject* target,
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
@@ -281,7 +267,7 @@ void RelocInfo::WipeOut(Isolate* isolate) {
} else if (IsInternalReferenceEncoded(rmode_)) {
Assembler::set_target_internal_reference_encoded_at(pc_, nullptr);
} else {
- Assembler::set_target_address_at(isolate, pc_, host_, nullptr);
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
}
}
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index b5719a3add..bd540346c0 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -192,21 +192,23 @@ bool RelocInfo::IsInConstantPool() {
}
Address RelocInfo::embedded_address() const {
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
uint32_t RelocInfo::embedded_size() const {
- return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
+ return reinterpret_cast<uint32_t>(
+ Assembler::target_address_at(pc_, constant_pool_));
}
void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, host_, address, flush_mode);
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
+ flush_mode);
}
void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, host_,
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_,
reinterpret_cast<Address>(size), flush_mode);
}
@@ -308,7 +310,7 @@ const Instr kSwRegFpNegOffsetPattern =
SW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask); // NOLINT
// A mask for the Rt register for push, pop, lw, sw instructions.
const Instr kRtMask = kRtFieldMask;
-const Instr kLwSwInstrTypeMask = 0xffe00000;
+const Instr kLwSwInstrTypeMask = 0xFFE00000;
const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
const Instr kLwSwOffsetMask = kImm16Mask;
@@ -788,7 +790,7 @@ uint32_t Assembler::CreateTargetAddress(Instr instr_lui, Instr instr_jic) {
// Use just lui and jic instructions. Insert lower part of the target address in
// jic offset part. Since jic sign-extends offset and then add it with register,
// before that addition, difference between upper part of the target address and
-// upper part of the sign-extended offset (0xffff or 0x0000), will be inserted
+// upper part of the sign-extended offset (0xFFFF or 0x0000), will be inserted
// in jic register with lui instruction.
void Assembler::UnpackTargetAddress(uint32_t address, int16_t& lui_offset,
int16_t& jic_offset) {
@@ -2001,7 +2003,7 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
// about -64KB to about +64KB, allowing further addition of 4 when accessing
// 64-bit variables with two 32-bit accesses.
constexpr int32_t kMinOffsetForSimpleAdjustment =
- 0x7ff8; // Max int16_t that's a multiple of 8.
+ 0x7FF8; // Max int16_t that's a multiple of 8.
constexpr int32_t kMaxOffsetForSimpleAdjustment =
2 * kMinOffsetForSimpleAdjustment;
if (0 <= src.offset() && src.offset() <= kMaxOffsetForSimpleAdjustment) {
@@ -2237,7 +2239,7 @@ void Assembler::aluipc(Register rs, int16_t imm16) {
// Break / Trap instructions.
void Assembler::break_(uint32_t code, bool break_as_stop) {
- DCHECK_EQ(code & ~0xfffff, 0);
+ DCHECK_EQ(code & ~0xFFFFF, 0);
// We need to invalidate breaks that could be stops as well because the
// simulator expects a char pointer after the stop instruction.
// See constants-mips.h for explanation.
@@ -2494,7 +2496,7 @@ void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
memcpy(&i, &d, 8);
- *lo = i & 0xffffffff;
+ *lo = i & 0xFFFFFFFF;
*hi = i >> 32;
}
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 76f3245e2c..4c68e730b3 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -238,6 +238,7 @@ int ToNumber(Register reg);
Register ToRegister(int num);
+constexpr bool kPadArguments = false;
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
@@ -581,10 +582,6 @@ class Assembler : public AssemblerBase {
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
set_target_address_at(isolate, pc, target, icache_flush_mode);
}
- INLINE(static Address target_address_at(Address pc, Code* code));
- INLINE(static void set_target_address_at(
- Isolate* isolate, Address pc, Code* code, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
static void set_target_value_at(
Isolate* isolate, Address pc, uint32_t target,
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 3485e146ea..7ae3451f34 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -238,7 +238,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// (happens only when input is MIN_INT).
__ Branch(&bail_out, gt, zero_reg, Operand(scratch));
__ bind(&positive_exponent);
- __ Assert(ge, kUnexpectedNegativeValue, scratch, Operand(zero_reg));
+ __ Assert(ge, AbortReason::kUnexpectedNegativeValue, scratch,
+ Operand(zero_reg));
Label while_true, no_carry, loop_end;
__ bind(&while_true);
@@ -527,7 +528,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// caller fp |
// function slot | entry frame
// context slot |
- // bad fp (0xff...f) |
+ // bad fp (0xFF...F) |
// callee saved registers + ra
// 4 args slots
// args
@@ -589,13 +590,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// callee saved registers + ra
// 4 args slots
// args
-
- if (type() == StackFrame::CONSTRUCT_ENTRY) {
- __ Call(BUILTIN_CODE(isolate, JSConstructEntryTrampoline),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(BUILTIN_CODE(isolate, JSEntryTrampoline), RelocInfo::CODE_TARGET);
- }
+ __ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
// Unlink this frame from the handler chain.
__ PopStackHandler();
@@ -646,8 +641,8 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
// filled with kZapValue by the GC.
// Dereference the address and check for this.
__ lw(t0, MemOperand(t9));
- __ Assert(ne, kReceivedInvalidReturnAddress, t0,
- Operand(reinterpret_cast<uint32_t>(kZapValue)));
+ __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, t0,
+ Operand(reinterpret_cast<uint32_t>(kZapValue)));
}
__ Jump(t9);
}
@@ -761,7 +756,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -803,7 +798,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
if (FLAG_debug_code) {
__ lw(t1, FieldMemOperand(a2, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Assert(eq, kExpectedAllocationSite, t1, Operand(at));
+ __ Assert(eq, AbortReason::kExpectedAllocationSite, t1, Operand(at));
}
// Save the resulting elements kind in type info. We can't just store a3
@@ -826,7 +821,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -901,11 +896,11 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ SmiTst(t0, at);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
- at, Operand(zero_reg));
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, at,
+ Operand(zero_reg));
__ GetObjectType(t0, t0, t1);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
- t1, Operand(MAP_TYPE));
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, t1,
+ Operand(MAP_TYPE));
// We should either have undefined in a2 or a valid AllocationSite
__ AssertUndefinedOrAllocationSite(a2, t0);
@@ -983,11 +978,11 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ SmiTst(a3, at);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
- at, Operand(zero_reg));
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, at,
+ Operand(zero_reg));
__ GetObjectType(a3, a3, t0);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
- t0, Operand(MAP_TYPE));
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, t0,
+ Operand(MAP_TYPE));
}
// Figure out the right elements kind.
@@ -1002,8 +997,10 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
Label done;
__ Branch(&done, eq, a3, Operand(PACKED_ELEMENTS));
- __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray, a3,
- Operand(HOLEY_ELEMENTS));
+ __ Assert(
+ eq,
+ AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray,
+ a3, Operand(HOLEY_ELEMENTS));
__ bind(&done);
}
@@ -1103,7 +1100,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ sw(s0, MemOperand(s3, kNextOffset));
if (__ emit_debug_code()) {
__ lw(a1, MemOperand(s3, kLevelOffset));
- __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
+ __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
+ Operand(s2));
}
__ Subu(s2, s2, Operand(1));
__ sw(s2, MemOperand(s3, kLevelOffset));
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 6205bcd202..c07422ff5f 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -24,8 +24,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
return stub;
#else
size_t allocated = 0;
- byte* buffer =
- AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@@ -97,7 +96,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
// copied and a3 to the dst pointer after all the 64 byte chunks have been
// copied. We will loop, incrementing a0 and a1 until a0 equals a3.
__ bind(&aligned);
- __ andi(t8, a2, 0x3f);
+ __ andi(t8, a2, 0x3F);
__ beq(a2, t8, &chkw); // Less than 64?
__ subu(a3, a2, t8); // In delay slot.
__ addu(a3, a0, a3); // Now a3 is the final dst after loop.
@@ -180,7 +179,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
// down to chk1w to handle the tail end of the copy.
__ bind(&chkw);
__ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
- __ andi(t8, a2, 0x1f);
+ __ andi(t8, a2, 0x1F);
__ beq(a2, t8, &chk1w); // Less than 32?
__ nop(); // In delay slot.
__ lw(t0, MemOperand(a1));
@@ -264,7 +263,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
// the dst pointer after all the 64 byte chunks have been copied. We will
// loop, incrementing a0 and a1 until a0 equals a3.
__ bind(&ua_chk16w);
- __ andi(t8, a2, 0x3f);
+ __ andi(t8, a2, 0x3F);
__ beq(a2, t8, &ua_chkw);
__ subu(a3, a2, t8); // In delay slot.
__ addu(a3, a0, a3);
@@ -436,7 +435,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
// ua_chk1w to handle the tail end of the copy.
__ bind(&ua_chkw);
__ Pref(pref_hint_load, MemOperand(a1));
- __ andi(t8, a2, 0x1f);
+ __ andi(t8, a2, 0x1F);
__ beq(a2, t8, &ua_chk1w);
__ nop(); // In delay slot.
@@ -545,8 +544,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
- CHECK(base::OS::SetPermissions(buffer, allocated,
- base::OS::MemoryPermission::kReadExecute));
+ CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
@@ -557,8 +555,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
#else
size_t allocated = 0;
- byte* buffer =
- AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@@ -574,8 +571,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
- CHECK(base::OS::SetPermissions(buffer, allocated,
- base::OS::MemoryPermission::kReadExecute));
+ CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index 420453aad0..f27bdc9b68 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -421,7 +421,7 @@ void Decoder::PrintPCImm21(Instruction* instr, int delta_pc, int n_bits) {
void Decoder::PrintXImm26(Instruction* instr) {
uint32_t target = static_cast<uint32_t>(instr->Imm26Value())
<< kImmFieldShift;
- target = (reinterpret_cast<uint32_t>(instr) & ~0xfffffff) | target;
+ target = (reinterpret_cast<uint32_t>(instr) & ~0xFFFFFFF) | target;
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", target);
}
@@ -456,7 +456,7 @@ void Decoder::PrintPCImm26(Instruction* instr, int delta_pc, int n_bits) {
// PC[GPRLEN-1 .. 28] || instr_index26 || 00
void Decoder::PrintPCImm26(Instruction* instr) {
int32_t imm26 = instr->Imm26Value();
- uint32_t pc_mask = ~0xfffffff;
+ uint32_t pc_mask = ~0xFFFFFFF;
uint32_t pc = ((uint32_t)(instr + 1) & pc_mask) | (imm26 << 2);
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s",
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index 4d8b9966fa..795fdc4af8 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -43,8 +43,6 @@ const Register LoadDescriptor::SlotRegister() { return a0; }
const Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
-const Register LoadICProtoArrayDescriptor::HandlerRegister() { return t0; }
-
const Register StoreDescriptor::ReceiverRegister() { return a1; }
const Register StoreDescriptor::NameRegister() { return a2; }
const Register StoreDescriptor::ValueRegister() { return a0; }
@@ -202,6 +200,11 @@ void TransitionElementsKindDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
+void AbortJSDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index de5de02f09..5c89467cd8 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -296,7 +296,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
lw(scratch, MemOperand(address));
- Assert(eq, kWrongAddressOrValuePassedToRecordWrite, scratch,
+ Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, scratch,
Operand(value));
}
@@ -825,7 +825,7 @@ void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
rotrv(rd, rs, rt.rm());
} else {
- rotr(rd, rs, rt.immediate() & 0x1f);
+ rotr(rd, rs, rt.immediate() & 0x1F);
}
} else {
if (rt.is_reg()) {
@@ -841,8 +841,8 @@ void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
} else {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- srl(scratch, rs, rt.immediate() & 0x1f);
- sll(rd, rs, (0x20 - (rt.immediate() & 0x1f)) & 0x1f);
+ srl(scratch, rs, rt.immediate() & 0x1F);
+ sll(rd, rs, (0x20 - (rt.immediate() & 0x1F)) & 0x1F);
or_(rd, rd, scratch);
}
}
@@ -3763,9 +3763,11 @@ void MacroAssembler::MaybeDropFrames() {
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ Push(Smi::kZero); // Padding.
+
// Link the current handler as the next handler.
li(t2,
Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
@@ -3898,7 +3900,8 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
}
if (FLAG_debug_code) {
- Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg));
+ Check(lo, AbortReason::kStackAccessBelowStackPointer, src_reg,
+ Operand(dst_reg));
}
// Restore caller's frame pointer and return address now as they will be
@@ -4491,13 +4494,13 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
// -----------------------------------------------------------------------------
// Debugging.
-void TurboAssembler::Assert(Condition cc, BailoutReason reason, Register rs,
+void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
Operand rt) {
if (emit_debug_code())
Check(cc, reason, rs, rt);
}
-void TurboAssembler::Check(Condition cc, BailoutReason reason, Register rs,
+void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
Operand rt) {
Label L;
Branch(&L, cc, rs, rt);
@@ -4506,11 +4509,11 @@ void TurboAssembler::Check(Condition cc, BailoutReason reason, Register rs,
bind(&L);
}
-void TurboAssembler::Abort(BailoutReason reason) {
+void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
#ifdef DEBUG
- const char* msg = GetBailoutReason(reason);
+ const char* msg = GetAbortReason(reason);
if (msg != nullptr) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -4823,7 +4826,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
andi(scratch, object, kSmiTagMask);
- Check(ne, kOperandIsASmi, scratch, Operand(zero_reg));
+ Check(ne, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg));
}
}
@@ -4834,7 +4837,7 @@ void MacroAssembler::AssertSmi(Register object) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
andi(scratch, object, kSmiTagMask);
- Check(eq, kOperandIsASmi, scratch, Operand(zero_reg));
+ Check(eq, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg));
}
}
@@ -4842,9 +4845,11 @@ void MacroAssembler::AssertFixedArray(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
- Check(ne, kOperandIsASmiAndNotAFixedArray, t8, Operand(zero_reg));
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFixedArray, t8,
+ Operand(zero_reg));
GetObjectType(object, t8, t8);
- Check(eq, kOperandIsNotAFixedArray, t8, Operand(FIXED_ARRAY_TYPE));
+ Check(eq, AbortReason::kOperandIsNotAFixedArray, t8,
+ Operand(FIXED_ARRAY_TYPE));
}
}
@@ -4852,9 +4857,11 @@ void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
- Check(ne, kOperandIsASmiAndNotAFunction, t8, Operand(zero_reg));
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8,
+ Operand(zero_reg));
GetObjectType(object, t8, t8);
- Check(eq, kOperandIsNotAFunction, t8, Operand(JS_FUNCTION_TYPE));
+ Check(eq, AbortReason::kOperandIsNotAFunction, t8,
+ Operand(JS_FUNCTION_TYPE));
}
}
@@ -4863,9 +4870,11 @@ void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
- Check(ne, kOperandIsASmiAndNotABoundFunction, t8, Operand(zero_reg));
+ Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, t8,
+ Operand(zero_reg));
GetObjectType(object, t8, t8);
- Check(eq, kOperandIsNotABoundFunction, t8, Operand(JS_BOUND_FUNCTION_TYPE));
+ Check(eq, AbortReason::kOperandIsNotABoundFunction, t8,
+ Operand(JS_BOUND_FUNCTION_TYPE));
}
}
@@ -4873,7 +4882,8 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
- Check(ne, kOperandIsASmiAndNotAGeneratorObject, t8, Operand(zero_reg));
+ Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, t8,
+ Operand(zero_reg));
GetObjectType(object, t8, t8);
@@ -4885,7 +4895,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
// Check if JSAsyncGeneratorObject
Branch(&done, eq, t8, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
- Abort(kOperandIsNotAGeneratorObject);
+ Abort(AbortReason::kOperandIsNotAGeneratorObject);
bind(&done);
}
@@ -4899,7 +4909,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Branch(&done_checking, eq, object, Operand(scratch));
lw(t8, FieldMemOperand(object, HeapObject::kMapOffset));
LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
- Assert(eq, kExpectedUndefinedOrCell, t8, Operand(scratch));
+ Assert(eq, AbortReason::kExpectedUndefinedOrCell, t8, Operand(scratch));
bind(&done_checking);
}
}
@@ -5127,20 +5137,11 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
void TurboAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
- if (IsMipsArchVariant(kMips32r6)) {
- uint32_t lui_offset, jialc_offset;
- UnpackTargetAddressUnsigned(Operand(function).immediate(), lui_offset,
- jialc_offset);
- if (MustUseReg(Operand(function).rmode())) {
- RecordRelocInfo(Operand(function).rmode(), Operand(function).immediate());
- }
- lui(t9, lui_offset);
- CallCFunctionHelper(t9, jialc_offset, num_reg_arguments,
- num_double_arguments);
- } else {
- li(t9, Operand(function));
- CallCFunctionHelper(t9, 0, num_reg_arguments, num_double_arguments);
- }
+ // Linux/MIPS convention demands that register t9 contains
+ // the address of the function being call in case of
+ // Position independent code
+ li(t9, Operand(function));
+ CallCFunctionHelper(t9, 0, num_reg_arguments, num_double_arguments);
}
void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
@@ -5197,6 +5198,11 @@ void TurboAssembler::CallCFunctionHelper(Register function_base,
function_base = t9;
}
+ if (function_offset != 0) {
+ addiu(t9, t9, function_offset);
+ function_offset = 0;
+ }
+
Call(function_base, function_offset);
int stack_passed_arguments = CalculateStackPassedWords(
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 52525ad9bc..8c70eb54a3 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -166,13 +166,13 @@ class TurboAssembler : public Assembler {
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt);
+ void Assert(Condition cc, AbortReason reason, Register rs, Operand rt);
// Like Assert(), but always enabled.
- void Check(Condition cc, BailoutReason reason, Register rs, Operand rt);
+ void Check(Condition cc, AbortReason reason, Register rs, Operand rt);
// Print a message to stdout and abort execution.
- void Abort(BailoutReason msg);
+ void Abort(AbortReason msg);
inline bool AllowThisStubCall(CodeStub* stub);
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 342f27666d..4994418ef5 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -60,8 +60,8 @@ class MipsDebugger {
void PrintAllRegsIncludingFPU();
private:
- // We set the breakpoint code to 0xfffff to easily recognize it.
- static const Instr kBreakpointInstr = SPECIAL | BREAK | 0xfffff << 6;
+ // We set the breakpoint code to 0xFFFFF to easily recognize it.
+ static const Instr kBreakpointInstr = SPECIAL | BREAK | 0xFFFFF << 6;
static const Instr kNopInstr = 0x0;
Simulator* sim_;
@@ -808,6 +808,10 @@ void Simulator::set_last_debugger_input(char* input) {
last_debugger_input_ = input;
}
+void Simulator::SetRedirectInstruction(Instruction* instruction) {
+ instruction->SetInstructionBits(rtCallRedirInstr);
+}
+
void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
void* start_addr, size_t size) {
intptr_t start = reinterpret_cast<intptr_t>(start_addr);
@@ -878,21 +882,12 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
}
-void Simulator::Initialize(Isolate* isolate) {
- if (isolate->simulator_initialized()) return;
- isolate->set_simulator_initialized(true);
- ::v8::internal::ExternalReference::set_redirector(isolate,
- &RedirectExternalReference);
-}
-
-
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
if (i_cache_ == nullptr) {
i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
- Initialize(isolate);
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
stack_ = reinterpret_cast<char*>(malloc(stack_size_));
@@ -934,101 +929,6 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
Simulator::~Simulator() { free(stack_); }
-// When the generated code calls an external reference we need to catch that in
-// the simulator. The external reference will be a function compiled for the
-// host architecture. We need to call that function instead of trying to
-// execute it with the simulator. We do that by redirecting the external
-// reference to a swi (software-interrupt) instruction that is handled by
-// the simulator. We write the original destination of the jump just at a known
-// offset from the swi instruction so the simulator knows what to call.
-class Redirection {
- public:
- Redirection(Isolate* isolate, void* external_function,
- ExternalReference::Type type)
- : external_function_(external_function),
- swi_instruction_(rtCallRedirInstr),
- type_(type),
- next_(nullptr) {
- next_ = isolate->simulator_redirection();
- Simulator::current(isolate)->
- FlushICache(isolate->simulator_i_cache(),
- reinterpret_cast<void*>(&swi_instruction_),
- Instruction::kInstrSize);
- isolate->set_simulator_redirection(this);
- }
-
- void* address_of_swi_instruction() {
- return reinterpret_cast<void*>(&swi_instruction_);
- }
-
- void* external_function() { return external_function_; }
- ExternalReference::Type type() { return type_; }
-
- static Redirection* Get(Isolate* isolate, void* external_function,
- ExternalReference::Type type) {
- Redirection* current = isolate->simulator_redirection();
- for (; current != nullptr; current = current->next_) {
- if (current->external_function_ == external_function &&
- current->type_ == type) {
- return current;
- }
- }
- return new Redirection(isolate, external_function, type);
- }
-
- static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
- char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
- char* addr_of_redirection =
- addr_of_swi - offsetof(Redirection, swi_instruction_);
- return reinterpret_cast<Redirection*>(addr_of_redirection);
- }
-
- static void* ReverseRedirection(int32_t reg) {
- Redirection* redirection = FromSwiInstruction(
- reinterpret_cast<Instruction*>(reinterpret_cast<void*>(reg)));
- return redirection->external_function();
- }
-
- static void DeleteChain(Redirection* redirection) {
- while (redirection != nullptr) {
- Redirection* next = redirection->next_;
- delete redirection;
- redirection = next;
- }
- }
-
- private:
- void* external_function_;
- uint32_t swi_instruction_;
- ExternalReference::Type type_;
- Redirection* next_;
-};
-
-
-// static
-void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
- Redirection* first) {
- Redirection::DeleteChain(first);
- if (i_cache != nullptr) {
- for (base::CustomMatcherHashMap::Entry* entry = i_cache->Start();
- entry != nullptr; entry = i_cache->Next(entry)) {
- delete static_cast<CachePage*>(entry->value);
- }
- delete i_cache;
- }
-}
-
-
-void* Simulator::RedirectExternalReference(Isolate* isolate,
- void* external_function,
- ExternalReference::Type type) {
- base::LockGuard<base::Mutex> lock_guard(
- isolate->simulator_redirection_mutex());
- Redirection* redirection = Redirection::Get(isolate, external_function, type);
- return redirection->address_of_swi_instruction();
-}
-
-
// Get the active Simulator for the current thread.
Simulator* Simulator::current(Isolate* isolate) {
v8::internal::Isolate::PerIsolateThreadData* isolate_data =
@@ -1105,7 +1005,7 @@ void Simulator::set_fpu_register_double(int fpureg, double value) {
} else {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
int64_t i64 = bit_cast<int64_t>(value);
- set_fpu_register_word(fpureg, i64 & 0xffffffff);
+ set_fpu_register_word(fpureg, i64 & 0xFFFFFFFF);
set_fpu_register_word(fpureg + 1, i64 >> 32);
}
}
@@ -1152,19 +1052,19 @@ int64_t Simulator::get_fpu_register(int fpureg) const {
int32_t Simulator::get_fpu_register_word(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return static_cast<int32_t>(FPUregisters_[fpureg * 2] & 0xffffffff);
+ return static_cast<int32_t>(FPUregisters_[fpureg * 2] & 0xFFFFFFFF);
}
int32_t Simulator::get_fpu_register_signed_word(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return static_cast<int32_t>(FPUregisters_[fpureg * 2] & 0xffffffff);
+ return static_cast<int32_t>(FPUregisters_[fpureg * 2] & 0xFFFFFFFF);
}
int32_t Simulator::get_fpu_register_hi_word(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return static_cast<int32_t>((FPUregisters_[fpureg * 2] >> 32) & 0xffffffff);
+ return static_cast<int32_t>((FPUregisters_[fpureg * 2] >> 32) & 0xFFFFFFFF);
}
@@ -2204,7 +2104,7 @@ void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
uint32_t Simulator::ReadBU(int32_t addr) {
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
TraceMemRd(addr, static_cast<int32_t>(*ptr));
- return *ptr & 0xff;
+ return *ptr & 0xFF;
}
@@ -2318,7 +2218,7 @@ void Simulator::SoftwareInterrupt() {
// We first check if we met a call_rt_redirected.
if (instr_.InstructionBits() == rtCallRedirInstr) {
- Redirection* redirection = Redirection::FromSwiInstruction(instr_.instr());
+ Redirection* redirection = Redirection::FromInstruction(instr_.instr());
int32_t arg0 = get_register(a0);
int32_t arg1 = get_register(a1);
int32_t arg2 = get_register(a2);
@@ -2603,7 +2503,7 @@ void Simulator::DisableStop(uint32_t code) {
void Simulator::IncreaseStopCounter(uint32_t code) {
DCHECK_LE(code, kMaxStopCode);
- if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
+ if ((watched_stops_[code].count & ~(1 << 31)) == 0x7FFFFFFF) {
PrintF("Stop counter for code %i has overflowed.\n"
"Enabling this code and reseting the counter to 0.\n", code);
watched_stops_[code].count = 0;
@@ -3102,8 +3002,8 @@ void Simulator::DecodeTypeRegisterDRsType() {
// Extracting sign, exponent and mantissa from the input double
uint32_t sign = (classed >> 63) & 1;
- uint32_t exponent = (classed >> 52) & 0x00000000000007ff;
- uint64_t mantissa = classed & 0x000fffffffffffff;
+ uint32_t exponent = (classed >> 52) & 0x00000000000007FF;
+ uint64_t mantissa = classed & 0x000FFFFFFFFFFFFF;
uint64_t result;
double dResult;
@@ -3124,7 +3024,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
// Setting flags if double is NaN
signalingNan = false;
quietNan = false;
- if (!negInf && !posInf && exponent == 0x7ff) {
+ if (!negInf && !posInf && exponent == 0x7FF) {
quietNan = ((mantissa & 0x0008000000000000) != 0) &&
((mantissa & (0x0008000000000000 - 1)) == 0);
signalingNan = !quietNan;
@@ -3417,8 +3317,8 @@ void Simulator::DecodeTypeRegisterSRsType() {
// Extracting sign, exponent and mantissa from the input float
uint32_t sign = (classed >> 31) & 1;
- uint32_t exponent = (classed >> 23) & 0x000000ff;
- uint32_t mantissa = classed & 0x007fffff;
+ uint32_t exponent = (classed >> 23) & 0x000000FF;
+ uint32_t mantissa = classed & 0x007FFFFF;
uint32_t result;
float fResult;
@@ -3439,7 +3339,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
// Setting flags if float is NaN
signalingNan = false;
quietNan = false;
- if (!negInf && !posInf && (exponent == 0xff)) {
+ if (!negInf && !posInf && (exponent == 0xFF)) {
quietNan = ((mantissa & 0x00200000) == 0) &&
((mantissa & (0x00200000 - 1)) == 0);
signalingNan = !quietNan;
@@ -3994,12 +3894,12 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
case MULT:
i64hilo = static_cast<int64_t>(rs()) * static_cast<int64_t>(rt());
if (!IsMipsArchVariant(kMips32r6)) {
- set_register(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
+ set_register(LO, static_cast<int32_t>(i64hilo & 0xFFFFFFFF));
set_register(HI, static_cast<int32_t>(i64hilo >> 32));
} else {
switch (sa()) {
case MUL_OP:
- SetResult(rd_reg(), static_cast<int32_t>(i64hilo & 0xffffffff));
+ SetResult(rd_reg(), static_cast<int32_t>(i64hilo & 0xFFFFFFFF));
break;
case MUH_OP:
SetResult(rd_reg(), static_cast<int32_t>(i64hilo >> 32));
@@ -4013,12 +3913,12 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
case MULTU:
u64hilo = static_cast<uint64_t>(rs_u()) * static_cast<uint64_t>(rt_u());
if (!IsMipsArchVariant(kMips32r6)) {
- set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
+ set_register(LO, static_cast<int32_t>(u64hilo & 0xFFFFFFFF));
set_register(HI, static_cast<int32_t>(u64hilo >> 32));
} else {
switch (sa()) {
case MUL_OP:
- SetResult(rd_reg(), static_cast<int32_t>(u64hilo & 0xffffffff));
+ SetResult(rd_reg(), static_cast<int32_t>(u64hilo & 0xFFFFFFFF));
break;
case MUH_OP:
SetResult(rd_reg(), static_cast<int32_t>(u64hilo >> 32));
@@ -4265,7 +4165,7 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
// Reverse the bit in byte for each individual byte
for (int i = 0; i < 4; i++) {
output = output >> 8;
- i_byte = input & 0xff;
+ i_byte = input & 0xFF;
// Fast way to reverse bits in byte
// Devised by Sean Anderson, July 13, 2001
@@ -5258,8 +5158,8 @@ void Msa3RInstrHelper_shuffle(const uint32_t opcode, T_reg ws, T_reg wt,
wd_p[2 * i + 1] = ws_p[2 * i + 1];
break;
case VSHF: {
- const int mask_not_valid = 0xc0;
- const int mask_6_bits = 0x3f;
+ const int mask_not_valid = 0xC0;
+ const int mask_6_bits = 0x3F;
if ((wd_p[i] & mask_not_valid)) {
wd_p[i] = 0;
} else {
@@ -5658,7 +5558,7 @@ void Simulator::DecodeTypeMsa3RF() {
break; \
} \
/* Infinity */ \
- dst = PACK_FLOAT16(aSign, 0x1f, 0); \
+ dst = PACK_FLOAT16(aSign, 0x1F, 0); \
break; \
} else if (aExp == 0 && aFrac == 0) { \
dst = PACK_FLOAT16(aSign, 0, 0); \
@@ -5672,13 +5572,13 @@ void Simulator::DecodeTypeMsa3RF() {
aExp -= 0x71; \
if (aExp < 1) { \
/* Will be denormal in halfprec */ \
- mask = 0x00ffffff; \
+ mask = 0x00FFFFFF; \
if (aExp >= -11) { \
mask >>= 11 + aExp; \
} \
} else { \
/* Normal number in halfprec */ \
- mask = 0x00001fff; \
+ mask = 0x00001FFF; \
} \
switch (MSACSR_ & 3) { \
case kRoundToNearest: \
@@ -5699,7 +5599,7 @@ void Simulator::DecodeTypeMsa3RF() {
} \
rounding_bumps_exp = (aFrac + increment >= 0x01000000); \
if (aExp > maxexp || (aExp == maxexp && rounding_bumps_exp)) { \
- dst = PACK_FLOAT16(aSign, 0x1f, 0); \
+ dst = PACK_FLOAT16(aSign, 0x1F, 0); \
break; \
} \
aFrac += increment; \
@@ -6213,8 +6113,8 @@ template <typename T_int, typename T_fp, typename T_reg>
T_int Msa2RFInstrHelper2(uint32_t opcode, T_reg ws, int i) {
switch (opcode) {
#define EXTRACT_FLOAT16_SIGN(fp16) (fp16 >> 15)
-#define EXTRACT_FLOAT16_EXP(fp16) (fp16 >> 10 & 0x1f)
-#define EXTRACT_FLOAT16_FRAC(fp16) (fp16 & 0x3ff)
+#define EXTRACT_FLOAT16_EXP(fp16) (fp16 >> 10 & 0x1F)
+#define EXTRACT_FLOAT16_FRAC(fp16) (fp16 & 0x3FF)
#define PACK_FLOAT32(sign, exp, frac) \
static_cast<uint32_t>(((sign) << 31) + ((exp) << 23) + (frac))
#define FEXUP_DF(src_index) \
@@ -6224,9 +6124,9 @@ T_int Msa2RFInstrHelper2(uint32_t opcode, T_reg ws, int i) {
aSign = EXTRACT_FLOAT16_SIGN(element); \
aExp = EXTRACT_FLOAT16_EXP(element); \
aFrac = EXTRACT_FLOAT16_FRAC(element); \
- if (V8_LIKELY(aExp && aExp != 0x1f)) { \
+ if (V8_LIKELY(aExp && aExp != 0x1F)) { \
return PACK_FLOAT32(aSign, aExp + 0x70, aFrac << 13); \
- } else if (aExp == 0x1f) { \
+ } else if (aExp == 0x1F) { \
if (aFrac) { \
return bit_cast<int32_t>(std::numeric_limits<float>::quiet_NaN()); \
} else { \
@@ -6389,7 +6289,7 @@ void Simulator::DecodeTypeImmediate() {
int32_t ft_reg = instr_.FtValue(); // Destination register.
// Zero extended immediate.
- uint32_t oe_imm16 = 0xffff & imm16;
+ uint32_t oe_imm16 = 0xFFFF & imm16;
// Sign extended immediate.
int32_t se_imm16 = imm16;
@@ -6438,11 +6338,11 @@ void Simulator::DecodeTypeImmediate() {
const int32_t bitsIn16Int = sizeof(int16_t) * kBitsPerByte;
if (do_branch) {
if (FLAG_debug_code) {
- int16_t bits = imm16 & 0xfc;
+ int16_t bits = imm16 & 0xFC;
if (imm16 >= 0) {
CHECK_EQ(bits, 0);
} else {
- CHECK_EQ(bits ^ 0xfc, 0);
+ CHECK_EQ(bits ^ 0xFC, 0);
}
}
// jump range :[pc + kInstrSize - 512 * kInstrSize,
@@ -6899,7 +6799,7 @@ void Simulator::DecodeTypeImmediate() {
break;
}
case ADDIUPC: {
- int32_t se_imm19 = imm19 | ((imm19 & 0x40000) ? 0xfff80000 : 0);
+ int32_t se_imm19 = imm19 | ((imm19 & 0x40000) ? 0xFFF80000 : 0);
alu_out = current_pc + (se_imm19 << 2);
break;
}
@@ -6987,7 +6887,7 @@ void Simulator::DecodeTypeJump() {
// Get current pc.
int32_t current_pc = get_pc();
// Get unchanged bits of pc.
- int32_t pc_high_bits = current_pc & 0xf0000000;
+ int32_t pc_high_bits = current_pc & 0xF0000000;
// Next pc.
int32_t next_pc = pc_high_bits | (simInstr.Imm26Value() << 2);
@@ -7150,18 +7050,16 @@ void Simulator::CallInternal(byte* entry) {
set_register(fp, fp_val);
}
-
-int32_t Simulator::Call(byte* entry, int argument_count, ...) {
- va_list parameters;
- va_start(parameters, argument_count);
+intptr_t Simulator::CallImpl(byte* entry, int argument_count,
+ const intptr_t* arguments) {
// Set up arguments.
// First four arguments passed in registers.
- DCHECK_GE(argument_count, 4);
- set_register(a0, va_arg(parameters, int32_t));
- set_register(a1, va_arg(parameters, int32_t));
- set_register(a2, va_arg(parameters, int32_t));
- set_register(a3, va_arg(parameters, int32_t));
+ int reg_arg_count = std::min(4, argument_count);
+ if (reg_arg_count > 0) set_register(a0, arguments[0]);
+ if (reg_arg_count > 1) set_register(a1, arguments[1]);
+ if (reg_arg_count > 2) set_register(a2, arguments[2]);
+ if (reg_arg_count > 3) set_register(a3, arguments[3]);
// Remaining arguments passed on stack.
int original_stack = get_register(sp);
@@ -7173,10 +7071,8 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
}
// Store remaining arguments on stack, from low to high memory.
intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
- for (int i = 4; i < argument_count; i++) {
- stack_argument[i - 4 + kCArgSlotCount] = va_arg(parameters, int32_t);
- }
- va_end(parameters);
+ memcpy(stack_argument + kCArgSlotCount, arguments + reg_arg_count,
+ (argument_count - reg_arg_count) * sizeof(*arguments));
set_register(sp, entry_stack);
CallInternal(entry);
@@ -7185,8 +7081,7 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
CHECK_EQ(entry_stack, get_register(sp));
set_register(sp, original_stack);
- int32_t result = get_register(v0);
- return result;
+ return get_register(v0);
}
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index fbc4ad19fb..0c417becd5 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
// Declares a Simulator for MIPS instructions if we are not generating a native
// MIPS binary. This Simulator allows us to run and debug MIPS code generation
// on regular desktop machines.
-// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
+// V8 calls into generated code via the GeneratedCode wrapper,
// which will start execution in the Simulator or forwards to the real entry
// on a MIPS HW platform.
@@ -16,63 +15,12 @@
#include "src/allocation.h"
#include "src/mips/constants-mips.h"
-#if !defined(USE_SIMULATOR)
-// Running without a simulator on a native mips platform.
-
-namespace v8 {
-namespace internal {
-
-// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- entry(p0, p1, p2, p3, p4)
-
-typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*, int*,
- int, Address, int, Isolate*);
-
-// Call the generated regexp code directly. The code at the entry address
-// should act as a function matching the type arm_regexp_matcher.
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- (FUNCTION_CAST<mips_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
- p8))
-
-// The stack limit beyond which we will throw stack overflow errors in
-// generated code. Because generated code on mips uses the C stack, we
-// just use the C stack limit.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
- uintptr_t c_limit) {
- return c_limit;
- }
-
- static inline uintptr_t RegisterCTryCatch(Isolate* isolate,
- uintptr_t try_catch_address) {
- USE(isolate);
- return try_catch_address;
- }
-
- static inline void UnregisterCTryCatch(Isolate* isolate) { USE(isolate); }
-};
-
-} // namespace internal
-} // namespace v8
-
-// Calculated the stack limit beyond which we will throw stack overflow errors.
-// This macro must be called from a C++ method. It relies on being able to take
-// the address of "this" to get a value on the current execution stack and then
-// calculates the stack limit based on that value.
-// NOTE: The check for overflow is not safe as there is no guarantee that the
-// running thread has its stack in all memory up to address 0x00000000.
-#define GENERATED_CODE_STACK_LIMIT(limit) \
- (reinterpret_cast<uintptr_t>(this) >= limit ? \
- reinterpret_cast<uintptr_t>(this) - limit : 0)
-
-#else // !defined(USE_SIMULATOR)
+#if defined(USE_SIMULATOR)
// Running with a simulator.
#include "src/assembler.h"
#include "src/base/hashmap.h"
+#include "src/simulator-base.h"
namespace v8 {
namespace internal {
@@ -143,7 +91,7 @@ class SimInstruction : public InstructionGetters<SimInstructionBase> {
}
};
-class Simulator {
+class Simulator : public SimulatorBase {
public:
friend class MipsDebugger;
@@ -223,7 +171,7 @@ class Simulator {
// The currently executing Simulator instance. Potentially there can be one
// for each native thread.
- static Simulator* current(v8::internal::Isolate* isolate);
+ V8_EXPORT_PRIVATE static Simulator* current(v8::internal::Isolate* isolate);
// Accessors for register state. Reading the pc value adheres to the MIPS
// architecture specification and is off by a 8 from the currently executing
@@ -288,15 +236,11 @@ class Simulator {
// Executes MIPS instructions until the PC reaches end_sim_pc.
void Execute();
- // Call on program start.
- static void Initialize(Isolate* isolate);
-
- static void TearDown(base::CustomMatcherHashMap* i_cache, Redirection* first);
+ template <typename Return, typename... Args>
+ Return Call(byte* entry, Args... args) {
+ return VariadicCall<Return>(this, &Simulator::CallImpl, entry, args...);
+ }
- // V8 generally calls into generated JS code with 5 parameters and into
- // generated RegExp code with 7 parameters. This is a convenience function,
- // which sets up the simulator state and grabs the result on return.
- int32_t Call(byte* entry, int argument_count, ...);
// Alternative: call a 2-argument double function.
double CallFP(byte* entry, double d0, double d1);
@@ -310,6 +254,9 @@ class Simulator {
void set_last_debugger_input(char* input);
char* last_debugger_input() { return last_debugger_input_; }
+ // Redirection support.
+ static void SetRedirectInstruction(Instruction* instruction);
+
// ICache checking.
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size);
@@ -332,6 +279,9 @@ class Simulator {
Unpredictable = 0xbadbeaf
};
+ V8_EXPORT_PRIVATE intptr_t CallImpl(byte* entry, int argument_count,
+ const intptr_t* arguments);
+
// Unsupported instructions use Format to print an error and stop execution.
void Format(Instruction* instr, const char* format);
@@ -557,11 +507,6 @@ class Simulator {
// Exceptions.
void SignalException(Exception e);
- // Runtime call support. Uses the isolate in a thread-safe way.
- static void* RedirectExternalReference(Isolate* isolate,
- void* external_function,
- ExternalReference::Type type);
-
// Handle arguments and return value for runtime FP functions.
void GetFpArgs(double* x, double* y, int32_t* z);
void SetFpResult(const double& result);
@@ -616,42 +561,8 @@ class Simulator {
StopCountAndDesc watched_stops_[kMaxStopCode + 1];
};
-
-// When running with the simulator transition into simulated execution at this
-// point.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(isolate)->Call( \
- FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
-
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- Simulator::current(isolate)->Call(entry, 9, p0, p1, p2, p3, p4, p5, p6, p7, \
- p8)
-
-// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code. The JS-based limit normally points near the end of
-// the simulator stack. When the C-based limit is exhausted we reflect that by
-// lowering the JS-based limit as well, to make stack checks trigger.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
- uintptr_t c_limit) {
- return Simulator::current(isolate)->StackLimit(c_limit);
- }
-
- static inline uintptr_t RegisterCTryCatch(Isolate* isolate,
- uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(isolate);
- return sim->PushAddress(try_catch_address);
- }
-
- static inline void UnregisterCTryCatch(Isolate* isolate) {
- Simulator::current(isolate)->PopAddress();
- }
-};
-
} // namespace internal
} // namespace v8
-#endif // !defined(USE_SIMULATOR)
+#endif // defined(USE_SIMULATOR)
#endif // V8_MIPS_SIMULATOR_MIPS_H_
diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/mips64/assembler-mips64-inl.h
index 2cb3374f8e..ded3da224c 100644
--- a/deps/v8/src/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/mips64/assembler-mips64-inl.h
@@ -77,8 +77,8 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- return Assembler::target_address_at(pc_, host_);
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
+ return Assembler::target_address_at(pc_, constant_pool_);
}
Address RelocInfo::target_address_address() {
@@ -116,18 +116,6 @@ int RelocInfo::target_address_size() {
return Assembler::kSpecialTargetSize;
}
-Address Assembler::target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- return target_address_at(pc, constant_pool);
-}
-
-void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
-}
-
Address Assembler::target_address_from_return_address(Address pc) {
return pc - kCallTargetAddressOffset;
}
@@ -136,7 +124,7 @@ void Assembler::deserialization_set_special_target_at(
Isolate* isolate, Address instruction_payload, Code* code, Address target) {
set_target_address_at(
isolate, instruction_payload - kInstructionsFor64BitConstant * kInstrSize,
- code, target);
+ code ? code->constant_pool() : nullptr, target);
}
void Assembler::set_target_internal_reference_encoded_at(Address pc,
@@ -170,21 +158,21 @@ void Assembler::deserialization_set_target_internal_reference_at(
HeapObject* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return HeapObject::cast(
- reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)));
+ return HeapObject::cast(reinterpret_cast<Object*>(
+ Assembler::target_address_at(pc_, constant_pool_)));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Handle<HeapObject>(
- reinterpret_cast<HeapObject**>(Assembler::target_address_at(pc_, host_)));
+ return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
+ Assembler::target_address_at(pc_, constant_pool_)));
}
void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
+ Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr &&
@@ -198,7 +186,7 @@ void RelocInfo::set_target_object(HeapObject* target,
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
@@ -246,7 +234,7 @@ void RelocInfo::WipeOut(Isolate* isolate) {
} else if (IsInternalReferenceEncoded(rmode_)) {
Assembler::set_target_internal_reference_encoded_at(pc_, nullptr);
} else {
- Assembler::set_target_address_at(isolate, pc_, host_, nullptr);
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
}
}
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index 5099ec1db9..a056f66849 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -170,22 +170,23 @@ bool RelocInfo::IsInConstantPool() {
}
Address RelocInfo::embedded_address() const {
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
uint32_t RelocInfo::embedded_size() const {
- return static_cast<uint32_t>(
- reinterpret_cast<intptr_t>((Assembler::target_address_at(pc_, host_))));
+ return static_cast<uint32_t>(reinterpret_cast<intptr_t>(
+ (Assembler::target_address_at(pc_, constant_pool_))));
}
void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, host_, address, flush_mode);
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
+ flush_mode);
}
void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, host_,
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_,
reinterpret_cast<Address>(size), flush_mode);
}
@@ -287,7 +288,7 @@ const Instr kSwRegFpNegOffsetPattern =
SW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask); // NOLINT
// A mask for the Rt register for push, pop, lw, sw instructions.
const Instr kRtMask = kRtFieldMask;
-const Instr kLwSwInstrTypeMask = 0xffe00000;
+const Instr kLwSwInstrTypeMask = 0xFFE00000;
const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
const Instr kLwSwOffsetMask = kImm16Mask;
@@ -2159,7 +2160,7 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
// about -64KB to about +64KB, allowing further addition of 4 when accessing
// 64-bit variables with two 32-bit accesses.
constexpr int32_t kMinOffsetForSimpleAdjustment =
- 0x7ff8; // Max int16_t that's a multiple of 8.
+ 0x7FF8; // Max int16_t that's a multiple of 8.
constexpr int32_t kMaxOffsetForSimpleAdjustment =
2 * kMinOffsetForSimpleAdjustment;
@@ -2486,7 +2487,7 @@ void Assembler::aluipc(Register rs, int16_t imm16) {
// Break / Trap instructions.
void Assembler::break_(uint32_t code, bool break_as_stop) {
- DCHECK_EQ(code & ~0xfffff, 0);
+ DCHECK_EQ(code & ~0xFFFFF, 0);
// We need to invalidate breaks that could be stops as well because the
// simulator expects a char pointer after the stop instruction.
// See constants-mips.h for explanation.
@@ -2896,7 +2897,7 @@ void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
memcpy(&i, &d, 8);
- *lo = i & 0xffffffff;
+ *lo = i & 0xFFFFFFFF;
*hi = i >> 32;
}
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index cdb8be46cd..3530c7e7b2 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -238,6 +238,7 @@ int ToNumber(Register reg);
Register ToRegister(int num);
+constexpr bool kPadArguments = false;
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
@@ -590,10 +591,6 @@ class Assembler : public AssemblerBase {
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
set_target_address_at(isolate, pc, target, icache_flush_mode);
}
- INLINE(static Address target_address_at(Address pc, Code* code));
- INLINE(static void set_target_address_at(
- Isolate* isolate, Address pc, Code* code, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
static void set_target_value_at(
Isolate* isolate, Address pc, uint64_t target,
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index 5d8cee7787..f8075885a9 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -237,7 +237,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// (happens only when input is MIN_INT).
__ Branch(&bail_out, gt, zero_reg, Operand(scratch));
__ bind(&positive_exponent);
- __ Assert(ge, kUnexpectedNegativeValue, scratch, Operand(zero_reg));
+ __ Assert(ge, AbortReason::kUnexpectedNegativeValue, scratch,
+ Operand(zero_reg));
Label while_true, no_carry, loop_end;
__ bind(&while_true);
@@ -525,7 +526,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// caller fp |
// function slot | entry frame
// context slot |
- // bad fp (0xff...f) |
+ // bad fp (0xFF...F) |
// callee saved registers + ra
// [ O32: 4 args slots]
// args
@@ -587,13 +588,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// callee saved registers + ra
// [ O32: 4 args slots]
// args
-
- if (type() == StackFrame::CONSTRUCT_ENTRY) {
- __ Call(BUILTIN_CODE(isolate, JSConstructEntryTrampoline),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(BUILTIN_CODE(isolate, JSEntryTrampoline), RelocInfo::CODE_TARGET);
- }
+ __ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
// Unlink this frame from the handler chain.
__ PopStackHandler();
@@ -644,8 +639,8 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
// filled with kZapValue by the GC.
// Dereference the address and check for this.
__ Uld(a4, MemOperand(t9));
- __ Assert(ne, kReceivedInvalidReturnAddress, a4,
- Operand(reinterpret_cast<uint64_t>(kZapValue)));
+ __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, a4,
+ Operand(reinterpret_cast<uint64_t>(kZapValue)));
}
__ Jump(t9);
}
@@ -760,7 +755,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -802,7 +797,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
if (FLAG_debug_code) {
__ Ld(a5, FieldMemOperand(a2, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Assert(eq, kExpectedAllocationSite, a5, Operand(at));
+ __ Assert(eq, AbortReason::kExpectedAllocationSite, a5, Operand(at));
}
// Save the resulting elements kind in type info. We can't just store a3
@@ -825,7 +820,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -900,11 +895,11 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ Ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ SmiTst(a4, at);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
- at, Operand(zero_reg));
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, at,
+ Operand(zero_reg));
__ GetObjectType(a4, a4, a5);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
- a5, Operand(MAP_TYPE));
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, a5,
+ Operand(MAP_TYPE));
// We should either have undefined in a2 or a valid AllocationSite
__ AssertUndefinedOrAllocationSite(a2, a4);
@@ -982,11 +977,11 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ Ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ SmiTst(a3, at);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
- at, Operand(zero_reg));
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, at,
+ Operand(zero_reg));
__ GetObjectType(a3, a3, a4);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
- a4, Operand(MAP_TYPE));
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, a4,
+ Operand(MAP_TYPE));
}
// Figure out the right elements kind.
@@ -1001,8 +996,10 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
Label done;
__ Branch(&done, eq, a3, Operand(PACKED_ELEMENTS));
- __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray, a3,
- Operand(HOLEY_ELEMENTS));
+ __ Assert(
+ eq,
+ AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray,
+ a3, Operand(HOLEY_ELEMENTS));
__ bind(&done);
}
@@ -1104,7 +1101,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ Sd(s0, MemOperand(s3, kNextOffset));
if (__ emit_debug_code()) {
__ Lw(a1, MemOperand(s3, kLevelOffset));
- __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
+ __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
+ Operand(s2));
}
__ Subu(s2, s2, Operand(1));
__ Sw(s2, MemOperand(s3, kLevelOffset));
diff --git a/deps/v8/src/mips64/codegen-mips64.cc b/deps/v8/src/mips64/codegen-mips64.cc
index 970e0efe56..3be5e504bb 100644
--- a/deps/v8/src/mips64/codegen-mips64.cc
+++ b/deps/v8/src/mips64/codegen-mips64.cc
@@ -24,8 +24,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
#else
size_t allocated = 0;
- byte* buffer =
- AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@@ -97,7 +96,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
// copied and a3 to the dst pointer after all the 64 byte chunks have been
// copied. We will loop, incrementing a0 and a1 until a0 equals a3.
__ bind(&aligned);
- __ andi(t8, a2, 0x3f);
+ __ andi(t8, a2, 0x3F);
__ beq(a2, t8, &chkw); // Less than 64?
__ subu(a3, a2, t8); // In delay slot.
__ addu(a3, a0, a3); // Now a3 is the final dst after loop.
@@ -180,7 +179,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
// down to chk1w to handle the tail end of the copy.
__ bind(&chkw);
__ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
- __ andi(t8, a2, 0x1f);
+ __ andi(t8, a2, 0x1F);
__ beq(a2, t8, &chk1w); // Less than 32?
__ nop(); // In delay slot.
__ Lw(a4, MemOperand(a1));
@@ -264,7 +263,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
// the dst pointer after all the 64 byte chunks have been copied. We will
// loop, incrementing a0 and a1 until a0 equals a3.
__ bind(&ua_chk16w);
- __ andi(t8, a2, 0x3f);
+ __ andi(t8, a2, 0x3F);
__ beq(a2, t8, &ua_chkw);
__ subu(a3, a2, t8); // In delay slot.
__ addu(a3, a0, a3);
@@ -437,7 +436,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
// ua_chk1w to handle the tail end of the copy.
__ bind(&ua_chkw);
__ Pref(pref_hint_load, MemOperand(a1));
- __ andi(t8, a2, 0x1f);
+ __ andi(t8, a2, 0x1F);
__ beq(a2, t8, &ua_chk1w);
__ nop(); // In delay slot.
@@ -546,8 +545,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
- CHECK(base::OS::SetPermissions(buffer, allocated,
- base::OS::MemoryPermission::kReadExecute));
+ CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
@@ -558,8 +556,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
#else
size_t allocated = 0;
- byte* buffer =
- AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@@ -575,8 +572,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
- CHECK(base::OS::SetPermissions(buffer, allocated,
- base::OS::MemoryPermission::kReadExecute));
+ CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
diff --git a/deps/v8/src/mips64/disasm-mips64.cc b/deps/v8/src/mips64/disasm-mips64.cc
index 523e268532..d53b47d0c6 100644
--- a/deps/v8/src/mips64/disasm-mips64.cc
+++ b/deps/v8/src/mips64/disasm-mips64.cc
@@ -449,7 +449,7 @@ void Decoder::PrintPCImm21(Instruction* instr, int delta_pc, int n_bits) {
void Decoder::PrintXImm26(Instruction* instr) {
uint64_t target = static_cast<uint64_t>(instr->Imm26Value())
<< kImmFieldShift;
- target = (reinterpret_cast<uint64_t>(instr) & ~0xfffffff) | target;
+ target = (reinterpret_cast<uint64_t>(instr) & ~0xFFFFFFF) | target;
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "0x%" PRIx64, target);
}
@@ -485,7 +485,7 @@ void Decoder::PrintPCImm26(Instruction* instr, int delta_pc, int n_bits) {
// PC[GPRLEN-1 .. 28] || instr_index26 || 00
void Decoder::PrintPCImm26(Instruction* instr) {
int32_t imm26 = instr->Imm26Value();
- uint64_t pc_mask = ~0xfffffff;
+ uint64_t pc_mask = ~0xFFFFFFF;
uint64_t pc = ((uint64_t)(instr + 1) & pc_mask) | (imm26 << 2);
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s",
@@ -2225,6 +2225,12 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
case SWR:
Format(instr, "swr 'rt, 'imm16s('rs)");
break;
+ case SDR:
+ Format(instr, "sdr 'rt, 'imm16s('rs)");
+ break;
+ case SDL:
+ Format(instr, "sdl 'rt, 'imm16s('rs)");
+ break;
case LL:
if (kArchVariant == kMips64r6) {
Unknown(instr);
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index e55a0c57ed..8bc04a0401 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -43,8 +43,6 @@ const Register LoadDescriptor::SlotRegister() { return a0; }
const Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
-const Register LoadICProtoArrayDescriptor::HandlerRegister() { return a4; }
-
const Register StoreDescriptor::ReceiverRegister() { return a1; }
const Register StoreDescriptor::NameRegister() { return a2; }
const Register StoreDescriptor::ValueRegister() { return a0; }
@@ -202,6 +200,11 @@ void TransitionElementsKindDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
+void AbortJSDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index 25bc8baf80..841f4665cf 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -296,7 +296,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Ld(scratch, MemOperand(address));
- Assert(eq, kWrongAddressOrValuePassedToRecordWrite, scratch,
+ Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, scratch,
Operand(value));
}
@@ -1537,14 +1537,14 @@ int TurboAssembler::InstrCountForLi64Bit(int64_t value) {
kArchVariant == kMips64r6) {
return 2;
} else if ((value & kImm16Mask) == 0 &&
- ((value >> 31) & 0x1ffff) == ((0x20000 - bit31) & 0x1ffff) &&
+ ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF) &&
kArchVariant == kMips64r6) {
return 2;
} else if (is_int16(static_cast<int32_t>(value)) &&
is_int16((value >> 32) + bit31) && kArchVariant == kMips64r6) {
return 2;
} else if (is_int16(static_cast<int32_t>(value)) &&
- ((value >> 31) & 0x1ffff) == ((0x20000 - bit31) & 0x1ffff) &&
+ ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF) &&
kArchVariant == kMips64r6) {
return 2;
} else if (base::bits::IsPowerOfTwo(value + 1) ||
@@ -1649,8 +1649,8 @@ void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
lui(rd, j.immediate() >> kLuiShift & kImm16Mask);
dahi(rd, ((j.immediate() >> 32) + bit31) & kImm16Mask);
} else if ((j.immediate() & kImm16Mask) == 0 &&
- ((j.immediate() >> 31) & 0x1ffff) ==
- ((0x20000 - bit31) & 0x1ffff) &&
+ ((j.immediate() >> 31) & 0x1FFFF) ==
+ ((0x20000 - bit31) & 0x1FFFF) &&
kArchVariant == kMips64r6) {
// 16 LSBs all set to zero.
// 48 MSBs hold a signed value which can't be represented by signed
@@ -1665,8 +1665,8 @@ void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
daddiu(rd, zero_reg, j.immediate() & kImm16Mask);
dahi(rd, ((j.immediate() >> 32) + bit31) & kImm16Mask);
} else if (is_int16(static_cast<int32_t>(j.immediate())) &&
- ((j.immediate() >> 31) & 0x1ffff) ==
- ((0x20000 - bit31) & 0x1ffff) &&
+ ((j.immediate() >> 31) & 0x1FFFF) ==
+ ((0x20000 - bit31) & 0x1FFFF) &&
kArchVariant == kMips64r6) {
// 48 LSBs contain an unsigned 16-bit number.
// 16 MSBs contain a signed 16-bit number.
@@ -2163,7 +2163,7 @@ void MacroAssembler::Trunc_l_ud(FPURegister fd,
{
UseScratchRegisterScope temps(this);
Register scratch1 = temps.Acquire();
- li(scratch1, 0x7fffffffffffffff);
+ li(scratch1, 0x7FFFFFFFFFFFFFFF);
and_(t8, t8, scratch1);
}
dmtc1(t8, fs);
@@ -2297,7 +2297,7 @@ void TurboAssembler::Trunc_ul_d(FPURegister fd, Register rs,
}
// Load 2^63 into scratch as its double representation.
- li(at, 0x43e0000000000000);
+ li(at, 0x43E0000000000000);
dmtc1(at, scratch);
// Test if scratch > fd.
@@ -2351,7 +2351,7 @@ void TurboAssembler::Trunc_ul_s(FPURegister fd, Register rs,
// Load 2^63 into scratch as its float representation.
UseScratchRegisterScope temps(this);
Register scratch1 = temps.Acquire();
- li(scratch1, 0x5f000000);
+ li(scratch1, 0x5F000000);
mtc1(scratch1, scratch);
}
@@ -4037,9 +4037,11 @@ void MacroAssembler::MaybeDropFrames() {
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ Push(Smi::kZero); // Padding.
+
// Link the current handler as the next handler.
li(a6,
Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
@@ -4174,7 +4176,8 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
}
if (FLAG_debug_code) {
- Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg));
+ Check(lo, AbortReason::kStackAccessBelowStackPointer, src_reg,
+ Operand(dst_reg));
}
// Restore caller's frame pointer and return address now as they will be
@@ -4747,13 +4750,13 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
// -----------------------------------------------------------------------------
// Debugging.
-void TurboAssembler::Assert(Condition cc, BailoutReason reason, Register rs,
+void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
Operand rt) {
if (emit_debug_code())
Check(cc, reason, rs, rt);
}
-void TurboAssembler::Check(Condition cc, BailoutReason reason, Register rs,
+void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
Operand rt) {
Label L;
Branch(&L, cc, rs, rt);
@@ -4762,11 +4765,11 @@ void TurboAssembler::Check(Condition cc, BailoutReason reason, Register rs,
bind(&L);
}
-void TurboAssembler::Abort(BailoutReason reason) {
+void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
#ifdef DEBUG
- const char* msg = GetBailoutReason(reason);
+ const char* msg = GetAbortReason(reason);
if (msg != nullptr) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -5095,7 +5098,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
andi(scratch, object, kSmiTagMask);
- Check(ne, kOperandIsASmi, scratch, Operand(zero_reg));
+ Check(ne, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg));
}
}
@@ -5106,7 +5109,7 @@ void MacroAssembler::AssertSmi(Register object) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
andi(scratch, object, kSmiTagMask);
- Check(eq, kOperandIsASmi, scratch, Operand(zero_reg));
+ Check(eq, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg));
}
}
@@ -5114,9 +5117,11 @@ void MacroAssembler::AssertFixedArray(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
- Check(ne, kOperandIsASmiAndNotAFixedArray, t8, Operand(zero_reg));
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFixedArray, t8,
+ Operand(zero_reg));
GetObjectType(object, t8, t8);
- Check(eq, kOperandIsNotAFixedArray, t8, Operand(FIXED_ARRAY_TYPE));
+ Check(eq, AbortReason::kOperandIsNotAFixedArray, t8,
+ Operand(FIXED_ARRAY_TYPE));
}
}
@@ -5124,9 +5129,11 @@ void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
- Check(ne, kOperandIsASmiAndNotAFunction, t8, Operand(zero_reg));
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8,
+ Operand(zero_reg));
GetObjectType(object, t8, t8);
- Check(eq, kOperandIsNotAFunction, t8, Operand(JS_FUNCTION_TYPE));
+ Check(eq, AbortReason::kOperandIsNotAFunction, t8,
+ Operand(JS_FUNCTION_TYPE));
}
}
@@ -5135,9 +5142,11 @@ void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
- Check(ne, kOperandIsASmiAndNotABoundFunction, t8, Operand(zero_reg));
+ Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, t8,
+ Operand(zero_reg));
GetObjectType(object, t8, t8);
- Check(eq, kOperandIsNotABoundFunction, t8, Operand(JS_BOUND_FUNCTION_TYPE));
+ Check(eq, AbortReason::kOperandIsNotABoundFunction, t8,
+ Operand(JS_BOUND_FUNCTION_TYPE));
}
}
@@ -5145,7 +5154,8 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
- Check(ne, kOperandIsASmiAndNotAGeneratorObject, t8, Operand(zero_reg));
+ Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, t8,
+ Operand(zero_reg));
GetObjectType(object, t8, t8);
@@ -5157,7 +5167,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
// Check if JSAsyncGeneratorObject
Branch(&done, eq, t8, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
- Abort(kOperandIsNotAGeneratorObject);
+ Abort(AbortReason::kOperandIsNotAGeneratorObject);
bind(&done);
}
@@ -5171,7 +5181,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Branch(&done_checking, eq, object, Operand(scratch));
Ld(t8, FieldMemOperand(object, HeapObject::kMapOffset));
LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
- Assert(eq, kExpectedUndefinedOrCell, t8, Operand(scratch));
+ Assert(eq, AbortReason::kExpectedUndefinedOrCell, t8, Operand(scratch));
bind(&done_checking);
}
}
@@ -5402,8 +5412,8 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
void TurboAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
- li(t8, Operand(function));
- CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
+ li(t9, Operand(function));
+ CallCFunctionHelper(t9, num_reg_arguments, num_double_arguments);
}
void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index a29c79635c..f89682d34c 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -195,13 +195,13 @@ class TurboAssembler : public Assembler {
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt);
+ void Assert(Condition cc, AbortReason reason, Register rs, Operand rt);
// Like Assert(), but always enabled.
- void Check(Condition cc, BailoutReason reason, Register rs, Operand rt);
+ void Check(Condition cc, AbortReason reason, Register rs, Operand rt);
// Print a message to stdout and abort execution.
- void Abort(BailoutReason msg);
+ void Abort(AbortReason msg);
inline bool AllowThisStubCall(CodeStub* stub);
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index e992efebf5..ebb8a76ad7 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -41,14 +41,14 @@ static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
uint64_t u0, v0, w0;
int64_t u1, v1, w1, w2, t;
- u0 = u & 0xffffffffL;
+ u0 = u & 0xFFFFFFFFL;
u1 = u >> 32;
- v0 = v & 0xffffffffL;
+ v0 = v & 0xFFFFFFFFL;
v1 = v >> 32;
w0 = u0 * v0;
t = u1 * v0 + (w0 >> 32);
- w1 = t & 0xffffffffL;
+ w1 = t & 0xFFFFFFFFL;
w2 = t >> 32;
w1 = u0 * v1 + w1;
@@ -75,8 +75,8 @@ class MipsDebugger {
void PrintAllRegsIncludingFPU();
private:
- // We set the breakpoint code to 0xfffff to easily recognize it.
- static const Instr kBreakpointInstr = SPECIAL | BREAK | 0xfffff << 6;
+ // We set the breakpoint code to 0xFFFFF to easily recognize it.
+ static const Instr kBreakpointInstr = SPECIAL | BREAK | 0xFFFFF << 6;
static const Instr kNopInstr = 0x0;
Simulator* sim_;
@@ -401,7 +401,7 @@ void MipsDebugger::Debug() {
if (fpuregnum != kInvalidFPURegister) {
value = GetFPURegisterValue(fpuregnum);
- value &= 0xffffffffUL;
+ value &= 0xFFFFFFFFUL;
fvalue = GetFPURegisterValueFloat(fpuregnum);
PrintF("%s: 0x%08" PRIx64 " %11.4e\n", arg1, value, fvalue);
} else {
@@ -740,6 +740,10 @@ void Simulator::set_last_debugger_input(char* input) {
last_debugger_input_ = input;
}
+void Simulator::SetRedirectInstruction(Instruction* instruction) {
+ instruction->SetInstructionBits(rtCallRedirInstr);
+}
+
void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
void* start_addr, size_t size) {
int64_t start = reinterpret_cast<int64_t>(start_addr);
@@ -809,21 +813,12 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
}
-void Simulator::Initialize(Isolate* isolate) {
- if (isolate->simulator_initialized()) return;
- isolate->set_simulator_initialized(true);
- ::v8::internal::ExternalReference::set_redirector(isolate,
- &RedirectExternalReference);
-}
-
-
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
if (i_cache_ == nullptr) {
i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
- Initialize(isolate);
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
stack_size_ = FLAG_sim_stack_size * KB;
@@ -867,101 +862,6 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
Simulator::~Simulator() { free(stack_); }
-// When the generated code calls an external reference we need to catch that in
-// the simulator. The external reference will be a function compiled for the
-// host architecture. We need to call that function instead of trying to
-// execute it with the simulator. We do that by redirecting the external
-// reference to a swi (software-interrupt) instruction that is handled by
-// the simulator. We write the original destination of the jump just at a known
-// offset from the swi instruction so the simulator knows what to call.
-class Redirection {
- public:
- Redirection(Isolate* isolate, void* external_function,
- ExternalReference::Type type)
- : external_function_(external_function),
- swi_instruction_(rtCallRedirInstr),
- type_(type),
- next_(nullptr) {
- next_ = isolate->simulator_redirection();
- Simulator::current(isolate)->
- FlushICache(isolate->simulator_i_cache(),
- reinterpret_cast<void*>(&swi_instruction_),
- Instruction::kInstrSize);
- isolate->set_simulator_redirection(this);
- }
-
- void* address_of_swi_instruction() {
- return reinterpret_cast<void*>(&swi_instruction_);
- }
-
- void* external_function() { return external_function_; }
- ExternalReference::Type type() { return type_; }
-
- static Redirection* Get(Isolate* isolate, void* external_function,
- ExternalReference::Type type) {
- Redirection* current = isolate->simulator_redirection();
- for (; current != nullptr; current = current->next_) {
- if (current->external_function_ == external_function &&
- current->type_ == type) {
- return current;
- }
- }
- return new Redirection(isolate, external_function, type);
- }
-
- static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
- char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
- char* addr_of_redirection =
- addr_of_swi - offsetof(Redirection, swi_instruction_);
- return reinterpret_cast<Redirection*>(addr_of_redirection);
- }
-
- static void* ReverseRedirection(int64_t reg) {
- Redirection* redirection = FromSwiInstruction(
- reinterpret_cast<Instruction*>(reinterpret_cast<void*>(reg)));
- return redirection->external_function();
- }
-
- static void DeleteChain(Redirection* redirection) {
- while (redirection != nullptr) {
- Redirection* next = redirection->next_;
- delete redirection;
- redirection = next;
- }
- }
-
- private:
- void* external_function_;
- uint32_t swi_instruction_;
- ExternalReference::Type type_;
- Redirection* next_;
-};
-
-
-// static
-void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
- Redirection* first) {
- Redirection::DeleteChain(first);
- if (i_cache != nullptr) {
- for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
- entry = i_cache->Next(entry)) {
- delete static_cast<CachePage*>(entry->value);
- }
- delete i_cache;
- }
-}
-
-
-void* Simulator::RedirectExternalReference(Isolate* isolate,
- void* external_function,
- ExternalReference::Type type) {
- base::LockGuard<base::Mutex> lock_guard(
- isolate->simulator_redirection_mutex());
- Redirection* redirection = Redirection::Get(isolate, external_function, type);
- return redirection->address_of_swi_instruction();
-}
-
-
// Get the active Simulator for the current thread.
Simulator* Simulator::current(Isolate* isolate) {
v8::internal::Isolate::PerIsolateThreadData* isolate_data =
@@ -1077,19 +977,19 @@ int64_t Simulator::get_fpu_register(int fpureg) const {
int32_t Simulator::get_fpu_register_word(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return static_cast<int32_t>(FPUregisters_[fpureg * 2] & 0xffffffff);
+ return static_cast<int32_t>(FPUregisters_[fpureg * 2] & 0xFFFFFFFF);
}
int32_t Simulator::get_fpu_register_signed_word(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return static_cast<int32_t>(FPUregisters_[fpureg * 2] & 0xffffffff);
+ return static_cast<int32_t>(FPUregisters_[fpureg * 2] & 0xFFFFFFFF);
}
int32_t Simulator::get_fpu_register_hi_word(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return static_cast<int32_t>((FPUregisters_[fpureg * 2] >> 32) & 0xffffffff);
+ return static_cast<int32_t>((FPUregisters_[fpureg * 2] >> 32) & 0xFFFFFFFF);
}
@@ -1686,7 +1586,7 @@ int64_t Simulator::get_pc() const {
// TODO(plind): refactor this messy debug code when we do unaligned access.
void Simulator::DieOrDebug() {
- if (1) { // Flag for this was removed.
+ if ((1)) { // Flag for this was removed.
MipsDebugger dbg(this);
dbg.Debug();
} else {
@@ -2157,7 +2057,7 @@ void Simulator::WriteH(int64_t addr, int16_t value, Instruction* instr) {
uint32_t Simulator::ReadBU(int64_t addr) {
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
TraceMemRd(addr, static_cast<int64_t>(*ptr));
- return *ptr & 0xff;
+ return *ptr & 0xFF;
}
@@ -2272,7 +2172,7 @@ void Simulator::SoftwareInterrupt() {
uint32_t code = (func == BREAK) ? instr_.Bits(25, 6) : -1;
// We first check if we met a call_rt_redirected.
if (instr_.InstructionBits() == rtCallRedirInstr) {
- Redirection* redirection = Redirection::FromSwiInstruction(instr_.instr());
+ Redirection* redirection = Redirection::FromInstruction(instr_.instr());
int64_t* stack_pointer = reinterpret_cast<int64_t*>(get_register(sp));
@@ -2546,7 +2446,7 @@ void Simulator::DisableStop(uint64_t code) {
void Simulator::IncreaseStopCounter(uint64_t code) {
DCHECK_LE(code, kMaxStopCode);
- if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
+ if ((watched_stops_[code].count & ~(1 << 31)) == 0x7FFFFFFF) {
PrintF("Stop counter for code %" PRId64
" has overflowed.\n"
"Enabling this code and reseting the counter to 0.\n",
@@ -2865,8 +2765,8 @@ void Simulator::DecodeTypeRegisterSRsType() {
// Extracting sign, exponent and mantissa from the input float
uint32_t sign = (classed >> 31) & 1;
- uint32_t exponent = (classed >> 23) & 0x000000ff;
- uint32_t mantissa = classed & 0x007fffff;
+ uint32_t exponent = (classed >> 23) & 0x000000FF;
+ uint32_t mantissa = classed & 0x007FFFFF;
uint32_t result;
float fResult;
@@ -2887,7 +2787,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
// Setting flags if float is NaN
signalingNan = false;
quietNan = false;
- if (!negInf && !posInf && (exponent == 0xff)) {
+ if (!negInf && !posInf && (exponent == 0xFF)) {
quietNan = ((mantissa & 0x00200000) == 0) &&
((mantissa & (0x00200000 - 1)) == 0);
signalingNan = !quietNan;
@@ -3396,8 +3296,8 @@ void Simulator::DecodeTypeRegisterDRsType() {
// Extracting sign, exponent and mantissa from the input double
uint32_t sign = (classed >> 63) & 1;
- uint32_t exponent = (classed >> 52) & 0x00000000000007ff;
- uint64_t mantissa = classed & 0x000fffffffffffff;
+ uint32_t exponent = (classed >> 52) & 0x00000000000007FF;
+ uint64_t mantissa = classed & 0x000FFFFFFFFFFFFF;
uint64_t result;
double dResult;
@@ -3418,7 +3318,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
// Setting flags if double is NaN
signalingNan = false;
quietNan = false;
- if (!negInf && !posInf && exponent == 0x7ff) {
+ if (!negInf && !posInf && exponent == 0x7FF) {
quietNan = ((mantissa & 0x0008000000000000) != 0) &&
((mantissa & (0x0008000000000000 - 1)) == 0);
signalingNan = !quietNan;
@@ -3951,12 +3851,12 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
int32_t rt_lo = static_cast<int32_t>(rt());
i64hilo = static_cast<int64_t>(rs_lo) * static_cast<int64_t>(rt_lo);
if (kArchVariant != kMips64r6) {
- set_register(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
+ set_register(LO, static_cast<int32_t>(i64hilo & 0xFFFFFFFF));
set_register(HI, static_cast<int32_t>(i64hilo >> 32));
} else {
switch (sa()) {
case MUL_OP:
- SetResult(rd_reg(), static_cast<int32_t>(i64hilo & 0xffffffff));
+ SetResult(rd_reg(), static_cast<int32_t>(i64hilo & 0xFFFFFFFF));
break;
case MUH_OP:
SetResult(rd_reg(), static_cast<int32_t>(i64hilo >> 32));
@@ -3969,15 +3869,15 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
break;
}
case MULTU:
- u64hilo = static_cast<uint64_t>(rs_u() & 0xffffffff) *
- static_cast<uint64_t>(rt_u() & 0xffffffff);
+ u64hilo = static_cast<uint64_t>(rs_u() & 0xFFFFFFFF) *
+ static_cast<uint64_t>(rt_u() & 0xFFFFFFFF);
if (kArchVariant != kMips64r6) {
- set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
+ set_register(LO, static_cast<int32_t>(u64hilo & 0xFFFFFFFF));
set_register(HI, static_cast<int32_t>(u64hilo >> 32));
} else {
switch (sa()) {
case MUL_OP:
- SetResult(rd_reg(), static_cast<int32_t>(u64hilo & 0xffffffff));
+ SetResult(rd_reg(), static_cast<int32_t>(u64hilo & 0xFFFFFFFF));
break;
case MUH_OP:
SetResult(rd_reg(), static_cast<int32_t>(u64hilo >> 32));
@@ -4370,7 +4270,7 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
// Reverse the bit in byte for each individual byte
for (int i = 0; i < 4; i++) {
output = output >> 8;
- i_byte = input & 0xff;
+ i_byte = input & 0xFF;
// Fast way to reverse bits in byte
// Devised by Sean Anderson, July 13, 2001
@@ -4475,7 +4375,7 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
// Reverse the bit in byte for each individual byte
for (int i = 0; i < 8; i++) {
output = output >> 8;
- i_byte = input & 0xff;
+ i_byte = input & 0xFF;
// Fast way to reverse bits in byte
// Devised by Sean Anderson, July 13, 2001
@@ -5482,8 +5382,8 @@ void Msa3RInstrHelper_shuffle(const uint32_t opcode, T_reg ws, T_reg wt,
wd_p[2 * i + 1] = ws_p[2 * i + 1];
break;
case VSHF: {
- const int mask_not_valid = 0xc0;
- const int mask_6_bits = 0x3f;
+ const int mask_not_valid = 0xC0;
+ const int mask_6_bits = 0x3F;
if ((wd_p[i] & mask_not_valid)) {
wd_p[i] = 0;
} else {
@@ -5882,7 +5782,7 @@ void Simulator::DecodeTypeMsa3RF() {
break; \
} \
/* Infinity */ \
- dst = PACK_FLOAT16(aSign, 0x1f, 0); \
+ dst = PACK_FLOAT16(aSign, 0x1F, 0); \
break; \
} else if (aExp == 0 && aFrac == 0) { \
dst = PACK_FLOAT16(aSign, 0, 0); \
@@ -5896,13 +5796,13 @@ void Simulator::DecodeTypeMsa3RF() {
aExp -= 0x71; \
if (aExp < 1) { \
/* Will be denormal in halfprec */ \
- mask = 0x00ffffff; \
+ mask = 0x00FFFFFF; \
if (aExp >= -11) { \
mask >>= 11 + aExp; \
} \
} else { \
/* Normal number in halfprec */ \
- mask = 0x00001fff; \
+ mask = 0x00001FFF; \
} \
switch (MSACSR_ & 3) { \
case kRoundToNearest: \
@@ -5923,7 +5823,7 @@ void Simulator::DecodeTypeMsa3RF() {
} \
rounding_bumps_exp = (aFrac + increment >= 0x01000000); \
if (aExp > maxexp || (aExp == maxexp && rounding_bumps_exp)) { \
- dst = PACK_FLOAT16(aSign, 0x1f, 0); \
+ dst = PACK_FLOAT16(aSign, 0x1F, 0); \
break; \
} \
aFrac += increment; \
@@ -6444,8 +6344,8 @@ template <typename T_int, typename T_fp, typename T_reg>
T_int Msa2RFInstrHelper2(uint32_t opcode, T_reg ws, int i) {
switch (opcode) {
#define EXTRACT_FLOAT16_SIGN(fp16) (fp16 >> 15)
-#define EXTRACT_FLOAT16_EXP(fp16) (fp16 >> 10 & 0x1f)
-#define EXTRACT_FLOAT16_FRAC(fp16) (fp16 & 0x3ff)
+#define EXTRACT_FLOAT16_EXP(fp16) (fp16 >> 10 & 0x1F)
+#define EXTRACT_FLOAT16_FRAC(fp16) (fp16 & 0x3FF)
#define PACK_FLOAT32(sign, exp, frac) \
static_cast<uint32_t>(((sign) << 31) + ((exp) << 23) + (frac))
#define FEXUP_DF(src_index) \
@@ -6455,9 +6355,9 @@ T_int Msa2RFInstrHelper2(uint32_t opcode, T_reg ws, int i) {
aSign = EXTRACT_FLOAT16_SIGN(element); \
aExp = EXTRACT_FLOAT16_EXP(element); \
aFrac = EXTRACT_FLOAT16_FRAC(element); \
- if (V8_LIKELY(aExp && aExp != 0x1f)) { \
+ if (V8_LIKELY(aExp && aExp != 0x1F)) { \
return PACK_FLOAT32(aSign, aExp + 0x70, aFrac << 13); \
- } else if (aExp == 0x1f) { \
+ } else if (aExp == 0x1F) { \
if (aFrac) { \
return bit_cast<int32_t>(std::numeric_limits<float>::quiet_NaN()); \
} else { \
@@ -6624,10 +6524,10 @@ void Simulator::DecodeTypeImmediate() {
int32_t ft_reg = instr_.FtValue(); // Destination register.
// Zero extended immediate.
- uint64_t oe_imm16 = 0xffff & imm16;
+ uint64_t oe_imm16 = 0xFFFF & imm16;
// Sign extended immediate.
int64_t se_imm16 = imm16;
- int64_t se_imm18 = imm18 | ((imm18 & 0x20000) ? 0xfffffffffffc0000 : 0);
+ int64_t se_imm18 = imm18 | ((imm18 & 0x20000) ? 0xFFFFFFFFFFFC0000 : 0);
// Next pc.
int64_t next_pc = bad_ra;
@@ -6678,11 +6578,11 @@ void Simulator::DecodeTypeImmediate() {
const int32_t bitsIn16Int = sizeof(int16_t) * kBitsPerByte;
if (do_branch) {
if (FLAG_debug_code) {
- int16_t bits = imm16 & 0xfc;
+ int16_t bits = imm16 & 0xFC;
if (imm16 >= 0) {
CHECK_EQ(bits, 0);
} else {
- CHECK_EQ(bits ^ 0xfc, 0);
+ CHECK_EQ(bits ^ 0xFC, 0);
}
}
// jump range :[pc + kInstrSize - 512 * kInstrSize,
@@ -6973,7 +6873,6 @@ void Simulator::DecodeTypeImmediate() {
break;
// ------------- Arithmetic instructions.
case ADDIU: {
- DCHECK(is_int32(rs));
int32_t alu32_out = static_cast<int32_t>(rs + se_imm16);
// Sign-extend result of 32bit operation into 64bit register.
SetResult(rt_reg, static_cast<int64_t>(alu32_out));
@@ -7122,7 +7021,7 @@ void Simulator::DecodeTypeImmediate() {
uint64_t mask = byte_shift ? (~0UL << (al_offset + 1) * 8) : 0;
addr = rs + se_imm16 - al_offset;
uint64_t mem_value = Read2W(addr, instr_.instr()) & mask;
- mem_value |= rt >> byte_shift * 8;
+ mem_value |= static_cast<uint64_t>(rt) >> byte_shift * 8;
Write2W(addr, mem_value, instr_.instr());
break;
}
@@ -7227,7 +7126,7 @@ void Simulator::DecodeTypeImmediate() {
}
case ADDIUPC: {
int64_t se_imm19 =
- imm19 | ((imm19 & 0x40000) ? 0xfffffffffff80000 : 0);
+ imm19 | ((imm19 & 0x40000) ? 0xFFFFFFFFFFF80000 : 0);
alu_out = current_pc + (se_imm19 << 2);
break;
}
@@ -7337,7 +7236,7 @@ void Simulator::DecodeTypeJump() {
// Get current pc.
int64_t current_pc = get_pc();
// Get unchanged bits of pc.
- int64_t pc_high_bits = current_pc & 0xfffffffff0000000;
+ int64_t pc_high_bits = current_pc & 0xFFFFFFFFF0000000;
// Next pc.
int64_t next_pc = pc_high_bits | (simInstr.Imm26Value() << 2);
@@ -7504,33 +7403,30 @@ void Simulator::CallInternal(byte* entry) {
set_register(fp, fp_val);
}
-
-int64_t Simulator::Call(byte* entry, int argument_count, ...) {
- const int kRegisterPassedArguments = 8;
- va_list parameters;
- va_start(parameters, argument_count);
+intptr_t Simulator::CallImpl(byte* entry, int argument_count,
+ const intptr_t* arguments) {
+ constexpr int kRegisterPassedArguments = 8;
// Set up arguments.
// First four arguments passed in registers in both ABI's.
- DCHECK_GE(argument_count, 4);
- set_register(a0, va_arg(parameters, int64_t));
- set_register(a1, va_arg(parameters, int64_t));
- set_register(a2, va_arg(parameters, int64_t));
- set_register(a3, va_arg(parameters, int64_t));
+ int reg_arg_count = std::min(kRegisterPassedArguments, argument_count);
+ if (reg_arg_count > 0) set_register(a0, arguments[0]);
+ if (reg_arg_count > 1) set_register(a1, arguments[1]);
+ if (reg_arg_count > 2) set_register(a2, arguments[2]);
+ if (reg_arg_count > 2) set_register(a3, arguments[3]);
// Up to eight arguments passed in registers in N64 ABI.
// TODO(plind): N64 ABI calls these regs a4 - a7. Clarify this.
- if (argument_count >= 5) set_register(a4, va_arg(parameters, int64_t));
- if (argument_count >= 6) set_register(a5, va_arg(parameters, int64_t));
- if (argument_count >= 7) set_register(a6, va_arg(parameters, int64_t));
- if (argument_count >= 8) set_register(a7, va_arg(parameters, int64_t));
+ if (reg_arg_count > 4) set_register(a4, arguments[4]);
+ if (reg_arg_count > 5) set_register(a5, arguments[5]);
+ if (reg_arg_count > 6) set_register(a6, arguments[6]);
+ if (reg_arg_count > 7) set_register(a7, arguments[7]);
// Remaining arguments passed on stack.
int64_t original_stack = get_register(sp);
// Compute position of stack on entry to generated code.
- int stack_args_count = (argument_count > kRegisterPassedArguments) ?
- (argument_count - kRegisterPassedArguments) : 0;
- int stack_args_size = stack_args_count * sizeof(int64_t) + kCArgsSlotsSize;
+ int stack_args_count = argument_count - reg_arg_count;
+ int stack_args_size = stack_args_count * sizeof(*arguments) + kCArgsSlotsSize;
int64_t entry_stack = original_stack - stack_args_size;
if (base::OS::ActivationFrameAlignment() != 0) {
@@ -7538,11 +7434,8 @@ int64_t Simulator::Call(byte* entry, int argument_count, ...) {
}
// Store remaining arguments on stack, from low to high memory.
intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
- for (int i = kRegisterPassedArguments; i < argument_count; i++) {
- int stack_index = i - kRegisterPassedArguments + kCArgSlotCount;
- stack_argument[stack_index] = va_arg(parameters, int64_t);
- }
- va_end(parameters);
+ memcpy(stack_argument + kCArgSlotCount, arguments + reg_arg_count,
+ stack_args_count * sizeof(*arguments));
set_register(sp, entry_stack);
CallInternal(entry);
@@ -7551,8 +7444,7 @@ int64_t Simulator::Call(byte* entry, int argument_count, ...) {
CHECK_EQ(entry_stack, get_register(sp));
set_register(sp, original_stack);
- int64_t result = get_register(v0);
- return result;
+ return get_register(v0);
}
diff --git a/deps/v8/src/mips64/simulator-mips64.h b/deps/v8/src/mips64/simulator-mips64.h
index 4ef22cbcfe..c4292236b0 100644
--- a/deps/v8/src/mips64/simulator-mips64.h
+++ b/deps/v8/src/mips64/simulator-mips64.h
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
// Declares a Simulator for MIPS instructions if we are not generating a native
// MIPS binary. This Simulator allows us to run and debug MIPS code generation
// on regular desktop machines.
-// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
+// V8 calls into generated code via the GeneratedCode wrapper,
// which will start execution in the Simulator or forwards to the real entry
// on a MIPS HW platform.
@@ -16,71 +15,12 @@
#include "src/allocation.h"
#include "src/mips64/constants-mips64.h"
-#if !defined(USE_SIMULATOR)
-// Running without a simulator on a native mips platform.
-
-namespace v8 {
-namespace internal {
-
-// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- entry(p0, p1, p2, p3, p4)
-
-
-// Call the generated regexp code directly. The code at the entry address
-// should act as a function matching the type arm_regexp_matcher.
-typedef int (*mips_regexp_matcher)(String* input,
- int64_t start_offset,
- const byte* input_start,
- const byte* input_end,
- int* output,
- int64_t output_size,
- Address stack_base,
- int64_t direct_call,
- Isolate* isolate);
-
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- (FUNCTION_CAST<mips_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
- p8))
-
-// The stack limit beyond which we will throw stack overflow errors in
-// generated code. Because generated code on mips uses the C stack, we
-// just use the C stack limit.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
- uintptr_t c_limit) {
- return c_limit;
- }
-
- static inline uintptr_t RegisterCTryCatch(Isolate* isolate,
- uintptr_t try_catch_address) {
- USE(isolate);
- return try_catch_address;
- }
-
- static inline void UnregisterCTryCatch(Isolate* isolate) { USE(isolate); }
-};
-
-} // namespace internal
-} // namespace v8
-
-// Calculated the stack limit beyond which we will throw stack overflow errors.
-// This macro must be called from a C++ method. It relies on being able to take
-// the address of "this" to get a value on the current execution stack and then
-// calculates the stack limit based on that value.
-// NOTE: The check for overflow is not safe as there is no guarantee that the
-// running thread has its stack in all memory up to address 0x00000000.
-#define GENERATED_CODE_STACK_LIMIT(limit) \
- (reinterpret_cast<uintptr_t>(this) >= limit ? \
- reinterpret_cast<uintptr_t>(this) - limit : 0)
-
-#else // !defined(USE_SIMULATOR)
+#if defined(USE_SIMULATOR)
// Running with a simulator.
#include "src/assembler.h"
#include "src/base/hashmap.h"
+#include "src/simulator-base.h"
namespace v8 {
namespace internal {
@@ -151,7 +91,7 @@ class SimInstruction : public InstructionGetters<SimInstructionBase> {
}
};
-class Simulator {
+class Simulator : public SimulatorBase {
public:
friend class MipsDebugger;
@@ -231,7 +171,7 @@ class Simulator {
// The currently executing Simulator instance. Potentially there can be one
// for each native thread.
- static Simulator* current(v8::internal::Isolate* isolate);
+ V8_EXPORT_PRIVATE static Simulator* current(v8::internal::Isolate* isolate);
// Accessors for register state. Reading the pc value adheres to the MIPS
// architecture specification and is off by a 8 from the currently executing
@@ -298,15 +238,11 @@ class Simulator {
// Executes MIPS instructions until the PC reaches end_sim_pc.
void Execute();
- // Call on program start.
- static void Initialize(Isolate* isolate);
-
- static void TearDown(base::CustomMatcherHashMap* i_cache, Redirection* first);
+ template <typename Return, typename... Args>
+ Return Call(byte* entry, Args... args) {
+ return VariadicCall<Return>(this, &Simulator::CallImpl, entry, args...);
+ }
- // V8 generally calls into generated JS code with 5 parameters and into
- // generated RegExp code with 7 parameters. This is a convenience function,
- // which sets up the simulator state and grabs the result on return.
- int64_t Call(byte* entry, int argument_count, ...);
// Alternative: call a 2-argument double function.
double CallFP(byte* entry, double d0, double d1);
@@ -320,6 +256,9 @@ class Simulator {
void set_last_debugger_input(char* input);
char* last_debugger_input() { return last_debugger_input_; }
+ // Redirection support.
+ static void SetRedirectInstruction(Instruction* instruction);
+
// ICache checking.
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size);
@@ -342,6 +281,9 @@ class Simulator {
Unpredictable = 0xbadbeaf
};
+ V8_EXPORT_PRIVATE intptr_t CallImpl(byte* entry, int argument_count,
+ const intptr_t* arguments);
+
// Unsupported instructions use Format to print an error and stop execution.
void Format(Instruction* instr, const char* format);
@@ -587,11 +529,6 @@ class Simulator {
// Exceptions.
void SignalException(Exception e);
- // Runtime call support. Uses the isolate in a thread-safe way.
- static void* RedirectExternalReference(Isolate* isolate,
- void* external_function,
- ExternalReference::Type type);
-
// Handle arguments and return value for runtime FP functions.
void GetFpArgs(double* x, double* y, int32_t* z);
void SetFpResult(const double& result);
@@ -645,45 +582,8 @@ class Simulator {
StopCountAndDesc watched_stops_[kMaxStopCode + 1];
};
-
-// When running with the simulator transition into simulated execution at this
-// point.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(isolate)->Call( \
- FUNCTION_ADDR(entry), 5, reinterpret_cast<int64_t*>(p0), \
- reinterpret_cast<int64_t*>(p1), reinterpret_cast<int64_t*>(p2), \
- reinterpret_cast<int64_t*>(p3), reinterpret_cast<int64_t*>(p4)))
-
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- static_cast<int>(Simulator::current(isolate)->Call( \
- entry, 9, p0, p1, p2, p3, p4, reinterpret_cast<int64_t*>(p5), p6, p7, \
- p8))
-
-// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code. The JS-based limit normally points near the end of
-// the simulator stack. When the C-based limit is exhausted we reflect that by
-// lowering the JS-based limit as well, to make stack checks trigger.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
- uintptr_t c_limit) {
- return Simulator::current(isolate)->StackLimit(c_limit);
- }
-
- static inline uintptr_t RegisterCTryCatch(Isolate* isolate,
- uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(isolate);
- return sim->PushAddress(try_catch_address);
- }
-
- static inline void UnregisterCTryCatch(Isolate* isolate) {
- Simulator::current(isolate)->PopAddress();
- }
-};
-
} // namespace internal
} // namespace v8
-#endif // !defined(USE_SIMULATOR)
+#endif // defined(USE_SIMULATOR)
#endif // V8_MIPS_SIMULATOR_MIPS_H_
diff --git a/deps/v8/src/objects-body-descriptors-inl.h b/deps/v8/src/objects-body-descriptors-inl.h
index ec6c39e288..bd391d272b 100644
--- a/deps/v8/src/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects-body-descriptors-inl.h
@@ -564,6 +564,9 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
} else {
return Op::template apply<StructBodyDescriptor>(p1, p2, p3);
}
+ case LOAD_HANDLER_TYPE:
+ case STORE_HANDLER_TYPE:
+ return Op::template apply<StructBodyDescriptor>(p1, p2, p3);
default:
PrintF("Unknown type: %d\n", type);
UNREACHABLE();
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index f1f49d5c45..142dbf6611 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -14,6 +14,7 @@
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/objects/bigint.h"
+#include "src/objects/data-handler-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/literal-objects.h"
#include "src/objects/module.h"
@@ -44,6 +45,13 @@ void Object::VerifyPointer(Object* p) {
}
}
+namespace {
+void VerifyForeignPointer(HeapObject* host, Object* foreign) {
+ host->VerifyPointer(foreign);
+ CHECK(foreign->IsUndefined(host->GetIsolate()) ||
+ Foreign::IsNormalized(foreign));
+}
+} // namespace
void Smi::SmiVerify() {
CHECK(IsSmi());
@@ -252,6 +260,14 @@ void HeapObject::HeapObjectVerify() {
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
+ case LOAD_HANDLER_TYPE:
+ LoadHandler::cast(this)->LoadHandlerVerify();
+ break;
+
+ case STORE_HANDLER_TYPE:
+ StoreHandler::cast(this)->StoreHandlerVerify();
+ break;
+
default:
UNREACHABLE();
break;
@@ -432,6 +448,10 @@ void Map::MapVerify() {
CHECK_IMPLIES(IsJSObjectMap() && !CanHaveFastTransitionableElementsKind(),
IsDictionaryElementsKind(elements_kind()) ||
IsTerminalElementsKind(elements_kind()));
+ if (is_prototype_map()) {
+ DCHECK(prototype_info() == Smi::kZero ||
+ prototype_info()->IsPrototypeInfo());
+ }
}
@@ -475,11 +495,11 @@ void FixedDoubleArray::FixedDoubleArrayVerify() {
uint64_t value = get_representation(i);
uint64_t unexpected =
bit_cast<uint64_t>(std::numeric_limits<double>::quiet_NaN()) &
- V8_UINT64_C(0x7FF8000000000000);
+ uint64_t{0x7FF8000000000000};
// Create implementation specific sNaN by inverting relevant bit.
- unexpected ^= V8_UINT64_C(0x0008000000000000);
- CHECK((value & V8_UINT64_C(0x7FF8000000000000)) != unexpected ||
- (value & V8_UINT64_C(0x0007FFFFFFFFFFFF)) == V8_UINT64_C(0));
+ unexpected ^= uint64_t{0x0008000000000000};
+ CHECK((value & uint64_t{0x7FF8000000000000}) != unexpected ||
+ (value & uint64_t{0x0007FFFFFFFFFFFF}) == uint64_t{0});
}
}
}
@@ -930,7 +950,7 @@ void JSArray::JSArrayVerify() {
CHECK(HasDictionaryElements());
uint32_t array_length;
CHECK(length()->ToArrayLength(&array_length));
- if (array_length == 0xffffffff) {
+ if (array_length == 0xFFFFFFFF) {
CHECK(length()->ToArrayLength(&array_length));
}
if (array_length != 0) {
@@ -1137,8 +1157,10 @@ void JSProxy::JSProxyVerify() {
VerifyPointer(target());
VerifyPointer(handler());
Isolate* isolate = GetIsolate();
- CHECK_EQ(target()->IsCallable(), map()->is_callable());
- CHECK_EQ(target()->IsConstructor(), map()->is_constructor());
+ if (!IsRevoked()) {
+ CHECK_EQ(target()->IsCallable(), map()->is_callable());
+ CHECK_EQ(target()->IsConstructor(), map()->is_constructor());
+ }
CHECK(map()->prototype()->IsNull(isolate));
// There should be no properties on a Proxy.
CHECK_EQ(0, map()->NumberOfOwnDescriptors());
@@ -1303,7 +1325,7 @@ void PrototypeInfo::PrototypeInfoVerify() {
} else {
CHECK(prototype_users()->IsSmi());
}
- CHECK(validity_cell()->IsCell() || validity_cell()->IsSmi());
+ CHECK(validity_cell()->IsSmi() || validity_cell()->IsCell());
}
void Tuple2::Tuple2Verify() {
@@ -1325,6 +1347,33 @@ void Tuple3::Tuple3Verify() {
VerifyObjectField(kValue3Offset);
}
+void DataHandler::DataHandlerVerify() {
+ CHECK(IsDataHandler());
+ CHECK_IMPLIES(!smi_handler()->IsSmi(),
+ smi_handler()->IsCode() && IsStoreHandler());
+ CHECK(validity_cell()->IsSmi() || validity_cell()->IsCell());
+ int data_count = data_field_count();
+ if (data_count >= 1) {
+ VerifyObjectField(kData1Offset);
+ }
+ if (data_count >= 2) {
+ VerifyObjectField(kData2Offset);
+ }
+ if (data_count >= 3) {
+ VerifyObjectField(kData3Offset);
+ }
+}
+
+void LoadHandler::LoadHandlerVerify() {
+ DataHandler::DataHandlerVerify();
+ // TODO(ishell): check handler integrity
+}
+
+void StoreHandler::StoreHandlerVerify() {
+ DataHandler::DataHandlerVerify();
+ // TODO(ishell): check handler integrity
+}
+
void ContextExtension::ContextExtensionVerify() {
CHECK(IsContextExtension());
VerifyObjectField(kScopeInfoOffset);
@@ -1335,9 +1384,9 @@ void AccessorInfo::AccessorInfoVerify() {
CHECK(IsAccessorInfo());
VerifyPointer(name());
VerifyPointer(expected_receiver_type());
- VerifyPointer(getter());
- VerifyPointer(setter());
- VerifyPointer(js_getter());
+ VerifyForeignPointer(this, getter());
+ VerifyForeignPointer(this, setter());
+ VerifyForeignPointer(this, js_getter());
VerifyPointer(data());
}
@@ -1360,11 +1409,11 @@ void AccessCheckInfo::AccessCheckInfoVerify() {
void InterceptorInfo::InterceptorInfoVerify() {
CHECK(IsInterceptorInfo());
- VerifyPointer(getter());
- VerifyPointer(setter());
- VerifyPointer(query());
- VerifyPointer(deleter());
- VerifyPointer(enumerator());
+ VerifyForeignPointer(this, getter());
+ VerifyForeignPointer(this, setter());
+ VerifyForeignPointer(this, query());
+ VerifyForeignPointer(this, deleter());
+ VerifyForeignPointer(this, enumerator());
VerifyPointer(data());
VerifySmiField(kFlagsOffset);
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 1cbb299057..c3841aa63e 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -34,9 +34,12 @@
#include "src/objects.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/bigint.h"
+#include "src/objects/data-handler-inl.h"
+#include "src/objects/fixed-array-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/hash-table.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/js-collection-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/literal-objects.h"
#include "src/objects/module-inl.h"
@@ -77,15 +80,12 @@ int PropertyDetails::field_width_in_words() const {
TYPE_CHECKER(BigInt, BIGINT_TYPE)
TYPE_CHECKER(BreakPoint, TUPLE2_TYPE)
TYPE_CHECKER(BreakPointInfo, TUPLE2_TYPE)
-TYPE_CHECKER(ByteArray, BYTE_ARRAY_TYPE)
TYPE_CHECKER(CallHandlerInfo, TUPLE3_TYPE)
TYPE_CHECKER(Cell, CELL_TYPE)
TYPE_CHECKER(ConstantElementsPair, TUPLE2_TYPE)
TYPE_CHECKER(CoverageInfo, FIXED_ARRAY_TYPE)
TYPE_CHECKER(DescriptorArray, DESCRIPTOR_ARRAY_TYPE)
TYPE_CHECKER(FeedbackVector, FEEDBACK_VECTOR_TYPE)
-TYPE_CHECKER(FixedArrayExact, FIXED_ARRAY_TYPE)
-TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
TYPE_CHECKER(Foreign, FOREIGN_TYPE)
TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE)
TYPE_CHECKER(HashTable, HASH_TABLE_TYPE)
@@ -99,15 +99,10 @@ TYPE_CHECKER(JSDate, JS_DATE_TYPE)
TYPE_CHECKER(JSError, JS_ERROR_TYPE)
TYPE_CHECKER(JSFunction, JS_FUNCTION_TYPE)
TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)
-TYPE_CHECKER(JSMap, JS_MAP_TYPE)
TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
TYPE_CHECKER(JSPromise, JS_PROMISE_TYPE)
-TYPE_CHECKER(JSSet, JS_SET_TYPE)
TYPE_CHECKER(JSStringIterator, JS_STRING_ITERATOR_TYPE)
TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
-TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
-TYPE_CHECKER(JSWeakSet, JS_WEAK_SET_TYPE)
-TYPE_CHECKER(Map, MAP_TYPE)
TYPE_CHECKER(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE)
TYPE_CHECKER(Oddball, ODDBALL_TYPE)
TYPE_CHECKER(PreParsedScopeData, TUPLE2_TYPE)
@@ -120,13 +115,11 @@ TYPE_CHECKER(SourcePositionTableWithFrameCache, TUPLE2_TYPE)
TYPE_CHECKER(TemplateMap, HASH_TABLE_TYPE)
TYPE_CHECKER(TemplateObjectDescription, TUPLE3_TYPE)
TYPE_CHECKER(TransitionArray, TRANSITION_ARRAY_TYPE)
-TYPE_CHECKER(TypeFeedbackInfo, TUPLE3_TYPE)
TYPE_CHECKER(WasmInstanceObject, WASM_INSTANCE_TYPE)
TYPE_CHECKER(WasmMemoryObject, WASM_MEMORY_TYPE)
TYPE_CHECKER(WasmModuleObject, WASM_MODULE_TYPE)
TYPE_CHECKER(WasmTableObject, WASM_TABLE_TYPE)
TYPE_CHECKER(WeakCell, WEAK_CELL_TYPE)
-TYPE_CHECKER(WeakFixedArray, FIXED_ARRAY_TYPE)
#define TYPED_ARRAY_TYPE_CHECKER(Type, type, TYPE, ctype, size) \
TYPE_CHECKER(Fixed##Type##Array, FIXED_##TYPE##_ARRAY_TYPE)
@@ -304,6 +297,9 @@ bool HeapObject::IsJSProxy() const { return map()->IsJSProxyMap(); }
bool HeapObject::IsJSMapIterator() const {
InstanceType instance_type = map()->instance_type();
+ STATIC_ASSERT(JS_MAP_KEY_ITERATOR_TYPE + 1 == JS_MAP_KEY_VALUE_ITERATOR_TYPE);
+ STATIC_ASSERT(JS_MAP_KEY_VALUE_ITERATOR_TYPE + 1 ==
+ JS_MAP_VALUE_ITERATOR_TYPE);
return (instance_type >= JS_MAP_KEY_ITERATOR_TYPE &&
instance_type <= JS_MAP_VALUE_ITERATOR_TYPE);
}
@@ -332,7 +328,10 @@ bool HeapObject::IsEnumCache() const { return IsTuple2(); }
bool HeapObject::IsFrameArray() const { return IsFixedArrayExact(); }
-bool HeapObject::IsArrayList() const { return IsFixedArrayExact(); }
+bool HeapObject::IsArrayList() const {
+ return map() == GetHeap()->array_list_map() ||
+ this == GetHeap()->empty_fixed_array();
+}
bool HeapObject::IsRegExpMatchInfo() const { return IsFixedArrayExact(); }
@@ -548,21 +547,15 @@ CAST_ACCESSOR(AccessorInfo)
CAST_ACCESSOR(AccessorPair)
CAST_ACCESSOR(AllocationMemento)
CAST_ACCESSOR(AllocationSite)
-CAST_ACCESSOR(ArrayList)
CAST_ACCESSOR(AsyncGeneratorRequest)
CAST_ACCESSOR(BigInt)
CAST_ACCESSOR(BoilerplateDescription)
-CAST_ACCESSOR(ByteArray)
CAST_ACCESSOR(CallHandlerInfo)
CAST_ACCESSOR(Cell)
CAST_ACCESSOR(ConstantElementsPair)
CAST_ACCESSOR(ContextExtension)
CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(EnumCache)
-CAST_ACCESSOR(FixedArray)
-CAST_ACCESSOR(FixedArrayBase)
-CAST_ACCESSOR(FixedDoubleArray)
-CAST_ACCESSOR(FixedTypedArrayBase)
CAST_ACCESSOR(Foreign)
CAST_ACCESSOR(FunctionTemplateInfo)
CAST_ACCESSOR(GlobalDictionary)
@@ -577,20 +570,13 @@ CAST_ACCESSOR(JSFunction)
CAST_ACCESSOR(JSGeneratorObject)
CAST_ACCESSOR(JSGlobalObject)
CAST_ACCESSOR(JSGlobalProxy)
-CAST_ACCESSOR(JSMap)
-CAST_ACCESSOR(JSMapIterator)
CAST_ACCESSOR(JSMessageObject)
CAST_ACCESSOR(JSObject)
CAST_ACCESSOR(JSPromise)
CAST_ACCESSOR(JSProxy)
CAST_ACCESSOR(JSReceiver)
-CAST_ACCESSOR(JSSet)
-CAST_ACCESSOR(JSSetIterator)
CAST_ACCESSOR(JSStringIterator)
CAST_ACCESSOR(JSValue)
-CAST_ACCESSOR(JSWeakCollection)
-CAST_ACCESSOR(JSWeakMap)
-CAST_ACCESSOR(JSWeakSet)
CAST_ACCESSOR(LayoutDescriptor)
CAST_ACCESSOR(NameDictionary)
CAST_ACCESSOR(NormalizedMapCache)
@@ -619,14 +605,11 @@ CAST_ACCESSOR(StringSet)
CAST_ACCESSOR(StringTable)
CAST_ACCESSOR(Struct)
CAST_ACCESSOR(TemplateInfo)
-CAST_ACCESSOR(TemplateList)
CAST_ACCESSOR(TemplateMap)
CAST_ACCESSOR(TemplateObjectDescription)
CAST_ACCESSOR(Tuple2)
CAST_ACCESSOR(Tuple3)
-CAST_ACCESSOR(TypeFeedbackInfo)
CAST_ACCESSOR(WeakCell)
-CAST_ACCESSOR(WeakFixedArray)
CAST_ACCESSOR(WeakHashTable)
bool Object::HasValidElements() {
@@ -1087,29 +1070,8 @@ int HeapNumber::get_sign() {
return READ_INT_FIELD(this, kExponentOffset) & kSignMask;
}
-inline Object* OrderedHashMap::ValueAt(int entry) {
- DCHECK_LT(entry, this->UsedCapacity());
- return get(EntryToIndex(entry) + kValueOffset);
-}
-
ACCESSORS(JSReceiver, raw_properties_or_hash, Object, kPropertiesOrHashOffset)
-Object** FixedArray::GetFirstElementAddress() {
- return reinterpret_cast<Object**>(FIELD_ADDR(this, OffsetOfElementAt(0)));
-}
-
-
-bool FixedArray::ContainsOnlySmisOrHoles() {
- Object* the_hole = GetHeap()->the_hole_value();
- Object** current = GetFirstElementAddress();
- for (int i = 0; i < length(); ++i) {
- Object* candidate = *current++;
- if (!candidate->IsSmi() && candidate != the_hole) return false;
- }
- return true;
-}
-
-
FixedArrayBase* JSObject::elements() const {
Object* array = READ_FIELD(this, kElementsOffset);
return static_cast<FixedArrayBase*>(array);
@@ -1467,6 +1429,15 @@ inline bool IsSpecialReceiverInstanceType(InstanceType instance_type) {
return instance_type <= LAST_SPECIAL_RECEIVER_TYPE;
}
+// This should be in objects/map-inl.h, but can't, because of a cyclic
+// dependency.
+bool Map::IsSpecialReceiverMap() const {
+ bool result = IsSpecialReceiverInstanceType(instance_type());
+ DCHECK_IMPLIES(!result,
+ !has_named_interceptor() && !is_access_check_needed());
+ return result;
+}
+
// static
int JSObject::GetEmbedderFieldCount(const Map* map) {
int instance_size = map->instance_size();
@@ -1517,13 +1488,6 @@ bool JSObject::IsUnboxedDoubleField(FieldIndex index) {
return map()->IsUnboxedDoubleField(index);
}
-bool Map::IsUnboxedDoubleField(FieldIndex index) const {
- if (!FLAG_unbox_double_fields) return false;
- if (index.is_hidden_field() || !index.is_inobject()) return false;
- return !layout_descriptor()->IsTagged(index.property_index());
-}
-
-
// Access fast-case object properties at index. The use of these routines
// is needed to correctly distinguish between properties stored in-object and
// properties stored in the properties array.
@@ -1657,16 +1621,6 @@ void JSObject::InitializeBody(Map* map, int start_offset,
}
}
-bool Map::TooManyFastProperties(StoreFromKeyed store_mode) const {
- if (UnusedPropertyFields() != 0) return false;
- if (is_prototype_map()) return false;
- int minimum = store_mode == CERTAINLY_NOT_STORE_FROM_KEYED ? 128 : 12;
- int limit = Max(minimum, GetInObjectProperties());
- int external = NumberOfFields() - GetInObjectProperties();
- return external > limit;
-}
-
-
void Struct::InitializeBody(int object_size) {
Object* value = GetHeap()->undefined_value();
for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
@@ -1696,57 +1650,12 @@ void Object::VerifyApiCallResultType() {
#endif // DEBUG
}
-
-Object* FixedArray::get(int index) const {
- SLOW_DCHECK(index >= 0 && index < this->length());
- return RELAXED_READ_FIELD(this, kHeaderSize + index * kPointerSize);
-}
-
Object* PropertyArray::get(int index) const {
DCHECK_GE(index, 0);
DCHECK_LE(index, this->length());
return RELAXED_READ_FIELD(this, kHeaderSize + index * kPointerSize);
}
-Handle<Object> FixedArray::get(FixedArray* array, int index, Isolate* isolate) {
- return handle(array->get(index), isolate);
-}
-
-template <class T>
-MaybeHandle<T> FixedArray::GetValue(Isolate* isolate, int index) const {
- Object* obj = get(index);
- if (obj->IsUndefined(isolate)) return MaybeHandle<T>();
- return Handle<T>(T::cast(obj), isolate);
-}
-
-template <class T>
-Handle<T> FixedArray::GetValueChecked(Isolate* isolate, int index) const {
- Object* obj = get(index);
- CHECK(!obj->IsUndefined(isolate));
- return Handle<T>(T::cast(obj), isolate);
-}
-bool FixedArray::is_the_hole(Isolate* isolate, int index) {
- return get(index)->IsTheHole(isolate);
-}
-
-void FixedArray::set(int index, Smi* value) {
- DCHECK_NE(map(), GetHeap()->fixed_cow_array_map());
- DCHECK_LT(index, this->length());
- DCHECK(reinterpret_cast<Object*>(value)->IsSmi());
- int offset = kHeaderSize + index * kPointerSize;
- RELAXED_WRITE_FIELD(this, offset, value);
-}
-
-void FixedArray::set(int index, Object* value) {
- DCHECK_NE(GetHeap()->fixed_cow_array_map(), map());
- DCHECK(IsFixedArray());
- DCHECK_GE(index, 0);
- DCHECK_LT(index, this->length());
- int offset = kHeaderSize + index * kPointerSize;
- RELAXED_WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(GetHeap(), this, offset, value);
-}
-
void PropertyArray::set(int index, Object* value) {
DCHECK(IsPropertyArray());
DCHECK_GE(index, 0);
@@ -1756,154 +1665,6 @@ void PropertyArray::set(int index, Object* value) {
WRITE_BARRIER(GetHeap(), this, offset, value);
}
-double FixedDoubleArray::get_scalar(int index) {
- DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
- map() != GetHeap()->fixed_array_map());
- DCHECK(index >= 0 && index < this->length());
- DCHECK(!is_the_hole(index));
- return READ_DOUBLE_FIELD(this, kHeaderSize + index * kDoubleSize);
-}
-
-
-uint64_t FixedDoubleArray::get_representation(int index) {
- DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
- map() != GetHeap()->fixed_array_map());
- DCHECK(index >= 0 && index < this->length());
- int offset = kHeaderSize + index * kDoubleSize;
- return READ_UINT64_FIELD(this, offset);
-}
-
-Handle<Object> FixedDoubleArray::get(FixedDoubleArray* array, int index,
- Isolate* isolate) {
- if (array->is_the_hole(index)) {
- return isolate->factory()->the_hole_value();
- } else {
- return isolate->factory()->NewNumber(array->get_scalar(index));
- }
-}
-
-
-void FixedDoubleArray::set(int index, double value) {
- DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
- map() != GetHeap()->fixed_array_map());
- int offset = kHeaderSize + index * kDoubleSize;
- if (std::isnan(value)) {
- WRITE_DOUBLE_FIELD(this, offset, std::numeric_limits<double>::quiet_NaN());
- } else {
- WRITE_DOUBLE_FIELD(this, offset, value);
- }
- DCHECK(!is_the_hole(index));
-}
-
-void FixedDoubleArray::set_the_hole(Isolate* isolate, int index) {
- set_the_hole(index);
-}
-
-void FixedDoubleArray::set_the_hole(int index) {
- DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
- map() != GetHeap()->fixed_array_map());
- int offset = kHeaderSize + index * kDoubleSize;
- WRITE_UINT64_FIELD(this, offset, kHoleNanInt64);
-}
-
-bool FixedDoubleArray::is_the_hole(Isolate* isolate, int index) {
- return is_the_hole(index);
-}
-
-bool FixedDoubleArray::is_the_hole(int index) {
- return get_representation(index) == kHoleNanInt64;
-}
-
-
-double* FixedDoubleArray::data_start() {
- return reinterpret_cast<double*>(FIELD_ADDR(this, kHeaderSize));
-}
-
-
-void FixedDoubleArray::FillWithHoles(int from, int to) {
- for (int i = from; i < to; i++) {
- set_the_hole(i);
- }
-}
-
-Object* WeakFixedArray::Get(int index) const {
- Object* raw = FixedArray::cast(this)->get(index + kFirstIndex);
- if (raw->IsSmi()) return raw;
- DCHECK(raw->IsWeakCell());
- return WeakCell::cast(raw)->value();
-}
-
-
-bool WeakFixedArray::IsEmptySlot(int index) const {
- DCHECK(index < Length());
- return Get(index)->IsSmi();
-}
-
-
-void WeakFixedArray::Clear(int index) {
- FixedArray::cast(this)->set(index + kFirstIndex, Smi::kZero);
-}
-
-
-int WeakFixedArray::Length() const {
- return FixedArray::cast(this)->length() - kFirstIndex;
-}
-
-
-int WeakFixedArray::last_used_index() const {
- return Smi::ToInt(FixedArray::cast(this)->get(kLastUsedIndexIndex));
-}
-
-
-void WeakFixedArray::set_last_used_index(int index) {
- FixedArray::cast(this)->set(kLastUsedIndexIndex, Smi::FromInt(index));
-}
-
-
-template <class T>
-T* WeakFixedArray::Iterator::Next() {
- if (list_ != nullptr) {
- // Assert that list did not change during iteration.
- DCHECK_EQ(last_used_index_, list_->last_used_index());
- while (index_ < list_->Length()) {
- Object* item = list_->Get(index_++);
- if (item != Empty()) return T::cast(item);
- }
- list_ = nullptr;
- }
- return nullptr;
-}
-
-int ArrayList::Length() const {
- if (FixedArray::cast(this)->length() == 0) return 0;
- return Smi::ToInt(FixedArray::cast(this)->get(kLengthIndex));
-}
-
-
-void ArrayList::SetLength(int length) {
- return FixedArray::cast(this)->set(kLengthIndex, Smi::FromInt(length));
-}
-
-Object* ArrayList::Get(int index) const {
- return FixedArray::cast(this)->get(kFirstIndex + index);
-}
-
-
-Object** ArrayList::Slot(int index) {
- return data_start() + kFirstIndex + index;
-}
-
-void ArrayList::Set(int index, Object* obj, WriteBarrierMode mode) {
- FixedArray::cast(this)->set(kFirstIndex + index, obj, mode);
-}
-
-
-void ArrayList::Clear(int index, Object* undefined) {
- DCHECK(undefined->IsUndefined(GetIsolate()));
- FixedArray::cast(this)
- ->set(kFirstIndex + index, undefined, SKIP_WRITE_BARRIER);
-}
-
int RegExpMatchInfo::NumberOfCaptureRegisters() {
DCHECK_GE(length(), kLastMatchOverhead);
Object* obj = get(kNumberOfCapturesIndex);
@@ -1987,17 +1748,6 @@ bool HeapObject::NeedsRehashing() const {
}
}
-void FixedArray::set(int index,
- Object* value,
- WriteBarrierMode mode) {
- DCHECK_NE(map(), GetHeap()->fixed_cow_array_map());
- DCHECK_GE(index, 0);
- DCHECK_LT(index, this->length());
- int offset = kHeaderSize + index * kPointerSize;
- RELAXED_WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
-}
-
void PropertyArray::set(int index, Object* value, WriteBarrierMode mode) {
DCHECK_GE(index, 0);
DCHECK_LT(index, this->length());
@@ -2006,57 +1756,10 @@ void PropertyArray::set(int index, Object* value, WriteBarrierMode mode) {
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
}
-void FixedArray::NoWriteBarrierSet(FixedArray* array,
- int index,
- Object* value) {
- DCHECK_NE(array->map(), array->GetHeap()->fixed_cow_array_map());
- DCHECK_GE(index, 0);
- DCHECK_LT(index, array->length());
- DCHECK(!array->GetHeap()->InNewSpace(value));
- RELAXED_WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
-}
-
-void FixedArray::set_undefined(int index) {
- set_undefined(GetIsolate(), index);
-}
-
-void FixedArray::set_undefined(Isolate* isolate, int index) {
- FixedArray::NoWriteBarrierSet(this, index,
- isolate->heap()->undefined_value());
-}
-
-void FixedArray::set_null(int index) { set_null(GetIsolate(), index); }
-
-void FixedArray::set_null(Isolate* isolate, int index) {
- FixedArray::NoWriteBarrierSet(this, index, isolate->heap()->null_value());
-}
-
-void FixedArray::set_the_hole(int index) { set_the_hole(GetIsolate(), index); }
-
-void FixedArray::set_the_hole(Isolate* isolate, int index) {
- FixedArray::NoWriteBarrierSet(this, index, isolate->heap()->the_hole_value());
-}
-
-void FixedArray::FillWithHoles(int from, int to) {
- Isolate* isolate = GetIsolate();
- for (int i = from; i < to; i++) {
- set_the_hole(isolate, i);
- }
-}
-
-
-Object** FixedArray::data_start() {
- return HeapObject::RawField(this, kHeaderSize);
-}
-
Object** PropertyArray::data_start() {
return HeapObject::RawField(this, kHeaderSize);
}
-Object** FixedArray::RawFieldOfElementAt(int index) {
- return HeapObject::RawField(this, OffsetOfElementAt(index));
-}
-
ACCESSORS(EnumCache, keys, FixedArray, kKeysOffset)
ACCESSORS(EnumCache, indices, FixedArray, kIndicesOffset)
@@ -2215,54 +1918,6 @@ int DescriptorArray::SearchWithCache(Isolate* isolate, Name* name, Map* map) {
return number;
}
-PropertyDetails Map::GetLastDescriptorDetails() const {
- return instance_descriptors()->GetDetails(LastAdded());
-}
-
-int Map::LastAdded() const {
- int number_of_own_descriptors = NumberOfOwnDescriptors();
- DCHECK_GT(number_of_own_descriptors, 0);
- return number_of_own_descriptors - 1;
-}
-
-int Map::NumberOfOwnDescriptors() const {
- return NumberOfOwnDescriptorsBits::decode(bit_field3());
-}
-
-
-void Map::SetNumberOfOwnDescriptors(int number) {
- CHECK_LE(static_cast<unsigned>(number),
- static_cast<unsigned>(kMaxNumberOfDescriptors));
- set_bit_field3(NumberOfOwnDescriptorsBits::update(bit_field3(), number));
-}
-
-int Map::EnumLength() const { return EnumLengthBits::decode(bit_field3()); }
-
-void Map::SetEnumLength(int length) {
- if (length != kInvalidEnumCacheSentinel) {
- DCHECK_LE(length, NumberOfOwnDescriptors());
- CHECK_LE(static_cast<unsigned>(length),
- static_cast<unsigned>(kMaxNumberOfDescriptors));
- }
- set_bit_field3(EnumLengthBits::update(bit_field3(), length));
-}
-
-FixedArrayBase* Map::GetInitialElements() const {
- FixedArrayBase* result = nullptr;
- if (has_fast_elements() || has_fast_string_wrapper_elements()) {
- result = GetHeap()->empty_fixed_array();
- } else if (has_fast_sloppy_arguments_elements()) {
- result = GetHeap()->empty_sloppy_arguments_elements();
- } else if (has_fixed_typed_array_elements()) {
- result = GetHeap()->EmptyFixedTypedArrayForMap(this);
- } else if (has_dictionary_elements()) {
- result = GetHeap()->empty_slow_element_dictionary();
- } else {
- UNREACHABLE();
- }
- DCHECK(!GetHeap()->InNewSpace(result));
- return result;
-}
Object** DescriptorArray::GetKeySlot(int descriptor_number) {
DCHECK(descriptor_number < number_of_descriptors());
@@ -2389,101 +2044,6 @@ void DescriptorArray::SwapSortedKeys(int first, int second) {
SetSortedKey(second, first_key);
}
-int HashTableBase::NumberOfElements() const {
- return Smi::ToInt(get(kNumberOfElementsIndex));
-}
-
-int HashTableBase::NumberOfDeletedElements() const {
- return Smi::ToInt(get(kNumberOfDeletedElementsIndex));
-}
-
-int HashTableBase::Capacity() const { return Smi::ToInt(get(kCapacityIndex)); }
-
-void HashTableBase::ElementAdded() {
- SetNumberOfElements(NumberOfElements() + 1);
-}
-
-
-void HashTableBase::ElementRemoved() {
- SetNumberOfElements(NumberOfElements() - 1);
- SetNumberOfDeletedElements(NumberOfDeletedElements() + 1);
-}
-
-
-void HashTableBase::ElementsRemoved(int n) {
- SetNumberOfElements(NumberOfElements() - n);
- SetNumberOfDeletedElements(NumberOfDeletedElements() + n);
-}
-
-
-// static
-int HashTableBase::ComputeCapacity(int at_least_space_for) {
- // Add 50% slack to make slot collisions sufficiently unlikely.
- // See matching computation in HashTable::HasSufficientCapacityToAdd().
- // Must be kept in sync with CodeStubAssembler::HashTableComputeCapacity().
- int raw_cap = at_least_space_for + (at_least_space_for >> 1);
- int capacity = base::bits::RoundUpToPowerOfTwo32(raw_cap);
- return Max(capacity, kMinCapacity);
-}
-
-void HashTableBase::SetNumberOfElements(int nof) {
- set(kNumberOfElementsIndex, Smi::FromInt(nof));
-}
-
-
-void HashTableBase::SetNumberOfDeletedElements(int nod) {
- set(kNumberOfDeletedElementsIndex, Smi::FromInt(nod));
-}
-
-template <typename Key>
-int BaseShape<Key>::GetMapRootIndex() {
- return Heap::kHashTableMapRootIndex;
-}
-
-template <typename Derived, typename Shape>
-int HashTable<Derived, Shape>::FindEntry(Key key) {
- return FindEntry(GetIsolate(), key);
-}
-
-template <typename Derived, typename Shape>
-int HashTable<Derived, Shape>::FindEntry(Isolate* isolate, Key key) {
- return FindEntry(isolate, key, Shape::Hash(isolate, key));
-}
-
-// Find entry for key otherwise return kNotFound.
-template <typename Derived, typename Shape>
-int HashTable<Derived, Shape>::FindEntry(Isolate* isolate, Key key,
- int32_t hash) {
- uint32_t capacity = Capacity();
- uint32_t entry = FirstProbe(hash, capacity);
- uint32_t count = 1;
- // EnsureCapacity will guarantee the hash table is never full.
- Object* undefined = isolate->heap()->undefined_value();
- Object* the_hole = isolate->heap()->the_hole_value();
- USE(the_hole);
- while (true) {
- Object* element = KeyAt(entry);
- // Empty entry. Uses raw unchecked accessors because it is called by the
- // string table during bootstrapping.
- if (element == undefined) break;
- if (!(Shape::kNeedsHoleCheck && the_hole == element)) {
- if (Shape::IsMatch(key, element)) return entry;
- }
- entry = NextProbe(entry, count++, capacity);
- }
- return kNotFound;
-}
-
-bool ObjectHashSet::Has(Isolate* isolate, Handle<Object> key, int32_t hash) {
- return FindEntry(isolate, key, hash) != kNotFound;
-}
-
-bool ObjectHashSet::Has(Isolate* isolate, Handle<Object> key) {
- Object* hash = key->GetHash();
- if (!hash->IsSmi()) return false;
- return FindEntry(isolate, key, Smi::ToInt(hash)) != kNotFound;
-}
-
bool StringSetShape::IsMatch(String* key, Object* value) {
DCHECK(value->IsString());
return key->Equals(String::cast(value));
@@ -2536,50 +2096,6 @@ void NumberDictionary::set_requires_slow_elements() {
set(kMaxNumberKeyIndex, Smi::FromInt(kRequiresSlowElementsMask));
}
-
-template <class T>
-PodArray<T>* PodArray<T>::cast(Object* object) {
- SLOW_DCHECK(object->IsByteArray());
- return reinterpret_cast<PodArray<T>*>(object);
-}
-template <class T>
-const PodArray<T>* PodArray<T>::cast(const Object* object) {
- SLOW_DCHECK(object->IsByteArray());
- return reinterpret_cast<const PodArray<T>*>(object);
-}
-
-// static
-template <class T>
-Handle<PodArray<T>> PodArray<T>::New(Isolate* isolate, int length,
- PretenureFlag pretenure) {
- return Handle<PodArray<T>>::cast(
- isolate->factory()->NewByteArray(length * sizeof(T), pretenure));
-}
-
-// static
-template <class Traits>
-STATIC_CONST_MEMBER_DEFINITION const InstanceType
- FixedTypedArray<Traits>::kInstanceType;
-
-
-template <class Traits>
-FixedTypedArray<Traits>* FixedTypedArray<Traits>::cast(Object* object) {
- SLOW_DCHECK(object->IsHeapObject() &&
- HeapObject::cast(object)->map()->instance_type() ==
- Traits::kInstanceType);
- return reinterpret_cast<FixedTypedArray<Traits>*>(object);
-}
-
-
-template <class Traits>
-const FixedTypedArray<Traits>*
-FixedTypedArray<Traits>::cast(const Object* object) {
- SLOW_DCHECK(object->IsHeapObject() &&
- HeapObject::cast(object)->map()->instance_type() ==
- Traits::kInstanceType);
- return reinterpret_cast<FixedTypedArray<Traits>*>(object);
-}
-
DEFINE_DEOPT_ELEMENT_ACCESSORS(TranslationByteArray, ByteArray)
DEFINE_DEOPT_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi)
DEFINE_DEOPT_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
@@ -2593,23 +2109,6 @@ DEFINE_DEOPT_ENTRY_ACCESSORS(BytecodeOffsetRaw, Smi)
DEFINE_DEOPT_ENTRY_ACCESSORS(TranslationIndex, Smi)
DEFINE_DEOPT_ENTRY_ACCESSORS(Pc, Smi)
-template <typename Derived, typename Shape>
-HashTable<Derived, Shape>* HashTable<Derived, Shape>::cast(Object* obj) {
- SLOW_DCHECK(obj->IsHashTable());
- return reinterpret_cast<HashTable*>(obj);
-}
-
-template <typename Derived, typename Shape>
-const HashTable<Derived, Shape>* HashTable<Derived, Shape>::cast(
- const Object* obj) {
- SLOW_DCHECK(obj->IsHashTable());
- return reinterpret_cast<const HashTable*>(obj);
-}
-
-
-SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
-SYNCHRONIZED_SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
-
int PropertyArray::length() const {
Object* value_obj = READ_FIELD(this, kLengthAndHashOffset);
int value = Smi::ToInt(value_obj);
@@ -2672,394 +2171,6 @@ FreeSpace* FreeSpace::cast(HeapObject* o) {
return reinterpret_cast<FreeSpace*>(o);
}
-int ByteArray::Size() { return RoundUp(length() + kHeaderSize, kPointerSize); }
-
-byte ByteArray::get(int index) const {
- DCHECK(index >= 0 && index < this->length());
- return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
-}
-
-void ByteArray::set(int index, byte value) {
- DCHECK(index >= 0 && index < this->length());
- WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize, value);
-}
-
-void ByteArray::copy_in(int index, const byte* buffer, int length) {
- DCHECK(index >= 0 && length >= 0 && length <= kMaxInt - index &&
- index + length <= this->length());
- byte* dst_addr = FIELD_ADDR(this, kHeaderSize + index * kCharSize);
- memcpy(dst_addr, buffer, length);
-}
-
-void ByteArray::copy_out(int index, byte* buffer, int length) {
- DCHECK(index >= 0 && length >= 0 && length <= kMaxInt - index &&
- index + length <= this->length());
- const byte* src_addr = FIELD_ADDR(this, kHeaderSize + index * kCharSize);
- memcpy(buffer, src_addr, length);
-}
-
-int ByteArray::get_int(int index) const {
- DCHECK(index >= 0 && index < this->length() / kIntSize);
- return READ_INT_FIELD(this, kHeaderSize + index * kIntSize);
-}
-
-void ByteArray::set_int(int index, int value) {
- DCHECK(index >= 0 && index < this->length() / kIntSize);
- WRITE_INT_FIELD(this, kHeaderSize + index * kIntSize, value);
-}
-
-uint32_t ByteArray::get_uint32(int index) const {
- DCHECK(index >= 0 && index < this->length() / kUInt32Size);
- return READ_UINT32_FIELD(this, kHeaderSize + index * kUInt32Size);
-}
-
-void ByteArray::set_uint32(int index, uint32_t value) {
- DCHECK(index >= 0 && index < this->length() / kUInt32Size);
- WRITE_UINT32_FIELD(this, kHeaderSize + index * kUInt32Size, value);
-}
-
-void ByteArray::clear_padding() {
- int data_size = length() + kHeaderSize;
- memset(address() + data_size, 0, Size() - data_size);
-}
-
-ByteArray* ByteArray::FromDataStartAddress(Address address) {
- DCHECK_TAG_ALIGNED(address);
- return reinterpret_cast<ByteArray*>(address - kHeaderSize + kHeapObjectTag);
-}
-
-int ByteArray::DataSize() const { return RoundUp(length(), kPointerSize); }
-
-int ByteArray::ByteArraySize() { return SizeFor(this->length()); }
-
-Address ByteArray::GetDataStartAddress() {
- return reinterpret_cast<Address>(this) - kHeapObjectTag + kHeaderSize;
-}
-
-ACCESSORS(FixedTypedArrayBase, base_pointer, Object, kBasePointerOffset)
-
-
-void* FixedTypedArrayBase::external_pointer() const {
- intptr_t ptr = READ_INTPTR_FIELD(this, kExternalPointerOffset);
- return reinterpret_cast<void*>(ptr);
-}
-
-
-void FixedTypedArrayBase::set_external_pointer(void* value,
- WriteBarrierMode mode) {
- intptr_t ptr = reinterpret_cast<intptr_t>(value);
- WRITE_INTPTR_FIELD(this, kExternalPointerOffset, ptr);
-}
-
-
-void* FixedTypedArrayBase::DataPtr() {
- return reinterpret_cast<void*>(
- reinterpret_cast<intptr_t>(base_pointer()) +
- reinterpret_cast<intptr_t>(external_pointer()));
-}
-
-
-int FixedTypedArrayBase::ElementSize(InstanceType type) {
- int element_size;
- switch (type) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case FIXED_##TYPE##_ARRAY_TYPE: \
- element_size = size; \
- break;
-
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- default:
- UNREACHABLE();
- }
- return element_size;
-}
-
-int FixedTypedArrayBase::DataSize(InstanceType type) const {
- if (base_pointer() == Smi::kZero) return 0;
- return length() * ElementSize(type);
-}
-
-int FixedTypedArrayBase::DataSize() const {
- return DataSize(map()->instance_type());
-}
-
-size_t FixedTypedArrayBase::ByteLength() const {
- return static_cast<size_t>(length()) *
- static_cast<size_t>(ElementSize(map()->instance_type()));
-}
-
-int FixedTypedArrayBase::size() const {
- return OBJECT_POINTER_ALIGN(kDataOffset + DataSize());
-}
-
-int FixedTypedArrayBase::TypedArraySize(InstanceType type) const {
- return OBJECT_POINTER_ALIGN(kDataOffset + DataSize(type));
-}
-
-// static
-int FixedTypedArrayBase::TypedArraySize(InstanceType type, int length) {
- return OBJECT_POINTER_ALIGN(kDataOffset + length * ElementSize(type));
-}
-
-
-uint8_t Uint8ArrayTraits::defaultValue() { return 0; }
-
-
-uint8_t Uint8ClampedArrayTraits::defaultValue() { return 0; }
-
-
-int8_t Int8ArrayTraits::defaultValue() { return 0; }
-
-
-uint16_t Uint16ArrayTraits::defaultValue() { return 0; }
-
-
-int16_t Int16ArrayTraits::defaultValue() { return 0; }
-
-
-uint32_t Uint32ArrayTraits::defaultValue() { return 0; }
-
-
-int32_t Int32ArrayTraits::defaultValue() { return 0; }
-
-
-float Float32ArrayTraits::defaultValue() {
- return std::numeric_limits<float>::quiet_NaN();
-}
-
-
-double Float64ArrayTraits::defaultValue() {
- return std::numeric_limits<double>::quiet_NaN();
-}
-
-template <class Traits>
-typename Traits::ElementType FixedTypedArray<Traits>::get_scalar(int index) {
- DCHECK((index >= 0) && (index < this->length()));
- // The JavaScript memory model allows for racy reads and writes to a
- // SharedArrayBuffer's backing store, which will always be a FixedTypedArray.
- // ThreadSanitizer will catch these racy accesses and warn about them, so we
- // disable TSAN for these reads and writes using annotations.
- //
- // We don't use relaxed atomics here, as it is not a requirement of the
- // JavaScript memory model to have tear-free reads of overlapping accesses,
- // and using relaxed atomics may introduce overhead.
- auto* ptr = reinterpret_cast<ElementType*>(DataPtr());
- TSAN_ANNOTATE_IGNORE_READS_BEGIN;
- auto result = ptr[index];
- TSAN_ANNOTATE_IGNORE_READS_END;
- return result;
-}
-
-
-template <class Traits>
-void FixedTypedArray<Traits>::set(int index, ElementType value) {
- CHECK((index >= 0) && (index < this->length()));
- // See the comment in FixedTypedArray<Traits>::get_scalar.
- auto* ptr = reinterpret_cast<ElementType*>(DataPtr());
- TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
- ptr[index] = value;
- TSAN_ANNOTATE_IGNORE_WRITES_END;
-}
-
-template <class Traits>
-typename Traits::ElementType FixedTypedArray<Traits>::from(int value) {
- return static_cast<ElementType>(value);
-}
-
-template <>
-inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(int value) {
- if (value < 0) return 0;
- if (value > 0xFF) return 0xFF;
- return static_cast<uint8_t>(value);
-}
-
-template <class Traits>
-typename Traits::ElementType FixedTypedArray<Traits>::from(uint32_t value) {
- return static_cast<ElementType>(value);
-}
-
-template <>
-inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(uint32_t value) {
- // We need this special case for Uint32 -> Uint8Clamped, because the highest
- // Uint32 values will be negative as an int, clamping to 0, rather than 255.
- if (value > 0xFF) return 0xFF;
- return static_cast<uint8_t>(value);
-}
-
-template <class Traits>
-typename Traits::ElementType FixedTypedArray<Traits>::from(double value) {
- return static_cast<ElementType>(DoubleToInt32(value));
-}
-
-template <>
-inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(double value) {
- // Handle NaNs and less than zero values which clamp to zero.
- if (!(value > 0)) return 0;
- if (value > 0xFF) return 0xFF;
- return static_cast<uint8_t>(lrint(value));
-}
-
-template <>
-inline float FixedTypedArray<Float32ArrayTraits>::from(double value) {
- return static_cast<float>(value);
-}
-
-template <>
-inline double FixedTypedArray<Float64ArrayTraits>::from(double value) {
- return value;
-}
-
-template <class Traits>
-Handle<Object> FixedTypedArray<Traits>::get(FixedTypedArray<Traits>* array,
- int index) {
- return Traits::ToHandle(array->GetIsolate(), array->get_scalar(index));
-}
-
-
-template <class Traits>
-void FixedTypedArray<Traits>::SetValue(uint32_t index, Object* value) {
- ElementType cast_value = Traits::defaultValue();
- if (value->IsSmi()) {
- int int_value = Smi::ToInt(value);
- cast_value = from(int_value);
- } else if (value->IsHeapNumber()) {
- double double_value = HeapNumber::cast(value)->value();
- cast_value = from(double_value);
- } else {
- // Clamp undefined to the default value. All other types have been
- // converted to a number type further up in the call chain.
- DCHECK(value->IsUndefined(GetIsolate()));
- }
- set(index, cast_value);
-}
-
-
-Handle<Object> Uint8ArrayTraits::ToHandle(Isolate* isolate, uint8_t scalar) {
- return handle(Smi::FromInt(scalar), isolate);
-}
-
-
-Handle<Object> Uint8ClampedArrayTraits::ToHandle(Isolate* isolate,
- uint8_t scalar) {
- return handle(Smi::FromInt(scalar), isolate);
-}
-
-
-Handle<Object> Int8ArrayTraits::ToHandle(Isolate* isolate, int8_t scalar) {
- return handle(Smi::FromInt(scalar), isolate);
-}
-
-
-Handle<Object> Uint16ArrayTraits::ToHandle(Isolate* isolate, uint16_t scalar) {
- return handle(Smi::FromInt(scalar), isolate);
-}
-
-
-Handle<Object> Int16ArrayTraits::ToHandle(Isolate* isolate, int16_t scalar) {
- return handle(Smi::FromInt(scalar), isolate);
-}
-
-
-Handle<Object> Uint32ArrayTraits::ToHandle(Isolate* isolate, uint32_t scalar) {
- return isolate->factory()->NewNumberFromUint(scalar);
-}
-
-
-Handle<Object> Int32ArrayTraits::ToHandle(Isolate* isolate, int32_t scalar) {
- return isolate->factory()->NewNumberFromInt(scalar);
-}
-
-
-Handle<Object> Float32ArrayTraits::ToHandle(Isolate* isolate, float scalar) {
- return isolate->factory()->NewNumber(scalar);
-}
-
-
-Handle<Object> Float64ArrayTraits::ToHandle(Isolate* isolate, double scalar) {
- return isolate->factory()->NewNumber(scalar);
-}
-
-VisitorId Map::visitor_id() const {
- return static_cast<VisitorId>(READ_BYTE_FIELD(this, kVisitorIdOffset));
-}
-
-void Map::set_visitor_id(VisitorId id) {
- DCHECK_LE(0, id);
- DCHECK_LT(id, 256);
- WRITE_BYTE_FIELD(this, kVisitorIdOffset, static_cast<byte>(id));
-}
-
-int Map::instance_size_in_words() const {
- return RELAXED_READ_BYTE_FIELD(this, kInstanceSizeInWordsOffset);
-}
-
-void Map::set_instance_size_in_words(int value) {
- RELAXED_WRITE_BYTE_FIELD(this, kInstanceSizeInWordsOffset,
- static_cast<byte>(value));
-}
-
-int Map::instance_size() const {
- return instance_size_in_words() << kPointerSizeLog2;
-}
-
-void Map::set_instance_size(int value) {
- CHECK_EQ(0, value & (kPointerSize - 1));
- value >>= kPointerSizeLog2;
- CHECK_LT(static_cast<unsigned>(value), 256);
- set_instance_size_in_words(value);
-}
-
-int Map::inobject_properties_start_or_constructor_function_index() const {
- return RELAXED_READ_BYTE_FIELD(
- this, kInObjectPropertiesStartOrConstructorFunctionIndexOffset);
-}
-
-void Map::set_inobject_properties_start_or_constructor_function_index(
- int value) {
- CHECK_LT(static_cast<unsigned>(value), 256);
- RELAXED_WRITE_BYTE_FIELD(
- this, kInObjectPropertiesStartOrConstructorFunctionIndexOffset,
- static_cast<byte>(value));
-}
-
-int Map::GetInObjectPropertiesStartInWords() const {
- DCHECK(IsJSObjectMap());
- return inobject_properties_start_or_constructor_function_index();
-}
-
-void Map::SetInObjectPropertiesStartInWords(int value) {
- CHECK(IsJSObjectMap());
- set_inobject_properties_start_or_constructor_function_index(value);
-}
-
-int Map::GetInObjectProperties() const {
- DCHECK(IsJSObjectMap());
- return instance_size_in_words() - GetInObjectPropertiesStartInWords();
-}
-
-int Map::GetConstructorFunctionIndex() const {
- DCHECK(IsPrimitiveMap());
- return inobject_properties_start_or_constructor_function_index();
-}
-
-
-void Map::SetConstructorFunctionIndex(int value) {
- CHECK(IsPrimitiveMap());
- set_inobject_properties_start_or_constructor_function_index(value);
-}
-
-int Map::GetInObjectPropertyOffset(int index) const {
- return (GetInObjectPropertiesStartInWords() + index) * kPointerSize;
-}
-
-
-Handle<Map> Map::AddMissingTransitionsForTesting(
- Handle<Map> split_map, Handle<DescriptorArray> descriptors,
- Handle<LayoutDescriptor> full_layout_descriptor) {
- return AddMissingTransitions(split_map, descriptors, full_layout_descriptor);
-}
-
int HeapObject::SizeFromMap(Map* map) const {
int instance_size = map->instance_size();
if (instance_size != kVariableSizeSentinel) return instance_size;
@@ -3125,633 +2236,6 @@ int HeapObject::SizeFromMap(Map* map) const {
return reinterpret_cast<const Code*>(this)->CodeSize();
}
-InstanceType Map::instance_type() const {
- return static_cast<InstanceType>(
- READ_UINT16_FIELD(this, kInstanceTypeOffset));
-}
-
-
-void Map::set_instance_type(InstanceType value) {
- WRITE_UINT16_FIELD(this, kInstanceTypeOffset, value);
-}
-
-int Map::UnusedPropertyFields() const {
- int value = used_or_unused_instance_size_in_words();
- DCHECK_IMPLIES(!IsJSObjectMap(), value == 0);
- int unused;
- if (value >= JSObject::kFieldsAdded) {
- unused = instance_size_in_words() - value;
- } else {
- // For out of object properties "used_or_unused_instance_size_in_words"
- // byte encodes the slack in the property array.
- unused = value;
- }
- return unused;
-}
-
-int Map::used_or_unused_instance_size_in_words() const {
- return READ_BYTE_FIELD(this, kUsedOrUnusedInstanceSizeInWordsOffset);
-}
-
-void Map::set_used_or_unused_instance_size_in_words(int value) {
- CHECK_LE(static_cast<unsigned>(value), 255);
- WRITE_BYTE_FIELD(this, kUsedOrUnusedInstanceSizeInWordsOffset,
- static_cast<byte>(value));
-}
-
-int Map::UsedInstanceSize() const {
- int words = used_or_unused_instance_size_in_words();
- if (words < JSObject::kFieldsAdded) {
- // All in-object properties are used and the words is tracking the slack
- // in the property array.
- return instance_size();
- }
- return words * kPointerSize;
-}
-
-void Map::SetInObjectUnusedPropertyFields(int value) {
- STATIC_ASSERT(JSObject::kFieldsAdded == JSObject::kHeaderSize / kPointerSize);
- if (!IsJSObjectMap()) {
- CHECK_EQ(0, value);
- set_used_or_unused_instance_size_in_words(0);
- DCHECK_EQ(0, UnusedPropertyFields());
- return;
- }
- CHECK_LE(0, value);
- DCHECK_LE(value, GetInObjectProperties());
- int used_inobject_properties = GetInObjectProperties() - value;
- set_used_or_unused_instance_size_in_words(
- GetInObjectPropertyOffset(used_inobject_properties) / kPointerSize);
- DCHECK_EQ(value, UnusedPropertyFields());
-}
-
-void Map::SetOutOfObjectUnusedPropertyFields(int value) {
- STATIC_ASSERT(JSObject::kFieldsAdded == JSObject::kHeaderSize / kPointerSize);
- CHECK_LT(static_cast<unsigned>(value), JSObject::kFieldsAdded);
- // For out of object properties "used_instance_size_in_words" byte encodes
- // the slack in the property array.
- set_used_or_unused_instance_size_in_words(value);
- DCHECK_EQ(value, UnusedPropertyFields());
-}
-
-void Map::CopyUnusedPropertyFields(Map* map) {
- set_used_or_unused_instance_size_in_words(
- map->used_or_unused_instance_size_in_words());
- DCHECK_EQ(UnusedPropertyFields(), map->UnusedPropertyFields());
-}
-
-void Map::AccountAddedPropertyField() {
- // Update used instance size and unused property fields number.
- STATIC_ASSERT(JSObject::kFieldsAdded == JSObject::kHeaderSize / kPointerSize);
-#ifdef DEBUG
- int new_unused = UnusedPropertyFields() - 1;
- if (new_unused < 0) new_unused += JSObject::kFieldsAdded;
-#endif
- int value = used_or_unused_instance_size_in_words();
- if (value >= JSObject::kFieldsAdded) {
- if (value == instance_size_in_words()) {
- AccountAddedOutOfObjectPropertyField(0);
- } else {
- // The property is added in-object, so simply increment the counter.
- set_used_or_unused_instance_size_in_words(value + 1);
- }
- } else {
- AccountAddedOutOfObjectPropertyField(value);
- }
- DCHECK_EQ(new_unused, UnusedPropertyFields());
-}
-
-void Map::AccountAddedOutOfObjectPropertyField(int unused_in_property_array) {
- unused_in_property_array--;
- if (unused_in_property_array < 0) {
- unused_in_property_array += JSObject::kFieldsAdded;
- }
- CHECK_LT(static_cast<unsigned>(unused_in_property_array),
- JSObject::kFieldsAdded);
- set_used_or_unused_instance_size_in_words(unused_in_property_array);
- DCHECK_EQ(unused_in_property_array, UnusedPropertyFields());
-}
-
-byte Map::bit_field() const { return READ_BYTE_FIELD(this, kBitFieldOffset); }
-
-
-void Map::set_bit_field(byte value) {
- WRITE_BYTE_FIELD(this, kBitFieldOffset, value);
-}
-
-
-byte Map::bit_field2() const { return READ_BYTE_FIELD(this, kBitField2Offset); }
-
-
-void Map::set_bit_field2(byte value) {
- WRITE_BYTE_FIELD(this, kBitField2Offset, value);
-}
-
-
-void Map::set_non_instance_prototype(bool value) {
- if (value) {
- set_bit_field(bit_field() | (1 << kHasNonInstancePrototype));
- } else {
- set_bit_field(bit_field() & ~(1 << kHasNonInstancePrototype));
- }
-}
-
-bool Map::has_non_instance_prototype() const {
- if (!has_prototype_slot()) return false;
- return ((1 << kHasNonInstancePrototype) & bit_field()) != 0;
-}
-
-
-void Map::set_is_constructor(bool value) {
- if (value) {
- set_bit_field(bit_field() | (1 << kIsConstructor));
- } else {
- set_bit_field(bit_field() & ~(1 << kIsConstructor));
- }
-}
-
-
-bool Map::is_constructor() const {
- return ((1 << kIsConstructor) & bit_field()) != 0;
-}
-
-BOOL_ACCESSORS(Map, bit_field, has_prototype_slot, kHasPrototypeSlot)
-
-void Map::set_has_hidden_prototype(bool value) {
- set_bit_field3(HasHiddenPrototype::update(bit_field3(), value));
-}
-
-bool Map::has_hidden_prototype() const {
- return HasHiddenPrototype::decode(bit_field3());
-}
-
-
-void Map::set_has_indexed_interceptor() {
- set_bit_field(bit_field() | (1 << kHasIndexedInterceptor));
-}
-
-bool Map::has_indexed_interceptor() const {
- return ((1 << kHasIndexedInterceptor) & bit_field()) != 0;
-}
-
-
-void Map::set_is_undetectable() {
- set_bit_field(bit_field() | (1 << kIsUndetectable));
-}
-
-bool Map::is_undetectable() const {
- return ((1 << kIsUndetectable) & bit_field()) != 0;
-}
-
-
-void Map::set_has_named_interceptor() {
- set_bit_field(bit_field() | (1 << kHasNamedInterceptor));
-}
-
-bool Map::has_named_interceptor() const {
- return ((1 << kHasNamedInterceptor) & bit_field()) != 0;
-}
-
-
-void Map::set_is_access_check_needed(bool access_check_needed) {
- if (access_check_needed) {
- set_bit_field(bit_field() | (1 << kIsAccessCheckNeeded));
- } else {
- set_bit_field(bit_field() & ~(1 << kIsAccessCheckNeeded));
- }
-}
-
-bool Map::is_access_check_needed() const {
- return ((1 << kIsAccessCheckNeeded) & bit_field()) != 0;
-}
-
-
-void Map::set_is_extensible(bool value) {
- if (value) {
- set_bit_field2(bit_field2() | (1 << kIsExtensible));
- } else {
- set_bit_field2(bit_field2() & ~(1 << kIsExtensible));
- }
-}
-
-bool Map::is_extensible() const {
- return ((1 << kIsExtensible) & bit_field2()) != 0;
-}
-
-
-void Map::set_is_prototype_map(bool value) {
- set_bit_field2(IsPrototypeMapBits::update(bit_field2(), value));
-}
-
-bool Map::is_prototype_map() const {
- return IsPrototypeMapBits::decode(bit_field2());
-}
-
-bool Map::is_abandoned_prototype_map() const {
- return is_prototype_map() && !owns_descriptors();
-}
-
-bool Map::should_be_fast_prototype_map() const {
- if (!prototype_info()->IsPrototypeInfo()) return false;
- return PrototypeInfo::cast(prototype_info())->should_be_fast_map();
-}
-
-void Map::set_elements_kind(ElementsKind elements_kind) {
- CHECK_LT(static_cast<int>(elements_kind), kElementsKindCount);
- DCHECK_LE(kElementsKindCount, 1 << Map::ElementsKindBits::kSize);
- set_bit_field2(Map::ElementsKindBits::update(bit_field2(), elements_kind));
- DCHECK(this->elements_kind() == elements_kind);
-}
-
-ElementsKind Map::elements_kind() const {
- return Map::ElementsKindBits::decode(bit_field2());
-}
-
-bool Map::has_fast_smi_elements() const {
- return IsSmiElementsKind(elements_kind());
-}
-
-bool Map::has_fast_object_elements() const {
- return IsObjectElementsKind(elements_kind());
-}
-
-bool Map::has_fast_smi_or_object_elements() const {
- return IsSmiOrObjectElementsKind(elements_kind());
-}
-
-bool Map::has_fast_double_elements() const {
- return IsDoubleElementsKind(elements_kind());
-}
-
-bool Map::has_fast_elements() const {
- return IsFastElementsKind(elements_kind());
-}
-
-bool Map::has_sloppy_arguments_elements() const {
- return IsSloppyArgumentsElementsKind(elements_kind());
-}
-
-bool Map::has_fast_sloppy_arguments_elements() const {
- return elements_kind() == FAST_SLOPPY_ARGUMENTS_ELEMENTS;
-}
-
-bool Map::has_fast_string_wrapper_elements() const {
- return elements_kind() == FAST_STRING_WRAPPER_ELEMENTS;
-}
-
-bool Map::has_fixed_typed_array_elements() const {
- return IsFixedTypedArrayElementsKind(elements_kind());
-}
-
-bool Map::has_dictionary_elements() const {
- return IsDictionaryElementsKind(elements_kind());
-}
-
-
-void Map::set_dictionary_map(bool value) {
- uint32_t new_bit_field3 = DictionaryMap::update(bit_field3(), value);
- new_bit_field3 = IsUnstable::update(new_bit_field3, value);
- set_bit_field3(new_bit_field3);
-}
-
-bool Map::is_dictionary_map() const {
- return DictionaryMap::decode(bit_field3());
-}
-
-void Map::set_owns_descriptors(bool owns_descriptors) {
- set_bit_field3(OwnsDescriptors::update(bit_field3(), owns_descriptors));
-}
-
-bool Map::owns_descriptors() const {
- return OwnsDescriptors::decode(bit_field3());
-}
-
-
-void Map::set_is_callable() { set_bit_field(bit_field() | (1 << kIsCallable)); }
-
-
-bool Map::is_callable() const {
- return ((1 << kIsCallable) & bit_field()) != 0;
-}
-
-
-void Map::deprecate() {
- set_bit_field3(Deprecated::update(bit_field3(), true));
- if (FLAG_trace_maps) {
- LOG(GetIsolate(), MapEvent("Deprecate", this, nullptr));
- }
-}
-
-bool Map::is_deprecated() const { return Deprecated::decode(bit_field3()); }
-
-void Map::set_migration_target(bool value) {
- set_bit_field3(IsMigrationTarget::update(bit_field3(), value));
-}
-
-bool Map::is_migration_target() const {
- return IsMigrationTarget::decode(bit_field3());
-}
-
-void Map::set_immutable_proto(bool value) {
- set_bit_field3(ImmutablePrototype::update(bit_field3(), value));
-}
-
-bool Map::is_immutable_proto() const {
- return ImmutablePrototype::decode(bit_field3());
-}
-
-void Map::set_new_target_is_base(bool value) {
- set_bit_field3(NewTargetIsBase::update(bit_field3(), value));
-}
-
-bool Map::new_target_is_base() const {
- return NewTargetIsBase::decode(bit_field3());
-}
-
-void Map::set_may_have_interesting_symbols(bool value) {
- set_bit_field3(MayHaveInterestingSymbols::update(bit_field3(), value));
-}
-
-bool Map::may_have_interesting_symbols() const {
- return MayHaveInterestingSymbols::decode(bit_field3());
-}
-
-void Map::set_construction_counter(int value) {
- set_bit_field3(ConstructionCounter::update(bit_field3(), value));
-}
-
-int Map::construction_counter() const {
- return ConstructionCounter::decode(bit_field3());
-}
-
-
-void Map::mark_unstable() {
- set_bit_field3(IsUnstable::update(bit_field3(), true));
-}
-
-bool Map::is_stable() const { return !IsUnstable::decode(bit_field3()); }
-
-bool Map::CanBeDeprecated() const {
- int descriptor = LastAdded();
- for (int i = 0; i <= descriptor; i++) {
- PropertyDetails details = instance_descriptors()->GetDetails(i);
- if (details.representation().IsNone()) return true;
- if (details.representation().IsSmi()) return true;
- if (details.representation().IsDouble()) return true;
- if (details.representation().IsHeapObject()) return true;
- if (details.kind() == kData && details.location() == kDescriptor) {
- return true;
- }
- }
- return false;
-}
-
-
-void Map::NotifyLeafMapLayoutChange() {
- if (is_stable()) {
- mark_unstable();
- dependent_code()->DeoptimizeDependentCodeGroup(
- GetIsolate(),
- DependentCode::kPrototypeCheckGroup);
- }
-}
-
-bool Map::CanTransition() const {
- // Only JSObject and subtypes have map transitions and back pointers.
- STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
- return instance_type() >= FIRST_JS_OBJECT_TYPE;
-}
-
-bool Map::IsBooleanMap() const { return this == GetHeap()->boolean_map(); }
-bool Map::IsPrimitiveMap() const {
- STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
- return instance_type() <= LAST_PRIMITIVE_TYPE;
-}
-bool Map::IsJSReceiverMap() const {
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- return instance_type() >= FIRST_JS_RECEIVER_TYPE;
-}
-bool Map::IsJSObjectMap() const {
- STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
- return instance_type() >= FIRST_JS_OBJECT_TYPE;
-}
-bool Map::IsJSArrayMap() const { return instance_type() == JS_ARRAY_TYPE; }
-bool Map::IsJSFunctionMap() const {
- return instance_type() == JS_FUNCTION_TYPE;
-}
-bool Map::IsStringMap() const { return instance_type() < FIRST_NONSTRING_TYPE; }
-bool Map::IsJSProxyMap() const { return instance_type() == JS_PROXY_TYPE; }
-bool Map::IsJSGlobalProxyMap() const {
- return instance_type() == JS_GLOBAL_PROXY_TYPE;
-}
-bool Map::IsJSGlobalObjectMap() const {
- return instance_type() == JS_GLOBAL_OBJECT_TYPE;
-}
-bool Map::IsJSTypedArrayMap() const {
- return instance_type() == JS_TYPED_ARRAY_TYPE;
-}
-bool Map::IsJSDataViewMap() const {
- return instance_type() == JS_DATA_VIEW_TYPE;
-}
-
-bool Map::IsSpecialReceiverMap() const {
- bool result = IsSpecialReceiverInstanceType(instance_type());
- DCHECK_IMPLIES(!result,
- !has_named_interceptor() && !is_access_check_needed());
- return result;
-}
-
-Object* Map::prototype() const {
- return READ_FIELD(this, kPrototypeOffset);
-}
-
-
-void Map::set_prototype(Object* value, WriteBarrierMode mode) {
- DCHECK(value->IsNull(GetIsolate()) || value->IsJSReceiver());
- WRITE_FIELD(this, kPrototypeOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, value, mode);
-}
-
-LayoutDescriptor* Map::layout_descriptor_gc_safe() const {
- DCHECK(FLAG_unbox_double_fields);
- Object* layout_desc = RELAXED_READ_FIELD(this, kLayoutDescriptorOffset);
- return LayoutDescriptor::cast_gc_safe(layout_desc);
-}
-
-
-bool Map::HasFastPointerLayout() const {
- DCHECK(FLAG_unbox_double_fields);
- Object* layout_desc = RELAXED_READ_FIELD(this, kLayoutDescriptorOffset);
- return LayoutDescriptor::IsFastPointerLayout(layout_desc);
-}
-
-
-void Map::UpdateDescriptors(DescriptorArray* descriptors,
- LayoutDescriptor* layout_desc) {
- set_instance_descriptors(descriptors);
- if (FLAG_unbox_double_fields) {
- if (layout_descriptor()->IsSlowLayout()) {
- set_layout_descriptor(layout_desc);
- }
-#ifdef VERIFY_HEAP
- // TODO(ishell): remove these checks from VERIFY_HEAP mode.
- if (FLAG_verify_heap) {
- CHECK(layout_descriptor()->IsConsistentWithMap(this));
- CHECK_EQ(Map::GetVisitorId(this), visitor_id());
- }
-#else
- SLOW_DCHECK(layout_descriptor()->IsConsistentWithMap(this));
- DCHECK(visitor_id() == Map::GetVisitorId(this));
-#endif
- }
-}
-
-
-void Map::InitializeDescriptors(DescriptorArray* descriptors,
- LayoutDescriptor* layout_desc) {
- int len = descriptors->number_of_descriptors();
- set_instance_descriptors(descriptors);
- SetNumberOfOwnDescriptors(len);
-
- if (FLAG_unbox_double_fields) {
- set_layout_descriptor(layout_desc);
-#ifdef VERIFY_HEAP
- // TODO(ishell): remove these checks from VERIFY_HEAP mode.
- if (FLAG_verify_heap) {
- CHECK(layout_descriptor()->IsConsistentWithMap(this));
- }
-#else
- SLOW_DCHECK(layout_descriptor()->IsConsistentWithMap(this));
-#endif
- set_visitor_id(Map::GetVisitorId(this));
- }
-}
-
-
-ACCESSORS(Map, instance_descriptors, DescriptorArray, kDescriptorsOffset)
-ACCESSORS_CHECKED(Map, layout_descriptor, LayoutDescriptor,
- kLayoutDescriptorOffset, FLAG_unbox_double_fields)
-
-void Map::set_bit_field3(uint32_t bits) {
- if (kInt32Size != kPointerSize) {
- WRITE_UINT32_FIELD(this, kBitField3Offset + kInt32Size, 0);
- }
- WRITE_UINT32_FIELD(this, kBitField3Offset, bits);
-}
-
-
-uint32_t Map::bit_field3() const {
- return READ_UINT32_FIELD(this, kBitField3Offset);
-}
-
-LayoutDescriptor* Map::GetLayoutDescriptor() const {
- return FLAG_unbox_double_fields ? layout_descriptor()
- : LayoutDescriptor::FastPointerLayout();
-}
-
-
-void Map::AppendDescriptor(Descriptor* desc) {
- DescriptorArray* descriptors = instance_descriptors();
- int number_of_own_descriptors = NumberOfOwnDescriptors();
- DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
- descriptors->Append(desc);
- SetNumberOfOwnDescriptors(number_of_own_descriptors + 1);
-
- // Properly mark the map if the {desc} is an "interesting symbol".
- if (desc->GetKey()->IsInterestingSymbol()) {
- set_may_have_interesting_symbols(true);
- }
- PropertyDetails details = desc->GetDetails();
- if (details.location() == kField) {
- DCHECK_GT(UnusedPropertyFields(), 0);
- AccountAddedPropertyField();
- }
-
-// This function does not support appending double field descriptors and
-// it should never try to (otherwise, layout descriptor must be updated too).
-#ifdef DEBUG
- DCHECK(details.location() != kField || !details.representation().IsDouble());
-#endif
-}
-
-Object* Map::GetBackPointer() const {
- Object* object = constructor_or_backpointer();
- if (object->IsMap()) {
- return object;
- }
- return GetIsolate()->heap()->undefined_value();
-}
-
-Map* Map::ElementsTransitionMap() {
- DisallowHeapAllocation no_gc;
- return TransitionsAccessor(this, &no_gc)
- .SearchSpecial(GetHeap()->elements_transition_symbol());
-}
-
-
-ACCESSORS(Map, raw_transitions, Object, kTransitionsOrPrototypeInfoOffset)
-
-
-Object* Map::prototype_info() const {
- DCHECK(is_prototype_map());
- return READ_FIELD(this, Map::kTransitionsOrPrototypeInfoOffset);
-}
-
-
-void Map::set_prototype_info(Object* value, WriteBarrierMode mode) {
- CHECK(is_prototype_map());
- WRITE_FIELD(this, Map::kTransitionsOrPrototypeInfoOffset, value);
- CONDITIONAL_WRITE_BARRIER(
- GetHeap(), this, Map::kTransitionsOrPrototypeInfoOffset, value, mode);
-}
-
-
-void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
- CHECK_GE(instance_type(), FIRST_JS_RECEIVER_TYPE);
- CHECK(value->IsMap());
- CHECK(GetBackPointer()->IsUndefined(GetIsolate()));
- CHECK_IMPLIES(value->IsMap(), Map::cast(value)->GetConstructor() ==
- constructor_or_backpointer());
- set_constructor_or_backpointer(value, mode);
-}
-
-ACCESSORS(Map, dependent_code, DependentCode, kDependentCodeOffset)
-ACCESSORS(Map, weak_cell_cache, Object, kWeakCellCacheOffset)
-ACCESSORS(Map, constructor_or_backpointer, Object,
- kConstructorOrBackPointerOffset)
-
-Object* Map::GetConstructor() const {
- Object* maybe_constructor = constructor_or_backpointer();
- // Follow any back pointers.
- while (maybe_constructor->IsMap()) {
- maybe_constructor =
- Map::cast(maybe_constructor)->constructor_or_backpointer();
- }
- return maybe_constructor;
-}
-
-FunctionTemplateInfo* Map::GetFunctionTemplateInfo() const {
- Object* constructor = GetConstructor();
- if (constructor->IsJSFunction()) {
- DCHECK(JSFunction::cast(constructor)->shared()->IsApiFunction());
- return JSFunction::cast(constructor)->shared()->get_api_func_data();
- }
- DCHECK(constructor->IsFunctionTemplateInfo());
- return FunctionTemplateInfo::cast(constructor);
-}
-
-void Map::SetConstructor(Object* constructor, WriteBarrierMode mode) {
- // Never overwrite a back pointer with a constructor.
- CHECK(!constructor_or_backpointer()->IsMap());
- set_constructor_or_backpointer(constructor, mode);
-}
-
-
-Handle<Map> Map::CopyInitialMap(Handle<Map> map) {
- return CopyInitialMap(map, map->instance_size(), map->GetInObjectProperties(),
- map->UnusedPropertyFields());
-}
-
Object* JSBoundFunction::raw_bound_target_function() const {
return READ_FIELD(this, kBoundTargetFunctionOffset);
}
@@ -3774,11 +2258,20 @@ SMI_ACCESSORS(AccessorInfo, flags, kFlagsOffset)
ACCESSORS(AccessorInfo, expected_receiver_type, Object,
kExpectedReceiverTypeOffset)
-ACCESSORS(AccessorInfo, getter, Object, kGetterOffset)
-ACCESSORS(AccessorInfo, setter, Object, kSetterOffset)
+ACCESSORS_CHECKED2(AccessorInfo, getter, Object, kGetterOffset, true,
+ Foreign::IsNormalized(value))
+ACCESSORS_CHECKED2(AccessorInfo, setter, Object, kSetterOffset, true,
+ Foreign::IsNormalized(value));
ACCESSORS(AccessorInfo, js_getter, Object, kJsGetterOffset)
ACCESSORS(AccessorInfo, data, Object, kDataOffset)
+bool AccessorInfo::has_getter() {
+ bool result = getter() != Smi::kZero;
+ DCHECK_EQ(result, getter() != Smi::kZero &&
+ Foreign::cast(getter())->foreign_address() != nullptr);
+ return result;
+}
+
ACCESSORS(PromiseResolveThenableJobInfo, thenable, JSReceiver, kThenableOffset)
ACCESSORS(PromiseResolveThenableJobInfo, then, JSReceiver, kThenOffset)
ACCESSORS(PromiseResolveThenableJobInfo, resolve, JSFunction, kResolveOffset)
@@ -3890,6 +2383,7 @@ BOOL_ACCESSORS(InterceptorInfo, flags, can_intercept_symbols,
kCanInterceptSymbolsBit)
BOOL_ACCESSORS(InterceptorInfo, flags, all_can_read, kAllCanReadBit)
BOOL_ACCESSORS(InterceptorInfo, flags, non_masking, kNonMasking)
+BOOL_ACCESSORS(InterceptorInfo, flags, is_named, kNamed)
ACCESSORS(CallHandlerInfo, callback, Object, kCallbackOffset)
ACCESSORS(CallHandlerInfo, js_callback, Object, kJsCallbackOffset)
@@ -3952,18 +2446,6 @@ void ObjectTemplateInfo::set_immutable_proto(bool immutable) {
IsImmutablePrototype::update(Smi::ToInt(data()), immutable)));
}
-int TemplateList::length() const {
- return Smi::ToInt(FixedArray::cast(this)->get(kLengthIndex));
-}
-
-Object* TemplateList::get(int index) const {
- return FixedArray::cast(this)->get(kFirstElementIndex + index);
-}
-
-void TemplateList::set(int index, Object* value) {
- FixedArray::cast(this)->set(kFirstElementIndex + index, value);
-}
-
ACCESSORS(AllocationSite, transition_info_or_boilerplate, Object,
kTransitionInfoOrBoilerplateOffset)
@@ -4056,8 +2538,7 @@ bool JSFunction::HasOptimizationMarker() {
void JSFunction::ClearOptimizationMarker() {
DCHECK(has_feedback_vector());
- DCHECK(!feedback_vector()->has_optimized_code());
- feedback_vector()->SetOptimizationMarker(OptimizationMarker::kNone);
+ feedback_vector()->ClearOptimizationMarker();
}
bool JSFunction::IsInterpreted() {
@@ -4094,22 +2575,6 @@ void JSFunction::CompleteInobjectSlackTrackingIfActive() {
}
}
-bool Map::IsInobjectSlackTrackingInProgress() const {
- return construction_counter() != Map::kNoSlackTracking;
-}
-
-
-void Map::InobjectSlackTrackingStep() {
- // Slack tracking should only be performed on an initial map.
- DCHECK(GetBackPointer()->IsUndefined(GetIsolate()));
- if (!IsInobjectSlackTrackingInProgress()) return;
- int counter = construction_counter();
- set_construction_counter(counter - 1);
- if (counter == kSlackTrackingCounterEnd) {
- CompleteInobjectSlackTracking();
- }
-}
-
AbstractCode* JSFunction::abstract_code() {
if (IsInterpreted()) {
return AbstractCode::cast(shared()->bytecode_array());
@@ -4253,24 +2718,21 @@ bool JSFunction::is_compiled() {
return code() != builtins->builtin(Builtins::kCompileLazy);
}
-ACCESSORS(JSProxy, target, JSReceiver, kTargetOffset)
+ACCESSORS(JSProxy, target, Object, kTargetOffset)
ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
bool JSProxy::IsRevoked() const { return !handler()->IsJSReceiver(); }
-ACCESSORS(JSCollection, table, Object, kTableOffset)
-ACCESSORS(JSCollectionIterator, table, Object, kTableOffset)
-ACCESSORS(JSCollectionIterator, index, Object, kIndexOffset)
-
-ACCESSORS(JSWeakCollection, table, Object, kTableOffset)
-ACCESSORS(JSWeakCollection, next, Object, kNextOffset)
-
+// static
+bool Foreign::IsNormalized(Object* value) {
+ if (value == Smi::kZero) return true;
+ return Foreign::cast(value)->foreign_address() != nullptr;
+}
Address Foreign::foreign_address() {
return AddressFrom<Address>(READ_INTPTR_FIELD(this, kForeignAddressOffset));
}
-
void Foreign::set_foreign_address(Address value) {
WRITE_INTPTR_FIELD(this, kForeignAddressOffset, OffsetFrom(value));
}
@@ -4966,110 +3428,6 @@ int WeakHashTableShape::GetMapRootIndex() {
return Heap::kWeakHashTableMapRootIndex;
}
-int Map::SlackForArraySize(int old_size, int size_limit) {
- const int max_slack = size_limit - old_size;
- CHECK_LE(0, max_slack);
- if (old_size < 4) {
- DCHECK_LE(1, max_slack);
- return 1;
- }
- return Min(max_slack, old_size / 4);
-}
-
-int TypeFeedbackInfo::ic_total_count() {
- int current = Smi::ToInt(READ_FIELD(this, kStorage1Offset));
- return ICTotalCountField::decode(current);
-}
-
-
-void TypeFeedbackInfo::set_ic_total_count(int count) {
- int value = Smi::ToInt(READ_FIELD(this, kStorage1Offset));
- value = ICTotalCountField::update(value,
- ICTotalCountField::decode(count));
- WRITE_FIELD(this, kStorage1Offset, Smi::FromInt(value));
-}
-
-
-int TypeFeedbackInfo::ic_with_type_info_count() {
- int current = Smi::ToInt(READ_FIELD(this, kStorage2Offset));
- return ICsWithTypeInfoCountField::decode(current);
-}
-
-
-void TypeFeedbackInfo::change_ic_with_type_info_count(int delta) {
- if (delta == 0) return;
- int value = Smi::ToInt(READ_FIELD(this, kStorage2Offset));
- int new_count = ICsWithTypeInfoCountField::decode(value) + delta;
- // We can get negative count here when the type-feedback info is
- // shared between two code objects. The can only happen when
- // the debugger made a shallow copy of code object (see Heap::CopyCode).
- // Since we do not optimize when the debugger is active, we can skip
- // this counter update.
- if (new_count >= 0) {
- new_count &= ICsWithTypeInfoCountField::kMask;
- value = ICsWithTypeInfoCountField::update(value, new_count);
- WRITE_FIELD(this, kStorage2Offset, Smi::FromInt(value));
- }
-}
-
-
-int TypeFeedbackInfo::ic_generic_count() {
- return Smi::ToInt(READ_FIELD(this, kStorage3Offset));
-}
-
-
-void TypeFeedbackInfo::change_ic_generic_count(int delta) {
- if (delta == 0) return;
- int new_count = ic_generic_count() + delta;
- if (new_count >= 0) {
- new_count &= ~Smi::kMinValue;
- WRITE_FIELD(this, kStorage3Offset, Smi::FromInt(new_count));
- }
-}
-
-
-void TypeFeedbackInfo::initialize_storage() {
- WRITE_FIELD(this, kStorage1Offset, Smi::kZero);
- WRITE_FIELD(this, kStorage2Offset, Smi::kZero);
- WRITE_FIELD(this, kStorage3Offset, Smi::kZero);
-}
-
-
-void TypeFeedbackInfo::change_own_type_change_checksum() {
- int value = Smi::ToInt(READ_FIELD(this, kStorage1Offset));
- int checksum = OwnTypeChangeChecksum::decode(value);
- checksum = (checksum + 1) % (1 << kTypeChangeChecksumBits);
- value = OwnTypeChangeChecksum::update(value, checksum);
- // Ensure packed bit field is in Smi range.
- if (value > Smi::kMaxValue) value |= Smi::kMinValue;
- if (value < Smi::kMinValue) value &= ~Smi::kMinValue;
- WRITE_FIELD(this, kStorage1Offset, Smi::FromInt(value));
-}
-
-
-void TypeFeedbackInfo::set_inlined_type_change_checksum(int checksum) {
- int value = Smi::ToInt(READ_FIELD(this, kStorage2Offset));
- int mask = (1 << kTypeChangeChecksumBits) - 1;
- value = InlinedTypeChangeChecksum::update(value, checksum & mask);
- // Ensure packed bit field is in Smi range.
- if (value > Smi::kMaxValue) value |= Smi::kMinValue;
- if (value < Smi::kMinValue) value &= ~Smi::kMinValue;
- WRITE_FIELD(this, kStorage2Offset, Smi::FromInt(value));
-}
-
-
-int TypeFeedbackInfo::own_type_change_checksum() {
- int value = Smi::ToInt(READ_FIELD(this, kStorage1Offset));
- return OwnTypeChangeChecksum::decode(value);
-}
-
-
-bool TypeFeedbackInfo::matches_inlined_type_change_checksum(int checksum) {
- int value = Smi::ToInt(READ_FIELD(this, kStorage2Offset));
- int mask = (1 << kTypeChangeChecksumBits) - 1;
- return InlinedTypeChangeChecksum::decode(value) == (checksum & mask);
-}
-
Relocatable::Relocatable(Isolate* isolate) {
isolate_ = isolate;
prev_ = isolate->relocatable_top();
@@ -5092,15 +3450,6 @@ Object* OrderedHashTableIterator<Derived, TableType>::CurrentKey() {
return key;
}
-
-Object* JSMapIterator::CurrentValue() {
- OrderedHashMap* table(OrderedHashMap::cast(this->table()));
- int index = Smi::ToInt(this->index());
- Object* value = table->ValueAt(index);
- DCHECK(!value->IsTheHole(table->GetIsolate()));
- return value;
-}
-
// Predictably converts HeapObject* or Address to uint32 by calculating
// offset of the address in respective MemoryChunk.
static inline uint32_t ObjectAddressForHashing(void* object) {
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 2ac24f823d..f13c222632 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -16,6 +16,8 @@
#include "src/ostreams.h"
#include "src/regexp/jsregexp.h"
#include "src/transitions-inl.h"
+#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
@@ -239,6 +241,14 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
+ case LOAD_HANDLER_TYPE:
+ LoadHandler::cast(this)->LoadHandlerPrint(os);
+ break;
+
+ case STORE_HANDLER_TYPE:
+ StoreHandler::cast(this)->StoreHandlerPrint(os);
+ break;
+
default:
os << "UNKNOWN TYPE " << map()->instance_type();
UNREACHABLE();
@@ -747,24 +757,31 @@ void FeedbackVector::FeedbackVectorPrint(std::ostream& os) { // NOLINT
return;
}
- os << "\n SharedFunctionInfo: " << Brief(shared_function_info());
- os << "\n Optimized Code: " << Brief(optimized_code_cell());
- os << "\n Invocation Count: " << invocation_count();
- os << "\n Profiler Ticks: " << profiler_ticks();
+ os << "\n - shared function info: " << Brief(shared_function_info());
+ os << "\n - optimized code/marker: ";
+ if (has_optimized_code()) {
+ os << Brief(optimized_code());
+ } else {
+ os << optimization_marker();
+ }
+ os << "\n - invocation count: " << invocation_count();
+ os << "\n - profiler ticks: " << profiler_ticks();
FeedbackMetadataIterator iter(metadata());
while (iter.HasNext()) {
FeedbackSlot slot = iter.Next();
FeedbackSlotKind kind = iter.kind();
- os << "\n Slot " << slot << " " << kind << " ";
+ os << "\n - slot " << slot << " " << kind << " ";
FeedbackSlotPrint(os, slot, kind);
int entry_size = iter.entry_size();
+ if (entry_size > 0) os << " {";
for (int i = 0; i < entry_size; i++) {
int index = GetIndex(slot) + i;
- os << "\n [" << index << "]: " << Brief(get(index));
+ os << "\n [" << index << "]: " << Brief(get(index));
}
+ if (entry_size > 0) os << "\n }";
}
os << "\n";
}
@@ -1430,6 +1447,42 @@ void Tuple3::Tuple3Print(std::ostream& os) { // NOLINT
os << "\n";
}
+void LoadHandler::LoadHandlerPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "LoadHandler");
+ // TODO(ishell): implement printing based on handler kind
+ os << "\n - handler: " << Brief(smi_handler());
+ os << "\n - validity_cell: " << Brief(validity_cell());
+ int data_count = data_field_count();
+ if (data_count >= 1) {
+ os << "\n - data1: " << Brief(data1());
+ }
+ if (data_count >= 2) {
+ os << "\n - data2: " << Brief(data2());
+ }
+ if (data_count >= 3) {
+ os << "\n - data3: " << Brief(data3());
+ }
+ os << "\n";
+}
+
+void StoreHandler::StoreHandlerPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "StoreHandler");
+ // TODO(ishell): implement printing based on handler kind
+ os << "\n - handler: " << Brief(smi_handler());
+ os << "\n - validity_cell: " << Brief(validity_cell());
+ int data_count = data_field_count();
+ if (data_count >= 1) {
+ os << "\n - data1: " << Brief(data1());
+ }
+ if (data_count >= 2) {
+ os << "\n - data2: " << Brief(data2());
+ }
+ if (data_count >= 3) {
+ os << "\n - data3: " << Brief(data3());
+ }
+ os << "\n";
+}
+
void ContextExtension::ContextExtensionPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "ContextExtension");
os << "\n - scope_info: " << Brief(scope_info());
@@ -1552,7 +1605,12 @@ void Script::ScriptPrint(std::ostream& os) { // NOLINT
os << "\n - wrapper: " << Brief(wrapper());
os << "\n - compilation type: " << compilation_type();
os << "\n - line ends: " << Brief(line_ends());
- os << "\n - eval from shared: " << Brief(eval_from_shared());
+ if (has_eval_from_shared()) {
+ os << "\n - eval from shared: " << Brief(eval_from_shared());
+ }
+ if (is_wrapped()) {
+ os << "\n - wrapped arguments: " << Brief(wrapped_arguments());
+ }
os << "\n - eval from position: " << eval_from_position();
os << "\n - shared function infos: " << Brief(shared_function_infos());
os << "\n";
@@ -1888,8 +1946,36 @@ extern void _v8_internal_Print_Object(void* object) {
}
extern void _v8_internal_Print_Code(void* object) {
+ i::Address address = reinterpret_cast<i::Address>(object);
i::Isolate* isolate = i::Isolate::Current();
- isolate->FindCodeObject(reinterpret_cast<i::Address>(object))->Print();
+
+ i::wasm::WasmCode* wasm_code =
+ isolate->wasm_engine()->code_manager()->LookupCode(address);
+ if (wasm_code) {
+ wasm_code->Print(isolate);
+ return;
+ }
+
+ if (!isolate->heap()->InSpaceSlow(address, i::CODE_SPACE) &&
+ !isolate->heap()->InSpaceSlow(address, i::LO_SPACE)) {
+ i::PrintF(
+ "%p is not within the current isolate's large object or code spaces\n",
+ static_cast<void*>(address));
+ return;
+ }
+
+ i::Code* code = isolate->FindCodeObject(address);
+ if (!code->IsCode()) {
+ i::PrintF("No code object found containing %p\n",
+ static_cast<void*>(address));
+ return;
+ }
+#ifdef ENABLE_DISASSEMBLER
+ i::OFStream os(stdout);
+ code->Disassemble(nullptr, os, address);
+#else // ENABLE_DISASSEMBLER
+ code->Print();
+#endif // ENABLE_DISASSEMBLER
}
extern void _v8_internal_Print_FeedbackMetadata(void* object) {
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index bd876e67d7..f8c55e57a6 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -73,6 +73,7 @@
#include "src/trap-handler/trap-handler.h"
#include "src/unicode-cache-inl.h"
#include "src/utils-inl.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects.h"
#include "src/zone/zone.h"
@@ -1163,7 +1164,7 @@ MaybeHandle<Object> JSProxy::GetProperty(Isolate* isolate,
Object);
}
// 5. Let target be the value of the [[ProxyTarget]] internal slot of O.
- Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
// 6. Let trap be ? GetMethod(handler, "get").
Handle<Object> trap;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -1536,7 +1537,7 @@ MaybeHandle<Object> JSProxy::GetPrototype(Handle<JSProxy> proxy) {
NewTypeError(MessageTemplate::kProxyRevoked, trap_name),
Object);
}
- Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
Handle<JSReceiver> handler(JSReceiver::cast(proxy->handler()), isolate);
// 5. Let trap be ? GetMethod(handler, "getPrototypeOf").
@@ -1605,9 +1606,7 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(LookupIterator* it) {
Object);
}
- v8::AccessorNameGetterCallback call_fun =
- v8::ToCData<v8::AccessorNameGetterCallback>(info->getter());
- if (call_fun == nullptr) return isolate->factory()->undefined_value();
+ if (!info->has_getter()) return isolate->factory()->undefined_value();
if (info->is_sloppy() && !receiver->IsJSReceiver()) {
ASSIGN_RETURN_ON_EXCEPTION(isolate, receiver,
@@ -1617,14 +1616,15 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(LookupIterator* it) {
PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder,
kDontThrow);
- Handle<Object> result = args.Call(call_fun, name);
+ Handle<Object> result = args.CallAccessorGetter(info, name);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (result.is_null()) return isolate->factory()->undefined_value();
Handle<Object> reboxed_result = handle(*result, isolate);
if (info->replace_on_access() && receiver->IsJSReceiver()) {
- args.Call(reinterpret_cast<GenericNamedPropertySetterCallback>(
- &Accessors::ReconfigureToDataProperty),
- name, result);
+ args.CallNamedSetterCallback(
+ reinterpret_cast<GenericNamedPropertySetterCallback>(
+ &Accessors::ReconfigureToDataProperty),
+ name, result);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
}
return reboxed_result;
@@ -1733,7 +1733,7 @@ Maybe<bool> Object::SetPropertyWithAccessor(LookupIterator* it,
PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder,
should_throw);
- Handle<Object> result = args.Call(call_fun, name, value);
+ Handle<Object> result = args.CallNamedSetterCallback(call_fun, name, value);
// In the case of AccessorNameSetterCallback, we know that the result value
// cannot have been set, so the result of Call will be null. In the case of
// AccessorNameBooleanSetterCallback, the result will either be null
@@ -1853,20 +1853,9 @@ MaybeHandle<Object> GetPropertyWithInterceptorInternal(
*holder, kDontThrow);
if (it->IsElement()) {
- uint32_t index = it->index();
- v8::IndexedPropertyGetterCallback getter =
- v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
- result = args.Call(getter, index);
+ result = args.CallIndexedGetter(interceptor, it->index());
} else {
- Handle<Name> name = it->name();
- DCHECK(!name->IsPrivate());
-
- DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols());
-
- v8::GenericNamedPropertyGetterCallback getter =
- v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
- interceptor->getter());
- result = args.Call(getter, name);
+ result = args.CallNamedGetter(interceptor, it->name());
}
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
@@ -1898,17 +1887,9 @@ Maybe<PropertyAttributes> GetPropertyAttributesWithInterceptorInternal(
if (!interceptor->query()->IsUndefined(isolate)) {
Handle<Object> result;
if (it->IsElement()) {
- uint32_t index = it->index();
- v8::IndexedPropertyQueryCallback query =
- v8::ToCData<v8::IndexedPropertyQueryCallback>(interceptor->query());
- result = args.Call(query, index);
+ result = args.CallIndexedQuery(interceptor, it->index());
} else {
- Handle<Name> name = it->name();
- DCHECK(!name->IsPrivate());
- v8::GenericNamedPropertyQueryCallback query =
- v8::ToCData<v8::GenericNamedPropertyQueryCallback>(
- interceptor->query());
- result = args.Call(query, name);
+ result = args.CallNamedQuery(interceptor, it->name());
}
if (!result.is_null()) {
int32_t value;
@@ -1919,17 +1900,9 @@ Maybe<PropertyAttributes> GetPropertyAttributesWithInterceptorInternal(
// TODO(verwaest): Use GetPropertyWithInterceptor?
Handle<Object> result;
if (it->IsElement()) {
- uint32_t index = it->index();
- v8::IndexedPropertyGetterCallback getter =
- v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
- result = args.Call(getter, index);
+ result = args.CallIndexedGetter(interceptor, it->index());
} else {
- Handle<Name> name = it->name();
- DCHECK(!name->IsPrivate());
- v8::GenericNamedPropertyGetterCallback getter =
- v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
- interceptor->getter());
- result = args.Call(getter, name);
+ result = args.CallNamedGetter(interceptor, it->name());
}
if (!result.is_null()) return Just(DONT_ENUM);
}
@@ -1960,22 +1933,11 @@ Maybe<bool> SetPropertyWithInterceptorInternal(
*holder, should_throw);
if (it->IsElement()) {
- uint32_t index = it->index();
- v8::IndexedPropertySetterCallback setter =
- v8::ToCData<v8::IndexedPropertySetterCallback>(interceptor->setter());
// TODO(neis): In the future, we may want to actually return the
// interceptor's result, which then should be a boolean.
- result = !args.Call(setter, index, value).is_null();
+ result = !args.CallIndexedSetter(interceptor, it->index(), value).is_null();
} else {
- Handle<Name> name = it->name();
- DCHECK(!name->IsPrivate());
-
- DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols());
-
- v8::GenericNamedPropertySetterCallback setter =
- v8::ToCData<v8::GenericNamedPropertySetterCallback>(
- interceptor->setter());
- result = !args.Call(setter, name, value).is_null();
+ result = !args.CallNamedSetter(interceptor, it->name(), value).is_null();
}
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
@@ -2025,20 +1987,11 @@ Maybe<bool> DefinePropertyWithInterceptorInternal(
}
if (it->IsElement()) {
- uint32_t index = it->index();
- v8::IndexedPropertyDefinerCallback definer =
- v8::ToCData<v8::IndexedPropertyDefinerCallback>(interceptor->definer());
- result = !args.Call(definer, index, *descriptor).is_null();
+ result = !args.CallIndexedDefiner(interceptor, it->index(), *descriptor)
+ .is_null();
} else {
- Handle<Name> name = it->name();
- DCHECK(!name->IsPrivate());
-
- DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols());
-
- v8::GenericNamedPropertyDefinerCallback definer =
- v8::ToCData<v8::GenericNamedPropertyDefinerCallback>(
- interceptor->definer());
- result = !args.Call(definer, name, *descriptor).is_null();
+ result =
+ !args.CallNamedDefiner(interceptor, it->name(), *descriptor).is_null();
}
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
@@ -2163,11 +2116,13 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
uint32_t hash = name->Hash();
if (object->IsJSGlobalObject()) {
- Handle<JSGlobalObject> global_obj(JSGlobalObject::cast(*object));
+ Handle<JSGlobalObject> global_obj = Handle<JSGlobalObject>::cast(object);
Handle<GlobalDictionary> dictionary(global_obj->global_dictionary());
int entry = dictionary->FindEntry(isolate, name, hash);
if (entry == GlobalDictionary::kNotFound) {
+ DCHECK_IMPLIES(global_obj->map()->is_prototype_map(),
+ Map::IsPrototypeChainInvalidated(global_obj->map()));
auto cell = isolate->factory()->NewPropertyCell(name);
cell->set_value(*value);
auto cell_type = value->IsUndefined(isolate)
@@ -2187,6 +2142,8 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
int entry = dictionary->FindEntry(name);
if (entry == NameDictionary::kNotFound) {
+ DCHECK_IMPLIES(object->map()->is_prototype_map(),
+ Map::IsPrototypeChainInvalidated(object->map()));
dictionary = NameDictionary::Add(dictionary, name, value, details);
object->SetProperties(*dictionary);
} else {
@@ -2613,7 +2570,7 @@ bool Object::IterationHasObservableEffects() {
JSArray* array = JSArray::cast(this);
Isolate* isolate = array->GetIsolate();
-#if defined(DEBUG) || defined(ENABLE_SLOWFAST_SWITCH)
+#ifdef V8_ENABLE_FORCE_SLOW_PATH
if (isolate->force_slow_path()) return true;
#endif
@@ -3280,7 +3237,10 @@ VisitorId Map::GetVisitorId(Map* map) {
if (instance_type == ALLOCATION_SITE_TYPE) {
return kVisitAllocationSite;
}
+ return kVisitStruct;
+ case LOAD_HANDLER_TYPE:
+ case STORE_HANDLER_TYPE:
return kVisitStruct;
default:
@@ -4213,6 +4173,9 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
// JSGlobalProxy must never be normalized
DCHECK(!object->IsJSGlobalProxy());
+ DCHECK_IMPLIES(new_map->is_prototype_map(),
+ Map::IsPrototypeChainInvalidated(*new_map));
+
Isolate* isolate = object->GetIsolate();
HandleScope scope(isolate);
Handle<Map> map(object->map());
@@ -4474,7 +4437,10 @@ void Map::DeprecateTransitionTree() {
transitions.GetTarget(i)->DeprecateTransitionTree();
}
DCHECK(!constructor_or_backpointer()->IsFunctionTemplateInfo());
- deprecate();
+ set_is_deprecated(true);
+ if (FLAG_trace_maps) {
+ LOG(GetIsolate(), MapEvent("Deprecate", this, nullptr));
+ }
dependent_code()->DeoptimizeDependentCodeGroup(
GetIsolate(), DependentCode::kTransitionGroup);
NotifyLeafMapLayoutChange();
@@ -5547,7 +5513,13 @@ Handle<Map> JSObject::GetElementsTransitionMap(Handle<JSObject> object,
void JSProxy::Revoke(Handle<JSProxy> proxy) {
Isolate* isolate = proxy->GetIsolate();
- if (!proxy->IsRevoked()) proxy->set_handler(isolate->heap()->null_value());
+ // ES#sec-proxy-revocation-functions
+ if (!proxy->IsRevoked()) {
+ // 5. Set p.[[ProxyTarget]] to null.
+ proxy->set_target(isolate->heap()->null_value());
+ // 6. Set p.[[ProxyHandler]] to null.
+ proxy->set_handler(isolate->heap()->null_value());
+ }
DCHECK(proxy->IsRevoked());
}
@@ -5563,7 +5535,7 @@ Maybe<bool> JSProxy::IsArray(Handle<JSProxy> proxy) {
isolate->factory()->NewStringFromAsciiChecked("IsArray")));
return Nothing<bool>();
}
- object = handle(proxy->target(), isolate);
+ object = handle(JSReceiver::cast(proxy->target()), isolate);
if (object->IsJSArray()) return Just(true);
if (!object->IsJSProxy()) return Just(false);
}
@@ -5588,7 +5560,7 @@ Maybe<bool> JSProxy::HasProperty(Isolate* isolate, Handle<JSProxy> proxy,
return Nothing<bool>();
}
// 5. Let target be the value of the [[ProxyTarget]] internal slot of O.
- Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
// 6. Let trap be ? GetMethod(handler, "has").
Handle<Object> trap;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
@@ -5661,7 +5633,7 @@ Maybe<bool> JSProxy::SetProperty(Handle<JSProxy> proxy, Handle<Name> name,
*factory->NewTypeError(MessageTemplate::kProxyRevoked, trap_name));
return Nothing<bool>();
}
- Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
Handle<JSReceiver> handler(JSReceiver::cast(proxy->handler()), isolate);
Handle<Object> trap;
@@ -5712,7 +5684,7 @@ Maybe<bool> JSProxy::DeletePropertyOrElement(Handle<JSProxy> proxy,
*factory->NewTypeError(MessageTemplate::kProxyRevoked, trap_name));
return Nothing<bool>();
}
- Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
Handle<JSReceiver> handler(JSReceiver::cast(proxy->handler()), isolate);
Handle<Object> trap;
@@ -5986,7 +5958,7 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
void JSObject::MigrateInstance(Handle<JSObject> object) {
Handle<Map> original_map(object->map());
Handle<Map> map = Map::Update(original_map);
- map->set_migration_target(true);
+ map->set_is_migration_target(true);
MigrateToMap(object, map);
if (FLAG_trace_migration) {
object->PrintInstanceMigration(stdout, *original_map, *map);
@@ -6300,7 +6272,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
Handle<Map> new_map = Map::CopyDropDescriptors(old_map);
new_map->set_may_have_interesting_symbols(new_map->has_named_interceptor() ||
new_map->is_access_check_needed());
- new_map->set_dictionary_map(false);
+ new_map->set_is_dictionary_map(false);
NotifyMapChange(old_map, new_map, isolate);
@@ -6626,19 +6598,9 @@ Maybe<bool> JSObject::DeletePropertyWithInterceptor(LookupIterator* it,
*holder, should_throw);
Handle<Object> result;
if (it->IsElement()) {
- uint32_t index = it->index();
- v8::IndexedPropertyDeleterCallback deleter =
- v8::ToCData<v8::IndexedPropertyDeleterCallback>(interceptor->deleter());
- result = args.Call(deleter, index);
+ result = args.CallIndexedDeleter(interceptor, it->index());
} else {
- DCHECK_IMPLIES(it->name()->IsSymbol(),
- interceptor->can_intercept_symbols());
- Handle<Name> name = it->name();
- DCHECK(!name->IsPrivate());
- v8::GenericNamedPropertyDeleterCallback deleter =
- v8::ToCData<v8::GenericNamedPropertyDeleterCallback>(
- interceptor->deleter());
- result = args.Call(deleter, name);
+ result = args.CallNamedDeleter(interceptor, it->name());
}
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
@@ -7526,7 +7488,7 @@ Maybe<bool> JSProxy::DefineOwnProperty(Isolate* isolate, Handle<JSProxy> proxy,
return Nothing<bool>();
}
// 5. Let target be the value of the [[ProxyTarget]] internal slot of O.
- Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
// 6. Let trap be ? GetMethod(handler, "defineProperty").
Handle<Object> trap;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
@@ -7690,19 +7652,9 @@ Maybe<bool> GetPropertyDescriptorWithInterceptor(LookupIterator* it,
PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
*holder, kDontThrow);
if (it->IsElement()) {
- uint32_t index = it->index();
- v8::IndexedPropertyDescriptorCallback descriptorCallback =
- v8::ToCData<v8::IndexedPropertyDescriptorCallback>(
- interceptor->descriptor());
-
- result = args.Call(descriptorCallback, index);
+ result = args.CallIndexedDescriptor(interceptor, it->index());
} else {
- Handle<Name> name = it->name();
- DCHECK(!name->IsPrivate());
- v8::GenericNamedPropertyDescriptorCallback descriptorCallback =
- v8::ToCData<v8::GenericNamedPropertyDescriptorCallback>(
- interceptor->descriptor());
- result = args.Call(descriptorCallback, name);
+ result = args.CallNamedDescriptor(interceptor, it->name());
}
if (!result.is_null()) {
// Request successfully intercepted, try to set the property
@@ -7809,7 +7761,7 @@ Maybe<bool> JSProxy::GetOwnPropertyDescriptor(Isolate* isolate,
return Nothing<bool>();
}
// 5. Let target be the value of the [[ProxyTarget]] internal slot of O.
- Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
// 6. Let trap be ? GetMethod(handler, "getOwnPropertyDescriptor").
Handle<Object> trap;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
@@ -8261,7 +8213,7 @@ Maybe<bool> JSProxy::PreventExtensions(Handle<JSProxy> proxy,
*factory->NewTypeError(MessageTemplate::kProxyRevoked, trap_name));
return Nothing<bool>();
}
- Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
Handle<JSReceiver> handler(JSReceiver::cast(proxy->handler()), isolate);
Handle<Object> trap;
@@ -8369,7 +8321,7 @@ Maybe<bool> JSProxy::IsExtensible(Handle<JSProxy> proxy) {
*factory->NewTypeError(MessageTemplate::kProxyRevoked, trap_name));
return Nothing<bool>();
}
- Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
Handle<JSReceiver> handler(JSReceiver::cast(proxy->handler()), isolate);
Handle<Object> trap;
@@ -8839,9 +8791,10 @@ MUST_USE_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
MaybeHandle<FixedArray> GetOwnValuesOrEntries(Isolate* isolate,
Handle<JSReceiver> object,
PropertyFilter filter,
+ bool try_fast_path,
bool get_entries) {
Handle<FixedArray> values_or_entries;
- if (filter == ENUMERABLE_STRINGS) {
+ if (try_fast_path && filter == ENUMERABLE_STRINGS) {
Maybe<bool> fast_values_or_entries = FastGetOwnValuesOrEntries(
isolate, object, get_entries, &values_or_entries);
if (fast_values_or_entries.IsNothing()) return MaybeHandle<FixedArray>();
@@ -8894,13 +8847,17 @@ MaybeHandle<FixedArray> GetOwnValuesOrEntries(Isolate* isolate,
}
MaybeHandle<FixedArray> JSReceiver::GetOwnValues(Handle<JSReceiver> object,
- PropertyFilter filter) {
- return GetOwnValuesOrEntries(object->GetIsolate(), object, filter, false);
+ PropertyFilter filter,
+ bool try_fast_path) {
+ return GetOwnValuesOrEntries(object->GetIsolate(), object, filter,
+ try_fast_path, false);
}
MaybeHandle<FixedArray> JSReceiver::GetOwnEntries(Handle<JSReceiver> object,
- PropertyFilter filter) {
- return GetOwnValuesOrEntries(object->GetIsolate(), object, filter, true);
+ PropertyFilter filter,
+ bool try_fast_path) {
+ return GetOwnValuesOrEntries(object->GetIsolate(), object, filter,
+ try_fast_path, true);
}
bool Map::DictionaryElementsInPrototypeChainOnly() {
@@ -9077,13 +9034,13 @@ Handle<Map> Map::RawCopy(Handle<Map> map, int instance_size,
result->set_bit_field(map->bit_field());
result->set_bit_field2(map->bit_field2());
int new_bit_field3 = map->bit_field3();
- new_bit_field3 = OwnsDescriptors::update(new_bit_field3, true);
+ new_bit_field3 = OwnsDescriptorsBit::update(new_bit_field3, true);
new_bit_field3 = NumberOfOwnDescriptorsBits::update(new_bit_field3, 0);
new_bit_field3 = EnumLengthBits::update(new_bit_field3,
kInvalidEnumCacheSentinel);
- new_bit_field3 = Deprecated::update(new_bit_field3, false);
+ new_bit_field3 = IsDeprecatedBit::update(new_bit_field3, false);
if (!map->is_dictionary_map()) {
- new_bit_field3 = IsUnstable::update(new_bit_field3, false);
+ new_bit_field3 = IsUnstableBit::update(new_bit_field3, false);
}
result->set_bit_field3(new_bit_field3);
return result;
@@ -9164,8 +9121,8 @@ Handle<Map> Map::CopyNormalized(Handle<Map> map,
// Clear the unused_property_fields explicitly as this field should not
// be accessed for normalized maps.
result->SetInObjectUnusedPropertyFields(0);
- result->set_dictionary_map(true);
- result->set_migration_target(false);
+ result->set_is_dictionary_map(true);
+ result->set_is_migration_target(false);
result->set_may_have_interesting_symbols(true);
result->set_construction_counter(kNoSlackTracking);
@@ -9184,7 +9141,7 @@ Handle<Map> Map::CopyNormalized(Handle<Map> map,
// static
Handle<Map> Map::TransitionToImmutableProto(Handle<Map> map) {
Handle<Map> new_map = Map::Copy(map, "ImmutablePrototype");
- new_map->set_immutable_proto(true);
+ new_map->set_is_immutable_proto(true);
return new_map;
}
@@ -9723,8 +9680,8 @@ Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
bool* created_new_map) {
RuntimeCallTimerScope stats_scope(
*map, map->is_prototype_map()
- ? &RuntimeCallStats::PrototypeMap_TransitionToDataProperty
- : &RuntimeCallStats::Map_TransitionToDataProperty);
+ ? RuntimeCallCounterId::kPrototypeMap_TransitionToDataProperty
+ : RuntimeCallCounterId::kMap_TransitionToDataProperty);
DCHECK(name->IsUniqueName());
DCHECK(!map->is_dictionary_map());
@@ -9839,8 +9796,8 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
RuntimeCallTimerScope stats_scope(
isolate,
map->is_prototype_map()
- ? &RuntimeCallStats::PrototypeMap_TransitionToAccessorProperty
- : &RuntimeCallStats::Map_TransitionToAccessorProperty);
+ ? RuntimeCallCounterId::kPrototypeMap_TransitionToAccessorProperty
+ : RuntimeCallCounterId::kMap_TransitionToAccessorProperty);
// At least one of the accessors needs to be a new value.
DCHECK(!getter->IsNull(isolate) || !setter->IsNull(isolate));
@@ -10291,8 +10248,10 @@ Handle<ArrayList> ArrayList::Add(Handle<ArrayList> array, Handle<Object> obj1,
// static
Handle<ArrayList> ArrayList::New(Isolate* isolate, int size) {
- Handle<ArrayList> result = Handle<ArrayList>::cast(
- isolate->factory()->NewFixedArray(size + kFirstIndex));
+ Handle<FixedArray> fixed_array =
+ isolate->factory()->NewFixedArray(size + kFirstIndex);
+ fixed_array->set_map_no_write_barrier(isolate->heap()->array_list_map());
+ Handle<ArrayList> result = Handle<ArrayList>::cast(fixed_array);
result->SetLength(0);
return result;
}
@@ -10331,10 +10290,13 @@ Handle<FixedArray> EnsureSpaceInFixedArray(Handle<FixedArray> array,
// static
Handle<ArrayList> ArrayList::EnsureSpace(Handle<ArrayList> array, int length) {
const bool empty = (array->length() == 0);
- auto ret = Handle<ArrayList>::cast(
- EnsureSpaceInFixedArray(array, kFirstIndex + length));
- if (empty) ret->SetLength(0);
- return ret;
+ auto ret = EnsureSpaceInFixedArray(array, kFirstIndex + length);
+ if (empty) {
+ ret->set_map_no_write_barrier(array->GetHeap()->array_list_map());
+
+ Handle<ArrayList>::cast(ret)->SetLength(0);
+ }
+ return Handle<ArrayList>::cast(ret);
}
Handle<RegExpMatchInfo> RegExpMatchInfo::ReserveCaptures(
@@ -10710,7 +10672,7 @@ Handle<Object> String::ToNumber(Handle<String> subject) {
// whitespace, a sign ('+' or '-'), the decimal point, a decimal digit
// or the 'I' character ('Infinity'). All of that have codes not greater
// than '9' except 'I' and &nbsp;.
- if (data[start_pos] != 'I' && data[start_pos] != 0xa0) {
+ if (data[start_pos] != 'I' && data[start_pos] != 0xA0) {
return isolate->factory()->nan_value();
}
} else if (len - start_pos < 10 && AreDigits(data, start_pos, len)) {
@@ -12551,15 +12513,19 @@ bool JSObject::UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
return true;
}
+namespace {
-static void InvalidatePrototypeChainsInternal(Map* map) {
+// This function must be kept in sync with
+// AccessorAssembler::InvalidateValidityCellIfPrototype() which does pre-checks
+// before jumping here.
+PrototypeInfo* InvalidateOnePrototypeValidityCellInternal(Map* map) {
DCHECK(map->is_prototype_map());
if (FLAG_trace_prototype_users) {
PrintF("Invalidating prototype map %p 's cell\n",
reinterpret_cast<void*>(map));
}
Object* maybe_proto_info = map->prototype_info();
- if (!maybe_proto_info->IsPrototypeInfo()) return;
+ if (!maybe_proto_info->IsPrototypeInfo()) return nullptr;
PrototypeInfo* proto_info = PrototypeInfo::cast(maybe_proto_info);
Object* maybe_cell = proto_info->validity_cell();
if (maybe_cell->IsCell()) {
@@ -12567,6 +12533,12 @@ static void InvalidatePrototypeChainsInternal(Map* map) {
Cell* cell = Cell::cast(maybe_cell);
cell->set_value(Smi::FromInt(Map::kPrototypeChainInvalid));
}
+ return proto_info;
+}
+
+void InvalidatePrototypeChainsInternal(Map* map) {
+ PrototypeInfo* proto_info = InvalidateOnePrototypeValidityCellInternal(map);
+ if (proto_info == nullptr) return;
WeakFixedArray::Iterator iterator(proto_info->prototype_users());
// For now, only maps register themselves as users.
@@ -12577,13 +12549,27 @@ static void InvalidatePrototypeChainsInternal(Map* map) {
}
}
+} // namespace
// static
-void JSObject::InvalidatePrototypeChains(Map* map) {
+Map* JSObject::InvalidatePrototypeChains(Map* map) {
DisallowHeapAllocation no_gc;
InvalidatePrototypeChainsInternal(map);
+ return map;
}
+// We also invalidate global objects validity cell when a new lexical
+// environment variable is added. This is necessary to ensure that
+// Load/StoreGlobalIC handlers that load/store from global object's prototype
+// get properly invalidated.
+// Note, that the normal Load/StoreICs that load/store through the global object
+// in the prototype chain are not affected by appearance of a new lexical
+// variable and therefore we don't propagate invalidation down.
+// static
+void JSObject::InvalidatePrototypeValidityCell(JSGlobalObject* global) {
+ DisallowHeapAllocation no_gc;
+ InvalidateOnePrototypeValidityCellInternal(global->map());
+}
// static
Handle<PrototypeInfo> Map::GetOrCreatePrototypeInfo(Handle<JSObject> prototype,
@@ -12662,6 +12648,21 @@ Handle<Cell> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
}
// static
+bool Map::IsPrototypeChainInvalidated(Map* map) {
+ DCHECK(map->is_prototype_map());
+ Object* maybe_proto_info = map->prototype_info();
+ if (maybe_proto_info->IsPrototypeInfo()) {
+ PrototypeInfo* proto_info = PrototypeInfo::cast(maybe_proto_info);
+ Object* maybe_cell = proto_info->validity_cell();
+ if (maybe_cell->IsCell()) {
+ Cell* cell = Cell::cast(maybe_cell);
+ return cell->value() == Smi::FromInt(Map::kPrototypeChainInvalid);
+ }
+ }
+ return true;
+}
+
+// static
Handle<WeakCell> Map::GetOrCreatePrototypeWeakCell(Handle<JSReceiver> prototype,
Isolate* isolate) {
DCHECK(!prototype.is_null());
@@ -12688,7 +12689,8 @@ Handle<WeakCell> Map::GetOrCreatePrototypeWeakCell(Handle<JSReceiver> prototype,
// static
void Map::SetPrototype(Handle<Map> map, Handle<Object> prototype,
bool enable_prototype_setup_mode) {
- RuntimeCallTimerScope stats_scope(*map, &RuntimeCallStats::Map_SetPrototype);
+ RuntimeCallTimerScope stats_scope(*map,
+ RuntimeCallCounterId::kMap_SetPrototype);
bool is_hidden = false;
if (prototype->IsJSObject()) {
@@ -12815,7 +12817,7 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
JSObject::MigrateToMap(function, new_map);
new_map->SetConstructor(*value);
- new_map->set_non_instance_prototype(true);
+ new_map->set_has_non_instance_prototype(true);
FunctionKind kind = function->shared()->kind();
Handle<Context> native_context(function->context()->native_context());
@@ -12829,7 +12831,7 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
isolate);
} else {
construct_prototype = Handle<JSReceiver>::cast(value);
- function->map()->set_non_instance_prototype(false);
+ function->map()->set_has_non_instance_prototype(false);
}
SetInstancePrototype(isolate, function, construct_prototype);
@@ -12976,6 +12978,56 @@ void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
map->StartInobjectSlackTracking();
}
+namespace {
+bool FastInitializeDerivedMap(Isolate* isolate, Handle<JSFunction> new_target,
+ Handle<JSFunction> constructor,
+ Handle<Map> constructor_initial_map) {
+ // Check that |function|'s initial map still in sync with the |constructor|,
+ // otherwise we must create a new initial map for |function|.
+ if (new_target->has_initial_map() &&
+ new_target->initial_map()->GetConstructor() == *constructor) {
+ DCHECK(new_target->instance_prototype()->IsJSReceiver());
+ return true;
+ }
+ InstanceType instance_type = constructor_initial_map->instance_type();
+ DCHECK(CanSubclassHaveInobjectProperties(instance_type));
+ // Create a new map with the size and number of in-object properties
+ // suggested by |function|.
+
+ // Link initial map and constructor function if the new.target is actually a
+ // subclass constructor.
+ if (!IsDerivedConstructor(new_target->shared()->kind())) return false;
+
+ int instance_size;
+ int in_object_properties;
+ int embedder_fields =
+ JSObject::GetEmbedderFieldCount(*constructor_initial_map);
+ bool success = JSFunction::CalculateInstanceSizeForDerivedClass(
+ new_target, instance_type, embedder_fields, &instance_size,
+ &in_object_properties);
+
+ Handle<Map> map;
+ if (success) {
+ int pre_allocated = constructor_initial_map->GetInObjectProperties() -
+ constructor_initial_map->UnusedPropertyFields();
+ CHECK_LE(constructor_initial_map->UsedInstanceSize(), instance_size);
+ int unused_property_fields = in_object_properties - pre_allocated;
+ map = Map::CopyInitialMap(constructor_initial_map, instance_size,
+ in_object_properties, unused_property_fields);
+ } else {
+ map = Map::CopyInitialMap(constructor_initial_map);
+ }
+ map->set_new_target_is_base(false);
+ Handle<Object> prototype(new_target->instance_prototype(), isolate);
+ JSFunction::SetInitialMap(new_target, map, prototype);
+ DCHECK(new_target->instance_prototype()->IsJSReceiver());
+ map->SetConstructor(*constructor);
+ map->set_construction_counter(Map::kNoSlackTracking);
+ map->StartInobjectSlackTracking();
+ return true;
+}
+
+} // namespace
// static
MaybeHandle<Map> JSFunction::GetDerivedMap(Isolate* isolate,
@@ -12986,55 +13038,16 @@ MaybeHandle<Map> JSFunction::GetDerivedMap(Isolate* isolate,
Handle<Map> constructor_initial_map(constructor->initial_map(), isolate);
if (*new_target == *constructor) return constructor_initial_map;
+ Handle<Map> result_map;
// Fast case, new.target is a subclass of constructor. The map is cacheable
// (and may already have been cached). new.target.prototype is guaranteed to
// be a JSReceiver.
if (new_target->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(new_target);
-
- // Check that |function|'s initial map still in sync with the |constructor|,
- // otherwise we must create a new initial map for |function|.
- if (function->has_initial_map() &&
- function->initial_map()->GetConstructor() == *constructor) {
+ if (FastInitializeDerivedMap(isolate, function, constructor,
+ constructor_initial_map)) {
return handle(function->initial_map(), isolate);
}
-
- // Create a new map with the size and number of in-object properties
- // suggested by |function|.
-
- // Link initial map and constructor function if the new.target is actually a
- // subclass constructor.
- if (IsDerivedConstructor(function->shared()->kind())) {
- Handle<Object> prototype(function->instance_prototype(), isolate);
- InstanceType instance_type = constructor_initial_map->instance_type();
- DCHECK(CanSubclassHaveInobjectProperties(instance_type));
- int embedder_fields =
- JSObject::GetEmbedderFieldCount(*constructor_initial_map);
- int pre_allocated = constructor_initial_map->GetInObjectProperties() -
- constructor_initial_map->UnusedPropertyFields();
- int instance_size;
- int in_object_properties;
- bool success = CalculateInstanceSizeForDerivedClass(
- function, instance_type, embedder_fields, &instance_size,
- &in_object_properties);
-
- int unused_property_fields = in_object_properties - pre_allocated;
-
- Handle<Map> map;
- if (success) {
- map = Map::CopyInitialMap(constructor_initial_map, instance_size,
- in_object_properties, unused_property_fields);
- } else {
- map = Map::CopyInitialMap(constructor_initial_map);
- }
- map->set_new_target_is_base(false);
-
- JSFunction::SetInitialMap(function, map, prototype);
- map->SetConstructor(*constructor);
- map->set_construction_counter(Map::kNoSlackTracking);
- map->StartInobjectSlackTracking();
- return map;
- }
}
// Slow path, new.target is either a proxy or can't cache the map.
@@ -13076,7 +13089,7 @@ MaybeHandle<Map> JSFunction::GetDerivedMap(Isolate* isolate,
Handle<Map> map = Map::CopyInitialMap(constructor_initial_map);
map->set_new_target_is_base(false);
- DCHECK(prototype->IsJSReceiver());
+ CHECK(prototype->IsJSReceiver());
if (map->prototype() != *prototype) Map::SetPrototype(map, prototype);
map->SetConstructor(*constructor);
return map;
@@ -13183,7 +13196,8 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
}
if (FLAG_harmony_function_tostring) {
- return Handle<String>::cast(shared_info->GetSourceCodeHarmony());
+ return Handle<String>::cast(
+ SharedFunctionInfo::GetSourceCodeHarmony(shared_info));
}
IncrementalStringBuilder builder(isolate);
@@ -13214,7 +13228,22 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
builder.AppendString(handle(shared_info->name(), isolate));
}
}
- builder.AppendString(Handle<String>::cast(shared_info->GetSourceCode()));
+ if (shared_info->is_wrapped()) {
+ builder.AppendCharacter('(');
+ Handle<FixedArray> args(
+ Script::cast(shared_info->script())->wrapped_arguments());
+ int argc = args->length();
+ for (int i = 0; i < argc; i++) {
+ if (i > 0) builder.AppendCString(", ");
+ builder.AppendString(Handle<String>(String::cast(args->get(i))));
+ }
+ builder.AppendCString(") {\n");
+ }
+ builder.AppendString(
+ Handle<String>::cast(SharedFunctionInfo::GetSourceCode(shared_info)));
+ if (shared_info->is_wrapped()) {
+ builder.AppendCString("\n}");
+ }
return builder.Finish().ToHandleChecked();
}
@@ -13245,10 +13274,10 @@ int Script::GetEvalPosition() {
// Due to laziness, the position may not have been translated from code
// offset yet, which would be encoded as negative integer. In that case,
// translate and set the position.
- if (eval_from_shared()->IsUndefined(GetIsolate())) {
+ if (!has_eval_from_shared()) {
position = 0;
} else {
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(eval_from_shared());
+ SharedFunctionInfo* shared = eval_from_shared();
position = shared->abstract_code()->SourcePosition(-position);
}
DCHECK_GE(position, 0);
@@ -13286,6 +13315,15 @@ bool Script::GetPositionInfo(Handle<Script> script, int position,
bool Script::IsUserJavaScript() { return type() == Script::TYPE_NORMAL; }
+bool Script::ContainsAsmModule() {
+ DisallowHeapAllocation no_gc;
+ SharedFunctionInfo::ScriptIterator iter(Handle<Script>(this));
+ while (SharedFunctionInfo* info = iter.Next()) {
+ if (info->HasAsmWasmData()) return true;
+ }
+ return false;
+}
+
namespace {
bool GetPositionInfoSlow(const Script* script, int position,
Script::PositionInfo* info) {
@@ -13324,8 +13362,8 @@ bool Script::GetPositionInfo(int position, PositionInfo* info,
Handle<WasmCompiledModule> compiled_module(
WasmCompiledModule::cast(wasm_compiled_module()));
DCHECK_LE(0, position);
- return compiled_module->GetPositionInfo(static_cast<uint32_t>(position),
- info);
+ return compiled_module->shared()->GetPositionInfo(
+ static_cast<uint32_t>(position), info);
}
if (line_ends()->IsUndefined(GetIsolate())) {
@@ -13456,8 +13494,12 @@ Handle<JSObject> Script::GetWrapper(Handle<Script> script) {
MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
Isolate* isolate, const FunctionLiteral* fun) {
- DCHECK_NE(fun->function_literal_id(), FunctionLiteral::kIdTypeInvalid);
- DCHECK_LT(fun->function_literal_id(), shared_function_infos()->length());
+ CHECK_NE(fun->function_literal_id(), FunctionLiteral::kIdTypeInvalid);
+ // If this check fails, the problem is most probably the function id
+ // renumbering done by AstFunctionLiteralIdReindexer; in particular, that
+ // AstTraversalVisitor doesn't recurse properly in the construct which
+ // triggers the mismatch.
+ CHECK_LT(fun->function_literal_id(), shared_function_infos()->length());
Object* shared = shared_function_infos()->get(fun->function_literal_id());
if (shared->IsUndefined(isolate) || WeakCell::cast(shared)->cleared()) {
return MaybeHandle<SharedFunctionInfo>();
@@ -13628,14 +13670,14 @@ String* SharedFunctionInfo::DebugName() {
return name();
}
-bool SharedFunctionInfo::HasNoSideEffect() {
- if (!computed_has_no_side_effect()) {
- DisallowHeapAllocation not_handlified;
- Handle<SharedFunctionInfo> info(this);
- set_has_no_side_effect(DebugEvaluate::FunctionHasNoSideEffect(info));
- set_computed_has_no_side_effect(true);
+// static
+bool SharedFunctionInfo::HasNoSideEffect(Handle<SharedFunctionInfo> info) {
+ if (!info->computed_has_no_side_effect()) {
+ bool has_no_side_effect = DebugEvaluate::FunctionHasNoSideEffect(info);
+ info->set_has_no_side_effect(has_no_side_effect);
+ info->set_computed_has_no_side_effect(true);
}
- return has_no_side_effect();
+ return info->has_no_side_effect();
}
// The filter is a pattern that matches function names in this way:
@@ -13680,22 +13722,44 @@ bool SharedFunctionInfo::HasSourceCode() const {
!reinterpret_cast<Script*>(script())->source()->IsUndefined(isolate);
}
-
-Handle<Object> SharedFunctionInfo::GetSourceCode() {
- if (!HasSourceCode()) return GetIsolate()->factory()->undefined_value();
- Handle<String> source(String::cast(Script::cast(script())->source()));
- return GetIsolate()->factory()->NewSubString(
- source, start_position(), end_position());
+// static
+Handle<Object> SharedFunctionInfo::GetSourceCode(
+ Handle<SharedFunctionInfo> shared) {
+ Isolate* isolate = shared->GetIsolate();
+ if (!shared->HasSourceCode()) return isolate->factory()->undefined_value();
+ Handle<String> source(String::cast(Script::cast(shared->script())->source()));
+ return isolate->factory()->NewSubString(source, shared->start_position(),
+ shared->end_position());
}
-Handle<Object> SharedFunctionInfo::GetSourceCodeHarmony() {
- Isolate* isolate = GetIsolate();
- if (!HasSourceCode()) return isolate->factory()->undefined_value();
- Handle<String> script_source(String::cast(Script::cast(script())->source()));
- int start_pos = function_token_position();
- if (start_pos == kNoSourcePosition) start_pos = start_position();
- return isolate->factory()->NewSubString(script_source, start_pos,
- end_position());
+// static
+Handle<Object> SharedFunctionInfo::GetSourceCodeHarmony(
+ Handle<SharedFunctionInfo> shared) {
+ Isolate* isolate = shared->GetIsolate();
+ if (!shared->HasSourceCode()) return isolate->factory()->undefined_value();
+ Handle<String> script_source(
+ String::cast(Script::cast(shared->script())->source()));
+ int start_pos = shared->function_token_position();
+ if (start_pos == kNoSourcePosition) start_pos = shared->start_position();
+ Handle<String> source = isolate->factory()->NewSubString(
+ script_source, start_pos, shared->end_position());
+ if (!shared->is_wrapped()) return source;
+
+ DCHECK(!shared->name_should_print_as_anonymous());
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendCString("function ");
+ builder.AppendString(Handle<String>(shared->name(), isolate));
+ builder.AppendCString("(");
+ Handle<FixedArray> args(Script::cast(shared->script())->wrapped_arguments());
+ int argc = args->length();
+ for (int i = 0; i < argc; i++) {
+ if (i > 0) builder.AppendCString(", ");
+ builder.AppendString(Handle<String>(String::cast(args->get(i))));
+ }
+ builder.AppendCString(") {\n");
+ builder.AppendString(source);
+ builder.AppendCString("\n}");
+ return builder.Finish().ToHandleChecked();
}
bool SharedFunctionInfo::IsInlineable() {
@@ -13720,15 +13784,17 @@ void JSFunction::CalculateInstanceSizeHelper(InstanceType instance_type,
int* instance_size,
int* in_object_properties) {
int header_size = JSObject::GetHeaderSize(instance_type, has_prototype_slot);
- DCHECK_LE(requested_embedder_fields,
- (JSObject::kMaxInstanceSize - header_size) >> kPointerSizeLog2);
+ int max_nof_fields =
+ (JSObject::kMaxInstanceSize - header_size) >> kPointerSizeLog2;
+ CHECK_LE(max_nof_fields, JSObject::kMaxInObjectProperties);
+ *in_object_properties = Min(requested_in_object_properties, max_nof_fields);
+ CHECK_LE(requested_embedder_fields, max_nof_fields - *in_object_properties);
*instance_size =
- Min(header_size +
- ((requested_embedder_fields + requested_in_object_properties)
- << kPointerSizeLog2),
- JSObject::kMaxInstanceSize);
- *in_object_properties = ((*instance_size - header_size) >> kPointerSizeLog2) -
- requested_embedder_fields;
+ header_size +
+ ((requested_embedder_fields + *in_object_properties) << kPointerSizeLog2);
+ CHECK_EQ(*in_object_properties,
+ ((*instance_size - header_size) >> kPointerSizeLog2) -
+ requested_embedder_fields);
}
// static
@@ -13738,7 +13804,6 @@ bool JSFunction::CalculateInstanceSizeForDerivedClass(
int* in_object_properties) {
Isolate* isolate = function->GetIsolate();
int expected_nof_properties = 0;
- bool result = true;
for (PrototypeIterator iter(isolate, function, kStartAtReceiver);
!iter.IsAtEnd(); iter.Advance()) {
Handle<JSReceiver> current =
@@ -13751,21 +13816,24 @@ bool JSFunction::CalculateInstanceSizeForDerivedClass(
if (shared->is_compiled() ||
Compiler::Compile(func, Compiler::CLEAR_EXCEPTION)) {
DCHECK(shared->is_compiled());
- expected_nof_properties += shared->expected_nof_properties();
+ int count = shared->expected_nof_properties();
+ // Check that the estimate is sane.
+ if (expected_nof_properties <= JSObject::kMaxInObjectProperties - count) {
+ expected_nof_properties += count;
+ } else {
+ expected_nof_properties = JSObject::kMaxInObjectProperties;
+ }
} else if (!shared->is_compiled()) {
// In case there was a compilation error for the constructor we will
// throw an error during instantiation. Hence we directly return 0;
- result = false;
- break;
- }
- if (!IsDerivedConstructor(shared->kind())) {
- break;
+ return false;
}
+ if (!IsDerivedConstructor(shared->kind())) break;
}
CalculateInstanceSizeHelper(instance_type, true, requested_embedder_fields,
expected_nof_properties, instance_size,
in_object_properties);
- return result;
+ return true;
}
@@ -13804,7 +13872,7 @@ std::ostream& operator<<(std::ostream& os, const SourceCodeOf& v) {
void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
- DCHECK_NE(reason, kNoReason);
+ DCHECK_NE(reason, BailoutReason::kNoReason);
set_compiler_hints(
DisabledOptimizationReasonBits::update(compiler_hints(), reason));
@@ -13833,6 +13901,7 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
shared_info->set_inferred_name(*lit->inferred_name());
shared_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
shared_info->set_language_mode(lit->language_mode());
+ shared_info->set_is_wrapped(lit->is_wrapped());
// shared_info->set_kind(lit->kind());
// FunctionKind must have already been set.
DCHECK(lit->kind() == shared_info->kind());
@@ -13941,7 +14010,7 @@ void Code::InvalidateEmbeddedObjects() {
void Code::Relocate(intptr_t delta) {
- if (trap_handler::UseTrapHandler() && is_wasm_code()) {
+ if (trap_handler::IsTrapHandlerEnabled() && is_wasm_code()) {
const int index = trap_handler_index()->value();
if (index >= 0) {
trap_handler::UpdateHandlerDataCodePointer(index, instruction_start());
@@ -14101,11 +14170,11 @@ void JSFunction::ClearTypeFeedbackInfo() {
}
}
-void Code::PrintDeoptLocation(FILE* out, Address pc) {
+void Code::PrintDeoptLocation(FILE* out, const char* str, Address pc) {
Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(this, pc);
class SourcePosition pos = info.position;
- if (info.deopt_reason != DeoptimizeReason::kNoReason || pos.IsKnown()) {
- PrintF(out, " ;;; deoptimize at ");
+ if (info.deopt_reason != DeoptimizeReason::kUnknown || pos.IsKnown()) {
+ PrintF(out, "%s", str);
OFStream outstr(out);
pos.Print(outstr, this);
PrintF(out, ", %s\n", DeoptimizeReasonToString(info.deopt_reason));
@@ -14291,9 +14360,11 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
DCHECK(Translation::BEGIN == opcode);
int frame_count = iterator.Next();
int jsframe_count = iterator.Next();
+ int update_feedback_count = iterator.Next();
os << " " << Translation::StringFor(opcode)
<< " {frame count=" << frame_count
- << ", js frame count=" << jsframe_count << "}\n";
+ << ", js frame count=" << jsframe_count
+ << ", update_feedback_count=" << update_feedback_count << "}\n";
while (iterator.HasNext() &&
Translation::BEGIN !=
@@ -14450,6 +14521,14 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
os << "{length=" << args_length << "}";
break;
}
+
+ case Translation::UPDATE_FEEDBACK: {
+ int literal_index = iterator.Next();
+ FeedbackSlot slot(iterator.Next());
+ os << "{feedback={vector_index=" << literal_index << ", slot=" << slot
+ << "}}";
+ break;
+ }
}
os << "\n";
}
@@ -14485,8 +14564,7 @@ void HandlerTable::HandlerTableReturnPrint(std::ostream& os) {
}
}
-
-void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
+void Code::Disassemble(const char* name, std::ostream& os, void* current_pc) {
os << "kind = " << Kind2String(kind()) << "\n";
if (is_stub()) {
const char* n = CodeStub::MajorName(CodeStub::GetMajorKey(this));
@@ -14514,21 +14592,22 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
os << "compiler = " << (is_turbofanned() ? "turbofan" : "unknown") << "\n";
os << "address = " << static_cast<const void*>(this) << "\n";
- os << "Instructions (size = " << instruction_size() << ")\n";
+ os << "Body (size = " << instruction_size() << ")\n";
{
Isolate* isolate = GetIsolate();
int size = instruction_size();
int safepoint_offset =
- is_turbofanned() ? static_cast<int>(safepoint_table_offset()) : size;
+ has_safepoint_info() ? safepoint_table_offset() : size;
int constant_pool_offset = FLAG_enable_embedded_constant_pool
? this->constant_pool_offset()
: size;
// Stop before reaching any embedded tables
int code_size = Min(safepoint_offset, constant_pool_offset);
+ os << "Instructions (size = " << code_size << ")\n";
byte* begin = instruction_start();
byte* end = begin + code_size;
- Disassembler::Decode(isolate, &os, begin, end, this);
+ Disassembler::Decode(isolate, &os, begin, end, this, current_pc);
if (constant_pool_offset < size) {
int constant_pool_size = safepoint_offset - constant_pool_offset;
@@ -14562,7 +14641,7 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
}
os << "\n";
- if (is_turbofanned()) {
+ if (has_safepoint_info()) {
SafepointTable table(this);
os << "Safepoints (size = " << table.size() << ")\n";
for (unsigned i = 0; i < table.length(); i++) {
@@ -15057,7 +15136,7 @@ Maybe<bool> JSProxy::SetPrototype(Handle<JSProxy> proxy, Handle<Object> value,
return Nothing<bool>();
}
// 5. Let target be the value of the [[ProxyTarget]] internal slot.
- Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
// 6. Let trap be ? GetMethod(handler, "getPrototypeOf").
Handle<Object> trap;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
@@ -15267,13 +15346,10 @@ static bool ShouldConvertToSlowElements(JSObject* object, uint32_t capacity,
bool JSObject::WouldConvertToSlowElements(uint32_t index) {
- if (HasFastElements()) {
- Handle<FixedArrayBase> backing_store(FixedArrayBase::cast(elements()));
- uint32_t capacity = static_cast<uint32_t>(backing_store->length());
- uint32_t new_capacity;
- return ShouldConvertToSlowElements(this, capacity, index, &new_capacity);
- }
- return false;
+ if (!HasFastElements()) return false;
+ uint32_t capacity = static_cast<uint32_t>(elements()->length());
+ uint32_t new_capacity;
+ return ShouldConvertToSlowElements(this, capacity, index, &new_capacity);
}
@@ -16655,26 +16731,18 @@ MaybeHandle<JSTypedArray> JSTypedArray::SpeciesCreate(
// 2. Let defaultConstructor be the intrinsic object listed in column one of
// Table 51 for exemplar.[[TypedArrayName]].
- Handle<JSFunction> default_ctor = isolate->uint8_array_fun();
- switch (exemplar->type()) {
-#define TYPED_ARRAY_CTOR(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: { \
- default_ctor = isolate->type##_array_fun(); \
- break; \
- }
-
- TYPED_ARRAYS(TYPED_ARRAY_CTOR)
-#undef TYPED_ARRAY_CTOR
- default:
- UNREACHABLE();
- }
+ Handle<JSFunction> default_ctor =
+ JSTypedArray::DefaultConstructor(isolate, exemplar);
// 3. Let constructor be ? SpeciesConstructor(exemplar, defaultConstructor).
- Handle<Object> ctor;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, ctor,
- Object::SpeciesConstructor(isolate, exemplar, default_ctor),
- JSTypedArray);
+ Handle<Object> ctor = default_ctor;
+ if (!exemplar->HasJSTypedArrayPrototype(isolate) ||
+ !isolate->IsArraySpeciesLookupChainIntact()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, ctor,
+ Object::SpeciesConstructor(isolate, exemplar, default_ctor),
+ JSTypedArray);
+ }
// 4. Return ? TypedArrayCreate(constructor, argumentList).
return Create(isolate, ctor, argc, argv, method_name);
@@ -16682,6 +16750,10 @@ MaybeHandle<JSTypedArray> JSTypedArray::SpeciesCreate(
void JSGlobalObject::InvalidatePropertyCell(Handle<JSGlobalObject> global,
Handle<Name> name) {
+ // Regardless of whether the property is there or not invalidate
+ // Load/StoreGlobalICs that load/store through global object's prototype.
+ JSObject::InvalidatePrototypeValidityCell(*global);
+
DCHECK(!global->HasFastProperties());
auto dictionary = handle(global->global_dictionary());
int entry = dictionary->FindEntry(name);
@@ -17703,6 +17775,8 @@ Handle<FixedArray> BaseNameDictionary<Derived, Shape>::IterationIndices(
array->set(array_size++, Smi::FromInt(i));
}
+ DCHECK_EQ(array_size, length);
+
EnumIndexComparator<Derived> cmp(raw_dictionary);
// Use AtomicElement wrapper to ensure that std::sort uses atomic load and
// store operations that are safe for concurrent marking.
@@ -18924,6 +18998,13 @@ void JSArrayBuffer::FreeBackingStore() {
// static
void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) {
+ if (allocation.mode == ArrayBuffer::Allocator::AllocationMode::kReservation) {
+ // TODO(eholk): check with WasmAllocationTracker to make sure this is
+ // actually a buffer we are tracking.
+ isolate->wasm_engine()->allocation_tracker()->ReleaseAddressSpace(
+ allocation.length);
+ }
+
isolate->array_buffer_allocator()->Free(allocation.allocation_base,
allocation.length, allocation.mode);
}
@@ -18959,7 +19040,7 @@ void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
// already been promoted.
array_buffer->set_backing_store(data);
- array_buffer->set_allocation_base(data);
+ array_buffer->set_allocation_base(allocation_base);
array_buffer->set_allocation_length(allocation_length);
if (data && !is_external) {
@@ -19179,6 +19260,14 @@ Handle<PropertyCell> PropertyCell::PrepareForValue(
details = details.set_cell_type(new_type);
cell->set_property_details(details);
+ if (new_type == PropertyCellType::kConstant ||
+ new_type == PropertyCellType::kConstantType) {
+ // Store the value now to ensure that the cell contains the constant or
+ // type information. Otherwise subsequent store operation will turn
+ // the cell to mutable.
+ cell->set_value(*value);
+ }
+
// Deopt when transitioning from a constant type.
if (!invalidate && (old_type != new_type ||
original_details.IsReadOnly() != details.IsReadOnly())) {
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 521c0e6554..93f4a4eb95 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -349,40 +349,44 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(FIXED_DOUBLE_ARRAY_TYPE) \
V(FILLER_TYPE) \
\
+ V(ACCESS_CHECK_INFO_TYPE) \
V(ACCESSOR_INFO_TYPE) \
V(ACCESSOR_PAIR_TYPE) \
- V(ACCESS_CHECK_INFO_TYPE) \
- V(INTERCEPTOR_INFO_TYPE) \
+ V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
+ V(ALLOCATION_MEMENTO_TYPE) \
+ V(ALLOCATION_SITE_TYPE) \
+ V(ASYNC_GENERATOR_REQUEST_TYPE) \
+ V(CONTEXT_EXTENSION_TYPE) \
+ V(DEBUG_INFO_TYPE) \
V(FUNCTION_TEMPLATE_INFO_TYPE) \
+ V(INTERCEPTOR_INFO_TYPE) \
+ V(MODULE_INFO_ENTRY_TYPE) \
+ V(MODULE_TYPE) \
V(OBJECT_TEMPLATE_INFO_TYPE) \
- V(ALLOCATION_SITE_TYPE) \
- V(ALLOCATION_MEMENTO_TYPE) \
- V(SCRIPT_TYPE) \
- V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
- V(PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE) \
V(PROMISE_REACTION_JOB_INFO_TYPE) \
- V(DEBUG_INFO_TYPE) \
- V(STACK_FRAME_INFO_TYPE) \
+ V(PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE) \
V(PROTOTYPE_INFO_TYPE) \
+ V(SCRIPT_TYPE) \
+ V(STACK_FRAME_INFO_TYPE) \
V(TUPLE2_TYPE) \
V(TUPLE3_TYPE) \
- V(CONTEXT_EXTENSION_TYPE) \
- V(MODULE_TYPE) \
- V(MODULE_INFO_ENTRY_TYPE) \
- V(ASYNC_GENERATOR_REQUEST_TYPE) \
+ \
V(FIXED_ARRAY_TYPE) \
- V(HASH_TABLE_TYPE) \
V(DESCRIPTOR_ARRAY_TYPE) \
+ V(HASH_TABLE_TYPE) \
V(TRANSITION_ARRAY_TYPE) \
+ \
+ V(CELL_TYPE) \
+ V(CODE_DATA_CONTAINER_TYPE) \
V(FEEDBACK_VECTOR_TYPE) \
+ V(LOAD_HANDLER_TYPE) \
V(PROPERTY_ARRAY_TYPE) \
- V(SHARED_FUNCTION_INFO_TYPE) \
- V(CELL_TYPE) \
- V(WEAK_CELL_TYPE) \
V(PROPERTY_CELL_TYPE) \
+ V(SHARED_FUNCTION_INFO_TYPE) \
V(SMALL_ORDERED_HASH_MAP_TYPE) \
V(SMALL_ORDERED_HASH_SET_TYPE) \
- V(CODE_DATA_CONTAINER_TYPE) \
+ V(STORE_HANDLER_TYPE) \
+ V(WEAK_CELL_TYPE) \
\
V(JS_PROXY_TYPE) \
V(JS_GLOBAL_OBJECT_TYPE) \
@@ -390,32 +394,34 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_MODULE_NAMESPACE_TYPE) \
V(JS_SPECIAL_API_OBJECT_TYPE) \
V(JS_VALUE_TYPE) \
- V(JS_MESSAGE_OBJECT_TYPE) \
- V(JS_DATE_TYPE) \
V(JS_API_OBJECT_TYPE) \
V(JS_OBJECT_TYPE) \
+ \
V(JS_ARGUMENTS_TYPE) \
+ V(JS_ARRAY_BUFFER_TYPE) \
+ V(JS_ARRAY_TYPE) \
+ V(JS_ASYNC_FROM_SYNC_ITERATOR_TYPE) \
+ V(JS_ASYNC_GENERATOR_OBJECT_TYPE) \
V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
+ V(JS_DATE_TYPE) \
+ V(JS_ERROR_TYPE) \
V(JS_GENERATOR_OBJECT_TYPE) \
- V(JS_ASYNC_GENERATOR_OBJECT_TYPE) \
- V(JS_ARRAY_TYPE) \
- V(JS_ARRAY_BUFFER_TYPE) \
- V(JS_TYPED_ARRAY_TYPE) \
- V(JS_DATA_VIEW_TYPE) \
- V(JS_SET_TYPE) \
V(JS_MAP_TYPE) \
- V(JS_SET_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_SET_VALUE_ITERATOR_TYPE) \
V(JS_MAP_KEY_ITERATOR_TYPE) \
V(JS_MAP_KEY_VALUE_ITERATOR_TYPE) \
V(JS_MAP_VALUE_ITERATOR_TYPE) \
- V(JS_WEAK_MAP_TYPE) \
- V(JS_WEAK_SET_TYPE) \
+ V(JS_MESSAGE_OBJECT_TYPE) \
V(JS_PROMISE_TYPE) \
V(JS_REGEXP_TYPE) \
- V(JS_ERROR_TYPE) \
- V(JS_ASYNC_FROM_SYNC_ITERATOR_TYPE) \
+ V(JS_SET_TYPE) \
+ V(JS_SET_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_SET_VALUE_ITERATOR_TYPE) \
V(JS_STRING_ITERATOR_TYPE) \
+ V(JS_WEAK_MAP_TYPE) \
+ V(JS_WEAK_SET_TYPE) \
+ \
+ V(JS_TYPED_ARRAY_TYPE) \
+ V(JS_DATA_VIEW_TYPE) \
\
ARRAY_ITERATOR_TYPE_LIST(V) \
\
@@ -531,29 +537,38 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
// type tags, elements in this list have to be added to the INSTANCE_TYPE_LIST
// manually.
#define STRUCT_LIST(V) \
+ V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info) \
V(ACCESSOR_INFO, AccessorInfo, accessor_info) \
V(ACCESSOR_PAIR, AccessorPair, accessor_pair) \
- V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info) \
- V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info) \
+ V(ALIASED_ARGUMENTS_ENTRY, AliasedArgumentsEntry, aliased_arguments_entry) \
+ V(ALLOCATION_MEMENTO, AllocationMemento, allocation_memento) \
+ V(ALLOCATION_SITE, AllocationSite, allocation_site) \
+ V(ASYNC_GENERATOR_REQUEST, AsyncGeneratorRequest, async_generator_request) \
+ V(CONTEXT_EXTENSION, ContextExtension, context_extension) \
+ V(DEBUG_INFO, DebugInfo, debug_info) \
V(FUNCTION_TEMPLATE_INFO, FunctionTemplateInfo, function_template_info) \
+ V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info) \
+ V(MODULE_INFO_ENTRY, ModuleInfoEntry, module_info_entry) \
+ V(MODULE, Module, module) \
V(OBJECT_TEMPLATE_INFO, ObjectTemplateInfo, object_template_info) \
- V(ALLOCATION_SITE, AllocationSite, allocation_site) \
- V(ALLOCATION_MEMENTO, AllocationMemento, allocation_memento) \
- V(SCRIPT, Script, script) \
- V(ALIASED_ARGUMENTS_ENTRY, AliasedArgumentsEntry, aliased_arguments_entry) \
- V(PROMISE_RESOLVE_THENABLE_JOB_INFO, PromiseResolveThenableJobInfo, \
- promise_resolve_thenable_job_info) \
V(PROMISE_REACTION_JOB_INFO, PromiseReactionJobInfo, \
promise_reaction_job_info) \
- V(DEBUG_INFO, DebugInfo, debug_info) \
- V(STACK_FRAME_INFO, StackFrameInfo, stack_frame_info) \
+ V(PROMISE_RESOLVE_THENABLE_JOB_INFO, PromiseResolveThenableJobInfo, \
+ promise_resolve_thenable_job_info) \
V(PROTOTYPE_INFO, PrototypeInfo, prototype_info) \
+ V(SCRIPT, Script, script) \
+ V(STACK_FRAME_INFO, StackFrameInfo, stack_frame_info) \
V(TUPLE2, Tuple2, tuple2) \
- V(TUPLE3, Tuple3, tuple3) \
- V(CONTEXT_EXTENSION, ContextExtension, context_extension) \
- V(MODULE, Module, module) \
- V(MODULE_INFO_ENTRY, ModuleInfoEntry, module_info_entry) \
- V(ASYNC_GENERATOR_REQUEST, AsyncGeneratorRequest, async_generator_request)
+ V(TUPLE3, Tuple3, tuple3)
+
+#define DATA_HANDLER_LIST(V) \
+ V(LOAD_HANDLER, LoadHandler, 1, load_handler1) \
+ V(LOAD_HANDLER, LoadHandler, 2, load_handler2) \
+ V(LOAD_HANDLER, LoadHandler, 3, load_handler3) \
+ V(STORE_HANDLER, StoreHandler, 0, store_handler0) \
+ V(STORE_HANDLER, StoreHandler, 1, store_handler1) \
+ V(STORE_HANDLER, StoreHandler, 2, store_handler2) \
+ V(STORE_HANDLER, StoreHandler, 3, store_handler3)
// We use the full 16 bits of the instance_type field to encode heap object
// instance types. All the high-order bits (bit 7-15) are cleared if the object
@@ -704,80 +719,90 @@ enum InstanceType : uint16_t {
FILLER_TYPE, // LAST_DATA_TYPE
// Structs.
+ ACCESS_CHECK_INFO_TYPE,
ACCESSOR_INFO_TYPE,
ACCESSOR_PAIR_TYPE,
- ACCESS_CHECK_INFO_TYPE,
- INTERCEPTOR_INFO_TYPE,
+ ALIASED_ARGUMENTS_ENTRY_TYPE,
+ ALLOCATION_MEMENTO_TYPE,
+ ALLOCATION_SITE_TYPE,
+ ASYNC_GENERATOR_REQUEST_TYPE,
+ CONTEXT_EXTENSION_TYPE,
+ DEBUG_INFO_TYPE,
FUNCTION_TEMPLATE_INFO_TYPE,
+ INTERCEPTOR_INFO_TYPE,
+ MODULE_INFO_ENTRY_TYPE,
+ MODULE_TYPE,
OBJECT_TEMPLATE_INFO_TYPE,
- ALLOCATION_SITE_TYPE,
- ALLOCATION_MEMENTO_TYPE,
- SCRIPT_TYPE,
- ALIASED_ARGUMENTS_ENTRY_TYPE,
- PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE,
PROMISE_REACTION_JOB_INFO_TYPE,
- DEBUG_INFO_TYPE,
- STACK_FRAME_INFO_TYPE,
+ PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE,
PROTOTYPE_INFO_TYPE,
+ SCRIPT_TYPE,
+ STACK_FRAME_INFO_TYPE,
TUPLE2_TYPE,
TUPLE3_TYPE,
- CONTEXT_EXTENSION_TYPE,
- MODULE_TYPE,
- MODULE_INFO_ENTRY_TYPE,
- ASYNC_GENERATOR_REQUEST_TYPE,
+
+ // FixedArrays.
FIXED_ARRAY_TYPE, // FIRST_FIXED_ARRAY_TYPE
- HASH_TABLE_TYPE,
DESCRIPTOR_ARRAY_TYPE,
+ HASH_TABLE_TYPE,
TRANSITION_ARRAY_TYPE, // LAST_FIXED_ARRAY_TYPE
+
+ // Misc.
+ CELL_TYPE,
+ CODE_DATA_CONTAINER_TYPE,
FEEDBACK_VECTOR_TYPE,
+ LOAD_HANDLER_TYPE,
PROPERTY_ARRAY_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
- CELL_TYPE,
- WEAK_CELL_TYPE,
PROPERTY_CELL_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
SMALL_ORDERED_HASH_MAP_TYPE,
SMALL_ORDERED_HASH_SET_TYPE,
- CODE_DATA_CONTAINER_TYPE,
+ STORE_HANDLER_TYPE,
+ WEAK_CELL_TYPE,
// All the following types are subtypes of JSReceiver, which corresponds to
// objects in the JS sense. The first and the last type in this range are
// the two forms of function. This organization enables using the same
// compares for checking the JS_RECEIVER and the NONCALLABLE_JS_OBJECT range.
- JS_PROXY_TYPE, // FIRST_JS_RECEIVER_TYPE
- JS_GLOBAL_OBJECT_TYPE, // FIRST_JS_OBJECT_TYPE
+ // Some of the following instance types are exposed in v8.h, so to not
+ // unnecessarily change the ABI when we introduce new instance types in the
+ // future, we leave some space between instance types.
+ JS_PROXY_TYPE = 0x0400, // FIRST_JS_RECEIVER_TYPE
+ JS_GLOBAL_OBJECT_TYPE, // FIRST_JS_OBJECT_TYPE
JS_GLOBAL_PROXY_TYPE,
JS_MODULE_NAMESPACE_TYPE,
// Like JS_API_OBJECT_TYPE, but requires access checks and/or has
// interceptors.
- JS_SPECIAL_API_OBJECT_TYPE, // LAST_SPECIAL_RECEIVER_TYPE
- JS_VALUE_TYPE, // LAST_CUSTOM_ELEMENTS_RECEIVER
- JS_MESSAGE_OBJECT_TYPE,
- JS_DATE_TYPE,
+ JS_SPECIAL_API_OBJECT_TYPE = 0x0410, // LAST_SPECIAL_RECEIVER_TYPE
+ JS_VALUE_TYPE, // LAST_CUSTOM_ELEMENTS_RECEIVER
// Like JS_OBJECT_TYPE, but created from API function.
- JS_API_OBJECT_TYPE,
+ JS_API_OBJECT_TYPE = 0x0420,
JS_OBJECT_TYPE,
JS_ARGUMENTS_TYPE,
+ JS_ARRAY_BUFFER_TYPE,
+ JS_ARRAY_TYPE,
+ JS_ASYNC_FROM_SYNC_ITERATOR_TYPE,
+ JS_ASYNC_GENERATOR_OBJECT_TYPE,
JS_CONTEXT_EXTENSION_OBJECT_TYPE,
+ JS_DATE_TYPE,
+ JS_ERROR_TYPE,
JS_GENERATOR_OBJECT_TYPE,
- JS_ASYNC_GENERATOR_OBJECT_TYPE,
- JS_ARRAY_TYPE,
- JS_ARRAY_BUFFER_TYPE,
- JS_TYPED_ARRAY_TYPE,
- JS_DATA_VIEW_TYPE,
- JS_SET_TYPE,
JS_MAP_TYPE,
- JS_SET_KEY_VALUE_ITERATOR_TYPE,
- JS_SET_VALUE_ITERATOR_TYPE,
JS_MAP_KEY_ITERATOR_TYPE,
JS_MAP_KEY_VALUE_ITERATOR_TYPE,
JS_MAP_VALUE_ITERATOR_TYPE,
- JS_WEAK_MAP_TYPE,
- JS_WEAK_SET_TYPE,
+ JS_MESSAGE_OBJECT_TYPE,
JS_PROMISE_TYPE,
JS_REGEXP_TYPE,
- JS_ERROR_TYPE,
- JS_ASYNC_FROM_SYNC_ITERATOR_TYPE,
+ JS_SET_TYPE,
+ JS_SET_KEY_VALUE_ITERATOR_TYPE,
+ JS_SET_VALUE_ITERATOR_TYPE,
JS_STRING_ITERATOR_TYPE,
+ JS_WEAK_MAP_TYPE,
+ JS_WEAK_SET_TYPE,
+
+ JS_TYPED_ARRAY_TYPE,
+ JS_DATA_VIEW_TYPE,
#define ARRAY_ITERATOR_TYPE(type) type,
ARRAY_ITERATOR_TYPE_LIST(ARRAY_ITERATOR_TYPE)
@@ -857,55 +882,6 @@ STATIC_ASSERT(FOREIGN_TYPE == Internals::kForeignType);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
InstanceType instance_type);
-#define FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(V) \
- V(BYTECODE_ARRAY_CONSTANT_POOL_SUB_TYPE) \
- V(BYTECODE_ARRAY_HANDLER_TABLE_SUB_TYPE) \
- V(CODE_STUBS_TABLE_SUB_TYPE) \
- V(COMPILATION_CACHE_TABLE_SUB_TYPE) \
- V(CONTEXT_SUB_TYPE) \
- V(COPY_ON_WRITE_SUB_TYPE) \
- V(DEOPTIMIZATION_DATA_SUB_TYPE) \
- V(DESCRIPTOR_ARRAY_SUB_TYPE) \
- V(EMBEDDED_OBJECT_SUB_TYPE) \
- V(ENUM_CACHE_SUB_TYPE) \
- V(ENUM_INDICES_CACHE_SUB_TYPE) \
- V(DEPENDENT_CODE_SUB_TYPE) \
- V(DICTIONARY_ELEMENTS_SUB_TYPE) \
- V(DICTIONARY_PROPERTIES_SUB_TYPE) \
- V(EMPTY_PROPERTIES_DICTIONARY_SUB_TYPE) \
- V(PACKED_ELEMENTS_SUB_TYPE) \
- V(FAST_PROPERTIES_SUB_TYPE) \
- V(FAST_TEMPLATE_INSTANTIATIONS_CACHE_SUB_TYPE) \
- V(HANDLER_TABLE_SUB_TYPE) \
- V(JS_COLLECTION_SUB_TYPE) \
- V(JS_WEAK_COLLECTION_SUB_TYPE) \
- V(NOSCRIPT_SHARED_FUNCTION_INFOS_SUB_TYPE) \
- V(NUMBER_STRING_CACHE_SUB_TYPE) \
- V(OBJECT_TO_CODE_SUB_TYPE) \
- V(OPTIMIZED_CODE_LITERALS_SUB_TYPE) \
- V(OPTIMIZED_CODE_MAP_SUB_TYPE) \
- V(PROTOTYPE_USERS_SUB_TYPE) \
- V(REGEXP_MULTIPLE_CACHE_SUB_TYPE) \
- V(RETAINED_MAPS_SUB_TYPE) \
- V(SCOPE_INFO_SUB_TYPE) \
- V(SCRIPT_LIST_SUB_TYPE) \
- V(SERIALIZED_TEMPLATES_SUB_TYPE) \
- V(SHARED_FUNCTION_INFOS_SUB_TYPE) \
- V(SINGLE_CHARACTER_STRING_CACHE_SUB_TYPE) \
- V(SLOW_TEMPLATE_INSTANTIATIONS_CACHE_SUB_TYPE) \
- V(STRING_SPLIT_CACHE_SUB_TYPE) \
- V(STRING_TABLE_SUB_TYPE) \
- V(TEMPLATE_INFO_SUB_TYPE) \
- V(FEEDBACK_METADATA_SUB_TYPE) \
- V(WEAK_NEW_SPACE_OBJECT_TO_CODE_SUB_TYPE)
-
-enum FixedArraySubInstanceType {
-#define DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE(name) name,
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE)
-#undef DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE
- LAST_FIXED_ARRAY_SUB_TYPE = WEAK_NEW_SPACE_OBJECT_TO_CODE_SUB_TYPE
-};
-
// Result of an abstract relational comparison of x and y, implemented according
// to ES6 section 7.2.11 Abstract Relational Comparison.
enum class ComparisonResult {
@@ -921,6 +897,7 @@ bool ComparisonResultToBool(Operation op, ComparisonResult result);
class AbstractCode;
class AccessorPair;
class AllocationSite;
+class ByteArray;
class Cell;
class ConsString;
class DependentCode;
@@ -944,7 +921,6 @@ class RootVisitor;
class SafepointEntry;
class SharedFunctionInfo;
class StringStream;
-class TypeFeedbackInfo;
class FeedbackMetadata;
class FeedbackVector;
class WeakCell;
@@ -994,6 +970,7 @@ template <class C> inline bool Is(Object* obj);
V(Constructor) \
V(Context) \
V(CoverageInfo) \
+ V(DataHandler) \
V(DeoptimizationData) \
V(DependentCode) \
V(DescriptorArray) \
@@ -1062,6 +1039,7 @@ template <class C> inline bool Is(Object* obj);
V(JSWeakCollection) \
V(JSWeakMap) \
V(JSWeakSet) \
+ V(LoadHandler) \
V(Map) \
V(MapCache) \
V(ModuleInfo) \
@@ -1093,6 +1071,7 @@ template <class C> inline bool Is(Object* obj);
V(SmallOrderedHashMap) \
V(SmallOrderedHashSet) \
V(SourcePositionTableWithFrameCache) \
+ V(StoreHandler) \
V(String) \
V(StringSet) \
V(StringTable) \
@@ -1105,7 +1084,6 @@ template <class C> inline bool Is(Object* obj);
V(TemplateObjectDescription) \
V(ThinString) \
V(TransitionArray) \
- V(TypeFeedbackInfo) \
V(Undetectable) \
V(UniqueName) \
V(WasmInstanceObject) \
@@ -2204,10 +2182,12 @@ class JSReceiver: public HeapObject {
Handle<JSReceiver> object);
MUST_USE_RESULT static MaybeHandle<FixedArray> GetOwnValues(
- Handle<JSReceiver> object, PropertyFilter filter);
+ Handle<JSReceiver> object, PropertyFilter filter,
+ bool try_fast_path = true);
MUST_USE_RESULT static MaybeHandle<FixedArray> GetOwnEntries(
- Handle<JSReceiver> object, PropertyFilter filter);
+ Handle<JSReceiver> object, PropertyFilter filter,
+ bool try_fast_path = true);
static const int kHashMask = PropertyArray::HashField::kMask;
@@ -2391,7 +2371,8 @@ class JSObject: public JSReceiver {
Handle<Map> new_map,
Isolate* isolate);
static bool UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate);
- static void InvalidatePrototypeChains(Map* map);
+ static Map* InvalidatePrototypeChains(Map* map);
+ static void InvalidatePrototypeValidityCell(JSGlobalObject* global);
// Updates prototype chain tracking information when an object changes its
// map from |old_map| to |new_map|.
@@ -2691,6 +2672,7 @@ class JSObject: public JSReceiver {
STATIC_ASSERT(kHeaderSize == Internals::kJSObjectHeaderSize);
static const int kMaxInObjectProperties =
(kMaxInstanceSize - kHeaderSize) >> kPointerSizeLog2;
+ STATIC_ASSERT(kMaxInObjectProperties <= kMaxNumberOfDescriptors);
class BodyDescriptor;
// No weak fields.
@@ -2801,414 +2783,6 @@ class JSIteratorResult: public JSObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(JSIteratorResult);
};
-
-// Common superclass for FixedArrays that allow implementations to share
-// common accessors and some code paths.
-class FixedArrayBase: public HeapObject {
- public:
- // [length]: length of the array.
- inline int length() const;
- inline void set_length(int value);
-
- // Get and set the length using acquire loads and release stores.
- inline int synchronized_length() const;
- inline void synchronized_set_length(int value);
-
- DECL_CAST(FixedArrayBase)
-
- static int GetMaxLengthForNewSpaceAllocation(ElementsKind kind);
-
- bool IsCowArray() const;
-
- // Layout description.
- // Length is smi tagged when it is stored.
- static const int kLengthOffset = HeapObject::kHeaderSize;
- static const int kHeaderSize = kLengthOffset + kPointerSize;
-};
-
-
-class FixedDoubleArray;
-class IncrementalMarking;
-
-
-// FixedArray describes fixed-sized arrays with element type Object*.
-class FixedArray: public FixedArrayBase {
- public:
- // Setter and getter for elements.
- inline Object* get(int index) const;
- static inline Handle<Object> get(FixedArray* array, int index,
- Isolate* isolate);
- template <class T>
- MaybeHandle<T> GetValue(Isolate* isolate, int index) const;
-
- template <class T>
- Handle<T> GetValueChecked(Isolate* isolate, int index) const;
-
- // Return a grown copy if the index is bigger than the array's length.
- static Handle<FixedArray> SetAndGrow(Handle<FixedArray> array, int index,
- Handle<Object> value);
-
- // Setter that uses write barrier.
- inline void set(int index, Object* value);
- inline bool is_the_hole(Isolate* isolate, int index);
-
- // Setter that doesn't need write barrier.
- inline void set(int index, Smi* value);
- // Setter with explicit barrier mode.
- inline void set(int index, Object* value, WriteBarrierMode mode);
-
- // Setters for frequently used oddballs located in old space.
- inline void set_undefined(int index);
- inline void set_undefined(Isolate* isolate, int index);
- inline void set_null(int index);
- inline void set_null(Isolate* isolate, int index);
- inline void set_the_hole(int index);
- inline void set_the_hole(Isolate* isolate, int index);
-
- inline Object** GetFirstElementAddress();
- inline bool ContainsOnlySmisOrHoles();
-
- // Gives access to raw memory which stores the array's data.
- inline Object** data_start();
-
- inline void FillWithHoles(int from, int to);
-
- // Shrink length and insert filler objects.
- void Shrink(int length);
-
- // Copy a sub array from the receiver to dest.
- void CopyTo(int pos, FixedArray* dest, int dest_pos, int len) const;
-
- // Garbage collection support.
- static constexpr int SizeFor(int length) {
- return kHeaderSize + length * kPointerSize;
- }
-
- // Code Generation support.
- static constexpr int OffsetOfElementAt(int index) { return SizeFor(index); }
-
- // Garbage collection support.
- inline Object** RawFieldOfElementAt(int index);
-
- DECL_CAST(FixedArray)
-
- // Maximal allowed size, in bytes, of a single FixedArray.
- // Prevents overflowing size computations, as well as extreme memory
- // consumption.
- static const int kMaxSize = 128 * MB * kPointerSize;
- // Maximally allowed length of a FixedArray.
- static const int kMaxLength = (kMaxSize - kHeaderSize) / kPointerSize;
- // Maximally allowed length for regular (non large object space) object.
- STATIC_ASSERT(kMaxRegularHeapObjectSize < kMaxSize);
- static const int kMaxRegularLength =
- (kMaxRegularHeapObjectSize - kHeaderSize) / kPointerSize;
-
- // Dispatched behavior.
- DECL_PRINTER(FixedArray)
- DECL_VERIFIER(FixedArray)
-#ifdef DEBUG
- // Checks if two FixedArrays have identical contents.
- bool IsEqualTo(FixedArray* other);
-#endif
-
- typedef FlexibleBodyDescriptor<kHeaderSize> BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
-
- protected:
- // Set operation on FixedArray without using write barriers. Can
- // only be used for storing old space objects or smis.
- static inline void NoWriteBarrierSet(FixedArray* array,
- int index,
- Object* value);
-
- private:
- STATIC_ASSERT(kHeaderSize == Internals::kFixedArrayHeaderSize);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray);
-};
-
-// FixedArray alias added only because of IsFixedArrayExact() predicate, which
-// checks for the exact instance type FIXED_ARRAY_TYPE instead of a range
-// check: [FIRST_FIXED_ARRAY_TYPE, LAST_FIXED_ARRAY_TYPE].
-class FixedArrayExact final : public FixedArray {
- public:
- DECL_CAST(FixedArrayExact)
-};
-
-// FixedDoubleArray describes fixed-sized arrays with element type double.
-class FixedDoubleArray: public FixedArrayBase {
- public:
- // Setter and getter for elements.
- inline double get_scalar(int index);
- inline uint64_t get_representation(int index);
- static inline Handle<Object> get(FixedDoubleArray* array, int index,
- Isolate* isolate);
- inline void set(int index, double value);
- inline void set_the_hole(Isolate* isolate, int index);
- inline void set_the_hole(int index);
-
- // Checking for the hole.
- inline bool is_the_hole(Isolate* isolate, int index);
- inline bool is_the_hole(int index);
-
- // Garbage collection support.
- inline static int SizeFor(int length) {
- return kHeaderSize + length * kDoubleSize;
- }
-
- // Gives access to raw memory which stores the array's data.
- inline double* data_start();
-
- inline void FillWithHoles(int from, int to);
-
- // Code Generation support.
- static int OffsetOfElementAt(int index) { return SizeFor(index); }
-
- DECL_CAST(FixedDoubleArray)
-
- // Maximal allowed size, in bytes, of a single FixedDoubleArray.
- // Prevents overflowing size computations, as well as extreme memory
- // consumption.
- static const int kMaxSize = 512 * MB;
- // Maximally allowed length of a FixedArray.
- static const int kMaxLength = (kMaxSize - kHeaderSize) / kDoubleSize;
-
- // Dispatched behavior.
- DECL_PRINTER(FixedDoubleArray)
- DECL_VERIFIER(FixedDoubleArray)
-
- class BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(FixedDoubleArray);
-};
-
-class WeakFixedArray : public FixedArray {
- public:
- // If |maybe_array| is not a WeakFixedArray, a fresh one will be allocated.
- // This function does not check if the value exists already, callers must
- // ensure this themselves if necessary.
- static Handle<WeakFixedArray> Add(Handle<Object> maybe_array,
- Handle<HeapObject> value,
- int* assigned_index = nullptr);
-
- // Returns true if an entry was found and removed.
- bool Remove(Handle<HeapObject> value);
-
- class NullCallback {
- public:
- static void Callback(Object* value, int old_index, int new_index) {}
- };
-
- template <class CompactionCallback>
- void Compact();
-
- inline Object* Get(int index) const;
- inline void Clear(int index);
- inline int Length() const;
-
- inline bool IsEmptySlot(int index) const;
- static Object* Empty() { return Smi::kZero; }
-
- class Iterator {
- public:
- explicit Iterator(Object* maybe_array) : list_(nullptr) {
- Reset(maybe_array);
- }
- void Reset(Object* maybe_array);
-
- template <class T>
- inline T* Next();
-
- private:
- int index_;
- WeakFixedArray* list_;
-#ifdef DEBUG
- int last_used_index_;
- DisallowHeapAllocation no_gc_;
-#endif // DEBUG
- DISALLOW_COPY_AND_ASSIGN(Iterator);
- };
-
- DECL_CAST(WeakFixedArray)
-
- private:
- static const int kLastUsedIndexIndex = 0;
- static const int kFirstIndex = 1;
-
- static Handle<WeakFixedArray> Allocate(
- Isolate* isolate, int size, Handle<WeakFixedArray> initialize_from);
-
- static void Set(Handle<WeakFixedArray> array, int index,
- Handle<HeapObject> value);
- inline void clear(int index);
-
- inline int last_used_index() const;
- inline void set_last_used_index(int index);
-
- // Disallow inherited setters.
- void set(int index, Smi* value);
- void set(int index, Object* value);
- void set(int index, Object* value, WriteBarrierMode mode);
- DISALLOW_IMPLICIT_CONSTRUCTORS(WeakFixedArray);
-};
-
-// Generic array grows dynamically with O(1) amortized insertion.
-//
-// ArrayList is a FixedArray with static convenience methods for adding more
-// elements. The Length() method returns the number of elements in the list, not
-// the allocated size. The number of elements is stored at kLengthIndex and is
-// updated with every insertion. The elements of the ArrayList are stored in the
-// underlying FixedArray starting at kFirstIndex.
-class ArrayList : public FixedArray {
- public:
- enum AddMode {
- kNone,
- // Use this if GC can delete elements from the array.
- kReloadLengthAfterAllocation,
- };
- static Handle<ArrayList> Add(Handle<ArrayList> array, Handle<Object> obj,
- AddMode mode = kNone);
- static Handle<ArrayList> Add(Handle<ArrayList> array, Handle<Object> obj1,
- Handle<Object> obj2, AddMode = kNone);
- static Handle<ArrayList> New(Isolate* isolate, int size);
-
- // Returns the number of elements in the list, not the allocated size, which
- // is length(). Lower and upper case length() return different results!
- inline int Length() const;
-
- // Sets the Length() as used by Elements(). Does not change the underlying
- // storage capacity, i.e., length().
- inline void SetLength(int length);
- inline Object* Get(int index) const;
- inline Object** Slot(int index);
-
- // Set the element at index to obj. The underlying array must be large enough.
- // If you need to grow the ArrayList, use the static Add() methods instead.
- inline void Set(int index, Object* obj,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-
- // Set the element at index to undefined. This does not change the Length().
- inline void Clear(int index, Object* undefined);
-
- // Return a copy of the list of size Length() without the first entry. The
- // number returned by Length() is stored in the first entry.
- static Handle<FixedArray> Elements(Handle<ArrayList> array);
- bool IsFull();
- DECL_CAST(ArrayList)
-
- private:
- static Handle<ArrayList> EnsureSpace(Handle<ArrayList> array, int length);
- static const int kLengthIndex = 0;
- static const int kFirstIndex = 1;
- DISALLOW_IMPLICIT_CONSTRUCTORS(ArrayList);
-};
-
-enum SearchMode { ALL_ENTRIES, VALID_ENTRIES };
-
-template <SearchMode search_mode, typename T>
-inline int Search(T* array, Name* name, int valid_entries = 0,
- int* out_insertion_index = nullptr);
-
-// ByteArray represents fixed sized byte arrays. Used for the relocation info
-// that is attached to code objects.
-class ByteArray: public FixedArrayBase {
- public:
- inline int Size();
-
- // Setter and getter.
- inline byte get(int index) const;
- inline void set(int index, byte value);
-
- // Copy in / copy out whole byte slices.
- inline void copy_out(int index, byte* buffer, int length);
- inline void copy_in(int index, const byte* buffer, int length);
-
- // Treat contents as an int array.
- inline int get_int(int index) const;
- inline void set_int(int index, int value);
-
- inline uint32_t get_uint32(int index) const;
- inline void set_uint32(int index, uint32_t value);
-
- // Clear uninitialized padding space. This ensures that the snapshot content
- // is deterministic.
- inline void clear_padding();
-
- static int SizeFor(int length) {
- return OBJECT_POINTER_ALIGN(kHeaderSize + length);
- }
- // We use byte arrays for free blocks in the heap. Given a desired size in
- // bytes that is a multiple of the word size and big enough to hold a byte
- // array, this function returns the number of elements a byte array should
- // have.
- static int LengthFor(int size_in_bytes) {
- DCHECK(IsAligned(size_in_bytes, kPointerSize));
- DCHECK_GE(size_in_bytes, kHeaderSize);
- return size_in_bytes - kHeaderSize;
- }
-
- // Returns data start address.
- inline Address GetDataStartAddress();
-
- inline int DataSize() const;
-
- // Returns a pointer to the ByteArray object for a given data start address.
- static inline ByteArray* FromDataStartAddress(Address address);
-
- DECL_CAST(ByteArray)
-
- // Dispatched behavior.
- inline int ByteArraySize();
- DECL_PRINTER(ByteArray)
- DECL_VERIFIER(ByteArray)
-
- // Layout description.
- static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
-
- // Maximal memory consumption for a single ByteArray.
- static const int kMaxSize = 512 * MB;
- // Maximal length of a single ByteArray.
- static const int kMaxLength = kMaxSize - kHeaderSize;
-
- class BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ByteArray);
-};
-
-// Wrapper class for ByteArray which can store arbitrary C++ classes, as long
-// as they can be copied with memcpy.
-template <class T>
-class PodArray : public ByteArray {
- public:
- static Handle<PodArray<T>> New(Isolate* isolate, int length,
- PretenureFlag pretenure = NOT_TENURED);
- void copy_out(int index, T* result) {
- ByteArray::copy_out(index * sizeof(T), reinterpret_cast<byte*>(result),
- sizeof(T));
- }
- T get(int index) {
- T result;
- copy_out(index, &result);
- return result;
- }
- void set(int index, const T& value) {
- copy_in(index * sizeof(T), reinterpret_cast<const byte*>(&value),
- sizeof(T));
- }
- int length() { return ByteArray::length() / sizeof(T); }
- DECL_CAST(PodArray<T>)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PodArray<T>);
-};
-
// FreeSpace are fixed-size free memory blocks used by the heap and GC.
// They look like heap objects (are heap object tagged and have a map) so that
// the heap remains iterable. They have a size and a next pointer.
@@ -3245,136 +2819,6 @@ class FreeSpace: public HeapObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeSpace);
};
-
-// V has parameters (Type, type, TYPE, C type, element_size)
-#define TYPED_ARRAYS(V) \
- V(Uint8, uint8, UINT8, uint8_t, 1) \
- V(Int8, int8, INT8, int8_t, 1) \
- V(Uint16, uint16, UINT16, uint16_t, 2) \
- V(Int16, int16, INT16, int16_t, 2) \
- V(Uint32, uint32, UINT32, uint32_t, 4) \
- V(Int32, int32, INT32, int32_t, 4) \
- V(Float32, float32, FLOAT32, float, 4) \
- V(Float64, float64, FLOAT64, double, 8) \
- V(Uint8Clamped, uint8_clamped, UINT8_CLAMPED, uint8_t, 1)
-
-
-class FixedTypedArrayBase: public FixedArrayBase {
- public:
- // [base_pointer]: Either points to the FixedTypedArrayBase itself or nullptr.
- DECL_ACCESSORS(base_pointer, Object)
-
- // [external_pointer]: Contains the offset between base_pointer and the start
- // of the data. If the base_pointer is a nullptr, the external_pointer
- // therefore points to the actual backing store.
- DECL_ACCESSORS(external_pointer, void)
-
- // Dispatched behavior.
- DECL_CAST(FixedTypedArrayBase)
-
- static const int kBasePointerOffset = FixedArrayBase::kHeaderSize;
- static const int kExternalPointerOffset = kBasePointerOffset + kPointerSize;
- static const int kHeaderSize =
- DOUBLE_POINTER_ALIGN(kExternalPointerOffset + kPointerSize);
-
- static const int kDataOffset = kHeaderSize;
-
- static const int kMaxElementSize = 8;
-
-#ifdef V8_HOST_ARCH_32_BIT
- static const size_t kMaxByteLength = std::numeric_limits<size_t>::max();
-#else
- static const size_t kMaxByteLength =
- static_cast<size_t>(Smi::kMaxValue) * kMaxElementSize;
-#endif // V8_HOST_ARCH_32_BIT
-
- static const size_t kMaxLength = Smi::kMaxValue;
-
- class BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
-
- inline int size() const;
-
- static inline int TypedArraySize(InstanceType type, int length);
- inline int TypedArraySize(InstanceType type) const;
-
- // Use with care: returns raw pointer into heap.
- inline void* DataPtr();
-
- inline int DataSize() const;
-
- inline size_t ByteLength() const;
-
- private:
- static inline int ElementSize(InstanceType type);
-
- inline int DataSize(InstanceType type) const;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(FixedTypedArrayBase);
-};
-
-
-template <class Traits>
-class FixedTypedArray: public FixedTypedArrayBase {
- public:
- typedef typename Traits::ElementType ElementType;
- static const InstanceType kInstanceType = Traits::kInstanceType;
-
- DECL_CAST(FixedTypedArray<Traits>)
-
- inline ElementType get_scalar(int index);
- static inline Handle<Object> get(FixedTypedArray* array, int index);
- inline void set(int index, ElementType value);
-
- static inline ElementType from(int value);
- static inline ElementType from(uint32_t value);
- static inline ElementType from(double value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- inline void SetValue(uint32_t index, Object* value);
-
- DECL_PRINTER(FixedTypedArray)
- DECL_VERIFIER(FixedTypedArray)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(FixedTypedArray);
-};
-
-#define FIXED_TYPED_ARRAY_TRAITS(Type, type, TYPE, elementType, size) \
- STATIC_ASSERT(size <= FixedTypedArrayBase::kMaxElementSize); \
- class Type##ArrayTraits { \
- public: /* NOLINT */ \
- typedef elementType ElementType; \
- static const InstanceType kInstanceType = FIXED_##TYPE##_ARRAY_TYPE; \
- static const char* Designator() { return #type " array"; } \
- static inline Handle<Object> ToHandle(Isolate* isolate, \
- elementType scalar); \
- static inline elementType defaultValue(); \
- }; \
- \
- typedef FixedTypedArray<Type##ArrayTraits> Fixed##Type##Array;
-
-TYPED_ARRAYS(FIXED_TYPED_ARRAY_TRAITS)
-
-#undef FIXED_TYPED_ARRAY_TRAITS
-
-class TemplateList : public FixedArray {
- public:
- static Handle<TemplateList> New(Isolate* isolate, int size);
- inline int length() const;
- inline Object* get(int index) const;
- inline void set(int index, Object* value);
- static Handle<TemplateList> Add(Isolate* isolate, Handle<TemplateList> list,
- Handle<Object> value);
- DECL_CAST(TemplateList)
- private:
- static const int kLengthIndex = 0;
- static const int kFirstElementIndex = kLengthIndex + 1;
- DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateList);
-};
-
class PrototypeInfo;
// An abstract superclass, a marker class really, for simple structure classes.
@@ -3779,6 +3223,7 @@ enum BuiltinFunctionId {
kArrayKeys,
kArrayValues,
kArrayIteratorNext,
+ kBigIntConstructor,
kMapSize,
kSetSize,
kMapIteratorNext,
@@ -3795,6 +3240,8 @@ enum BuiltinFunctionId {
kGlobalUnescape,
kGlobalIsFinite,
kGlobalIsNaN,
+ kNumberConstructor,
+ kSymbolConstructor,
kTypedArrayByteLength,
kTypedArrayByteOffset,
kTypedArrayEntries,
@@ -3803,6 +3250,7 @@ enum BuiltinFunctionId {
kTypedArrayToStringTag,
kTypedArrayValues,
kSharedArrayBufferByteLength,
+ kStringConstructor,
kStringIterator,
kStringIteratorNext,
kStringToLowerCaseIntl,
@@ -4470,48 +3918,6 @@ class JSPromise : public JSObject {
STATIC_ASSERT(v8::Promise::kRejected == 2);
};
-class TypeFeedbackInfo : public Tuple3 {
- public:
- inline int ic_total_count();
- inline void set_ic_total_count(int count);
-
- inline int ic_with_type_info_count();
- inline void change_ic_with_type_info_count(int delta);
-
- inline int ic_generic_count();
- inline void change_ic_generic_count(int delta);
-
- inline void initialize_storage();
-
- inline void change_own_type_change_checksum();
- inline int own_type_change_checksum();
-
- inline void set_inlined_type_change_checksum(int checksum);
- inline bool matches_inlined_type_change_checksum(int checksum);
-
- DECL_CAST(TypeFeedbackInfo)
-
- static const int kStorage1Offset = kValue1Offset;
- static const int kStorage2Offset = kValue2Offset;
- static const int kStorage3Offset = kValue3Offset;
-
- private:
- static const int kTypeChangeChecksumBits = 7;
-
- class ICTotalCountField: public BitField<int, 0,
- kSmiValueSize - kTypeChangeChecksumBits> {}; // NOLINT
- class OwnTypeChangeChecksum: public BitField<int,
- kSmiValueSize - kTypeChangeChecksumBits,
- kTypeChangeChecksumBits> {}; // NOLINT
- class ICsWithTypeInfoCountField: public BitField<int, 0,
- kSmiValueSize - kTypeChangeChecksumBits> {}; // NOLINT
- class InlinedTypeChangeChecksum: public BitField<int,
- kSmiValueSize - kTypeChangeChecksumBits,
- kTypeChangeChecksumBits> {}; // NOLINT
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(TypeFeedbackInfo);
-};
-
class AllocationSite: public Struct {
public:
static const uint32_t kMaximumArrayBytesToPretransition = 8 * 1024;
@@ -4904,7 +4310,7 @@ class JSProxy: public JSReceiver {
// [handler]: The handler property.
DECL_ACCESSORS(handler, Object)
// [target]: The target property.
- DECL_ACCESSORS(target, JSReceiver)
+ DECL_ACCESSORS(target, Object)
static MaybeHandle<Context> GetFunctionRealm(Handle<JSProxy> proxy);
@@ -5015,58 +4421,23 @@ class JSProxy: public JSReceiver {
DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxy);
};
-
-class JSCollection : public JSObject {
- public:
- // [table]: the backing hash table
- DECL_ACCESSORS(table, Object)
-
- static const int kTableOffset = JSObject::kHeaderSize;
- static const int kSize = kTableOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSCollection);
-};
-
-
-// The JSSet describes EcmaScript Harmony sets
-// TODO(marja): When moving JSSet out of objects.h, move JSSetIterator (from
-// objects/hash-table.h) into the same file.
-class JSSet : public JSCollection {
- public:
- DECL_CAST(JSSet)
-
- static void Initialize(Handle<JSSet> set, Isolate* isolate);
- static void Clear(Handle<JSSet> set);
-
- // Dispatched behavior.
- DECL_PRINTER(JSSet)
- DECL_VERIFIER(JSSet)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSSet);
-};
-
-
-// The JSMap describes EcmaScript Harmony maps
-// TODO(marja): When moving JSMap out of objects.h, move JSMapIterator (from
-// objects/hash-table.h) into the same file.
-class JSMap : public JSCollection {
+// JSProxyRevocableResult is just a JSObject with a specific initial map.
+// This initial map adds in-object properties for "proxy" and "revoke".
+// See https://tc39.github.io/ecma262/#sec-proxy.revocable
+class JSProxyRevocableResult : public JSObject {
public:
- DECL_CAST(JSMap)
-
- static void Initialize(Handle<JSMap> map, Isolate* isolate);
- static void Clear(Handle<JSMap> map);
-
- // Dispatched behavior.
- DECL_PRINTER(JSMap)
- DECL_VERIFIER(JSMap)
+ // Offsets of object fields.
+ static const int kProxyOffset = JSObject::kHeaderSize;
+ static const int kRevokeOffset = kProxyOffset + kPointerSize;
+ static const int kSize = kRevokeOffset + kPointerSize;
+ // Indices of in-object properties.
+ static const int kProxyIndex = 0;
+ static const int kRevokeIndex = 1;
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSMap);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxyRevocableResult);
};
-
// The [Async-from-Sync Iterator] object
// (proposal-async-iteration/#sec-async-from-sync-iterator-objects)
// An object which wraps an ordinary Iterator and converts it to behave
@@ -5116,82 +4487,13 @@ class JSStringIterator : public JSObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(JSStringIterator);
};
-// Base class for both JSWeakMap and JSWeakSet
-class JSWeakCollection: public JSObject {
- public:
- DECL_CAST(JSWeakCollection)
-
- // [table]: the backing hash table mapping keys to values.
- DECL_ACCESSORS(table, Object)
-
- // [next]: linked list of encountered weak maps during GC.
- DECL_ACCESSORS(next, Object)
-
- static void Initialize(Handle<JSWeakCollection> collection, Isolate* isolate);
- static void Set(Handle<JSWeakCollection> collection, Handle<Object> key,
- Handle<Object> value, int32_t hash);
- static bool Delete(Handle<JSWeakCollection> collection, Handle<Object> key,
- int32_t hash);
- static Handle<JSArray> GetEntries(Handle<JSWeakCollection> holder,
- int max_entries);
-
- static const int kTableOffset = JSObject::kHeaderSize;
- static const int kNextOffset = kTableOffset + kPointerSize;
- static const int kSize = kNextOffset + kPointerSize;
-
- // Visiting policy defines whether the table and next collection fields
- // should be visited or not.
- enum BodyVisitingPolicy { kIgnoreWeakness, kRespectWeakness };
-
- // Iterates the function object according to the visiting policy.
- template <BodyVisitingPolicy>
- class BodyDescriptorImpl;
-
- // Visit the whole object.
- typedef BodyDescriptorImpl<kIgnoreWeakness> BodyDescriptor;
-
- // Don't visit table and next collection fields.
- typedef BodyDescriptorImpl<kRespectWeakness> BodyDescriptorWeak;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakCollection);
-};
-
-
-// The JSWeakMap describes EcmaScript Harmony weak maps
-class JSWeakMap: public JSWeakCollection {
- public:
- DECL_CAST(JSWeakMap)
-
- // Dispatched behavior.
- DECL_PRINTER(JSWeakMap)
- DECL_VERIFIER(JSWeakMap)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakMap);
-};
-
-
-// The JSWeakSet describes EcmaScript Harmony weak sets
-class JSWeakSet: public JSWeakCollection {
- public:
- DECL_CAST(JSWeakSet)
-
- // Dispatched behavior.
- DECL_PRINTER(JSWeakSet)
- DECL_VERIFIER(JSWeakSet)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakSet);
-};
-
-
// Foreign describes objects pointing from JavaScript to C structures.
class Foreign: public HeapObject {
public:
// [address]: field containing the address.
inline Address foreign_address();
- inline void set_foreign_address(Address value);
+
+ static inline bool IsNormalized(Object* object);
DECL_CAST(Foreign)
@@ -5211,6 +4513,12 @@ class Foreign: public HeapObject {
typedef BodyDescriptor BodyDescriptorWeak;
private:
+ friend class Heap;
+ friend class SerializerDeserializer;
+ friend class StartupSerializer;
+
+ inline void set_foreign_address(Address value);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(Foreign);
};
@@ -5230,6 +4538,7 @@ class AccessorInfo: public Struct {
DECL_ACCESSORS(expected_receiver_type, Object)
// This directly points at a foreign C function to be used from the runtime.
DECL_ACCESSORS(getter, Object)
+ inline bool has_getter();
DECL_ACCESSORS(setter, Object)
// This either points at the same as above, or a trampoline in case we are
// running with the simulator. Use these entries from generated code.
@@ -5394,6 +4703,7 @@ class InterceptorInfo: public Struct {
DECL_BOOLEAN_ACCESSORS(can_intercept_symbols)
DECL_BOOLEAN_ACCESSORS(all_can_read)
DECL_BOOLEAN_ACCESSORS(non_masking)
+ DECL_BOOLEAN_ACCESSORS(is_named)
inline int flags() const;
inline void set_flags(int flags);
@@ -5418,6 +4728,7 @@ class InterceptorInfo: public Struct {
static const int kCanInterceptSymbolsBit = 0;
static const int kAllCanReadBit = 1;
static const int kNonMasking = 2;
+ static const int kNamed = 3;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(InterceptorInfo);
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index 85424600c0..df5f854395 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -185,7 +185,7 @@ MaybeHandle<MutableBigInt> MutableBigInt::New(Isolate* isolate, int length) {
result->set_length(length);
result->set_sign(false);
#if DEBUG
- result->InitializeDigits(length, 0xbf);
+ result->InitializeDigits(length, 0xBF);
#endif
return result;
}
@@ -304,7 +304,71 @@ MaybeHandle<BigInt> BigInt::BitwiseNot(Handle<BigInt> x) {
MaybeHandle<BigInt> BigInt::Exponentiate(Handle<BigInt> base,
Handle<BigInt> exponent) {
- UNIMPLEMENTED(); // TODO(jkummerow): Implement.
+ Isolate* isolate = base->GetIsolate();
+ // 1. If exponent is < 0, throw a RangeError exception.
+ if (exponent->sign()) {
+ THROW_NEW_ERROR(isolate,
+ NewRangeError(MessageTemplate::kBigIntNegativeExponent),
+ BigInt);
+ }
+ // 2. If base is 0n and exponent is 0n, return 1n.
+ if (exponent->is_zero()) {
+ return MutableBigInt::NewFromInt(isolate, 1);
+ }
+ // 3. Return a BigInt representing the mathematical value of base raised
+ // to the power exponent.
+ if (base->is_zero()) return base;
+ if (base->length() == 1 && base->digit(0) == 1) return base;
+ // For all bases >= 2, very large exponents would lead to unrepresentable
+ // results.
+ STATIC_ASSERT(kMaxLengthBits < std::numeric_limits<digit_t>::max());
+ if (exponent->length() > 1) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
+ BigInt);
+ }
+ digit_t exp_value = exponent->digit(0);
+ if (exp_value == 1) return base;
+ if (exp_value >= kMaxLengthBits) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
+ BigInt);
+ }
+ STATIC_ASSERT(kMaxLengthBits <= kMaxInt);
+ int n = static_cast<int>(exp_value);
+ if (base->length() == 1 && base->digit(0) == 2) {
+ // Fast path for 2^n.
+ int needed_digits = 1 + (n / kDigitBits);
+ Handle<MutableBigInt> result =
+ MutableBigInt::New(isolate, needed_digits).ToHandleChecked();
+ result->InitializeDigits(needed_digits);
+ // All bits are zero. Now set the n-th bit.
+ digit_t msd = static_cast<digit_t>(1) << (n % kDigitBits);
+ result->set_digit(needed_digits - 1, msd);
+ // Result is negative for odd powers of -2n.
+ if (base->sign()) result->set_sign((n & 1) != 0);
+ return MutableBigInt::MakeImmutable(result);
+ }
+ Handle<BigInt> result;
+ Handle<BigInt> running_square = base;
+ // This implicitly sets the result's sign correctly.
+ if (n & 1) result = base;
+ n >>= 1;
+ for (; n != 0; n >>= 1) {
+ if (!Multiply(running_square, running_square).ToHandle(&running_square)) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
+ BigInt);
+ }
+ if (n & 1) {
+ if (result.is_null()) {
+ result = running_square;
+ } else {
+ if (!Multiply(result, running_square).ToHandle(&result)) {
+ THROW_NEW_ERROR(
+ isolate, NewRangeError(MessageTemplate::kBigIntTooBig), BigInt);
+ }
+ }
+ }
+ }
+ return result;
}
MaybeHandle<BigInt> BigInt::Multiply(Handle<BigInt> x, Handle<BigInt> y) {
@@ -1617,8 +1681,8 @@ Handle<BigInt> MutableBigInt::RightShiftByMaximum(Isolate* isolate, bool sign) {
Maybe<BigInt::digit_t> MutableBigInt::ToShiftAmount(Handle<BigIntBase> x) {
if (x->length() > 1) return Nothing<digit_t>();
digit_t value = x->digit(0);
- STATIC_ASSERT(kMaxLength * kDigitBits < std::numeric_limits<digit_t>::max());
- if (value > kMaxLength * kDigitBits) return Nothing<digit_t>();
+ STATIC_ASSERT(kMaxLengthBits < std::numeric_limits<digit_t>::max());
+ if (value > kMaxLengthBits) return Nothing<digit_t>();
return Just(value);
}
@@ -1864,12 +1928,13 @@ Handle<BigInt> BigInt::AsIntN(uint64_t n, Handle<BigInt> x) {
if (x->is_zero()) return x;
if (n == 0) return MutableBigInt::Zero(x->GetIsolate());
uint64_t needed_length = (n + kDigitBits - 1) / kDigitBits;
+ uint64_t x_length = static_cast<uint64_t>(x->length());
// If {x} has less than {n} bits, return it directly.
- if (static_cast<uint64_t>(x->length()) < needed_length) return x;
+ if (x_length < needed_length) return x;
DCHECK_LE(needed_length, kMaxInt);
digit_t top_digit = x->digit(static_cast<int>(needed_length) - 1);
digit_t compare_digit = static_cast<digit_t>(1) << ((n - 1) % kDigitBits);
- if (top_digit < compare_digit) return x;
+ if (x_length == needed_length && top_digit < compare_digit) return x;
// Otherwise we have to truncate (which is a no-op in the special case
// of x == -2^(n-1)), and determine the right sign. We also might have
// to subtract from 2^n to simulate having two's complement representation.
@@ -1946,8 +2011,11 @@ Handle<BigInt> MutableBigInt::TruncateToNBits(int n, Handle<BigInt> x) {
// The MSD might contain extra bits that we don't want.
digit_t msd = x->digit(last);
- int drop = kDigitBits - (n % kDigitBits);
- result->set_digit(last, (msd << drop) >> drop);
+ if (n % kDigitBits != 0) {
+ int drop = kDigitBits - (n % kDigitBits);
+ msd = (msd << drop) >> drop;
+ }
+ result->set_digit(last, msd);
result->set_sign(x->sign());
return MakeImmutable(result);
}
diff --git a/deps/v8/src/objects/bigint.h b/deps/v8/src/objects/bigint.h
index de0daf495e..9e29a69b3b 100644
--- a/deps/v8/src/objects/bigint.h
+++ b/deps/v8/src/objects/bigint.h
@@ -24,13 +24,19 @@ class BigIntBase : public HeapObject {
return LengthBits::decode(static_cast<uint32_t>(bitfield));
}
- // The maximum length that the current implementation supports would be
- // kMaxInt / kDigitBits. However, we use a lower limit for now, because
- // raising it later is easier than lowering it.
- // Support up to 1 million bits.
- static const int kMaxLengthBits = 1024 * 1024;
+ // Increasing kMaxLength will require code changes.
+ static const int kMaxLengthBits = kMaxInt - kPointerSize * kBitsPerByte - 1;
static const int kMaxLength = kMaxLengthBits / (kPointerSize * kBitsPerByte);
+ static const int kLengthFieldBits = 30;
+ STATIC_ASSERT(kMaxLength <= ((1 << kLengthFieldBits) - 1));
+ class LengthBits : public BitField<int, 0, kLengthFieldBits> {};
+ class SignBits : public BitField<bool, LengthBits::kNext, 1> {};
+
+ static const int kBitfieldOffset = HeapObject::kHeaderSize;
+ static const int kDigitsOffset = kBitfieldOffset + kPointerSize;
+ static const int kHeaderSize = kDigitsOffset;
+
private:
friend class BigInt;
friend class MutableBigInt;
@@ -44,15 +50,6 @@ class BigIntBase : public HeapObject {
static const int kHalfDigitBits = kDigitBits / 2;
static const digit_t kHalfDigitMask = (1ull << kHalfDigitBits) - 1;
- static const int kBitfieldOffset = HeapObject::kHeaderSize;
- static const int kDigitsOffset = kBitfieldOffset + kPointerSize;
- static const int kHeaderSize = kDigitsOffset;
-
- static const int kLengthFieldBits = 20;
- STATIC_ASSERT(kMaxLength <= ((1 << kLengthFieldBits) - 1));
- class LengthBits : public BitField<int, 0, kLengthFieldBits> {};
- class SignBits : public BitField<bool, LengthBits::kNext, 1> {};
-
// sign() == true means negative.
inline bool sign() const {
intptr_t bitfield = READ_INTPTR_FIELD(this, kBitfieldOffset);
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index 17cfa4f67b..4c3e7f0d97 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -290,14 +290,14 @@ Code::Kind Code::kind() const {
void Code::initialize_flags(Kind kind, bool has_unwinding_info,
bool is_turbofanned, int stack_slots) {
- CHECK_LE(stack_slots, StackSlotsField::kMax);
- DCHECK_IMPLIES(stack_slots != 0, is_turbofanned);
+ CHECK(0 <= stack_slots && stack_slots < StackSlotsField::kMax);
static_assert(Code::NUMBER_OF_KINDS <= KindField::kMax + 1, "field overflow");
uint32_t flags = HasUnwindingInfoField::encode(has_unwinding_info) |
KindField::encode(kind) |
IsTurbofannedField::encode(is_turbofanned) |
StackSlotsField::encode(stack_slots);
WRITE_UINT32_FIELD(this, kFlagsOffset, flags);
+ DCHECK_IMPLIES(stack_slots != 0, has_safepoint_info());
}
inline bool Code::is_interpreter_trampoline_builtin() const {
@@ -411,21 +411,25 @@ void Code::set_builtin_index(int index) {
bool Code::is_builtin() const { return builtin_index() != -1; }
-unsigned Code::stack_slots() const {
- DCHECK(is_turbofanned());
+bool Code::has_safepoint_info() const {
+ return is_turbofanned() || is_wasm_code();
+}
+
+int Code::stack_slots() const {
+ DCHECK(has_safepoint_info());
return StackSlotsField::decode(READ_UINT32_FIELD(this, kFlagsOffset));
}
-unsigned Code::safepoint_table_offset() const {
- DCHECK(is_turbofanned());
- return READ_UINT32_FIELD(this, kSafepointTableOffsetOffset);
+int Code::safepoint_table_offset() const {
+ DCHECK(has_safepoint_info());
+ return READ_INT32_FIELD(this, kSafepointTableOffsetOffset);
}
-void Code::set_safepoint_table_offset(unsigned offset) {
- CHECK(offset <= std::numeric_limits<uint32_t>::max());
- DCHECK(is_turbofanned() || offset == 0); // Allow zero initialization.
+void Code::set_safepoint_table_offset(int offset) {
+ CHECK_LE(0, offset);
+ DCHECK(has_safepoint_info() || offset == 0); // Allow zero initialization.
DCHECK(IsAligned(offset, static_cast<unsigned>(kIntSize)));
- WRITE_UINT32_FIELD(this, kSafepointTableOffsetOffset, offset);
+ WRITE_INT32_FIELD(this, kSafepointTableOffsetOffset, offset);
}
bool Code::marked_for_deoptimization() const {
@@ -635,6 +639,14 @@ ByteArray* BytecodeArray::SourcePositionTable() {
->source_position_table();
}
+void BytecodeArray::ClearFrameCacheFromSourcePositionTable() {
+ Object* maybe_table = source_position_table();
+ if (maybe_table->IsByteArray()) return;
+ DCHECK(maybe_table->IsSourcePositionTableWithFrameCache());
+ set_source_position_table(SourcePositionTableWithFrameCache::cast(maybe_table)
+ ->source_position_table());
+}
+
int BytecodeArray::BytecodeArraySize() { return SizeFor(this->length()); }
int BytecodeArray::SizeIncludingMetadata() {
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index bb447ce2dd..c43e07c1f9 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_CODE_H_
#include "src/objects.h"
+#include "src/objects/fixed-array.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -138,7 +139,8 @@ class Code : public HeapObject {
#endif // defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
#ifdef ENABLE_DISASSEMBLER
- void Disassemble(const char* name, std::ostream& os); // NOLINT
+ void Disassemble(const char* name, std::ostream& os,
+ void* current_pc = nullptr); // NOLINT
#endif
// [instruction_size]: Size of the native instructions
@@ -232,14 +234,16 @@ class Code : public HeapObject {
inline void set_builtin_index(int id);
inline bool is_builtin() const;
- // [stack_slots]: For kind OPTIMIZED_FUNCTION, the number of stack slots
+ inline bool has_safepoint_info() const;
+
+ // [stack_slots]: If {has_safepoint_info()}, the number of stack slots
// reserved in the code prologue.
- inline unsigned stack_slots() const;
+ inline int stack_slots() const;
- // [safepoint_table_start]: For kind OPTIMIZED_FUNCTION, the offset in
- // the instruction stream where the safepoint table starts.
- inline unsigned safepoint_table_offset() const;
- inline void set_safepoint_table_offset(unsigned offset);
+ // [safepoint_table_offset]: If {has_safepoint_info()}, the offset in the
+ // instruction stream where the safepoint table starts.
+ inline int safepoint_table_offset() const;
+ inline void set_safepoint_table_offset(int offset);
// [marked_for_deoptimization]: For kind OPTIMIZED_FUNCTION tells whether
// the code is going to be deoptimized because of dead embedded maps.
@@ -386,7 +390,7 @@ class Code : public HeapObject {
DECL_PRINTER(Code)
DECL_VERIFIER(Code)
- void PrintDeoptLocation(FILE* out, Address pc);
+ void PrintDeoptLocation(FILE* out, const char* str, Address pc);
bool CanDeoptAt(Address pc);
inline HandlerTable::CatchPrediction GetBuiltinCatchPrediction();
@@ -790,6 +794,7 @@ class BytecodeArray : public FixedArrayBase {
DECL_ACCESSORS(source_position_table, Object)
inline ByteArray* SourcePositionTable();
+ inline void ClearFrameCacheFromSourcePositionTable();
DECL_CAST(BytecodeArray)
diff --git a/deps/v8/src/objects/data-handler-inl.h b/deps/v8/src/objects/data-handler-inl.h
new file mode 100644
index 0000000000..40c3658e60
--- /dev/null
+++ b/deps/v8/src/objects/data-handler-inl.h
@@ -0,0 +1,41 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DATA_HANDLER_INL_H_
+#define V8_DATA_HANDLER_INL_H_
+
+#include "src/objects/data-handler.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+bool HeapObject::IsDataHandler() const {
+ return IsLoadHandler() || IsStoreHandler();
+}
+
+CAST_ACCESSOR(DataHandler)
+
+ACCESSORS(DataHandler, smi_handler, Object, kSmiHandlerOffset)
+ACCESSORS(DataHandler, validity_cell, Object, kValidityCellOffset)
+
+int DataHandler::data_field_count() const {
+ return (map()->instance_size() - kSizeWithData0) / kPointerSize;
+}
+
+ACCESSORS_CHECKED(DataHandler, data1, Object, kData1Offset,
+ map()->instance_size() >= kSizeWithData1)
+ACCESSORS_CHECKED(DataHandler, data2, Object, kData2Offset,
+ map()->instance_size() >= kSizeWithData2)
+ACCESSORS_CHECKED(DataHandler, data3, Object, kData3Offset,
+ map()->instance_size() >= kSizeWithData3)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_DATA_HANDLER_INL_H_
diff --git a/deps/v8/src/objects/data-handler.h b/deps/v8/src/objects/data-handler.h
new file mode 100644
index 0000000000..f11d00fa38
--- /dev/null
+++ b/deps/v8/src/objects/data-handler.h
@@ -0,0 +1,63 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DATA_HANDLER_H_
+#define V8_DATA_HANDLER_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// DataHandler is a base class for load and store handlers that can't be
+// encoded in one Smi. Kind of a handler can be deduced from instance type.
+class DataHandler : public Struct {
+ public:
+ // [smi_handler]: A Smi which encodes a handler or Code object (we still
+ // use code handlers for accessing lexical environment variables, but soon
+ // only smi handlers will remain). See LoadHandler and StoreHandler for
+ // details about encoding.
+ DECL_ACCESSORS(smi_handler, Object)
+
+ // [validity_cell]: A validity Cell that guards prototype chain modifications.
+ DECL_ACCESSORS(validity_cell, Object)
+
+ // Returns number of optional data fields available in the object.
+ inline int data_field_count() const;
+
+ // [data1-3]: These are optional general-purpose fields whose content and
+ // presence depends on the handler kind.
+ DECL_ACCESSORS(data1, Object)
+ DECL_ACCESSORS(data2, Object)
+ DECL_ACCESSORS(data3, Object)
+
+// Layout description.
+#define DATA_HANDLER_FIELDS(V) \
+ V(kSmiHandlerOffset, kPointerSize) \
+ V(kValidityCellOffset, kPointerSize) \
+ V(kSizeWithData0, 0) \
+ V(kData1Offset, kPointerSize) \
+ V(kSizeWithData1, 0) \
+ V(kData2Offset, kPointerSize) \
+ V(kSizeWithData2, 0) \
+ V(kData3Offset, kPointerSize) \
+ V(kSizeWithData3, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, DATA_HANDLER_FIELDS)
+#undef DATA_HANDLER_FIELDS
+
+ DECL_CAST(DataHandler)
+
+ DECL_VERIFIER(DataHandler)
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_DATA_HANDLER_H_
diff --git a/deps/v8/src/objects/debug-objects.h b/deps/v8/src/objects/debug-objects.h
index 9ee2765897..0ce134b0b3 100644
--- a/deps/v8/src/objects/debug-objects.h
+++ b/deps/v8/src/objects/debug-objects.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_DEBUG_OBJECTS_H_
#include "src/objects.h"
+#include "src/objects/fixed-array.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/descriptor-array.h b/deps/v8/src/objects/descriptor-array.h
index f0b985337b..a89a31fcd5 100644
--- a/deps/v8/src/objects/descriptor-array.h
+++ b/deps/v8/src/objects/descriptor-array.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_DESCRIPTOR_ARRAY_H_
#include "src/objects.h"
+#include "src/objects/fixed-array.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
new file mode 100644
index 0000000000..edca36c92e
--- /dev/null
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -0,0 +1,634 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_FIXED_ARRAY_INL_H_
+#define V8_OBJECTS_FIXED_ARRAY_INL_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+TYPE_CHECKER(ByteArray, BYTE_ARRAY_TYPE)
+TYPE_CHECKER(FixedArrayExact, FIXED_ARRAY_TYPE)
+TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
+TYPE_CHECKER(WeakFixedArray, FIXED_ARRAY_TYPE)
+
+CAST_ACCESSOR(ArrayList)
+CAST_ACCESSOR(ByteArray)
+CAST_ACCESSOR(FixedArray)
+CAST_ACCESSOR(FixedArrayBase)
+CAST_ACCESSOR(FixedDoubleArray)
+CAST_ACCESSOR(FixedTypedArrayBase)
+CAST_ACCESSOR(TemplateList)
+CAST_ACCESSOR(WeakFixedArray)
+
+SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
+SYNCHRONIZED_SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
+
+Object* FixedArrayBase::unchecked_synchronized_length() const {
+ return ACQUIRE_READ_FIELD(this, kLengthOffset);
+}
+
+ACCESSORS(FixedTypedArrayBase, base_pointer, Object, kBasePointerOffset)
+
+Object** FixedArray::GetFirstElementAddress() {
+ return reinterpret_cast<Object**>(FIELD_ADDR(this, OffsetOfElementAt(0)));
+}
+
+bool FixedArray::ContainsOnlySmisOrHoles() {
+ Object* the_hole = GetHeap()->the_hole_value();
+ Object** current = GetFirstElementAddress();
+ for (int i = 0; i < length(); ++i) {
+ Object* candidate = *current++;
+ if (!candidate->IsSmi() && candidate != the_hole) return false;
+ }
+ return true;
+}
+
+Object* FixedArray::get(int index) const {
+ SLOW_DCHECK(index >= 0 && index < this->length());
+ return RELAXED_READ_FIELD(this, kHeaderSize + index * kPointerSize);
+}
+
+Handle<Object> FixedArray::get(FixedArray* array, int index, Isolate* isolate) {
+ return handle(array->get(index), isolate);
+}
+
+template <class T>
+MaybeHandle<T> FixedArray::GetValue(Isolate* isolate, int index) const {
+ Object* obj = get(index);
+ if (obj->IsUndefined(isolate)) return MaybeHandle<T>();
+ return Handle<T>(T::cast(obj), isolate);
+}
+
+template <class T>
+Handle<T> FixedArray::GetValueChecked(Isolate* isolate, int index) const {
+ Object* obj = get(index);
+ CHECK(!obj->IsUndefined(isolate));
+ return Handle<T>(T::cast(obj), isolate);
+}
+
+bool FixedArray::is_the_hole(Isolate* isolate, int index) {
+ return get(index)->IsTheHole(isolate);
+}
+
+void FixedArray::set(int index, Smi* value) {
+ DCHECK_NE(map(), GetHeap()->fixed_cow_array_map());
+ DCHECK_LT(index, this->length());
+ DCHECK(reinterpret_cast<Object*>(value)->IsSmi());
+ int offset = kHeaderSize + index * kPointerSize;
+ RELAXED_WRITE_FIELD(this, offset, value);
+}
+
+void FixedArray::set(int index, Object* value) {
+ DCHECK_NE(GetHeap()->fixed_cow_array_map(), map());
+ DCHECK(IsFixedArray());
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, this->length());
+ int offset = kHeaderSize + index * kPointerSize;
+ RELAXED_WRITE_FIELD(this, offset, value);
+ WRITE_BARRIER(GetHeap(), this, offset, value);
+}
+
+void FixedArray::set(int index, Object* value, WriteBarrierMode mode) {
+ DCHECK_NE(map(), GetHeap()->fixed_cow_array_map());
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, this->length());
+ int offset = kHeaderSize + index * kPointerSize;
+ RELAXED_WRITE_FIELD(this, offset, value);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
+}
+
+void FixedArray::NoWriteBarrierSet(FixedArray* array, int index,
+ Object* value) {
+ DCHECK_NE(array->map(), array->GetHeap()->fixed_cow_array_map());
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, array->length());
+ DCHECK(!array->GetHeap()->InNewSpace(value));
+ RELAXED_WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
+}
+
+void FixedArray::set_undefined(int index) {
+ set_undefined(GetIsolate(), index);
+}
+
+void FixedArray::set_undefined(Isolate* isolate, int index) {
+ FixedArray::NoWriteBarrierSet(this, index,
+ isolate->heap()->undefined_value());
+}
+
+void FixedArray::set_null(int index) { set_null(GetIsolate(), index); }
+
+void FixedArray::set_null(Isolate* isolate, int index) {
+ FixedArray::NoWriteBarrierSet(this, index, isolate->heap()->null_value());
+}
+
+void FixedArray::set_the_hole(int index) { set_the_hole(GetIsolate(), index); }
+
+void FixedArray::set_the_hole(Isolate* isolate, int index) {
+ FixedArray::NoWriteBarrierSet(this, index, isolate->heap()->the_hole_value());
+}
+
+void FixedArray::FillWithHoles(int from, int to) {
+ Isolate* isolate = GetIsolate();
+ for (int i = from; i < to; i++) {
+ set_the_hole(isolate, i);
+ }
+}
+
+Object** FixedArray::data_start() {
+ return HeapObject::RawField(this, kHeaderSize);
+}
+
+Object** FixedArray::RawFieldOfElementAt(int index) {
+ return HeapObject::RawField(this, OffsetOfElementAt(index));
+}
+
+double FixedDoubleArray::get_scalar(int index) {
+ DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
+ map() != GetHeap()->fixed_array_map());
+ DCHECK(index >= 0 && index < this->length());
+ DCHECK(!is_the_hole(index));
+ return READ_DOUBLE_FIELD(this, kHeaderSize + index * kDoubleSize);
+}
+
+uint64_t FixedDoubleArray::get_representation(int index) {
+ DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
+ map() != GetHeap()->fixed_array_map());
+ DCHECK(index >= 0 && index < this->length());
+ int offset = kHeaderSize + index * kDoubleSize;
+ return READ_UINT64_FIELD(this, offset);
+}
+
+Handle<Object> FixedDoubleArray::get(FixedDoubleArray* array, int index,
+ Isolate* isolate) {
+ if (array->is_the_hole(index)) {
+ return isolate->factory()->the_hole_value();
+ } else {
+ return isolate->factory()->NewNumber(array->get_scalar(index));
+ }
+}
+
+void FixedDoubleArray::set(int index, double value) {
+ DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
+ map() != GetHeap()->fixed_array_map());
+ int offset = kHeaderSize + index * kDoubleSize;
+ if (std::isnan(value)) {
+ WRITE_DOUBLE_FIELD(this, offset, std::numeric_limits<double>::quiet_NaN());
+ } else {
+ WRITE_DOUBLE_FIELD(this, offset, value);
+ }
+ DCHECK(!is_the_hole(index));
+}
+
+void FixedDoubleArray::set_the_hole(Isolate* isolate, int index) {
+ set_the_hole(index);
+}
+
+void FixedDoubleArray::set_the_hole(int index) {
+ DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
+ map() != GetHeap()->fixed_array_map());
+ int offset = kHeaderSize + index * kDoubleSize;
+ WRITE_UINT64_FIELD(this, offset, kHoleNanInt64);
+}
+
+bool FixedDoubleArray::is_the_hole(Isolate* isolate, int index) {
+ return is_the_hole(index);
+}
+
+bool FixedDoubleArray::is_the_hole(int index) {
+ return get_representation(index) == kHoleNanInt64;
+}
+
+double* FixedDoubleArray::data_start() {
+ return reinterpret_cast<double*>(FIELD_ADDR(this, kHeaderSize));
+}
+
+void FixedDoubleArray::FillWithHoles(int from, int to) {
+ for (int i = from; i < to; i++) {
+ set_the_hole(i);
+ }
+}
+
+Object* WeakFixedArray::Get(int index) const {
+ Object* raw = FixedArray::cast(this)->get(index + kFirstIndex);
+ if (raw->IsSmi()) return raw;
+ DCHECK(raw->IsWeakCell());
+ return WeakCell::cast(raw)->value();
+}
+
+bool WeakFixedArray::IsEmptySlot(int index) const {
+ DCHECK(index < Length());
+ return Get(index)->IsSmi();
+}
+
+void WeakFixedArray::Clear(int index) {
+ FixedArray::cast(this)->set(index + kFirstIndex, Smi::kZero);
+}
+
+int WeakFixedArray::Length() const {
+ return FixedArray::cast(this)->length() - kFirstIndex;
+}
+
+int WeakFixedArray::last_used_index() const {
+ return Smi::ToInt(FixedArray::cast(this)->get(kLastUsedIndexIndex));
+}
+
+void WeakFixedArray::set_last_used_index(int index) {
+ FixedArray::cast(this)->set(kLastUsedIndexIndex, Smi::FromInt(index));
+}
+
+template <class T>
+T* WeakFixedArray::Iterator::Next() {
+ if (list_ != nullptr) {
+ // Assert that list did not change during iteration.
+ DCHECK_EQ(last_used_index_, list_->last_used_index());
+ while (index_ < list_->Length()) {
+ Object* item = list_->Get(index_++);
+ if (item != Empty()) return T::cast(item);
+ }
+ list_ = nullptr;
+ }
+ return nullptr;
+}
+
+int ArrayList::Length() const {
+ if (FixedArray::cast(this)->length() == 0) return 0;
+ return Smi::ToInt(FixedArray::cast(this)->get(kLengthIndex));
+}
+
+void ArrayList::SetLength(int length) {
+ return FixedArray::cast(this)->set(kLengthIndex, Smi::FromInt(length));
+}
+
+Object* ArrayList::Get(int index) const {
+ return FixedArray::cast(this)->get(kFirstIndex + index);
+}
+
+Object** ArrayList::Slot(int index) {
+ return data_start() + kFirstIndex + index;
+}
+
+void ArrayList::Set(int index, Object* obj, WriteBarrierMode mode) {
+ FixedArray::cast(this)->set(kFirstIndex + index, obj, mode);
+}
+
+void ArrayList::Clear(int index, Object* undefined) {
+ DCHECK(undefined->IsUndefined(GetIsolate()));
+ FixedArray::cast(this)->set(kFirstIndex + index, undefined,
+ SKIP_WRITE_BARRIER);
+}
+
+int ByteArray::Size() { return RoundUp(length() + kHeaderSize, kPointerSize); }
+
+byte ByteArray::get(int index) const {
+ DCHECK(index >= 0 && index < this->length());
+ return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
+}
+
+void ByteArray::set(int index, byte value) {
+ DCHECK(index >= 0 && index < this->length());
+ WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize, value);
+}
+
+void ByteArray::copy_in(int index, const byte* buffer, int length) {
+ DCHECK(index >= 0 && length >= 0 && length <= kMaxInt - index &&
+ index + length <= this->length());
+ byte* dst_addr = FIELD_ADDR(this, kHeaderSize + index * kCharSize);
+ memcpy(dst_addr, buffer, length);
+}
+
+void ByteArray::copy_out(int index, byte* buffer, int length) {
+ DCHECK(index >= 0 && length >= 0 && length <= kMaxInt - index &&
+ index + length <= this->length());
+ const byte* src_addr = FIELD_ADDR(this, kHeaderSize + index * kCharSize);
+ memcpy(buffer, src_addr, length);
+}
+
+int ByteArray::get_int(int index) const {
+ DCHECK(index >= 0 && index < this->length() / kIntSize);
+ return READ_INT_FIELD(this, kHeaderSize + index * kIntSize);
+}
+
+void ByteArray::set_int(int index, int value) {
+ DCHECK(index >= 0 && index < this->length() / kIntSize);
+ WRITE_INT_FIELD(this, kHeaderSize + index * kIntSize, value);
+}
+
+uint32_t ByteArray::get_uint32(int index) const {
+ DCHECK(index >= 0 && index < this->length() / kUInt32Size);
+ return READ_UINT32_FIELD(this, kHeaderSize + index * kUInt32Size);
+}
+
+void ByteArray::set_uint32(int index, uint32_t value) {
+ DCHECK(index >= 0 && index < this->length() / kUInt32Size);
+ WRITE_UINT32_FIELD(this, kHeaderSize + index * kUInt32Size, value);
+}
+
+void ByteArray::clear_padding() {
+ int data_size = length() + kHeaderSize;
+ memset(address() + data_size, 0, Size() - data_size);
+}
+
+ByteArray* ByteArray::FromDataStartAddress(Address address) {
+ DCHECK_TAG_ALIGNED(address);
+ return reinterpret_cast<ByteArray*>(address - kHeaderSize + kHeapObjectTag);
+}
+
+int ByteArray::DataSize() const { return RoundUp(length(), kPointerSize); }
+
+int ByteArray::ByteArraySize() { return SizeFor(this->length()); }
+
+Address ByteArray::GetDataStartAddress() {
+ return reinterpret_cast<Address>(this) - kHeapObjectTag + kHeaderSize;
+}
+
+template <class T>
+PodArray<T>* PodArray<T>::cast(Object* object) {
+ SLOW_DCHECK(object->IsByteArray());
+ return reinterpret_cast<PodArray<T>*>(object);
+}
+template <class T>
+const PodArray<T>* PodArray<T>::cast(const Object* object) {
+ SLOW_DCHECK(object->IsByteArray());
+ return reinterpret_cast<const PodArray<T>*>(object);
+}
+
+// static
+template <class T>
+Handle<PodArray<T>> PodArray<T>::New(Isolate* isolate, int length,
+ PretenureFlag pretenure) {
+ return Handle<PodArray<T>>::cast(
+ isolate->factory()->NewByteArray(length * sizeof(T), pretenure));
+}
+
+void* FixedTypedArrayBase::external_pointer() const {
+ intptr_t ptr = READ_INTPTR_FIELD(this, kExternalPointerOffset);
+ return reinterpret_cast<void*>(ptr);
+}
+
+void FixedTypedArrayBase::set_external_pointer(void* value,
+ WriteBarrierMode mode) {
+ intptr_t ptr = reinterpret_cast<intptr_t>(value);
+ WRITE_INTPTR_FIELD(this, kExternalPointerOffset, ptr);
+}
+
+void* FixedTypedArrayBase::DataPtr() {
+ return reinterpret_cast<void*>(
+ reinterpret_cast<intptr_t>(base_pointer()) +
+ reinterpret_cast<intptr_t>(external_pointer()));
+}
+
+int FixedTypedArrayBase::ElementSize(InstanceType type) {
+ int element_size;
+ switch (type) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
+ element_size = size; \
+ break;
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ default:
+ UNREACHABLE();
+ }
+ return element_size;
+}
+
+int FixedTypedArrayBase::DataSize(InstanceType type) const {
+ if (base_pointer() == Smi::kZero) return 0;
+ return length() * ElementSize(type);
+}
+
+int FixedTypedArrayBase::DataSize() const {
+ return DataSize(map()->instance_type());
+}
+
+size_t FixedTypedArrayBase::ByteLength() const {
+ return static_cast<size_t>(length()) *
+ static_cast<size_t>(ElementSize(map()->instance_type()));
+}
+
+int FixedTypedArrayBase::size() const {
+ return OBJECT_POINTER_ALIGN(kDataOffset + DataSize());
+}
+
+int FixedTypedArrayBase::TypedArraySize(InstanceType type) const {
+ return OBJECT_POINTER_ALIGN(kDataOffset + DataSize(type));
+}
+
+// static
+int FixedTypedArrayBase::TypedArraySize(InstanceType type, int length) {
+ return OBJECT_POINTER_ALIGN(kDataOffset + length * ElementSize(type));
+}
+
+uint8_t Uint8ArrayTraits::defaultValue() { return 0; }
+
+uint8_t Uint8ClampedArrayTraits::defaultValue() { return 0; }
+
+int8_t Int8ArrayTraits::defaultValue() { return 0; }
+
+uint16_t Uint16ArrayTraits::defaultValue() { return 0; }
+
+int16_t Int16ArrayTraits::defaultValue() { return 0; }
+
+uint32_t Uint32ArrayTraits::defaultValue() { return 0; }
+
+int32_t Int32ArrayTraits::defaultValue() { return 0; }
+
+float Float32ArrayTraits::defaultValue() {
+ return std::numeric_limits<float>::quiet_NaN();
+}
+
+double Float64ArrayTraits::defaultValue() {
+ return std::numeric_limits<double>::quiet_NaN();
+}
+
+template <class Traits>
+typename Traits::ElementType FixedTypedArray<Traits>::get_scalar(int index) {
+ DCHECK((index >= 0) && (index < this->length()));
+ return FixedTypedArray<Traits>::get_scalar_from_data_ptr(DataPtr(), index);
+}
+
+// static
+template <class Traits>
+typename Traits::ElementType FixedTypedArray<Traits>::get_scalar_from_data_ptr(
+ void* data_ptr, int index) {
+ typename Traits::ElementType* ptr = reinterpret_cast<ElementType*>(data_ptr);
+ // The JavaScript memory model allows for racy reads and writes to a
+ // SharedArrayBuffer's backing store, which will always be a FixedTypedArray.
+ // ThreadSanitizer will catch these racy accesses and warn about them, so we
+ // disable TSAN for these reads and writes using annotations.
+ //
+ // We don't use relaxed atomics here, as it is not a requirement of the
+ // JavaScript memory model to have tear-free reads of overlapping accesses,
+ // and using relaxed atomics may introduce overhead.
+ TSAN_ANNOTATE_IGNORE_READS_BEGIN;
+ auto result = ptr[index];
+ TSAN_ANNOTATE_IGNORE_READS_END;
+ return result;
+}
+
+template <class Traits>
+void FixedTypedArray<Traits>::set(int index, ElementType value) {
+ CHECK((index >= 0) && (index < this->length()));
+ // See the comment in FixedTypedArray<Traits>::get_scalar.
+ auto* ptr = reinterpret_cast<ElementType*>(DataPtr());
+ TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+ ptr[index] = value;
+ TSAN_ANNOTATE_IGNORE_WRITES_END;
+}
+
+template <class Traits>
+typename Traits::ElementType FixedTypedArray<Traits>::from(int value) {
+ return static_cast<ElementType>(value);
+}
+
+template <>
+inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(int value) {
+ if (value < 0) return 0;
+ if (value > 0xFF) return 0xFF;
+ return static_cast<uint8_t>(value);
+}
+
+template <class Traits>
+typename Traits::ElementType FixedTypedArray<Traits>::from(uint32_t value) {
+ return static_cast<ElementType>(value);
+}
+
+template <>
+inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(uint32_t value) {
+ // We need this special case for Uint32 -> Uint8Clamped, because the highest
+ // Uint32 values will be negative as an int, clamping to 0, rather than 255.
+ if (value > 0xFF) return 0xFF;
+ return static_cast<uint8_t>(value);
+}
+
+template <class Traits>
+typename Traits::ElementType FixedTypedArray<Traits>::from(double value) {
+ return static_cast<ElementType>(DoubleToInt32(value));
+}
+
+template <>
+inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(double value) {
+ // Handle NaNs and less than zero values which clamp to zero.
+ if (!(value > 0)) return 0;
+ if (value > 0xFF) return 0xFF;
+ return static_cast<uint8_t>(lrint(value));
+}
+
+template <>
+inline float FixedTypedArray<Float32ArrayTraits>::from(double value) {
+ return static_cast<float>(value);
+}
+
+template <>
+inline double FixedTypedArray<Float64ArrayTraits>::from(double value) {
+ return value;
+}
+
+template <class Traits>
+Handle<Object> FixedTypedArray<Traits>::get(FixedTypedArray<Traits>* array,
+ int index) {
+ return Traits::ToHandle(array->GetIsolate(), array->get_scalar(index));
+}
+
+template <class Traits>
+void FixedTypedArray<Traits>::SetValue(uint32_t index, Object* value) {
+ ElementType cast_value = Traits::defaultValue();
+ if (value->IsSmi()) {
+ int int_value = Smi::ToInt(value);
+ cast_value = from(int_value);
+ } else if (value->IsHeapNumber()) {
+ double double_value = HeapNumber::cast(value)->value();
+ cast_value = from(double_value);
+ } else {
+ // Clamp undefined to the default value. All other types have been
+ // converted to a number type further up in the call chain.
+ DCHECK(value->IsUndefined(GetIsolate()));
+ }
+ set(index, cast_value);
+}
+
+Handle<Object> Uint8ArrayTraits::ToHandle(Isolate* isolate, uint8_t scalar) {
+ return handle(Smi::FromInt(scalar), isolate);
+}
+
+Handle<Object> Uint8ClampedArrayTraits::ToHandle(Isolate* isolate,
+ uint8_t scalar) {
+ return handle(Smi::FromInt(scalar), isolate);
+}
+
+Handle<Object> Int8ArrayTraits::ToHandle(Isolate* isolate, int8_t scalar) {
+ return handle(Smi::FromInt(scalar), isolate);
+}
+
+Handle<Object> Uint16ArrayTraits::ToHandle(Isolate* isolate, uint16_t scalar) {
+ return handle(Smi::FromInt(scalar), isolate);
+}
+
+Handle<Object> Int16ArrayTraits::ToHandle(Isolate* isolate, int16_t scalar) {
+ return handle(Smi::FromInt(scalar), isolate);
+}
+
+Handle<Object> Uint32ArrayTraits::ToHandle(Isolate* isolate, uint32_t scalar) {
+ return isolate->factory()->NewNumberFromUint(scalar);
+}
+
+Handle<Object> Int32ArrayTraits::ToHandle(Isolate* isolate, int32_t scalar) {
+ return isolate->factory()->NewNumberFromInt(scalar);
+}
+
+Handle<Object> Float32ArrayTraits::ToHandle(Isolate* isolate, float scalar) {
+ return isolate->factory()->NewNumber(scalar);
+}
+
+Handle<Object> Float64ArrayTraits::ToHandle(Isolate* isolate, double scalar) {
+ return isolate->factory()->NewNumber(scalar);
+}
+
+// static
+template <class Traits>
+STATIC_CONST_MEMBER_DEFINITION const InstanceType
+ FixedTypedArray<Traits>::kInstanceType;
+
+template <class Traits>
+FixedTypedArray<Traits>* FixedTypedArray<Traits>::cast(Object* object) {
+ SLOW_DCHECK(object->IsHeapObject() &&
+ HeapObject::cast(object)->map()->instance_type() ==
+ Traits::kInstanceType);
+ return reinterpret_cast<FixedTypedArray<Traits>*>(object);
+}
+
+template <class Traits>
+const FixedTypedArray<Traits>* FixedTypedArray<Traits>::cast(
+ const Object* object) {
+ SLOW_DCHECK(object->IsHeapObject() &&
+ HeapObject::cast(object)->map()->instance_type() ==
+ Traits::kInstanceType);
+ return reinterpret_cast<FixedTypedArray<Traits>*>(object);
+}
+
+int TemplateList::length() const {
+ return Smi::ToInt(FixedArray::cast(this)->get(kLengthIndex));
+}
+
+Object* TemplateList::get(int index) const {
+ return FixedArray::cast(this)->get(kFirstElementIndex + index);
+}
+
+void TemplateList::set(int index, Object* value) {
+ FixedArray::cast(this)->set(kFirstElementIndex + index, value);
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_FIXED_ARRAY_INL_H_
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
new file mode 100644
index 0000000000..5d78af8799
--- /dev/null
+++ b/deps/v8/src/objects/fixed-array.h
@@ -0,0 +1,601 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_FIXED_ARRAY_H_
+#define V8_OBJECTS_FIXED_ARRAY_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+#define FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(V) \
+ V(BYTECODE_ARRAY_CONSTANT_POOL_SUB_TYPE) \
+ V(BYTECODE_ARRAY_HANDLER_TABLE_SUB_TYPE) \
+ V(CODE_STUBS_TABLE_SUB_TYPE) \
+ V(COMPILATION_CACHE_TABLE_SUB_TYPE) \
+ V(CONTEXT_SUB_TYPE) \
+ V(COPY_ON_WRITE_SUB_TYPE) \
+ V(DEOPTIMIZATION_DATA_SUB_TYPE) \
+ V(DESCRIPTOR_ARRAY_SUB_TYPE) \
+ V(EMBEDDED_OBJECT_SUB_TYPE) \
+ V(ENUM_CACHE_SUB_TYPE) \
+ V(ENUM_INDICES_CACHE_SUB_TYPE) \
+ V(DEPENDENT_CODE_SUB_TYPE) \
+ V(DICTIONARY_ELEMENTS_SUB_TYPE) \
+ V(DICTIONARY_PROPERTIES_SUB_TYPE) \
+ V(EMPTY_PROPERTIES_DICTIONARY_SUB_TYPE) \
+ V(PACKED_ELEMENTS_SUB_TYPE) \
+ V(FAST_PROPERTIES_SUB_TYPE) \
+ V(FAST_TEMPLATE_INSTANTIATIONS_CACHE_SUB_TYPE) \
+ V(HANDLER_TABLE_SUB_TYPE) \
+ V(JS_COLLECTION_SUB_TYPE) \
+ V(JS_WEAK_COLLECTION_SUB_TYPE) \
+ V(NOSCRIPT_SHARED_FUNCTION_INFOS_SUB_TYPE) \
+ V(NUMBER_STRING_CACHE_SUB_TYPE) \
+ V(OBJECT_TO_CODE_SUB_TYPE) \
+ V(OPTIMIZED_CODE_LITERALS_SUB_TYPE) \
+ V(OPTIMIZED_CODE_MAP_SUB_TYPE) \
+ V(PROTOTYPE_USERS_SUB_TYPE) \
+ V(REGEXP_MULTIPLE_CACHE_SUB_TYPE) \
+ V(RETAINED_MAPS_SUB_TYPE) \
+ V(SCOPE_INFO_SUB_TYPE) \
+ V(SCRIPT_LIST_SUB_TYPE) \
+ V(SERIALIZED_OBJECTS_SUB_TYPE) \
+ V(SHARED_FUNCTION_INFOS_SUB_TYPE) \
+ V(SINGLE_CHARACTER_STRING_CACHE_SUB_TYPE) \
+ V(SLOW_TEMPLATE_INSTANTIATIONS_CACHE_SUB_TYPE) \
+ V(STRING_SPLIT_CACHE_SUB_TYPE) \
+ V(STRING_TABLE_SUB_TYPE) \
+ V(TEMPLATE_INFO_SUB_TYPE) \
+ V(FEEDBACK_METADATA_SUB_TYPE) \
+ V(WEAK_NEW_SPACE_OBJECT_TO_CODE_SUB_TYPE)
+
+enum FixedArraySubInstanceType {
+#define DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE(name) name,
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE)
+#undef DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE
+ LAST_FIXED_ARRAY_SUB_TYPE = WEAK_NEW_SPACE_OBJECT_TO_CODE_SUB_TYPE
+};
+
+// Common superclass for FixedArrays that allow implementations to share
+// common accessors and some code paths.
+class FixedArrayBase : public HeapObject {
+ public:
+ // [length]: length of the array.
+ inline int length() const;
+ inline void set_length(int value);
+
+ // Get and set the length using acquire loads and release stores.
+ inline int synchronized_length() const;
+ inline void synchronized_set_length(int value);
+
+ inline Object* unchecked_synchronized_length() const;
+
+ DECL_CAST(FixedArrayBase)
+
+ static int GetMaxLengthForNewSpaceAllocation(ElementsKind kind);
+
+ bool IsCowArray() const;
+
+ // Layout description.
+ // Length is smi tagged when it is stored.
+ static const int kLengthOffset = HeapObject::kHeaderSize;
+ static const int kHeaderSize = kLengthOffset + kPointerSize;
+};
+
+// FixedArray describes fixed-sized arrays with element type Object*.
+class FixedArray : public FixedArrayBase {
+ public:
+ // Setter and getter for elements.
+ inline Object* get(int index) const;
+ static inline Handle<Object> get(FixedArray* array, int index,
+ Isolate* isolate);
+ template <class T>
+ MaybeHandle<T> GetValue(Isolate* isolate, int index) const;
+
+ template <class T>
+ Handle<T> GetValueChecked(Isolate* isolate, int index) const;
+
+ // Return a grown copy if the index is bigger than the array's length.
+ static Handle<FixedArray> SetAndGrow(Handle<FixedArray> array, int index,
+ Handle<Object> value);
+
+ // Setter that uses write barrier.
+ inline void set(int index, Object* value);
+ inline bool is_the_hole(Isolate* isolate, int index);
+
+ // Setter that doesn't need write barrier.
+ inline void set(int index, Smi* value);
+ // Setter with explicit barrier mode.
+ inline void set(int index, Object* value, WriteBarrierMode mode);
+
+ // Setters for frequently used oddballs located in old space.
+ inline void set_undefined(int index);
+ inline void set_undefined(Isolate* isolate, int index);
+ inline void set_null(int index);
+ inline void set_null(Isolate* isolate, int index);
+ inline void set_the_hole(int index);
+ inline void set_the_hole(Isolate* isolate, int index);
+
+ inline Object** GetFirstElementAddress();
+ inline bool ContainsOnlySmisOrHoles();
+
+ // Gives access to raw memory which stores the array's data.
+ inline Object** data_start();
+
+ inline void FillWithHoles(int from, int to);
+
+ // Shrink length and insert filler objects.
+ void Shrink(int length);
+
+ // Copy a sub array from the receiver to dest.
+ void CopyTo(int pos, FixedArray* dest, int dest_pos, int len) const;
+
+ // Garbage collection support.
+ static constexpr int SizeFor(int length) {
+ return kHeaderSize + length * kPointerSize;
+ }
+
+ // Code Generation support.
+ static constexpr int OffsetOfElementAt(int index) { return SizeFor(index); }
+
+ // Garbage collection support.
+ inline Object** RawFieldOfElementAt(int index);
+
+ DECL_CAST(FixedArray)
+
+ // Maximal allowed size, in bytes, of a single FixedArray.
+ // Prevents overflowing size computations, as well as extreme memory
+ // consumption.
+ static const int kMaxSize = 128 * MB * kPointerSize;
+ // Maximally allowed length of a FixedArray.
+ static const int kMaxLength = (kMaxSize - kHeaderSize) / kPointerSize;
+ // Maximally allowed length for regular (non large object space) object.
+ STATIC_ASSERT(kMaxRegularHeapObjectSize < kMaxSize);
+ static const int kMaxRegularLength =
+ (kMaxRegularHeapObjectSize - kHeaderSize) / kPointerSize;
+
+ // Dispatched behavior.
+ DECL_PRINTER(FixedArray)
+ DECL_VERIFIER(FixedArray)
+#ifdef DEBUG
+ // Checks if two FixedArrays have identical contents.
+ bool IsEqualTo(FixedArray* other);
+#endif
+
+ typedef FlexibleBodyDescriptor<kHeaderSize> BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ protected:
+ // Set operation on FixedArray without using write barriers. Can
+ // only be used for storing old space objects or smis.
+ static inline void NoWriteBarrierSet(FixedArray* array, int index,
+ Object* value);
+
+ private:
+ STATIC_ASSERT(kHeaderSize == Internals::kFixedArrayHeaderSize);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray);
+};
+
+// FixedArray alias added only because of IsFixedArrayExact() predicate, which
+// checks for the exact instance type FIXED_ARRAY_TYPE instead of a range
+// check: [FIRST_FIXED_ARRAY_TYPE, LAST_FIXED_ARRAY_TYPE].
+class FixedArrayExact final : public FixedArray {
+ public:
+ DECL_CAST(FixedArrayExact)
+};
+
+// FixedDoubleArray describes fixed-sized arrays with element type double.
+class FixedDoubleArray : public FixedArrayBase {
+ public:
+ // Setter and getter for elements.
+ inline double get_scalar(int index);
+ inline uint64_t get_representation(int index);
+ static inline Handle<Object> get(FixedDoubleArray* array, int index,
+ Isolate* isolate);
+ inline void set(int index, double value);
+ inline void set_the_hole(Isolate* isolate, int index);
+ inline void set_the_hole(int index);
+
+ // Checking for the hole.
+ inline bool is_the_hole(Isolate* isolate, int index);
+ inline bool is_the_hole(int index);
+
+ // Garbage collection support.
+ inline static int SizeFor(int length) {
+ return kHeaderSize + length * kDoubleSize;
+ }
+
+ // Gives access to raw memory which stores the array's data.
+ inline double* data_start();
+
+ inline void FillWithHoles(int from, int to);
+
+ // Code Generation support.
+ static int OffsetOfElementAt(int index) { return SizeFor(index); }
+
+ DECL_CAST(FixedDoubleArray)
+
+ // Maximal allowed size, in bytes, of a single FixedDoubleArray.
+ // Prevents overflowing size computations, as well as extreme memory
+ // consumption.
+ static const int kMaxSize = 512 * MB;
+ // Maximally allowed length of a FixedArray.
+ static const int kMaxLength = (kMaxSize - kHeaderSize) / kDoubleSize;
+
+ // Dispatched behavior.
+ DECL_PRINTER(FixedDoubleArray)
+ DECL_VERIFIER(FixedDoubleArray)
+
+ class BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FixedDoubleArray);
+};
+
+class WeakFixedArray : public FixedArray {
+ public:
+ // If |maybe_array| is not a WeakFixedArray, a fresh one will be allocated.
+ // This function does not check if the value exists already, callers must
+ // ensure this themselves if necessary.
+ static Handle<WeakFixedArray> Add(Handle<Object> maybe_array,
+ Handle<HeapObject> value,
+ int* assigned_index = nullptr);
+
+ // Returns true if an entry was found and removed.
+ bool Remove(Handle<HeapObject> value);
+
+ class NullCallback {
+ public:
+ static void Callback(Object* value, int old_index, int new_index) {}
+ };
+
+ template <class CompactionCallback>
+ void Compact();
+
+ inline Object* Get(int index) const;
+ inline void Clear(int index);
+ inline int Length() const;
+
+ inline bool IsEmptySlot(int index) const;
+ static Object* Empty() { return Smi::kZero; }
+
+ class Iterator {
+ public:
+ explicit Iterator(Object* maybe_array) : list_(nullptr) {
+ Reset(maybe_array);
+ }
+ void Reset(Object* maybe_array);
+
+ template <class T>
+ inline T* Next();
+
+ private:
+ int index_;
+ WeakFixedArray* list_;
+#ifdef DEBUG
+ int last_used_index_;
+ DisallowHeapAllocation no_gc_;
+#endif // DEBUG
+ DISALLOW_COPY_AND_ASSIGN(Iterator);
+ };
+
+ DECL_CAST(WeakFixedArray)
+
+ private:
+ static const int kLastUsedIndexIndex = 0;
+ static const int kFirstIndex = 1;
+
+ static Handle<WeakFixedArray> Allocate(
+ Isolate* isolate, int size, Handle<WeakFixedArray> initialize_from);
+
+ static void Set(Handle<WeakFixedArray> array, int index,
+ Handle<HeapObject> value);
+ inline void clear(int index);
+
+ inline int last_used_index() const;
+ inline void set_last_used_index(int index);
+
+ // Disallow inherited setters.
+ void set(int index, Smi* value);
+ void set(int index, Object* value);
+ void set(int index, Object* value, WriteBarrierMode mode);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(WeakFixedArray);
+};
+
+// Generic array grows dynamically with O(1) amortized insertion.
+//
+// ArrayList is a FixedArray with static convenience methods for adding more
+// elements. The Length() method returns the number of elements in the list, not
+// the allocated size. The number of elements is stored at kLengthIndex and is
+// updated with every insertion. The elements of the ArrayList are stored in the
+// underlying FixedArray starting at kFirstIndex.
+class ArrayList : public FixedArray {
+ public:
+ enum AddMode {
+ kNone,
+ // Use this if GC can delete elements from the array.
+ kReloadLengthAfterAllocation,
+ };
+ static Handle<ArrayList> Add(Handle<ArrayList> array, Handle<Object> obj,
+ AddMode mode = kNone);
+ static Handle<ArrayList> Add(Handle<ArrayList> array, Handle<Object> obj1,
+ Handle<Object> obj2, AddMode = kNone);
+ static Handle<ArrayList> New(Isolate* isolate, int size);
+
+ // Returns the number of elements in the list, not the allocated size, which
+ // is length(). Lower and upper case length() return different results!
+ inline int Length() const;
+
+ // Sets the Length() as used by Elements(). Does not change the underlying
+ // storage capacity, i.e., length().
+ inline void SetLength(int length);
+ inline Object* Get(int index) const;
+ inline Object** Slot(int index);
+
+ // Set the element at index to obj. The underlying array must be large enough.
+ // If you need to grow the ArrayList, use the static Add() methods instead.
+ inline void Set(int index, Object* obj,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+ // Set the element at index to undefined. This does not change the Length().
+ inline void Clear(int index, Object* undefined);
+
+ // Return a copy of the list of size Length() without the first entry. The
+ // number returned by Length() is stored in the first entry.
+ static Handle<FixedArray> Elements(Handle<ArrayList> array);
+ bool IsFull();
+ DECL_CAST(ArrayList)
+
+ private:
+ static Handle<ArrayList> EnsureSpace(Handle<ArrayList> array, int length);
+ static const int kLengthIndex = 0;
+ static const int kFirstIndex = 1;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ArrayList);
+};
+
+enum SearchMode { ALL_ENTRIES, VALID_ENTRIES };
+
+template <SearchMode search_mode, typename T>
+inline int Search(T* array, Name* name, int valid_entries = 0,
+ int* out_insertion_index = nullptr);
+
+// ByteArray represents fixed sized byte arrays. Used for the relocation info
+// that is attached to code objects.
+class ByteArray : public FixedArrayBase {
+ public:
+ inline int Size();
+
+ // Setter and getter.
+ inline byte get(int index) const;
+ inline void set(int index, byte value);
+
+ // Copy in / copy out whole byte slices.
+ inline void copy_out(int index, byte* buffer, int length);
+ inline void copy_in(int index, const byte* buffer, int length);
+
+ // Treat contents as an int array.
+ inline int get_int(int index) const;
+ inline void set_int(int index, int value);
+
+ inline uint32_t get_uint32(int index) const;
+ inline void set_uint32(int index, uint32_t value);
+
+ // Clear uninitialized padding space. This ensures that the snapshot content
+ // is deterministic.
+ inline void clear_padding();
+
+ static int SizeFor(int length) {
+ return OBJECT_POINTER_ALIGN(kHeaderSize + length);
+ }
+ // We use byte arrays for free blocks in the heap. Given a desired size in
+ // bytes that is a multiple of the word size and big enough to hold a byte
+ // array, this function returns the number of elements a byte array should
+ // have.
+ static int LengthFor(int size_in_bytes) {
+ DCHECK(IsAligned(size_in_bytes, kPointerSize));
+ DCHECK_GE(size_in_bytes, kHeaderSize);
+ return size_in_bytes - kHeaderSize;
+ }
+
+ // Returns data start address.
+ inline Address GetDataStartAddress();
+
+ inline int DataSize() const;
+
+ // Returns a pointer to the ByteArray object for a given data start address.
+ static inline ByteArray* FromDataStartAddress(Address address);
+
+ DECL_CAST(ByteArray)
+
+ // Dispatched behavior.
+ inline int ByteArraySize();
+ DECL_PRINTER(ByteArray)
+ DECL_VERIFIER(ByteArray)
+
+ // Layout description.
+ static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
+
+ // Maximal memory consumption for a single ByteArray.
+ static const int kMaxSize = 512 * MB;
+ // Maximal length of a single ByteArray.
+ static const int kMaxLength = kMaxSize - kHeaderSize;
+
+ class BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ByteArray);
+};
+
+// Wrapper class for ByteArray which can store arbitrary C++ classes, as long
+// as they can be copied with memcpy.
+template <class T>
+class PodArray : public ByteArray {
+ public:
+ static Handle<PodArray<T>> New(Isolate* isolate, int length,
+ PretenureFlag pretenure = NOT_TENURED);
+ void copy_out(int index, T* result) {
+ ByteArray::copy_out(index * sizeof(T), reinterpret_cast<byte*>(result),
+ sizeof(T));
+ }
+ T get(int index) {
+ T result;
+ copy_out(index, &result);
+ return result;
+ }
+ void set(int index, const T& value) {
+ copy_in(index * sizeof(T), reinterpret_cast<const byte*>(&value),
+ sizeof(T));
+ }
+ int length() { return ByteArray::length() / sizeof(T); }
+ DECL_CAST(PodArray<T>)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PodArray<T>);
+};
+
+// V has parameters (Type, type, TYPE, C type, element_size)
+#define TYPED_ARRAYS(V) \
+ V(Uint8, uint8, UINT8, uint8_t, 1) \
+ V(Int8, int8, INT8, int8_t, 1) \
+ V(Uint16, uint16, UINT16, uint16_t, 2) \
+ V(Int16, int16, INT16, int16_t, 2) \
+ V(Uint32, uint32, UINT32, uint32_t, 4) \
+ V(Int32, int32, INT32, int32_t, 4) \
+ V(Float32, float32, FLOAT32, float, 4) \
+ V(Float64, float64, FLOAT64, double, 8) \
+ V(Uint8Clamped, uint8_clamped, UINT8_CLAMPED, uint8_t, 1)
+
+class FixedTypedArrayBase : public FixedArrayBase {
+ public:
+ // [base_pointer]: Either points to the FixedTypedArrayBase itself or nullptr.
+ DECL_ACCESSORS(base_pointer, Object)
+
+ // [external_pointer]: Contains the offset between base_pointer and the start
+ // of the data. If the base_pointer is a nullptr, the external_pointer
+ // therefore points to the actual backing store.
+ DECL_ACCESSORS(external_pointer, void)
+
+ // Dispatched behavior.
+ DECL_CAST(FixedTypedArrayBase)
+
+ static const int kBasePointerOffset = FixedArrayBase::kHeaderSize;
+ static const int kExternalPointerOffset = kBasePointerOffset + kPointerSize;
+ static const int kHeaderSize =
+ DOUBLE_POINTER_ALIGN(kExternalPointerOffset + kPointerSize);
+
+ static const int kDataOffset = kHeaderSize;
+
+ static const int kMaxElementSize = 8;
+
+#ifdef V8_HOST_ARCH_32_BIT
+ static const size_t kMaxByteLength = std::numeric_limits<size_t>::max();
+#else
+ static const size_t kMaxByteLength =
+ static_cast<size_t>(Smi::kMaxValue) * kMaxElementSize;
+#endif // V8_HOST_ARCH_32_BIT
+
+ static const size_t kMaxLength = Smi::kMaxValue;
+
+ class BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ inline int size() const;
+
+ static inline int TypedArraySize(InstanceType type, int length);
+ inline int TypedArraySize(InstanceType type) const;
+
+ // Use with care: returns raw pointer into heap.
+ inline void* DataPtr();
+
+ inline int DataSize() const;
+
+ inline size_t ByteLength() const;
+
+ private:
+ static inline int ElementSize(InstanceType type);
+
+ inline int DataSize(InstanceType type) const;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FixedTypedArrayBase);
+};
+
+template <class Traits>
+class FixedTypedArray : public FixedTypedArrayBase {
+ public:
+ typedef typename Traits::ElementType ElementType;
+ static const InstanceType kInstanceType = Traits::kInstanceType;
+
+ DECL_CAST(FixedTypedArray<Traits>)
+
+ static inline ElementType get_scalar_from_data_ptr(void* data_ptr, int index);
+ inline ElementType get_scalar(int index);
+ static inline Handle<Object> get(FixedTypedArray* array, int index);
+ inline void set(int index, ElementType value);
+
+ static inline ElementType from(int value);
+ static inline ElementType from(uint32_t value);
+ static inline ElementType from(double value);
+
+ // This accessor applies the correct conversion from Smi, HeapNumber
+ // and undefined.
+ inline void SetValue(uint32_t index, Object* value);
+
+ DECL_PRINTER(FixedTypedArray)
+ DECL_VERIFIER(FixedTypedArray)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FixedTypedArray);
+};
+
+#define FIXED_TYPED_ARRAY_TRAITS(Type, type, TYPE, elementType, size) \
+ STATIC_ASSERT(size <= FixedTypedArrayBase::kMaxElementSize); \
+ class Type##ArrayTraits { \
+ public: /* NOLINT */ \
+ typedef elementType ElementType; \
+ static const InstanceType kInstanceType = FIXED_##TYPE##_ARRAY_TYPE; \
+ static const char* Designator() { return #type " array"; } \
+ static inline Handle<Object> ToHandle(Isolate* isolate, \
+ elementType scalar); \
+ static inline elementType defaultValue(); \
+ }; \
+ \
+ typedef FixedTypedArray<Type##ArrayTraits> Fixed##Type##Array;
+
+TYPED_ARRAYS(FIXED_TYPED_ARRAY_TRAITS)
+
+#undef FIXED_TYPED_ARRAY_TRAITS
+
+class TemplateList : public FixedArray {
+ public:
+ static Handle<TemplateList> New(Isolate* isolate, int size);
+ inline int length() const;
+ inline Object* get(int index) const;
+ inline void set(int index, Object* value);
+ static Handle<TemplateList> Add(Isolate* isolate, Handle<TemplateList> list,
+ Handle<Object> value);
+ DECL_CAST(TemplateList)
+ private:
+ static const int kLengthIndex = 0;
+ static const int kFirstElementIndex = kLengthIndex + 1;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateList);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_FIXED_ARRAY_H_
diff --git a/deps/v8/src/objects/hash-table-inl.h b/deps/v8/src/objects/hash-table-inl.h
index baff7c03b4..a764684a00 100644
--- a/deps/v8/src/objects/hash-table-inl.h
+++ b/deps/v8/src/objects/hash-table-inl.h
@@ -11,12 +11,116 @@
namespace v8 {
namespace internal {
+int HashTableBase::NumberOfElements() const {
+ return Smi::ToInt(get(kNumberOfElementsIndex));
+}
+
+int HashTableBase::NumberOfDeletedElements() const {
+ return Smi::ToInt(get(kNumberOfDeletedElementsIndex));
+}
+
+int HashTableBase::Capacity() const { return Smi::ToInt(get(kCapacityIndex)); }
+
+void HashTableBase::ElementAdded() {
+ SetNumberOfElements(NumberOfElements() + 1);
+}
+
+void HashTableBase::ElementRemoved() {
+ SetNumberOfElements(NumberOfElements() - 1);
+ SetNumberOfDeletedElements(NumberOfDeletedElements() + 1);
+}
+
+void HashTableBase::ElementsRemoved(int n) {
+ SetNumberOfElements(NumberOfElements() - n);
+ SetNumberOfDeletedElements(NumberOfDeletedElements() + n);
+}
+
+// static
+int HashTableBase::ComputeCapacity(int at_least_space_for) {
+ // Add 50% slack to make slot collisions sufficiently unlikely.
+ // See matching computation in HashTable::HasSufficientCapacityToAdd().
+ // Must be kept in sync with CodeStubAssembler::HashTableComputeCapacity().
+ int raw_cap = at_least_space_for + (at_least_space_for >> 1);
+ int capacity = base::bits::RoundUpToPowerOfTwo32(raw_cap);
+ return Max(capacity, kMinCapacity);
+}
+
+void HashTableBase::SetNumberOfElements(int nof) {
+ set(kNumberOfElementsIndex, Smi::FromInt(nof));
+}
+
+void HashTableBase::SetNumberOfDeletedElements(int nod) {
+ set(kNumberOfDeletedElementsIndex, Smi::FromInt(nod));
+}
+
+template <typename Key>
+int BaseShape<Key>::GetMapRootIndex() {
+ return Heap::kHashTableMapRootIndex;
+}
+
+template <typename Derived, typename Shape>
+int HashTable<Derived, Shape>::FindEntry(Key key) {
+ return FindEntry(GetIsolate(), key);
+}
+
+template <typename Derived, typename Shape>
+int HashTable<Derived, Shape>::FindEntry(Isolate* isolate, Key key) {
+ return FindEntry(isolate, key, Shape::Hash(isolate, key));
+}
+
+// Find entry for key otherwise return kNotFound.
+template <typename Derived, typename Shape>
+int HashTable<Derived, Shape>::FindEntry(Isolate* isolate, Key key,
+ int32_t hash) {
+ uint32_t capacity = Capacity();
+ uint32_t entry = FirstProbe(hash, capacity);
+ uint32_t count = 1;
+ // EnsureCapacity will guarantee the hash table is never full.
+ Object* undefined = isolate->heap()->undefined_value();
+ Object* the_hole = isolate->heap()->the_hole_value();
+ USE(the_hole);
+ while (true) {
+ Object* element = KeyAt(entry);
+ // Empty entry. Uses raw unchecked accessors because it is called by the
+ // string table during bootstrapping.
+ if (element == undefined) break;
+ if (!(Shape::kNeedsHoleCheck && the_hole == element)) {
+ if (Shape::IsMatch(key, element)) return entry;
+ }
+ entry = NextProbe(entry, count++, capacity);
+ }
+ return kNotFound;
+}
+
template <typename KeyT>
bool BaseShape<KeyT>::IsLive(Isolate* isolate, Object* k) {
Heap* heap = isolate->heap();
return k != heap->the_hole_value() && k != heap->undefined_value();
}
+template <typename Derived, typename Shape>
+HashTable<Derived, Shape>* HashTable<Derived, Shape>::cast(Object* obj) {
+ SLOW_DCHECK(obj->IsHashTable());
+ return reinterpret_cast<HashTable*>(obj);
+}
+
+template <typename Derived, typename Shape>
+const HashTable<Derived, Shape>* HashTable<Derived, Shape>::cast(
+ const Object* obj) {
+ SLOW_DCHECK(obj->IsHashTable());
+ return reinterpret_cast<const HashTable*>(obj);
+}
+
+bool ObjectHashSet::Has(Isolate* isolate, Handle<Object> key, int32_t hash) {
+ return FindEntry(isolate, key, hash) != kNotFound;
+}
+
+bool ObjectHashSet::Has(Isolate* isolate, Handle<Object> key) {
+ Object* hash = key->GetHash();
+ if (!hash->IsSmi()) return false;
+ return FindEntry(isolate, key, Smi::ToInt(hash)) != kNotFound;
+}
+
int OrderedHashSet::GetMapRootIndex() {
return Heap::kOrderedHashSetMapRootIndex;
}
@@ -25,6 +129,11 @@ int OrderedHashMap::GetMapRootIndex() {
return Heap::kOrderedHashMapMapRootIndex;
}
+inline Object* OrderedHashMap::ValueAt(int entry) {
+ DCHECK_LT(entry, this->UsedCapacity());
+ return get(EntryToIndex(entry) + kValueOffset);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/hash-table.h b/deps/v8/src/objects/hash-table.h
index 9b7ac5deb3..a058b7df39 100644
--- a/deps/v8/src/objects/hash-table.h
+++ b/deps/v8/src/objects/hash-table.h
@@ -5,10 +5,9 @@
#ifndef V8_OBJECTS_HASH_TABLE_H_
#define V8_OBJECTS_HASH_TABLE_H_
-#include "src/objects.h"
-
#include "src/base/compiler-specific.h"
#include "src/globals.h"
+#include "src/objects/fixed-array.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -896,37 +895,6 @@ class OrderedHashTableIterator : public JSCollectionIterator {
DISALLOW_IMPLICIT_CONSTRUCTORS(OrderedHashTableIterator);
};
-
-class JSSetIterator
- : public OrderedHashTableIterator<JSSetIterator, OrderedHashSet> {
- public:
- // Dispatched behavior.
- DECL_PRINTER(JSSetIterator)
- DECL_VERIFIER(JSSetIterator)
-
- DECL_CAST(JSSetIterator)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSSetIterator);
-};
-
-class JSMapIterator
- : public OrderedHashTableIterator<JSMapIterator, OrderedHashMap> {
- public:
- // Dispatched behavior.
- DECL_PRINTER(JSMapIterator)
- DECL_VERIFIER(JSMapIterator)
-
- DECL_CAST(JSMapIterator)
-
- // Returns the current value of the iterator. This should only be called when
- // |HasMore| returns true.
- inline Object* CurrentValue();
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSMapIterator);
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-array-inl.h b/deps/v8/src/objects/js-array-inl.h
index 6bba2f0054..1128e190b2 100644
--- a/deps/v8/src/objects/js-array-inl.h
+++ b/deps/v8/src/objects/js-array-inl.h
@@ -204,6 +204,15 @@ void JSTypedArray::set_length(Object* value, WriteBarrierMode mode) {
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kLengthOffset, value, mode);
}
+bool JSTypedArray::HasJSTypedArrayPrototype(Isolate* isolate) {
+ DisallowHeapAllocation no_gc;
+ Object* proto = map()->prototype();
+ if (!proto->IsJSObject()) return false;
+
+ JSObject* proto_obj = JSObject::cast(proto);
+ return proto_obj->map()->prototype() == *isolate->typed_array_prototype();
+}
+
// static
MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
Handle<Object> receiver,
@@ -227,6 +236,26 @@ MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
return array;
}
+// static
+Handle<JSFunction> JSTypedArray::DefaultConstructor(
+ Isolate* isolate, Handle<JSTypedArray> exemplar) {
+ Handle<JSFunction> default_ctor = isolate->uint8_array_fun();
+ switch (exemplar->type()) {
+#define TYPED_ARRAY_CTOR(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: { \
+ default_ctor = isolate->type##_array_fun(); \
+ break; \
+ }
+
+ TYPED_ARRAYS(TYPED_ARRAY_CTOR)
+#undef TYPED_ARRAY_CTOR
+ default:
+ UNREACHABLE();
+ }
+
+ return default_ctor;
+}
+
#ifdef VERIFY_HEAP
ACCESSORS(JSTypedArray, raw_length, Object, kLengthOffset)
#endif
diff --git a/deps/v8/src/objects/js-array.h b/deps/v8/src/objects/js-array.h
index a2d13a766d..806c275c8f 100644
--- a/deps/v8/src/objects/js-array.h
+++ b/deps/v8/src/objects/js-array.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_JS_ARRAY_H_
#include "src/objects.h"
+#include "src/objects/fixed-array.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -298,9 +299,12 @@ class JSTypedArray : public JSArrayBufferView {
Handle<JSArrayBuffer> GetBuffer();
+ inline bool HasJSTypedArrayPrototype(Isolate* isolate);
static inline MaybeHandle<JSTypedArray> Validate(Isolate* isolate,
Handle<Object> receiver,
const char* method_name);
+ static inline Handle<JSFunction> DefaultConstructor(
+ Isolate* isolate, Handle<JSTypedArray> exemplar);
// ES7 section 22.2.4.6 Create ( constructor, argumentList )
static MaybeHandle<JSTypedArray> Create(Isolate* isolate,
Handle<Object> default_ctor, int argc,
diff --git a/deps/v8/src/objects/js-collection-inl.h b/deps/v8/src/objects/js-collection-inl.h
new file mode 100644
index 0000000000..7ad24bcf12
--- /dev/null
+++ b/deps/v8/src/objects/js-collection-inl.h
@@ -0,0 +1,49 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_COLLECTION_INL_H_
+#define V8_OBJECTS_JS_COLLECTION_INL_H_
+
+#include "src/objects/js-collection.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+ACCESSORS(JSCollection, table, Object, kTableOffset)
+ACCESSORS(JSCollectionIterator, table, Object, kTableOffset)
+ACCESSORS(JSCollectionIterator, index, Object, kIndexOffset)
+
+ACCESSORS(JSWeakCollection, table, Object, kTableOffset)
+ACCESSORS(JSWeakCollection, next, Object, kNextOffset)
+
+TYPE_CHECKER(JSMap, JS_MAP_TYPE)
+TYPE_CHECKER(JSSet, JS_SET_TYPE)
+TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
+TYPE_CHECKER(JSWeakSet, JS_WEAK_SET_TYPE)
+
+CAST_ACCESSOR(JSSet)
+CAST_ACCESSOR(JSSetIterator)
+CAST_ACCESSOR(JSMap)
+CAST_ACCESSOR(JSMapIterator)
+CAST_ACCESSOR(JSWeakCollection)
+CAST_ACCESSOR(JSWeakMap)
+CAST_ACCESSOR(JSWeakSet)
+
+Object* JSMapIterator::CurrentValue() {
+ OrderedHashMap* table(OrderedHashMap::cast(this->table()));
+ int index = Smi::ToInt(this->index());
+ Object* value = table->ValueAt(index);
+ DCHECK(!value->IsTheHole(table->GetIsolate()));
+ return value;
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_COLLECTION_INL_H_
diff --git a/deps/v8/src/objects/js-collection.h b/deps/v8/src/objects/js-collection.h
new file mode 100644
index 0000000000..0777ccf1bd
--- /dev/null
+++ b/deps/v8/src/objects/js-collection.h
@@ -0,0 +1,162 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_COLLECTION_H_
+#define V8_OBJECTS_JS_COLLECTION_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class JSCollection : public JSObject {
+ public:
+ // [table]: the backing hash table
+ DECL_ACCESSORS(table, Object)
+
+ static const int kTableOffset = JSObject::kHeaderSize;
+ static const int kSize = kTableOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSCollection);
+};
+
+// The JSSet describes EcmaScript Harmony sets
+class JSSet : public JSCollection {
+ public:
+ DECL_CAST(JSSet)
+
+ static void Initialize(Handle<JSSet> set, Isolate* isolate);
+ static void Clear(Handle<JSSet> set);
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSSet)
+ DECL_VERIFIER(JSSet)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSSet);
+};
+
+class JSSetIterator
+ : public OrderedHashTableIterator<JSSetIterator, OrderedHashSet> {
+ public:
+ // Dispatched behavior.
+ DECL_PRINTER(JSSetIterator)
+ DECL_VERIFIER(JSSetIterator)
+
+ DECL_CAST(JSSetIterator)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSSetIterator);
+};
+
+// The JSMap describes EcmaScript Harmony maps
+class JSMap : public JSCollection {
+ public:
+ DECL_CAST(JSMap)
+
+ static void Initialize(Handle<JSMap> map, Isolate* isolate);
+ static void Clear(Handle<JSMap> map);
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSMap)
+ DECL_VERIFIER(JSMap)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSMap);
+};
+
+class JSMapIterator
+ : public OrderedHashTableIterator<JSMapIterator, OrderedHashMap> {
+ public:
+ // Dispatched behavior.
+ DECL_PRINTER(JSMapIterator)
+ DECL_VERIFIER(JSMapIterator)
+
+ DECL_CAST(JSMapIterator)
+
+ // Returns the current value of the iterator. This should only be called when
+ // |HasMore| returns true.
+ inline Object* CurrentValue();
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSMapIterator);
+};
+
+// Base class for both JSWeakMap and JSWeakSet
+class JSWeakCollection : public JSObject {
+ public:
+ DECL_CAST(JSWeakCollection)
+
+ // [table]: the backing hash table mapping keys to values.
+ DECL_ACCESSORS(table, Object)
+
+ // [next]: linked list of encountered weak maps during GC.
+ DECL_ACCESSORS(next, Object)
+
+ static void Initialize(Handle<JSWeakCollection> collection, Isolate* isolate);
+ static void Set(Handle<JSWeakCollection> collection, Handle<Object> key,
+ Handle<Object> value, int32_t hash);
+ static bool Delete(Handle<JSWeakCollection> collection, Handle<Object> key,
+ int32_t hash);
+ static Handle<JSArray> GetEntries(Handle<JSWeakCollection> holder,
+ int max_entries);
+
+ static const int kTableOffset = JSObject::kHeaderSize;
+ static const int kNextOffset = kTableOffset + kPointerSize;
+ static const int kSize = kNextOffset + kPointerSize;
+
+ // Visiting policy defines whether the table and next collection fields
+ // should be visited or not.
+ enum BodyVisitingPolicy { kIgnoreWeakness, kRespectWeakness };
+
+ // Iterates the function object according to the visiting policy.
+ template <BodyVisitingPolicy>
+ class BodyDescriptorImpl;
+
+ // Visit the whole object.
+ typedef BodyDescriptorImpl<kIgnoreWeakness> BodyDescriptor;
+
+ // Don't visit table and next collection fields.
+ typedef BodyDescriptorImpl<kRespectWeakness> BodyDescriptorWeak;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakCollection);
+};
+
+// The JSWeakMap describes EcmaScript Harmony weak maps
+class JSWeakMap : public JSWeakCollection {
+ public:
+ DECL_CAST(JSWeakMap)
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSWeakMap)
+ DECL_VERIFIER(JSWeakMap)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakMap);
+};
+
+// The JSWeakSet describes EcmaScript Harmony weak sets
+class JSWeakSet : public JSWeakCollection {
+ public:
+ DECL_CAST(JSWeakSet)
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSWeakSet)
+ DECL_VERIFIER(JSWeakSet)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakSet);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_COLLECTION_H_
diff --git a/deps/v8/src/objects/js-regexp.h b/deps/v8/src/objects/js-regexp.h
index 32c07e879e..69cd5c3104 100644
--- a/deps/v8/src/objects/js-regexp.h
+++ b/deps/v8/src/objects/js-regexp.h
@@ -144,13 +144,20 @@ DEFINE_OPERATORS_FOR_FLAGS(JSRegExp::Flags)
// After creation the result must be treated as a JSArray in all regards.
class JSRegExpResult : public JSArray {
public:
- // Offsets of object fields.
- static const int kIndexOffset = JSArray::kSize;
- static const int kInputOffset = kIndexOffset + kPointerSize;
- static const int kSize = kInputOffset + kPointerSize;
+#define REG_EXP_RESULT_FIELDS(V) \
+ V(kIndexOffset, kPointerSize) \
+ V(kInputOffset, kPointerSize) \
+ V(kGroupsOffset, kPointerSize) \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSArray::kSize, REG_EXP_RESULT_FIELDS)
+#undef REG_EXP_RESULT_FIELDS
+
// Indices of in-object properties.
static const int kIndexIndex = 0;
static const int kInputIndex = 1;
+ static const int kGroupsIndex = 2;
+ static const int kInObjectPropertyCount = 3;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSRegExpResult);
diff --git a/deps/v8/src/objects/literal-objects.h b/deps/v8/src/objects/literal-objects.h
index 6fe34ffa8a..7fb0c712f2 100644
--- a/deps/v8/src/objects/literal-objects.h
+++ b/deps/v8/src/objects/literal-objects.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_LITERAL_OBJECTS_H_
#include "src/objects.h"
+#include "src/objects/fixed-array.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index a5421a32ca..c78f947b3a 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -5,9 +5,19 @@
#ifndef V8_OBJECTS_MAP_INL_H_
#define V8_OBJECTS_MAP_INL_H_
-#include "src/field-type.h"
#include "src/objects/map.h"
+#include "src/field-type.h"
+#include "src/objects-inl.h"
+#include "src/objects/descriptor-array.h"
+#include "src/objects/shared-function-info.h"
+#include "src/property.h"
+#include "src/transitions.h"
+
+// For pulling in heap/incremental-marking.h which is needed by
+// ACCESSORS_CHECKED.
+#include "src/heap/heap-inl.h"
+
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -16,6 +26,48 @@ namespace internal {
CAST_ACCESSOR(Map)
+ACCESSORS(Map, instance_descriptors, DescriptorArray, kDescriptorsOffset)
+ACCESSORS_CHECKED(Map, layout_descriptor, LayoutDescriptor,
+ kLayoutDescriptorOffset, FLAG_unbox_double_fields)
+ACCESSORS(Map, raw_transitions, Object, kTransitionsOrPrototypeInfoOffset)
+
+// |bit_field| fields.
+BIT_FIELD_ACCESSORS(Map, bit_field, has_non_instance_prototype,
+ Map::HasNonInstancePrototypeBit)
+BIT_FIELD_ACCESSORS(Map, bit_field, is_callable, Map::IsCallableBit)
+BIT_FIELD_ACCESSORS(Map, bit_field, has_named_interceptor,
+ Map::HasNamedInterceptorBit)
+BIT_FIELD_ACCESSORS(Map, bit_field, has_indexed_interceptor,
+ Map::HasIndexedInterceptorBit)
+BIT_FIELD_ACCESSORS(Map, bit_field, is_undetectable, Map::IsUndetectableBit)
+BIT_FIELD_ACCESSORS(Map, bit_field, is_access_check_needed,
+ Map::IsAccessCheckNeededBit)
+BIT_FIELD_ACCESSORS(Map, bit_field, is_constructor, Map::IsConstructorBit)
+BIT_FIELD_ACCESSORS(Map, bit_field, has_prototype_slot,
+ Map::HasPrototypeSlotBit)
+
+// |bit_field2| fields.
+BIT_FIELD_ACCESSORS(Map, bit_field2, is_extensible, Map::IsExtensibleBit)
+BIT_FIELD_ACCESSORS(Map, bit_field2, is_prototype_map, Map::IsPrototypeMapBit)
+
+// |bit_field3| fields.
+BIT_FIELD_ACCESSORS(Map, bit_field3, owns_descriptors, Map::OwnsDescriptorsBit)
+BIT_FIELD_ACCESSORS(Map, bit_field3, has_hidden_prototype,
+ Map::HasHiddenPrototypeBit)
+BIT_FIELD_ACCESSORS(Map, bit_field3, is_deprecated, Map::IsDeprecatedBit)
+BIT_FIELD_ACCESSORS(Map, bit_field3, is_migration_target,
+ Map::IsMigrationTargetBit)
+BIT_FIELD_ACCESSORS(Map, bit_field3, is_immutable_proto,
+ Map::IsImmutablePrototypeBit)
+BIT_FIELD_ACCESSORS(Map, bit_field3, new_target_is_base,
+ Map::NewTargetIsBaseBit)
+BIT_FIELD_ACCESSORS(Map, bit_field3, may_have_interesting_symbols,
+ Map::MayHaveInterestingSymbolsBit)
+BIT_FIELD_ACCESSORS(Map, bit_field3, construction_counter,
+ Map::ConstructionCounterBits)
+
+TYPE_CHECKER(Map, MAP_TYPE)
+
InterceptorInfo* Map::GetNamedInterceptor() {
DCHECK(has_named_interceptor());
FunctionTemplateInfo* info = GetFunctionTemplateInfo();
@@ -75,6 +127,597 @@ void Map::GeneralizeIfCanHaveTransitionableFastElementsKind(
}
}
+bool Map::IsUnboxedDoubleField(FieldIndex index) const {
+ if (!FLAG_unbox_double_fields) return false;
+ if (index.is_hidden_field() || !index.is_inobject()) return false;
+ return !layout_descriptor()->IsTagged(index.property_index());
+}
+
+bool Map::TooManyFastProperties(StoreFromKeyed store_mode) const {
+ if (UnusedPropertyFields() != 0) return false;
+ if (is_prototype_map()) return false;
+ int minimum = store_mode == CERTAINLY_NOT_STORE_FROM_KEYED ? 128 : 12;
+ int limit = Max(minimum, GetInObjectProperties());
+ int external = NumberOfFields() - GetInObjectProperties();
+ return external > limit;
+}
+
+PropertyDetails Map::GetLastDescriptorDetails() const {
+ return instance_descriptors()->GetDetails(LastAdded());
+}
+
+int Map::LastAdded() const {
+ int number_of_own_descriptors = NumberOfOwnDescriptors();
+ DCHECK_GT(number_of_own_descriptors, 0);
+ return number_of_own_descriptors - 1;
+}
+
+int Map::NumberOfOwnDescriptors() const {
+ return NumberOfOwnDescriptorsBits::decode(bit_field3());
+}
+
+void Map::SetNumberOfOwnDescriptors(int number) {
+ DCHECK_LE(number, instance_descriptors()->number_of_descriptors());
+ CHECK_LE(static_cast<unsigned>(number),
+ static_cast<unsigned>(kMaxNumberOfDescriptors));
+ set_bit_field3(NumberOfOwnDescriptorsBits::update(bit_field3(), number));
+}
+
+int Map::EnumLength() const { return EnumLengthBits::decode(bit_field3()); }
+
+void Map::SetEnumLength(int length) {
+ if (length != kInvalidEnumCacheSentinel) {
+ DCHECK_LE(length, NumberOfOwnDescriptors());
+ CHECK_LE(static_cast<unsigned>(length),
+ static_cast<unsigned>(kMaxNumberOfDescriptors));
+ }
+ set_bit_field3(EnumLengthBits::update(bit_field3(), length));
+}
+
+FixedArrayBase* Map::GetInitialElements() const {
+ FixedArrayBase* result = nullptr;
+ if (has_fast_elements() || has_fast_string_wrapper_elements()) {
+ result = GetHeap()->empty_fixed_array();
+ } else if (has_fast_sloppy_arguments_elements()) {
+ result = GetHeap()->empty_sloppy_arguments_elements();
+ } else if (has_fixed_typed_array_elements()) {
+ result = GetHeap()->EmptyFixedTypedArrayForMap(this);
+ } else if (has_dictionary_elements()) {
+ result = GetHeap()->empty_slow_element_dictionary();
+ } else {
+ UNREACHABLE();
+ }
+ DCHECK(!GetHeap()->InNewSpace(result));
+ return result;
+}
+
+VisitorId Map::visitor_id() const {
+ return static_cast<VisitorId>(
+ RELAXED_READ_BYTE_FIELD(this, kVisitorIdOffset));
+}
+
+void Map::set_visitor_id(VisitorId id) {
+ CHECK_LT(static_cast<unsigned>(id), 256);
+ RELAXED_WRITE_BYTE_FIELD(this, kVisitorIdOffset, static_cast<byte>(id));
+}
+
+int Map::instance_size_in_words() const {
+ return RELAXED_READ_BYTE_FIELD(this, kInstanceSizeInWordsOffset);
+}
+
+void Map::set_instance_size_in_words(int value) {
+ RELAXED_WRITE_BYTE_FIELD(this, kInstanceSizeInWordsOffset,
+ static_cast<byte>(value));
+}
+
+int Map::instance_size() const {
+ return instance_size_in_words() << kPointerSizeLog2;
+}
+
+void Map::set_instance_size(int value) {
+ CHECK_EQ(0, value & (kPointerSize - 1));
+ value >>= kPointerSizeLog2;
+ CHECK_LT(static_cast<unsigned>(value), 256);
+ set_instance_size_in_words(value);
+}
+
+int Map::inobject_properties_start_or_constructor_function_index() const {
+ return RELAXED_READ_BYTE_FIELD(
+ this, kInObjectPropertiesStartOrConstructorFunctionIndexOffset);
+}
+
+void Map::set_inobject_properties_start_or_constructor_function_index(
+ int value) {
+ CHECK_LT(static_cast<unsigned>(value), 256);
+ RELAXED_WRITE_BYTE_FIELD(
+ this, kInObjectPropertiesStartOrConstructorFunctionIndexOffset,
+ static_cast<byte>(value));
+}
+
+int Map::GetInObjectPropertiesStartInWords() const {
+ DCHECK(IsJSObjectMap());
+ return inobject_properties_start_or_constructor_function_index();
+}
+
+void Map::SetInObjectPropertiesStartInWords(int value) {
+ CHECK(IsJSObjectMap());
+ set_inobject_properties_start_or_constructor_function_index(value);
+}
+
+int Map::GetInObjectProperties() const {
+ DCHECK(IsJSObjectMap());
+ return instance_size_in_words() - GetInObjectPropertiesStartInWords();
+}
+
+int Map::GetConstructorFunctionIndex() const {
+ DCHECK(IsPrimitiveMap());
+ return inobject_properties_start_or_constructor_function_index();
+}
+
+void Map::SetConstructorFunctionIndex(int value) {
+ CHECK(IsPrimitiveMap());
+ set_inobject_properties_start_or_constructor_function_index(value);
+}
+
+int Map::GetInObjectPropertyOffset(int index) const {
+ return (GetInObjectPropertiesStartInWords() + index) * kPointerSize;
+}
+
+Handle<Map> Map::AddMissingTransitionsForTesting(
+ Handle<Map> split_map, Handle<DescriptorArray> descriptors,
+ Handle<LayoutDescriptor> full_layout_descriptor) {
+ return AddMissingTransitions(split_map, descriptors, full_layout_descriptor);
+}
+
+InstanceType Map::instance_type() const {
+ return static_cast<InstanceType>(
+ READ_UINT16_FIELD(this, kInstanceTypeOffset));
+}
+
+void Map::set_instance_type(InstanceType value) {
+ WRITE_UINT16_FIELD(this, kInstanceTypeOffset, value);
+}
+
+int Map::UnusedPropertyFields() const {
+ int value = used_or_unused_instance_size_in_words();
+ DCHECK_IMPLIES(!IsJSObjectMap(), value == 0);
+ int unused;
+ if (value >= JSObject::kFieldsAdded) {
+ unused = instance_size_in_words() - value;
+ } else {
+ // For out of object properties "used_or_unused_instance_size_in_words"
+ // byte encodes the slack in the property array.
+ unused = value;
+ }
+ return unused;
+}
+
+int Map::used_or_unused_instance_size_in_words() const {
+ return RELAXED_READ_BYTE_FIELD(this, kUsedOrUnusedInstanceSizeInWordsOffset);
+}
+
+void Map::set_used_or_unused_instance_size_in_words(int value) {
+ CHECK_LE(static_cast<unsigned>(value), 255);
+ RELAXED_WRITE_BYTE_FIELD(this, kUsedOrUnusedInstanceSizeInWordsOffset,
+ static_cast<byte>(value));
+}
+
+int Map::UsedInstanceSize() const {
+ int words = used_or_unused_instance_size_in_words();
+ if (words < JSObject::kFieldsAdded) {
+ // All in-object properties are used and the words is tracking the slack
+ // in the property array.
+ return instance_size();
+ }
+ return words * kPointerSize;
+}
+
+void Map::SetInObjectUnusedPropertyFields(int value) {
+ STATIC_ASSERT(JSObject::kFieldsAdded == JSObject::kHeaderSize / kPointerSize);
+ if (!IsJSObjectMap()) {
+ CHECK_EQ(0, value);
+ set_used_or_unused_instance_size_in_words(0);
+ DCHECK_EQ(0, UnusedPropertyFields());
+ return;
+ }
+ CHECK_LE(0, value);
+ DCHECK_LE(value, GetInObjectProperties());
+ int used_inobject_properties = GetInObjectProperties() - value;
+ set_used_or_unused_instance_size_in_words(
+ GetInObjectPropertyOffset(used_inobject_properties) / kPointerSize);
+ DCHECK_EQ(value, UnusedPropertyFields());
+}
+
+void Map::SetOutOfObjectUnusedPropertyFields(int value) {
+ STATIC_ASSERT(JSObject::kFieldsAdded == JSObject::kHeaderSize / kPointerSize);
+ CHECK_LT(static_cast<unsigned>(value), JSObject::kFieldsAdded);
+ // For out of object properties "used_instance_size_in_words" byte encodes
+ // the slack in the property array.
+ set_used_or_unused_instance_size_in_words(value);
+ DCHECK_EQ(value, UnusedPropertyFields());
+}
+
+void Map::CopyUnusedPropertyFields(Map* map) {
+ set_used_or_unused_instance_size_in_words(
+ map->used_or_unused_instance_size_in_words());
+ DCHECK_EQ(UnusedPropertyFields(), map->UnusedPropertyFields());
+}
+
+void Map::AccountAddedPropertyField() {
+ // Update used instance size and unused property fields number.
+ STATIC_ASSERT(JSObject::kFieldsAdded == JSObject::kHeaderSize / kPointerSize);
+#ifdef DEBUG
+ int new_unused = UnusedPropertyFields() - 1;
+ if (new_unused < 0) new_unused += JSObject::kFieldsAdded;
+#endif
+ int value = used_or_unused_instance_size_in_words();
+ if (value >= JSObject::kFieldsAdded) {
+ if (value == instance_size_in_words()) {
+ AccountAddedOutOfObjectPropertyField(0);
+ } else {
+ // The property is added in-object, so simply increment the counter.
+ set_used_or_unused_instance_size_in_words(value + 1);
+ }
+ } else {
+ AccountAddedOutOfObjectPropertyField(value);
+ }
+ DCHECK_EQ(new_unused, UnusedPropertyFields());
+}
+
+void Map::AccountAddedOutOfObjectPropertyField(int unused_in_property_array) {
+ unused_in_property_array--;
+ if (unused_in_property_array < 0) {
+ unused_in_property_array += JSObject::kFieldsAdded;
+ }
+ CHECK_LT(static_cast<unsigned>(unused_in_property_array),
+ JSObject::kFieldsAdded);
+ set_used_or_unused_instance_size_in_words(unused_in_property_array);
+ DCHECK_EQ(unused_in_property_array, UnusedPropertyFields());
+}
+
+byte Map::bit_field() const { return READ_BYTE_FIELD(this, kBitFieldOffset); }
+
+void Map::set_bit_field(byte value) {
+ WRITE_BYTE_FIELD(this, kBitFieldOffset, value);
+}
+
+byte Map::bit_field2() const { return READ_BYTE_FIELD(this, kBitField2Offset); }
+
+void Map::set_bit_field2(byte value) {
+ WRITE_BYTE_FIELD(this, kBitField2Offset, value);
+}
+
+bool Map::is_abandoned_prototype_map() const {
+ return is_prototype_map() && !owns_descriptors();
+}
+
+bool Map::should_be_fast_prototype_map() const {
+ if (!prototype_info()->IsPrototypeInfo()) return false;
+ return PrototypeInfo::cast(prototype_info())->should_be_fast_map();
+}
+
+void Map::set_elements_kind(ElementsKind elements_kind) {
+ CHECK_LT(static_cast<int>(elements_kind), kElementsKindCount);
+ set_bit_field2(Map::ElementsKindBits::update(bit_field2(), elements_kind));
+}
+
+ElementsKind Map::elements_kind() const {
+ return Map::ElementsKindBits::decode(bit_field2());
+}
+
+bool Map::has_fast_smi_elements() const {
+ return IsSmiElementsKind(elements_kind());
+}
+
+bool Map::has_fast_object_elements() const {
+ return IsObjectElementsKind(elements_kind());
+}
+
+bool Map::has_fast_smi_or_object_elements() const {
+ return IsSmiOrObjectElementsKind(elements_kind());
+}
+
+bool Map::has_fast_double_elements() const {
+ return IsDoubleElementsKind(elements_kind());
+}
+
+bool Map::has_fast_elements() const {
+ return IsFastElementsKind(elements_kind());
+}
+
+bool Map::has_sloppy_arguments_elements() const {
+ return IsSloppyArgumentsElementsKind(elements_kind());
+}
+
+bool Map::has_fast_sloppy_arguments_elements() const {
+ return elements_kind() == FAST_SLOPPY_ARGUMENTS_ELEMENTS;
+}
+
+bool Map::has_fast_string_wrapper_elements() const {
+ return elements_kind() == FAST_STRING_WRAPPER_ELEMENTS;
+}
+
+bool Map::has_fixed_typed_array_elements() const {
+ return IsFixedTypedArrayElementsKind(elements_kind());
+}
+
+bool Map::has_dictionary_elements() const {
+ return IsDictionaryElementsKind(elements_kind());
+}
+
+void Map::set_is_dictionary_map(bool value) {
+ uint32_t new_bit_field3 = IsDictionaryMapBit::update(bit_field3(), value);
+ new_bit_field3 = IsUnstableBit::update(new_bit_field3, value);
+ set_bit_field3(new_bit_field3);
+}
+
+bool Map::is_dictionary_map() const {
+ return IsDictionaryMapBit::decode(bit_field3());
+}
+
+void Map::mark_unstable() {
+ set_bit_field3(IsUnstableBit::update(bit_field3(), true));
+}
+
+bool Map::is_stable() const { return !IsUnstableBit::decode(bit_field3()); }
+
+bool Map::CanBeDeprecated() const {
+ int descriptor = LastAdded();
+ for (int i = 0; i <= descriptor; i++) {
+ PropertyDetails details = instance_descriptors()->GetDetails(i);
+ if (details.representation().IsNone()) return true;
+ if (details.representation().IsSmi()) return true;
+ if (details.representation().IsDouble()) return true;
+ if (details.representation().IsHeapObject()) return true;
+ if (details.kind() == kData && details.location() == kDescriptor) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void Map::NotifyLeafMapLayoutChange() {
+ if (is_stable()) {
+ mark_unstable();
+ dependent_code()->DeoptimizeDependentCodeGroup(
+ GetIsolate(), DependentCode::kPrototypeCheckGroup);
+ }
+}
+
+bool Map::CanTransition() const {
+ // Only JSObject and subtypes have map transitions and back pointers.
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
+ return instance_type() >= FIRST_JS_OBJECT_TYPE;
+}
+
+bool Map::IsBooleanMap() const { return this == GetHeap()->boolean_map(); }
+bool Map::IsPrimitiveMap() const {
+ STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
+ return instance_type() <= LAST_PRIMITIVE_TYPE;
+}
+bool Map::IsJSReceiverMap() const {
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ return instance_type() >= FIRST_JS_RECEIVER_TYPE;
+}
+bool Map::IsJSObjectMap() const {
+ STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+ return instance_type() >= FIRST_JS_OBJECT_TYPE;
+}
+bool Map::IsJSArrayMap() const { return instance_type() == JS_ARRAY_TYPE; }
+bool Map::IsJSFunctionMap() const {
+ return instance_type() == JS_FUNCTION_TYPE;
+}
+bool Map::IsStringMap() const { return instance_type() < FIRST_NONSTRING_TYPE; }
+bool Map::IsJSProxyMap() const { return instance_type() == JS_PROXY_TYPE; }
+bool Map::IsJSGlobalProxyMap() const {
+ return instance_type() == JS_GLOBAL_PROXY_TYPE;
+}
+bool Map::IsJSGlobalObjectMap() const {
+ return instance_type() == JS_GLOBAL_OBJECT_TYPE;
+}
+bool Map::IsJSTypedArrayMap() const {
+ return instance_type() == JS_TYPED_ARRAY_TYPE;
+}
+bool Map::IsJSDataViewMap() const {
+ return instance_type() == JS_DATA_VIEW_TYPE;
+}
+
+Object* Map::prototype() const { return READ_FIELD(this, kPrototypeOffset); }
+
+void Map::set_prototype(Object* value, WriteBarrierMode mode) {
+ DCHECK(value->IsNull(GetIsolate()) || value->IsJSReceiver());
+ WRITE_FIELD(this, kPrototypeOffset, value);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, value, mode);
+}
+
+LayoutDescriptor* Map::layout_descriptor_gc_safe() const {
+ DCHECK(FLAG_unbox_double_fields);
+ Object* layout_desc = RELAXED_READ_FIELD(this, kLayoutDescriptorOffset);
+ return LayoutDescriptor::cast_gc_safe(layout_desc);
+}
+
+bool Map::HasFastPointerLayout() const {
+ DCHECK(FLAG_unbox_double_fields);
+ Object* layout_desc = RELAXED_READ_FIELD(this, kLayoutDescriptorOffset);
+ return LayoutDescriptor::IsFastPointerLayout(layout_desc);
+}
+
+void Map::UpdateDescriptors(DescriptorArray* descriptors,
+ LayoutDescriptor* layout_desc) {
+ set_instance_descriptors(descriptors);
+ if (FLAG_unbox_double_fields) {
+ if (layout_descriptor()->IsSlowLayout()) {
+ set_layout_descriptor(layout_desc);
+ }
+#ifdef VERIFY_HEAP
+ // TODO(ishell): remove these checks from VERIFY_HEAP mode.
+ if (FLAG_verify_heap) {
+ CHECK(layout_descriptor()->IsConsistentWithMap(this));
+ CHECK_EQ(Map::GetVisitorId(this), visitor_id());
+ }
+#else
+ SLOW_DCHECK(layout_descriptor()->IsConsistentWithMap(this));
+ DCHECK(visitor_id() == Map::GetVisitorId(this));
+#endif
+ }
+}
+
+void Map::InitializeDescriptors(DescriptorArray* descriptors,
+ LayoutDescriptor* layout_desc) {
+ int len = descriptors->number_of_descriptors();
+ set_instance_descriptors(descriptors);
+ SetNumberOfOwnDescriptors(len);
+
+ if (FLAG_unbox_double_fields) {
+ set_layout_descriptor(layout_desc);
+#ifdef VERIFY_HEAP
+ // TODO(ishell): remove these checks from VERIFY_HEAP mode.
+ if (FLAG_verify_heap) {
+ CHECK(layout_descriptor()->IsConsistentWithMap(this));
+ }
+#else
+ SLOW_DCHECK(layout_descriptor()->IsConsistentWithMap(this));
+#endif
+ set_visitor_id(Map::GetVisitorId(this));
+ }
+}
+
+void Map::set_bit_field3(uint32_t bits) {
+ if (kInt32Size != kPointerSize) {
+ WRITE_UINT32_FIELD(this, kBitField3Offset + kInt32Size, 0);
+ }
+ WRITE_UINT32_FIELD(this, kBitField3Offset, bits);
+}
+
+uint32_t Map::bit_field3() const {
+ return READ_UINT32_FIELD(this, kBitField3Offset);
+}
+
+LayoutDescriptor* Map::GetLayoutDescriptor() const {
+ return FLAG_unbox_double_fields ? layout_descriptor()
+ : LayoutDescriptor::FastPointerLayout();
+}
+
+void Map::AppendDescriptor(Descriptor* desc) {
+ DescriptorArray* descriptors = instance_descriptors();
+ int number_of_own_descriptors = NumberOfOwnDescriptors();
+ DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
+ descriptors->Append(desc);
+ SetNumberOfOwnDescriptors(number_of_own_descriptors + 1);
+
+ // Properly mark the map if the {desc} is an "interesting symbol".
+ if (desc->GetKey()->IsInterestingSymbol()) {
+ set_may_have_interesting_symbols(true);
+ }
+ PropertyDetails details = desc->GetDetails();
+ if (details.location() == kField) {
+ DCHECK_GT(UnusedPropertyFields(), 0);
+ AccountAddedPropertyField();
+ }
+
+// This function does not support appending double field descriptors and
+// it should never try to (otherwise, layout descriptor must be updated too).
+#ifdef DEBUG
+ DCHECK(details.location() != kField || !details.representation().IsDouble());
+#endif
+}
+
+Object* Map::GetBackPointer() const {
+ Object* object = constructor_or_backpointer();
+ if (object->IsMap()) {
+ return object;
+ }
+ return GetIsolate()->heap()->undefined_value();
+}
+
+Map* Map::ElementsTransitionMap() {
+ DisallowHeapAllocation no_gc;
+ return TransitionsAccessor(this, &no_gc)
+ .SearchSpecial(GetHeap()->elements_transition_symbol());
+}
+
+Object* Map::prototype_info() const {
+ DCHECK(is_prototype_map());
+ return READ_FIELD(this, Map::kTransitionsOrPrototypeInfoOffset);
+}
+
+void Map::set_prototype_info(Object* value, WriteBarrierMode mode) {
+ CHECK(is_prototype_map());
+ WRITE_FIELD(this, Map::kTransitionsOrPrototypeInfoOffset, value);
+ CONDITIONAL_WRITE_BARRIER(
+ GetHeap(), this, Map::kTransitionsOrPrototypeInfoOffset, value, mode);
+}
+
+void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
+ CHECK_GE(instance_type(), FIRST_JS_RECEIVER_TYPE);
+ CHECK(value->IsMap());
+ CHECK(GetBackPointer()->IsUndefined(GetIsolate()));
+ CHECK_IMPLIES(value->IsMap(), Map::cast(value)->GetConstructor() ==
+ constructor_or_backpointer());
+ set_constructor_or_backpointer(value, mode);
+}
+
+ACCESSORS(Map, dependent_code, DependentCode, kDependentCodeOffset)
+ACCESSORS(Map, weak_cell_cache, Object, kWeakCellCacheOffset)
+ACCESSORS(Map, constructor_or_backpointer, Object,
+ kConstructorOrBackPointerOffset)
+
+Object* Map::GetConstructor() const {
+ Object* maybe_constructor = constructor_or_backpointer();
+ // Follow any back pointers.
+ while (maybe_constructor->IsMap()) {
+ maybe_constructor =
+ Map::cast(maybe_constructor)->constructor_or_backpointer();
+ }
+ return maybe_constructor;
+}
+
+FunctionTemplateInfo* Map::GetFunctionTemplateInfo() const {
+ Object* constructor = GetConstructor();
+ if (constructor->IsJSFunction()) {
+ DCHECK(JSFunction::cast(constructor)->shared()->IsApiFunction());
+ return JSFunction::cast(constructor)->shared()->get_api_func_data();
+ }
+ DCHECK(constructor->IsFunctionTemplateInfo());
+ return FunctionTemplateInfo::cast(constructor);
+}
+
+void Map::SetConstructor(Object* constructor, WriteBarrierMode mode) {
+ // Never overwrite a back pointer with a constructor.
+ CHECK(!constructor_or_backpointer()->IsMap());
+ set_constructor_or_backpointer(constructor, mode);
+}
+
+Handle<Map> Map::CopyInitialMap(Handle<Map> map) {
+ return CopyInitialMap(map, map->instance_size(), map->GetInObjectProperties(),
+ map->UnusedPropertyFields());
+}
+
+bool Map::IsInobjectSlackTrackingInProgress() const {
+ return construction_counter() != Map::kNoSlackTracking;
+}
+
+void Map::InobjectSlackTrackingStep() {
+ // Slack tracking should only be performed on an initial map.
+ DCHECK(GetBackPointer()->IsUndefined(GetIsolate()));
+ if (!IsInobjectSlackTrackingInProgress()) return;
+ int counter = construction_counter();
+ set_construction_counter(counter - 1);
+ if (counter == kSlackTrackingCounterEnd) {
+ CompleteInobjectSlackTracking();
+ }
+}
+
+int Map::SlackForArraySize(int old_size, int size_limit) {
+ const int max_slack = size_limit - old_size;
+ CHECK_LE(0, max_slack);
+ if (old_size < 4) {
+ DCHECK_LE(1, max_slack);
+ return 1;
+ }
+ return Min(max_slack, old_size / 4);
+}
+
int NormalizedMapCache::GetIndex(Handle<Map> map) {
return map->Hash() % NormalizedMapCache::kEntries;
}
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index d9a0a73158..bf0d843884 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -194,8 +194,7 @@ class Map : public HeapObject {
inline InterceptorInfo* GetIndexedInterceptor();
// Instance type.
- inline InstanceType instance_type() const;
- inline void set_instance_type(InstanceType value);
+ DECL_PRIMITIVE_ACCESSORS(instance_type, InstanceType)
// Returns the size of the used in-object area including object header
// (only used for JSObject in fast mode, for the other kinds of objects it
@@ -214,50 +213,69 @@ class Map : public HeapObject {
inline void AccountAddedOutOfObjectPropertyField(
int unused_in_property_array);
+ //
// Bit field.
- inline byte bit_field() const;
- inline void set_bit_field(byte value);
+ //
+ DECL_PRIMITIVE_ACCESSORS(bit_field, byte)
+
+// Bit positions for |bit_field|.
+#define MAP_BIT_FIELD_FIELDS(V, _) \
+ V(HasNonInstancePrototypeBit, bool, 1, _) \
+ V(IsCallableBit, bool, 1, _) \
+ V(HasNamedInterceptorBit, bool, 1, _) \
+ V(HasIndexedInterceptorBit, bool, 1, _) \
+ V(IsUndetectableBit, bool, 1, _) \
+ V(IsAccessCheckNeededBit, bool, 1, _) \
+ V(IsConstructorBit, bool, 1, _) \
+ V(HasPrototypeSlotBit, bool, 1, _)
+
+ DEFINE_BIT_FIELDS(MAP_BIT_FIELD_FIELDS)
+#undef MAP_BIT_FIELD_FIELDS
+ //
// Bit field 2.
- inline byte bit_field2() const;
- inline void set_bit_field2(byte value);
+ //
+ DECL_PRIMITIVE_ACCESSORS(bit_field2, byte)
+// Bit positions for |bit_field2|.
+#define MAP_BIT_FIELD2_FIELDS(V, _) \
+ /* One bit is still free here. */ \
+ V(IsExtensibleBit, bool, 1, _) \
+ V(IsPrototypeMapBit, bool, 1, _) \
+ V(ElementsKindBits, ElementsKind, 5, _)
+
+ DEFINE_BIT_FIELDS(MAP_BIT_FIELD2_FIELDS)
+#undef MAP_BIT_FIELD2_FIELDS
+
+ //
// Bit field 3.
- inline uint32_t bit_field3() const;
- inline void set_bit_field3(uint32_t bits);
-
- class EnumLengthBits : public BitField<int, 0, kDescriptorIndexBitCount> {
- }; // NOLINT
- class NumberOfOwnDescriptorsBits
- : public BitField<int, kDescriptorIndexBitCount,
- kDescriptorIndexBitCount> {}; // NOLINT
- STATIC_ASSERT(kDescriptorIndexBitCount + kDescriptorIndexBitCount == 20);
- class DictionaryMap : public BitField<bool, 20, 1> {};
- class OwnsDescriptors : public BitField<bool, 21, 1> {};
- class HasHiddenPrototype : public BitField<bool, 22, 1> {};
- class Deprecated : public BitField<bool, 23, 1> {};
- class IsUnstable : public BitField<bool, 24, 1> {};
- class IsMigrationTarget : public BitField<bool, 25, 1> {};
- class ImmutablePrototype : public BitField<bool, 26, 1> {};
- class NewTargetIsBase : public BitField<bool, 27, 1> {};
- class MayHaveInterestingSymbols : public BitField<bool, 28, 1> {};
+ //
+ DECL_PRIMITIVE_ACCESSORS(bit_field3, uint32_t)
+
+// Bit positions for |bit_field3|.
+#define MAP_BIT_FIELD3_FIELDS(V, _) \
+ V(EnumLengthBits, int, kDescriptorIndexBitCount, _) \
+ V(NumberOfOwnDescriptorsBits, int, kDescriptorIndexBitCount, _) \
+ V(IsDictionaryMapBit, bool, 1, _) \
+ V(OwnsDescriptorsBit, bool, 1, _) \
+ V(HasHiddenPrototypeBit, bool, 1, _) \
+ V(IsDeprecatedBit, bool, 1, _) \
+ V(IsUnstableBit, bool, 1, _) \
+ V(IsMigrationTargetBit, bool, 1, _) \
+ V(IsImmutablePrototypeBit, bool, 1, _) \
+ V(NewTargetIsBaseBit, bool, 1, _) \
+ V(MayHaveInterestingSymbolsBit, bool, 1, _) \
+ V(ConstructionCounterBits, int, 3, _)
+
+ DEFINE_BIT_FIELDS(MAP_BIT_FIELD3_FIELDS)
+#undef MAP_BIT_FIELD3_FIELDS
STATIC_ASSERT(NumberOfOwnDescriptorsBits::kMax >= kMaxNumberOfDescriptors);
- // Keep this bit field at the very end for better code in
- // Builtins::kJSConstructStubGeneric stub.
- // This counter is used for in-object slack tracking.
- // The in-object slack tracking is considered enabled when the counter is
- // non zero. The counter only has a valid count for initial maps. For
- // transitioned maps only kNoSlackTracking has a meaning, namely that inobject
- // slack tracking already finished for the transition tree. Any other value
- // indicates that either inobject slack tracking is still in progress, or that
- // the map isn't part of the transition tree anymore.
- class ConstructionCounter : public BitField<int, 29, 3> {};
static const int kSlackTrackingCounterStart = 7;
static const int kSlackTrackingCounterEnd = 1;
static const int kNoSlackTracking = 0;
- STATIC_ASSERT(kSlackTrackingCounterStart <= ConstructionCounter::kMax);
+ STATIC_ASSERT(kSlackTrackingCounterStart <= ConstructionCounterBits::kMax);
// Inobject slack tracking is the way to reclaim unused inobject space.
//
@@ -310,8 +328,7 @@ class Map : public HeapObject {
// property is set to a value that is not a JSObject, the prototype
// property will not be used to create instances of the function.
// See ECMA-262, 13.2.2.
- inline void set_non_instance_prototype(bool value);
- inline bool has_non_instance_prototype() const;
+ DECL_BOOLEAN_ACCESSORS(has_non_instance_prototype)
// Tells whether the instance has a [[Construct]] internal method.
// This property is implemented according to ES6, section 7.2.4.
@@ -329,12 +346,10 @@ class Map : public HeapObject {
DECL_BOOLEAN_ACCESSORS(has_hidden_prototype)
// Records and queries whether the instance has a named interceptor.
- inline void set_has_named_interceptor();
- inline bool has_named_interceptor() const;
+ DECL_BOOLEAN_ACCESSORS(has_named_interceptor)
// Records and queries whether the instance has an indexed interceptor.
- inline void set_has_indexed_interceptor();
- inline bool has_indexed_interceptor() const;
+ DECL_BOOLEAN_ACCESSORS(has_indexed_interceptor)
// Tells whether the instance is undetectable.
// An undetectable object is a special class of JSObject: 'typeof' operator
@@ -342,21 +357,18 @@ class Map : public HeapObject {
// a normal JS object. It is useful for implementing undetectable
// document.all in Firefox & Safari.
// See https://bugzilla.mozilla.org/show_bug.cgi?id=248549.
- inline void set_is_undetectable();
- inline bool is_undetectable() const;
+ DECL_BOOLEAN_ACCESSORS(is_undetectable)
// Tells whether the instance has a [[Call]] internal method.
// This property is implemented according to ES6, section 7.2.3.
- inline void set_is_callable();
- inline bool is_callable() const;
+ DECL_BOOLEAN_ACCESSORS(is_callable)
DECL_BOOLEAN_ACCESSORS(new_target_is_base)
DECL_BOOLEAN_ACCESSORS(is_extensible)
DECL_BOOLEAN_ACCESSORS(is_prototype_map)
inline bool is_abandoned_prototype_map() const;
- inline void set_elements_kind(ElementsKind elements_kind);
- inline ElementsKind elements_kind() const;
+ DECL_PRIMITIVE_ACCESSORS(elements_kind, ElementsKind)
// Tells whether the instance has fast elements that are only Smis.
inline bool has_fast_smi_elements() const;
@@ -409,6 +421,8 @@ class Map : public HeapObject {
static const int kPrototypeChainValid = 0;
static const int kPrototypeChainInvalid = 1;
+ static bool IsPrototypeChainInvalidated(Map* map);
+
// Return the map of the root of object's prototype chain.
Map* GetPrototypeChainRootMap(Isolate* isolate) const;
@@ -489,13 +503,11 @@ class Map : public HeapObject {
// normalized objects, ie objects for which HasFastProperties returns false).
// A map can never be used for both dictionary mode and fast mode JSObjects.
// False by default and for HeapObjects that are not JSObjects.
- inline void set_dictionary_map(bool value);
- inline bool is_dictionary_map() const;
+ DECL_BOOLEAN_ACCESSORS(is_dictionary_map)
// Tells whether the instance needs security checks when accessing its
// properties.
- inline void set_is_access_check_needed(bool access_check_needed);
- inline bool is_access_check_needed() const;
+ DECL_BOOLEAN_ACCESSORS(is_access_check_needed)
// [prototype]: implicit prototype object.
DECL_ACCESSORS(prototype, Object)
@@ -563,15 +575,24 @@ class Map : public HeapObject {
inline void SetEnumLength(int length);
DECL_BOOLEAN_ACCESSORS(owns_descriptors)
+
inline void mark_unstable();
inline bool is_stable() const;
- inline void set_migration_target(bool value);
- inline bool is_migration_target() const;
- inline void set_immutable_proto(bool value);
- inline bool is_immutable_proto() const;
+
+ DECL_BOOLEAN_ACCESSORS(is_migration_target)
+
+ DECL_BOOLEAN_ACCESSORS(is_immutable_proto)
+
+ // This counter is used for in-object slack tracking.
+ // The in-object slack tracking is considered enabled when the counter is
+ // non zero. The counter only has a valid count for initial maps. For
+ // transitioned maps only kNoSlackTracking has a meaning, namely that inobject
+ // slack tracking already finished for the transition tree. Any other value
+ // indicates that either inobject slack tracking is still in progress, or that
+ // the map isn't part of the transition tree anymore.
DECL_INT_ACCESSORS(construction_counter)
- inline void deprecate();
- inline bool is_deprecated() const;
+
+ DECL_BOOLEAN_ACCESSORS(is_deprecated)
inline bool CanBeDeprecated() const;
// Returns a non-deprecated version of the input. If the input was not
// deprecated, it is directly returned. Otherwise, the non-deprecated version
@@ -759,22 +780,6 @@ class Map : public HeapObject {
STATIC_ASSERT(kInstanceTypeOffset == Internals::kMapInstanceTypeOffset);
- // Bit positions for bit field.
- static const int kHasNonInstancePrototype = 0;
- static const int kIsCallable = 1;
- static const int kHasNamedInterceptor = 2;
- static const int kHasIndexedInterceptor = 3;
- static const int kIsUndetectable = 4;
- static const int kIsAccessCheckNeeded = 5;
- static const int kIsConstructor = 6;
- static const int kHasPrototypeSlot = 7;
-
- // Bit positions for bit field 2
- static const int kIsExtensible = 0;
- // Bit 1 is free.
- class IsPrototypeMapBits : public BitField<bool, 2, 1> {};
- class ElementsKindBits : public BitField<ElementsKind, 3, 5> {};
-
typedef FixedBodyDescriptor<kPointerFieldsBeginOffset,
kPointerFieldsEndOffset, kSize>
BodyDescriptor;
diff --git a/deps/v8/src/objects/module.cc b/deps/v8/src/objects/module.cc
index 4040d05bca..b9d7697fb5 100644
--- a/deps/v8/src/objects/module.cc
+++ b/deps/v8/src/objects/module.cc
@@ -199,29 +199,69 @@ void Module::SetStatus(Status new_status) {
set_status(new_status);
}
+void Module::ResetGraph(Handle<Module> module) {
+ DCHECK_NE(module->status(), kInstantiating);
+ DCHECK_NE(module->status(), kEvaluating);
+ if (module->status() != kPreInstantiating) return;
+ Isolate* isolate = module->GetIsolate();
+ Handle<FixedArray> requested_modules(module->requested_modules(), isolate);
+ Reset(module);
+ for (int i = 0; i < requested_modules->length(); ++i) {
+ Handle<Object> descendant(requested_modules->get(i), isolate);
+ if (descendant->IsModule()) {
+ ResetGraph(Handle<Module>::cast(descendant));
+ } else {
+ DCHECK(descendant->IsUndefined(isolate));
+ }
+ }
+}
+
+void Module::Reset(Handle<Module> module) {
+ Isolate* isolate = module->GetIsolate();
+ Factory* factory = isolate->factory();
+
+ DCHECK(module->status() == kPreInstantiating ||
+ module->status() == kInstantiating);
+ DCHECK(module->exception()->IsTheHole(isolate));
+ DCHECK(module->import_meta()->IsTheHole(isolate));
+ // The namespace object cannot exist, because it would have been created
+ // by RunInitializationCode, which is called only after this module's SCC
+ // succeeds instantiation.
+ DCHECK(!module->module_namespace()->IsJSModuleNamespace());
+
+ Handle<ObjectHashTable> exports =
+ ObjectHashTable::New(isolate, module->info()->RegularExportCount());
+ Handle<FixedArray> regular_exports =
+ factory->NewFixedArray(module->regular_exports()->length());
+ Handle<FixedArray> regular_imports =
+ factory->NewFixedArray(module->regular_imports()->length());
+ Handle<FixedArray> requested_modules =
+ factory->NewFixedArray(module->requested_modules()->length());
+
+ if (module->status() == kInstantiating) {
+ module->set_code(JSFunction::cast(module->code())->shared());
+ }
+#ifdef DEBUG
+ module->PrintStatusTransition(kUninstantiated);
+#endif // DEBUG
+ module->set_status(kUninstantiated);
+ module->set_exports(*exports);
+ module->set_regular_exports(*regular_exports);
+ module->set_regular_imports(*regular_imports);
+ module->set_requested_modules(*requested_modules);
+ module->set_dfs_index(-1);
+ module->set_dfs_ancestor_index(-1);
+}
+
void Module::RecordError() {
DisallowHeapAllocation no_alloc;
-
Isolate* isolate = GetIsolate();
+
+ DCHECK(exception()->IsTheHole(isolate));
Object* the_exception = isolate->pending_exception();
DCHECK(!the_exception->IsTheHole(isolate));
- switch (status()) {
- case Module::kUninstantiated:
- case Module::kPreInstantiating:
- case Module::kInstantiating:
- case Module::kEvaluating:
- break;
- case Module::kErrored:
- DCHECK_EQ(exception(), the_exception);
- return;
- default:
- UNREACHABLE();
- }
-
set_code(info());
-
- DCHECK(exception()->IsTheHole(isolate));
#ifdef DEBUG
PrintStatusTransition(Module::kErrored);
#endif // DEBUG
@@ -232,9 +272,8 @@ void Module::RecordError() {
Object* Module::GetException() {
DisallowHeapAllocation no_alloc;
DCHECK_EQ(status(), Module::kErrored);
- Object* the_exception = exception();
- DCHECK(!the_exception->IsTheHole(GetIsolate()));
- return the_exception;
+ DCHECK(!exception()->IsTheHole(GetIsolate()));
+ return exception();
}
MaybeHandle<Cell> Module::ResolveImport(Handle<Module> module,
@@ -244,29 +283,25 @@ MaybeHandle<Cell> Module::ResolveImport(Handle<Module> module,
Isolate* isolate = module->GetIsolate();
Handle<Module> requested_module(
Module::cast(module->requested_modules()->get(module_request)), isolate);
- MaybeHandle<Cell> result = Module::ResolveExport(requested_module, name, loc,
- must_resolve, resolve_set);
- if (isolate->has_pending_exception()) {
- DCHECK(result.is_null());
- if (must_resolve) module->RecordError();
- // If {must_resolve} is false and there's an exception, then either that
- // exception was already recorded where it happened, or it's the
- // kAmbiguousExport exception (see ResolveExportUsingStarExports) and the
- // culprit module is still to be determined.
- }
+ Handle<String> specifier(
+ String::cast(module->info()->module_requests()->get(module_request)),
+ isolate);
+ MaybeHandle<Cell> result = Module::ResolveExport(
+ requested_module, specifier, name, loc, must_resolve, resolve_set);
+ DCHECK_IMPLIES(isolate->has_pending_exception(), result.is_null());
return result;
}
MaybeHandle<Cell> Module::ResolveExport(Handle<Module> module,
- Handle<String> name,
+ Handle<String> module_specifier,
+ Handle<String> export_name,
MessageLocation loc, bool must_resolve,
Module::ResolveSet* resolve_set) {
- DCHECK_NE(module->status(), kErrored);
- DCHECK_NE(module->status(), kEvaluating);
DCHECK_GE(module->status(), kPreInstantiating);
+ DCHECK_NE(module->status(), kEvaluating);
Isolate* isolate = module->GetIsolate();
- Handle<Object> object(module->exports()->Lookup(name), isolate);
+ Handle<Object> object(module->exports()->Lookup(export_name), isolate);
if (object->IsCell()) {
// Already resolved (e.g. because it's a local export).
return Handle<Cell>::cast(object);
@@ -282,17 +317,18 @@ MaybeHandle<Cell> Module::ResolveExport(Handle<Module> module,
Zone* zone = resolve_set->zone();
name_set =
new (zone->New(sizeof(UnorderedStringSet))) UnorderedStringSet(zone);
- } else if (name_set->count(name)) {
+ } else if (name_set->count(export_name)) {
// Cycle detected.
if (must_resolve) {
return isolate->Throw<Cell>(
isolate->factory()->NewSyntaxError(
- MessageTemplate::kCyclicModuleDependency, name),
+ MessageTemplate::kCyclicModuleDependency, export_name,
+ module_specifier),
&loc);
}
return MaybeHandle<Cell>();
}
- name_set->insert(name);
+ name_set->insert(export_name);
}
if (object->IsModuleInfoEntry()) {
@@ -313,23 +349,24 @@ MaybeHandle<Cell> Module::ResolveExport(Handle<Module> module,
// The export table may have changed but the entry in question should be
// unchanged.
Handle<ObjectHashTable> exports(module->exports(), isolate);
- DCHECK(exports->Lookup(name)->IsModuleInfoEntry());
+ DCHECK(exports->Lookup(export_name)->IsModuleInfoEntry());
- exports = ObjectHashTable::Put(exports, name, cell);
+ exports = ObjectHashTable::Put(exports, export_name, cell);
module->set_exports(*exports);
return cell;
}
DCHECK(object->IsTheHole(isolate));
- return Module::ResolveExportUsingStarExports(module, name, loc, must_resolve,
- resolve_set);
+ return Module::ResolveExportUsingStarExports(
+ module, module_specifier, export_name, loc, must_resolve, resolve_set);
}
MaybeHandle<Cell> Module::ResolveExportUsingStarExports(
- Handle<Module> module, Handle<String> name, MessageLocation loc,
- bool must_resolve, Module::ResolveSet* resolve_set) {
+ Handle<Module> module, Handle<String> module_specifier,
+ Handle<String> export_name, MessageLocation loc, bool must_resolve,
+ Module::ResolveSet* resolve_set) {
Isolate* isolate = module->GetIsolate();
- if (!name->Equals(isolate->heap()->default_string())) {
+ if (!export_name->Equals(isolate->heap()->default_string())) {
// Go through all star exports looking for the given name. If multiple star
// exports provide the name, make sure they all map it to the same cell.
Handle<Cell> unique_cell;
@@ -346,15 +383,15 @@ MaybeHandle<Cell> Module::ResolveExportUsingStarExports(
MessageLocation new_loc(script, entry->beg_pos(), entry->end_pos());
Handle<Cell> cell;
- if (ResolveImport(module, name, entry->module_request(), new_loc, false,
- resolve_set)
+ if (ResolveImport(module, export_name, entry->module_request(), new_loc,
+ false, resolve_set)
.ToHandle(&cell)) {
if (unique_cell.is_null()) unique_cell = cell;
if (*unique_cell != *cell) {
- return isolate->Throw<Cell>(
- isolate->factory()->NewSyntaxError(
- MessageTemplate::kAmbiguousExport, name),
- &loc);
+ return isolate->Throw<Cell>(isolate->factory()->NewSyntaxError(
+ MessageTemplate::kAmbiguousExport,
+ module_specifier, export_name),
+ &loc);
}
} else if (isolate->has_pending_exception()) {
return MaybeHandle<Cell>();
@@ -364,8 +401,8 @@ MaybeHandle<Cell> Module::ResolveExportUsingStarExports(
if (!unique_cell.is_null()) {
// Found a unique star export for this name.
Handle<ObjectHashTable> exports(module->exports(), isolate);
- DCHECK(exports->Lookup(name)->IsTheHole(isolate));
- exports = ObjectHashTable::Put(exports, name, unique_cell);
+ DCHECK(exports->Lookup(export_name)->IsTheHole(isolate));
+ exports = ObjectHashTable::Put(exports, export_name, unique_cell);
module->set_exports(*exports);
return unique_cell;
}
@@ -373,9 +410,10 @@ MaybeHandle<Cell> Module::ResolveExportUsingStarExports(
// Unresolvable.
if (must_resolve) {
- return isolate->Throw<Cell>(isolate->factory()->NewSyntaxError(
- MessageTemplate::kUnresolvableExport, name),
- &loc);
+ return isolate->Throw<Cell>(
+ isolate->factory()->NewSyntaxError(MessageTemplate::kUnresolvableExport,
+ module_specifier, export_name),
+ &loc);
}
return MaybeHandle<Cell>();
}
@@ -393,27 +431,24 @@ bool Module::Instantiate(Handle<Module> module, v8::Local<v8::Context> context,
}
#endif // DEBUG
- Isolate* isolate = module->GetIsolate();
- if (module->status() == kErrored) {
- isolate->Throw(module->GetException());
- return false;
- }
-
if (!PrepareInstantiate(module, context, callback)) {
+ ResetGraph(module);
return false;
}
+ Isolate* isolate = module->GetIsolate();
Zone zone(isolate->allocator(), ZONE_NAME);
ZoneForwardList<Handle<Module>> stack(&zone);
unsigned dfs_index = 0;
if (!FinishInstantiate(module, &stack, &dfs_index, &zone)) {
for (auto& descendant : stack) {
- descendant->RecordError();
+ Reset(descendant);
}
- DCHECK_EQ(module->GetException(), isolate->pending_exception());
+ DCHECK_EQ(module->status(), kUninstantiated);
return false;
}
- DCHECK(module->status() == kInstantiated || module->status() == kEvaluated);
+ DCHECK(module->status() == kInstantiated || module->status() == kEvaluated ||
+ module->status() == kErrored);
DCHECK(stack.empty());
return true;
}
@@ -421,7 +456,6 @@ bool Module::Instantiate(Handle<Module> module, v8::Local<v8::Context> context,
bool Module::PrepareInstantiate(Handle<Module> module,
v8::Local<v8::Context> context,
v8::Module::ResolveCallback callback) {
- DCHECK_NE(module->status(), kErrored);
DCHECK_NE(module->status(), kEvaluating);
DCHECK_NE(module->status(), kInstantiating);
if (module->status() >= kPreInstantiating) return true;
@@ -439,17 +473,9 @@ bool Module::PrepareInstantiate(Handle<Module> module,
v8::Utils::ToLocal(module))
.ToLocal(&api_requested_module)) {
isolate->PromoteScheduledException();
- module->RecordError();
return false;
}
Handle<Module> requested_module = Utils::OpenHandle(*api_requested_module);
- if (requested_module->status() == kErrored) {
- // TODO(neis): Move this into callback?
- isolate->Throw(requested_module->GetException());
- module->RecordError();
- DCHECK_EQ(module->GetException(), requested_module->GetException());
- return false;
- }
requested_modules->set(i, *requested_module);
}
@@ -458,8 +484,6 @@ bool Module::PrepareInstantiate(Handle<Module> module,
Handle<Module> requested_module(Module::cast(requested_modules->get(i)),
isolate);
if (!PrepareInstantiate(requested_module, context, callback)) {
- module->RecordError();
- DCHECK_EQ(module->GetException(), requested_module->GetException());
return false;
}
}
@@ -531,7 +555,6 @@ void Module::MaybeTransitionComponent(Handle<Module> module,
bool Module::FinishInstantiate(Handle<Module> module,
ZoneForwardList<Handle<Module>>* stack,
unsigned* dfs_index, Zone* zone) {
- DCHECK_NE(module->status(), kErrored);
DCHECK_NE(module->status(), kEvaluating);
if (module->status() >= kInstantiating) return true;
DCHECK_EQ(module->status(), kPreInstantiating);
@@ -560,7 +583,6 @@ bool Module::FinishInstantiate(Handle<Module> module,
return false;
}
- DCHECK_NE(requested_module->status(), kErrored);
DCHECK_NE(requested_module->status(), kEvaluating);
DCHECK_GE(requested_module->status(), kInstantiating);
SLOW_DCHECK(
@@ -606,8 +628,8 @@ bool Module::FinishInstantiate(Handle<Module> module,
if (name->IsUndefined(isolate)) continue; // Star export.
MessageLocation loc(script, entry->beg_pos(), entry->end_pos());
ResolveSet resolve_set(zone);
- if (ResolveExport(module, Handle<String>::cast(name), loc, true,
- &resolve_set)
+ if (ResolveExport(module, Handle<String>(), Handle<String>::cast(name), loc,
+ true, &resolve_set)
.is_null()) {
return false;
}
@@ -722,7 +744,6 @@ namespace {
void FetchStarExports(Handle<Module> module, Zone* zone,
UnorderedModuleSet* visited) {
- DCHECK_NE(module->status(), Module::kErrored);
DCHECK_GE(module->status(), Module::kInstantiating);
if (module->module_namespace()->IsJSModuleNamespace()) return; // Shortcut.
diff --git a/deps/v8/src/objects/module.h b/deps/v8/src/objects/module.h
index 7680f55313..fe374d3fc6 100644
--- a/deps/v8/src/objects/module.h
+++ b/deps/v8/src/objects/module.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_MODULE_H_
#include "src/objects.h"
+#include "src/objects/fixed-array.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -153,15 +154,17 @@ class Module : public Struct {
// exception (so check manually!).
class ResolveSet;
static MUST_USE_RESULT MaybeHandle<Cell> ResolveExport(
- Handle<Module> module, Handle<String> name, MessageLocation loc,
- bool must_resolve, ResolveSet* resolve_set);
+ Handle<Module> module, Handle<String> module_specifier,
+ Handle<String> export_name, MessageLocation loc, bool must_resolve,
+ ResolveSet* resolve_set);
static MUST_USE_RESULT MaybeHandle<Cell> ResolveImport(
Handle<Module> module, Handle<String> name, int module_request,
MessageLocation loc, bool must_resolve, ResolveSet* resolve_set);
static MUST_USE_RESULT MaybeHandle<Cell> ResolveExportUsingStarExports(
- Handle<Module> module, Handle<String> name, MessageLocation loc,
- bool must_resolve, ResolveSet* resolve_set);
+ Handle<Module> module, Handle<String> module_specifier,
+ Handle<String> export_name, MessageLocation loc, bool must_resolve,
+ ResolveSet* resolve_set);
static MUST_USE_RESULT bool PrepareInstantiate(
Handle<Module> module, v8::Local<v8::Context> context,
@@ -179,6 +182,11 @@ class Module : public Struct {
ZoneForwardList<Handle<Module>>* stack,
Status new_status);
+ // Set module's status back to kUninstantiated and reset other internal state.
+ // This is used when instantiation fails.
+ static void Reset(Handle<Module> module);
+ static void ResetGraph(Handle<Module> module);
+
// To set status to kErrored, RecordError should be used.
void SetStatus(Status status);
void RecordError();
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index 5d367d351f..604942a272 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -54,8 +54,9 @@
#define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
set_condition) \
type* holder::name() const { \
+ type* value = type::cast(READ_FIELD(this, offset)); \
DCHECK(get_condition); \
- return type::cast(READ_FIELD(this, offset)); \
+ return value; \
} \
void holder::set_##name(type* value, WriteBarrierMode mode) { \
DCHECK(set_condition); \
diff --git a/deps/v8/src/objects/scope-info.h b/deps/v8/src/objects/scope-info.h
index b03b1e831e..3a8459a204 100644
--- a/deps/v8/src/objects/scope-info.h
+++ b/deps/v8/src/objects/scope-info.h
@@ -7,6 +7,7 @@
#include "src/globals.h"
#include "src/objects.h"
+#include "src/objects/fixed-array.h"
#include "src/utils.h"
// Has to be the last include (doesn't have include guards):
@@ -306,12 +307,14 @@ class ScopeInfo : public FixedArray {
class HasSimpleParametersField
: public BitField<bool, AsmModuleField::kNext, 1> {};
class FunctionKindField
- : public BitField<FunctionKind, HasSimpleParametersField::kNext, 10> {};
+ : public BitField<FunctionKind, HasSimpleParametersField::kNext, 11> {};
class HasOuterScopeInfoField
: public BitField<bool, FunctionKindField::kNext, 1> {};
class IsDebugEvaluateScopeField
: public BitField<bool, HasOuterScopeInfoField::kNext, 1> {};
+ STATIC_ASSERT(kLastFunctionKind <= FunctionKindField::kMax);
+
// Properties of variables.
class VariableModeField : public BitField<VariableMode, 0, 3> {};
class InitFlagField : public BitField<InitializationFlag, 3, 1> {};
diff --git a/deps/v8/src/objects/script-inl.h b/deps/v8/src/objects/script-inl.h
index 2544b4e20e..c5bd407628 100644
--- a/deps/v8/src/objects/script-inl.h
+++ b/deps/v8/src/objects/script-inl.h
@@ -26,7 +26,8 @@ ACCESSORS(Script, context_data, Object, kContextOffset)
ACCESSORS(Script, wrapper, HeapObject, kWrapperOffset)
SMI_ACCESSORS(Script, type, kTypeOffset)
ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
-ACCESSORS_CHECKED(Script, eval_from_shared, Object, kEvalFromSharedOffset,
+ACCESSORS_CHECKED(Script, eval_from_shared_or_wrapped_arguments, Object,
+ kEvalFromSharedOrWrappedArgumentsOffset,
this->type() != TYPE_WASM)
SMI_ACCESSORS_CHECKED(Script, eval_from_position, kEvalFromPositionOffset,
this->type() != TYPE_WASM)
@@ -35,9 +36,39 @@ SMI_ACCESSORS(Script, flags, kFlagsOffset)
ACCESSORS(Script, source_url, Object, kSourceUrlOffset)
ACCESSORS(Script, source_mapping_url, Object, kSourceMappingUrlOffset)
ACCESSORS(Script, host_defined_options, FixedArray, kHostDefinedOptionsOffset)
-ACCESSORS_CHECKED(Script, wasm_compiled_module, Object, kEvalFromSharedOffset,
+ACCESSORS_CHECKED(Script, wasm_compiled_module, Object,
+ kEvalFromSharedOrWrappedArgumentsOffset,
this->type() == TYPE_WASM)
+bool Script::is_wrapped() const {
+ return eval_from_shared_or_wrapped_arguments()->IsFixedArray();
+}
+
+bool Script::has_eval_from_shared() const {
+ return eval_from_shared_or_wrapped_arguments()->IsSharedFunctionInfo();
+}
+
+void Script::set_eval_from_shared(SharedFunctionInfo* shared,
+ WriteBarrierMode mode) {
+ DCHECK(!is_wrapped());
+ set_eval_from_shared_or_wrapped_arguments(shared, mode);
+}
+
+SharedFunctionInfo* Script::eval_from_shared() const {
+ DCHECK(has_eval_from_shared());
+ return SharedFunctionInfo::cast(eval_from_shared_or_wrapped_arguments());
+}
+
+void Script::set_wrapped_arguments(FixedArray* value, WriteBarrierMode mode) {
+ DCHECK(!has_eval_from_shared());
+ set_eval_from_shared_or_wrapped_arguments(value, mode);
+}
+
+FixedArray* Script::wrapped_arguments() const {
+ DCHECK(is_wrapped());
+ return FixedArray::cast(eval_from_shared_or_wrapped_arguments());
+}
+
Script::CompilationType Script::compilation_type() {
return BooleanBit::get(flags(), kCompilationTypeBit) ? COMPILATION_TYPE_EVAL
: COMPILATION_TYPE_HOST;
diff --git a/deps/v8/src/objects/script.h b/deps/v8/src/objects/script.h
index ae4a87914d..4d84be2262 100644
--- a/deps/v8/src/objects/script.h
+++ b/deps/v8/src/objects/script.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_SCRIPT_H_
#include "src/objects.h"
+#include "src/objects/fixed-array.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -62,9 +63,21 @@ class Script : public Struct {
// [line_ends]: FixedArray of line ends positions.
DECL_ACCESSORS(line_ends, Object)
+ DECL_ACCESSORS(eval_from_shared_or_wrapped_arguments, Object)
+
// [eval_from_shared]: for eval scripts the shared function info for the
// function from which eval was called.
- DECL_ACCESSORS(eval_from_shared, Object)
+ DECL_ACCESSORS(eval_from_shared, SharedFunctionInfo)
+
+ // [wrapped_arguments]: for the list of arguments in a wrapped script.
+ DECL_ACCESSORS(wrapped_arguments, FixedArray)
+
+ // Whether the script is implicitly wrapped in a function.
+ inline bool is_wrapped() const;
+
+ // Whether the eval_from_shared field is set with a shared function info
+ // for the eval site.
+ inline bool has_eval_from_shared() const;
// [eval_from_position]: the source position in the code for the function
// from which eval was called, as positive integer. Or the code offset in the
@@ -118,6 +131,9 @@ class Script : public Struct {
// Retrieve source position from where eval was called.
int GetEvalPosition();
+ // Check if the script contains any Asm modules.
+ bool ContainsAsmModule();
+
// Init line_ends array with source code positions of line ends.
static void InitLineEnds(Handle<Script> script);
@@ -186,9 +202,10 @@ class Script : public Struct {
static const int kTypeOffset = kWrapperOffset + kPointerSize;
static const int kLineEndsOffset = kTypeOffset + kPointerSize;
static const int kIdOffset = kLineEndsOffset + kPointerSize;
- static const int kEvalFromSharedOffset = kIdOffset + kPointerSize;
+ static const int kEvalFromSharedOrWrappedArgumentsOffset =
+ kIdOffset + kPointerSize;
static const int kEvalFromPositionOffset =
- kEvalFromSharedOffset + kPointerSize;
+ kEvalFromSharedOrWrappedArgumentsOffset + kPointerSize;
static const int kSharedFunctionInfosOffset =
kEvalFromPositionOffset + kPointerSize;
static const int kFlagsOffset = kSharedFunctionInfosOffset + kPointerSize;
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index 0c35933950..57a72754b5 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -80,6 +80,8 @@ AbstractCode* SharedFunctionInfo::abstract_code() {
}
}
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, is_wrapped,
+ SharedFunctionInfo::IsWrappedBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, allows_lazy_compilation,
SharedFunctionInfo::AllowLazyCompilationBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints,
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index a43c2a12b7..8e996042c0 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -254,7 +254,7 @@ class SharedFunctionInfo : public HeapObject {
String* DebugName();
// The function cannot cause any side effects.
- bool HasNoSideEffect();
+ static bool HasNoSideEffect(Handle<SharedFunctionInfo> info);
// Used for flags such as --turbo-filter.
bool PassesFilter(const char* raw_filter);
@@ -288,6 +288,9 @@ class SharedFunctionInfo : public HeapObject {
inline LanguageMode language_mode();
inline void set_language_mode(LanguageMode language_mode);
+ // Indicates whether the source is implicitly wrapped in a function.
+ DECL_BOOLEAN_ACCESSORS(is_wrapped)
+
// True if the function has any duplicated parameter names.
DECL_BOOLEAN_ACCESSORS(has_duplicate_parameters)
@@ -336,8 +339,8 @@ class SharedFunctionInfo : public HeapObject {
// [source code]: Source code for the function.
bool HasSourceCode() const;
- Handle<Object> GetSourceCode();
- Handle<Object> GetSourceCodeHarmony();
+ static Handle<Object> GetSourceCode(Handle<SharedFunctionInfo> shared);
+ static Handle<Object> GetSourceCodeHarmony(Handle<SharedFunctionInfo> shared);
// Tells whether this function should be subject to debugging.
inline bool IsSubjectToDebugging();
@@ -465,22 +468,25 @@ class SharedFunctionInfo : public HeapObject {
#define COMPILER_HINTS_BIT_FIELDS(V, _) \
V(IsNativeBit, bool, 1, _) \
V(IsStrictBit, bool, 1, _) \
- V(FunctionKindBits, FunctionKind, 10, _) \
+ V(IsWrappedBit, bool, 1, _) \
+ V(FunctionKindBits, FunctionKind, 11, _) \
V(HasDuplicateParametersBit, bool, 1, _) \
V(AllowLazyCompilationBit, bool, 1, _) \
V(NeedsHomeObjectBit, bool, 1, _) \
V(IsDeclarationBit, bool, 1, _) \
V(IsAsmWasmBrokenBit, bool, 1, _) \
V(FunctionMapIndexBits, int, 5, _) \
- V(DisabledOptimizationReasonBits, BailoutReason, 7, _) \
+ V(DisabledOptimizationReasonBits, BailoutReason, 4, _) \
V(RequiresInstanceFieldsInitializer, bool, 1, _)
DEFINE_BIT_FIELDS(COMPILER_HINTS_BIT_FIELDS)
#undef COMPILER_HINTS_BIT_FIELDS
// Bailout reasons must fit in the DisabledOptimizationReason bitfield.
- STATIC_ASSERT(kLastErrorMessage <= DisabledOptimizationReasonBits::kMax);
+ STATIC_ASSERT(BailoutReason::kLastErrorMessage <=
+ DisabledOptimizationReasonBits::kMax);
+ STATIC_ASSERT(kLastFunctionKind <= FunctionKindBits::kMax);
// Masks for checking if certain FunctionKind bits are set without fully
// decoding of the FunctionKind bit field.
static const int kClassConstructorMask = FunctionKind::kClassConstructor
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
index dd75210a54..9b64444de2 100644
--- a/deps/v8/src/objects/string-inl.h
+++ b/deps/v8/src/objects/string-inl.h
@@ -525,11 +525,42 @@ void ConsString::set_second(String* value, WriteBarrierMode mode) {
ACCESSORS(ThinString, actual, String, kActualOffset);
+HeapObject* ThinString::unchecked_actual() const {
+ return reinterpret_cast<HeapObject*>(READ_FIELD(this, kActualOffset));
+}
+
bool ExternalString::is_short() {
InstanceType type = map()->instance_type();
return (type & kShortExternalStringMask) == kShortExternalStringTag;
}
+Address ExternalString::resource_as_address() {
+ return *reinterpret_cast<Address*>(FIELD_ADDR(this, kResourceOffset));
+}
+
+void ExternalString::set_address_as_resource(Address address) {
+ DCHECK(IsAligned(reinterpret_cast<intptr_t>(address), kPointerSize));
+ *reinterpret_cast<Address*>(FIELD_ADDR(this, kResourceOffset)) = address;
+ if (IsExternalOneByteString()) {
+ ExternalOneByteString::cast(this)->update_data_cache();
+ } else {
+ ExternalTwoByteString::cast(this)->update_data_cache();
+ }
+}
+
+uint32_t ExternalString::resource_as_uint32() {
+ return static_cast<uint32_t>(
+ *reinterpret_cast<uintptr_t*>(FIELD_ADDR(this, kResourceOffset)));
+}
+
+void ExternalString::set_uint32_as_resource(uint32_t value) {
+ *reinterpret_cast<uintptr_t*>(FIELD_ADDR(this, kResourceOffset)) = value;
+ if (is_short()) return;
+ const char** data_field =
+ reinterpret_cast<const char**>(FIELD_ADDR(this, kResourceDataOffset));
+ *data_field = nullptr;
+}
+
const ExternalOneByteString::Resource* ExternalOneByteString::resource() {
return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
}
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index f21171d62f..066fc6d879 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -631,6 +631,7 @@ class ThinString : public String {
public:
// Actual string that this ThinString refers to.
inline String* actual() const;
+ inline HeapObject* unchecked_actual() const;
inline void set_actual(String* s,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
@@ -718,6 +719,12 @@ class ExternalString : public String {
// Return whether external string is short (data pointer is not cached).
inline bool is_short();
+ // Used in the serializer/deserializer.
+ inline Address resource_as_address();
+ inline void set_address_as_resource(Address address);
+ inline uint32_t resource_as_uint32();
+ inline void set_uint32_as_resource(uint32_t value);
+
STATIC_ASSERT(kResourceOffset == Internals::kStringResourceOffset);
private:
diff --git a/deps/v8/src/ostreams.cc b/deps/v8/src/ostreams.cc
index 5c7b1631a2..66b57020ad 100644
--- a/deps/v8/src/ostreams.cc
+++ b/deps/v8/src/ostreams.cc
@@ -50,14 +50,14 @@ OFStream::~OFStream() {}
namespace {
// Locale-independent predicates.
-bool IsPrint(uint16_t c) { return 0x20 <= c && c <= 0x7e; }
-bool IsSpace(uint16_t c) { return (0x9 <= c && c <= 0xd) || c == 0x20; }
+bool IsPrint(uint16_t c) { return 0x20 <= c && c <= 0x7E; }
+bool IsSpace(uint16_t c) { return (0x9 <= c && c <= 0xD) || c == 0x20; }
bool IsOK(uint16_t c) { return (IsPrint(c) || IsSpace(c)) && c != '\\'; }
std::ostream& PrintUC16(std::ostream& os, uint16_t c, bool (*pred)(uint16_t)) {
char buf[10];
- const char* format = pred(c) ? "%c" : (c <= 0xff) ? "\\x%02x" : "\\u%04x";
+ const char* format = pred(c) ? "%c" : (c <= 0xFF) ? "\\x%02x" : "\\u%04x";
snprintf(buf, sizeof(buf), format, c);
return os << buf;
}
@@ -124,7 +124,7 @@ std::ostream& operator<<(std::ostream& os, const AsHexBytes& hex) {
if (b) os << " ";
uint8_t printed_byte =
hex.byte_order == AsHexBytes::kLittleEndian ? b : bytes - b - 1;
- os << AsHex((hex.value >> (8 * printed_byte)) & 0xff, 2);
+ os << AsHex((hex.value >> (8 * printed_byte)) & 0xFF, 2);
}
return os;
}
diff --git a/deps/v8/src/parsing/background-parsing-task.cc b/deps/v8/src/parsing/background-parsing-task.cc
index 387cd3a1c6..cb811566df 100644
--- a/deps/v8/src/parsing/background-parsing-task.cc
+++ b/deps/v8/src/parsing/background-parsing-task.cc
@@ -31,7 +31,8 @@ BackgroundParsingTask::BackgroundParsingTask(
DCHECK(options == ScriptCompiler::kProduceParserCache ||
options == ScriptCompiler::kProduceCodeCache ||
options == ScriptCompiler::kProduceFullCodeCache ||
- options == ScriptCompiler::kNoCompileOptions);
+ options == ScriptCompiler::kNoCompileOptions ||
+ options == ScriptCompiler::kEagerCompile);
VMState<PARSER> state(isolate);
diff --git a/deps/v8/src/parsing/expression-classifier.h b/deps/v8/src/parsing/expression-classifier.h
index 6c6c813b3e..709d5736b5 100644
--- a/deps/v8/src/parsing/expression-classifier.h
+++ b/deps/v8/src/parsing/expression-classifier.h
@@ -97,14 +97,12 @@ class ExpressionClassifier {
: base_(base),
previous_(base->classifier_),
zone_(base->impl()->zone()),
- non_patterns_to_rewrite_(base->impl()->GetNonPatternList()),
reported_errors_(base->impl()->GetReportedErrorList()),
duplicate_finder_(duplicate_finder),
invalid_productions_(0),
function_properties_(0) {
base->classifier_ = this;
reported_errors_begin_ = reported_errors_end_ = reported_errors_->length();
- non_pattern_begin_ = non_patterns_to_rewrite_->length();
}
V8_INLINE ~ExpressionClassifier() {
@@ -291,19 +289,10 @@ class ExpressionClassifier {
Add(Error(loc, message, kLetPatternProduction, arg));
}
- void Accumulate(ExpressionClassifier* inner, unsigned productions,
- bool merge_non_patterns = true) {
+ void Accumulate(ExpressionClassifier* inner, unsigned productions) {
DCHECK_EQ(inner->reported_errors_, reported_errors_);
DCHECK_EQ(inner->reported_errors_begin_, reported_errors_end_);
DCHECK_EQ(inner->reported_errors_end_, reported_errors_->length());
- DCHECK_EQ(inner->non_patterns_to_rewrite_, non_patterns_to_rewrite_);
- DCHECK_LE(non_pattern_begin_, inner->non_pattern_begin_);
- DCHECK_LE(inner->non_pattern_begin_, non_patterns_to_rewrite_->length());
- // Merge non-patterns from the inner classifier, or discard them.
- if (merge_non_patterns)
- inner->non_pattern_begin_ = non_patterns_to_rewrite_->length();
- else
- non_patterns_to_rewrite_->Rewind(inner->non_pattern_begin_);
// Propagate errors from inner, but don't overwrite already recorded
// errors.
unsigned non_arrow_inner_invalid_productions =
@@ -368,16 +357,12 @@ class ExpressionClassifier {
reported_errors_end_;
}
- V8_INLINE int GetNonPatternBegin() const { return non_pattern_begin_; }
-
V8_INLINE void Discard() {
if (reported_errors_end_ == reported_errors_->length()) {
reported_errors_->Rewind(reported_errors_begin_);
reported_errors_end_ = reported_errors_begin_;
}
DCHECK_EQ(reported_errors_begin_, reported_errors_end_);
- DCHECK_LE(non_pattern_begin_, non_patterns_to_rewrite_->length());
- non_patterns_to_rewrite_->Rewind(non_pattern_begin_);
}
ExpressionClassifier* previous() const { return previous_; }
@@ -424,16 +409,8 @@ class ExpressionClassifier {
typename Types::Base* base_;
ExpressionClassifier* previous_;
Zone* zone_;
- ZoneList<typename Types::RewritableExpression>* non_patterns_to_rewrite_;
ZoneList<Error>* reported_errors_;
DuplicateFinder* duplicate_finder_;
- // The uint16_t for non_pattern_begin_ will not be enough in the case,
- // e.g., of an array literal containing more than 64K inner array
- // literals with spreads, as in:
- // var N=65536; eval("var x=[];" + "[" + "[...x],".repeat(N) + "].length");
- // An implementation limit error in ParserBase::AddNonPatternForRewriting
- // will be triggered in this case.
- uint16_t non_pattern_begin_;
unsigned invalid_productions_ : 14;
unsigned function_properties_ : 2;
// The uint16_t for reported_errors_begin_ and reported_errors_end_ will
diff --git a/deps/v8/src/parsing/expression-scope-reparenter.cc b/deps/v8/src/parsing/expression-scope-reparenter.cc
index 18c52add11..bdb0aeadd6 100644
--- a/deps/v8/src/parsing/expression-scope-reparenter.cc
+++ b/deps/v8/src/parsing/expression-scope-reparenter.cc
@@ -85,7 +85,7 @@ void Reparenter::VisitRewritableExpression(RewritableExpression* expr) {
}
void Reparenter::VisitBlock(Block* stmt) {
- if (stmt->scope() != nullptr)
+ if (stmt->scope())
stmt->scope()->ReplaceOuterScope(scope_);
else
VisitStatements(stmt->statements());
@@ -93,7 +93,11 @@ void Reparenter::VisitBlock(Block* stmt) {
void Reparenter::VisitTryCatchStatement(TryCatchStatement* stmt) {
Visit(stmt->try_block());
- stmt->scope()->ReplaceOuterScope(scope_);
+ if (stmt->scope()) {
+ stmt->scope()->ReplaceOuterScope(scope_);
+ } else {
+ Visit(stmt->catch_block());
+ }
}
void Reparenter::VisitWithStatement(WithStatement* stmt) {
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index 1c9d648a1e..b8f191dd5a 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -45,7 +45,13 @@ ParseInfo::ParseInfo(Handle<SharedFunctionInfo> shared)
Isolate* isolate = shared->GetIsolate();
InitFromIsolate(isolate);
+ // Do not support re-parsing top-level function of a wrapped script.
+ // TODO(yangguo): consider whether we need a top-level function in a
+ // wrapped script at all.
+ DCHECK_IMPLIES(is_toplevel(), !Script::cast(shared->script())->is_wrapped());
+
set_toplevel(shared->is_toplevel());
+ set_wrapped_as_function(shared->is_wrapped());
set_allow_lazy_parsing(FLAG_lazy_inner_functions);
set_is_named_expression(shared->is_named_expression());
set_compiler_hints(shared->compiler_hints());
@@ -54,8 +60,6 @@ ParseInfo::ParseInfo(Handle<SharedFunctionInfo> shared)
function_literal_id_ = shared->function_literal_id();
set_language_mode(shared->language_mode());
set_asm_wasm_broken(shared->is_asm_wasm_broken());
- set_requires_instance_fields_initializer(
- shared->requires_instance_fields_initializer());
Handle<Script> script(Script::cast(shared->script()));
set_script(script);
@@ -90,6 +94,7 @@ ParseInfo::ParseInfo(Handle<Script> script)
set_allow_lazy_parsing();
set_toplevel();
set_script(script);
+ set_wrapped_as_function(script->is_wrapped());
set_native(script->type() == Script::TYPE_NATIVE);
set_eval(script->compilation_type() == Script::COMPILATION_TYPE_EVAL);
@@ -151,6 +156,11 @@ FunctionKind ParseInfo::function_kind() const {
return SharedFunctionInfo::FunctionKindBits::decode(compiler_hints_);
}
+bool ParseInfo::requires_instance_fields_initializer() const {
+ return SharedFunctionInfo::RequiresInstanceFieldsInitializer::decode(
+ compiler_hints_);
+}
+
void ParseInfo::InitFromIsolate(Isolate* isolate) {
DCHECK_NOT_NULL(isolate);
set_hash_seed(isolate->heap()->HashSeed());
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index 9deea1ecac..e93c7137ca 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -79,13 +79,12 @@ class V8_EXPORT_PRIVATE ParseInfo {
FLAG_ACCESSOR(kCollectTypeProfile, collect_type_profile,
set_collect_type_profile)
FLAG_ACCESSOR(kIsAsmWasmBroken, is_asm_wasm_broken, set_asm_wasm_broken)
- FLAG_ACCESSOR(kRequiresInstanceFieldsInitializer,
- requires_instance_fields_initializer,
- set_requires_instance_fields_initializer)
FLAG_ACCESSOR(kBlockCoverageEnabled, block_coverage_enabled,
set_block_coverage_enabled)
FLAG_ACCESSOR(kOnBackgroundThread, on_background_thread,
set_on_background_thread)
+ FLAG_ACCESSOR(kWrappedAsFunction, is_wrapped_as_function,
+ set_wrapped_as_function)
#undef FLAG_ACCESSOR
void set_parse_restriction(ParseRestriction restriction) {
@@ -208,6 +207,7 @@ class V8_EXPORT_PRIVATE ParseInfo {
// Getters for individual compiler hints.
bool is_declaration() const;
FunctionKind function_kind() const;
+ bool requires_instance_fields_initializer() const;
//--------------------------------------------------------------------------
// TODO(titzer): these should not be part of ParseInfo.
@@ -261,8 +261,8 @@ class V8_EXPORT_PRIVATE ParseInfo {
kCollectTypeProfile = 1 << 10,
kBlockCoverageEnabled = 1 << 11,
kIsAsmWasmBroken = 1 << 12,
- kRequiresInstanceFieldsInitializer = 1 << 13,
- kOnBackgroundThread = 1 << 14,
+ kOnBackgroundThread = 1 << 13,
+ kWrappedAsFunction = 1 << 14, // Implicitly wrapped as function.
};
//------------- Inputs to parsing and scope analysis -----------------------
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index c393bc5ec2..faefe44011 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -279,9 +279,11 @@ class ParserBase {
allow_harmony_do_expressions_(false),
allow_harmony_function_sent_(false),
allow_harmony_public_fields_(false),
+ allow_harmony_static_fields_(false),
allow_harmony_dynamic_import_(false),
allow_harmony_import_meta_(false),
- allow_harmony_async_iteration_(false) {}
+ allow_harmony_optional_catch_binding_(false),
+ allow_harmony_private_fields_(false) {}
#define ALLOW_ACCESSORS(name) \
bool allow_##name() const { return allow_##name##_; } \
@@ -291,9 +293,10 @@ class ParserBase {
ALLOW_ACCESSORS(harmony_do_expressions);
ALLOW_ACCESSORS(harmony_function_sent);
ALLOW_ACCESSORS(harmony_public_fields);
+ ALLOW_ACCESSORS(harmony_static_fields);
ALLOW_ACCESSORS(harmony_dynamic_import);
ALLOW_ACCESSORS(harmony_import_meta);
- ALLOW_ACCESSORS(harmony_async_iteration);
+ ALLOW_ACCESSORS(harmony_optional_catch_binding);
#undef ALLOW_ACCESSORS
@@ -304,6 +307,13 @@ class ParserBase {
scanner()->set_allow_harmony_bigint(allow);
}
+ bool allow_harmony_private_fields() const {
+ return scanner()->allow_harmony_private_fields();
+ }
+ void set_allow_harmony_private_fields(bool allow) {
+ scanner()->set_allow_harmony_private_fields(allow);
+ }
+
uintptr_t stack_limit() const { return stack_limit_; }
void set_stack_limit(uintptr_t stack_limit) { stack_limit_ = stack_limit; }
@@ -383,18 +393,27 @@ class ParserBase {
void AddProperty() { expected_property_count_++; }
int expected_property_count() { return expected_property_count_; }
+ void DisableOptimization(BailoutReason reason) {
+ dont_optimize_reason_ = reason;
+ }
+ BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
+
FunctionKind kind() const { return scope()->function_kind(); }
- FunctionState* outer() const { return outer_function_state_; }
void RewindDestructuringAssignments(int pos) {
destructuring_assignments_to_rewrite_.Rewind(pos);
}
- void SetDestructuringAssignmentsScope(int pos, Scope* scope) {
- for (int i = pos; i < destructuring_assignments_to_rewrite_.length();
- ++i) {
- destructuring_assignments_to_rewrite_[i]->set_scope(scope);
+ void AdoptDestructuringAssignmentsFromParentState(int pos) {
+ const auto& outer_assignments =
+ outer_function_state_->destructuring_assignments_to_rewrite_;
+ DCHECK_GE(outer_assignments.length(), pos);
+ for (int i = pos; i < outer_assignments.length(); ++i) {
+ auto expr = outer_assignments[i];
+ expr->set_scope(scope_);
+ destructuring_assignments_to_rewrite_.Add(expr, scope_->zone());
}
+ outer_function_state_->RewindDestructuringAssignments(pos);
}
const ZoneList<RewritableExpressionT>&
@@ -451,14 +470,6 @@ class ParserBase {
destructuring_assignments_to_rewrite_.Add(expr, scope_->zone());
}
- void AddNonPatternForRewriting(RewritableExpressionT expr, bool* ok) {
- non_patterns_to_rewrite_.Add(expr, scope_->zone());
- if (non_patterns_to_rewrite_.length() >=
- std::numeric_limits<uint16_t>::max()) {
- *ok = false;
- }
- }
-
// Properties count estimation.
int expected_property_count_;
@@ -471,6 +482,9 @@ class ParserBase {
ZoneList<typename ExpressionClassifier::Error> reported_errors_;
+ // A reason, if any, why this function should not be optimized.
+ BailoutReason dont_optimize_reason_;
+
// Record whether the next (=== immediately following) function literal is
// preceded by a parenthesis / exclamation mark. Also record the previous
// state.
@@ -1078,10 +1092,8 @@ class ParserBase {
return ParsePrimaryExpression(&is_async, ok);
}
- // This method wraps the parsing of the expression inside a new expression
- // classifier and calls RewriteNonPattern if parsing is successful.
- // It should be used whenever we're parsing an expression that is known
- // to not be a pattern or part of a pattern.
+ // Use when parsing an expression that is known to not be a pattern or part
+ // of a pattern.
V8_INLINE ExpressionT ParseExpression(bool accept_IN, bool* ok);
// This method does not wrap the parsing of the expression inside a
@@ -1201,14 +1213,15 @@ class ParserBase {
// by value. The method is expected to add the parsed statements to the
// list. This works because in the case of the parser, StatementListT is
// a pointer whereas the preparser does not really modify the body.
- V8_INLINE void ParseStatementList(StatementListT body, int end_token,
+ V8_INLINE void ParseStatementList(StatementListT body, Token::Value end_token,
bool* ok) {
LazyParsingResult result = ParseStatementList(body, end_token, false, ok);
USE(result);
DCHECK_EQ(result, kLazyParsingComplete);
}
- LazyParsingResult ParseStatementList(StatementListT body, int end_token,
- bool may_abort, bool* ok);
+ LazyParsingResult ParseStatementList(StatementListT body,
+ Token::Value end_token, bool may_abort,
+ bool* ok);
StatementT ParseStatementListItem(bool* ok);
StatementT ParseStatement(ZoneList<const AstRawString*>* labels, bool* ok) {
return ParseStatement(labels, kDisallowLabelledFunctionStatement, ok);
@@ -1463,21 +1476,18 @@ class ParserBase {
// Accumulates the classifier that is on top of the stack (inner) to
// the one that is right below (outer) and pops the inner.
- V8_INLINE void Accumulate(unsigned productions,
- bool merge_non_patterns = true) {
+ V8_INLINE void Accumulate(unsigned productions) {
DCHECK_NOT_NULL(classifier_);
ExpressionClassifier* previous = classifier_->previous();
DCHECK_NOT_NULL(previous);
- previous->Accumulate(classifier_, productions, merge_non_patterns);
+ previous->Accumulate(classifier_, productions);
classifier_ = previous;
}
V8_INLINE void AccumulateNonBindingPatternErrors() {
- static const bool kMergeNonPatterns = true;
this->Accumulate(ExpressionClassifier::AllProductions &
- ~(ExpressionClassifier::BindingPatternProduction |
- ExpressionClassifier::LetPatternProduction),
- kMergeNonPatterns);
+ ~(ExpressionClassifier::BindingPatternProduction |
+ ExpressionClassifier::LetPatternProduction));
}
// Pops and discards the classifier that is on top of the stack
@@ -1534,9 +1544,11 @@ class ParserBase {
bool allow_harmony_do_expressions_;
bool allow_harmony_function_sent_;
bool allow_harmony_public_fields_;
+ bool allow_harmony_static_fields_;
bool allow_harmony_dynamic_import_;
bool allow_harmony_import_meta_;
- bool allow_harmony_async_iteration_;
+ bool allow_harmony_optional_catch_binding_;
+ bool allow_harmony_private_fields_;
friend class DiscardableZoneScope;
};
@@ -1553,6 +1565,7 @@ ParserBase<Impl>::FunctionState::FunctionState(
destructuring_assignments_to_rewrite_(16, scope->zone()),
non_patterns_to_rewrite_(0, scope->zone()),
reported_errors_(16, scope->zone()),
+ dont_optimize_reason_(BailoutReason::kNoReason),
next_function_is_likely_called_(false),
previous_function_was_likely_called_(false),
contains_function_or_eval_(false) {
@@ -1587,6 +1600,7 @@ void ParserBase<Impl>::GetUnexpectedTokenMessage(
case Token::STRING:
*message = MessageTemplate::kUnexpectedTokenString;
break;
+ case Token::PRIVATE_NAME:
case Token::IDENTIFIER:
*message = MessageTemplate::kUnexpectedTokenIdentifier;
break;
@@ -1664,6 +1678,13 @@ ParserBase<Impl>::ParseAndClassifyIdentifier(bool* ok) {
if (next == Token::IDENTIFIER || next == Token::ASYNC ||
(next == Token::AWAIT && !parsing_module_ && !is_async_function())) {
IdentifierT name = impl()->GetSymbol();
+
+ if (impl()->IsArguments(name) && scope()->ShouldBanArguments()) {
+ ReportMessage(MessageTemplate::kArgumentsDisallowedInInitializer);
+ *ok = false;
+ return impl()->NullIdentifier();
+ }
+
// When this function is used to read a formal parameter, we don't always
// know whether the function is going to be strict or sloppy. Indeed for
// arrow functions we don't always know that the identifier we are reading
@@ -1942,7 +1963,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseExpression(
bool accept_IN, bool* ok) {
ExpressionClassifier classifier(this);
ExpressionT result = ParseExpressionCoverGrammar(accept_IN, CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
return result;
}
@@ -2068,22 +2089,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseArrayLiteral(
}
Expect(Token::RBRACK, CHECK_OK);
- ExpressionT result =
- factory()->NewArrayLiteral(values, first_spread_index, pos);
- if (first_spread_index >= 0) {
- auto rewritable = factory()->NewRewritableExpression(result, scope());
- impl()->QueueNonPatternForRewriting(rewritable, ok);
- if (!*ok) {
- // If the non-pattern rewriting mechanism is used in the future for
- // rewriting other things than spreads, this error message will have
- // to change. Also, this error message will never appear while pre-
- // parsing (this is OK, as it is an implementation limitation).
- ReportMessage(MessageTemplate::kTooManySpreads);
- return impl()->NullExpression();
- }
- result = rewritable;
- }
- return result;
+ return factory()->NewArrayLiteral(values, first_spread_index, pos);
}
template <class Impl>
@@ -2108,6 +2114,9 @@ bool ParserBase<Impl>::SetPropertyKindFromToken(Token::Value token,
case Token::SEMICOLON:
*kind = PropertyKind::kClassField;
return true;
+ case Token::PRIVATE_NAME:
+ *kind = PropertyKind::kClassField;
+ return true;
default:
break;
}
@@ -2137,8 +2146,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
!scanner()->HasAnyLineTerminatorAfterNext()) {
Consume(Token::ASYNC);
token = peek();
- if (token == Token::MUL && allow_harmony_async_iteration() &&
- !scanner()->HasAnyLineTerminatorBeforeNext()) {
+ if (token == Token::MUL && !scanner()->HasAnyLineTerminatorBeforeNext()) {
Consume(Token::MUL);
token = peek();
*is_generator = true;
@@ -2198,7 +2206,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
Consume(Token::LBRACK);
ExpressionClassifier computed_name_classifier(this);
expression = ParseAssignmentExpression(true, CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
AccumulateFormalParameterContainmentErrors();
Expect(Token::RBRACK, CHECK_OK);
break;
@@ -2270,6 +2278,8 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
PropertyKind kind = PropertyKind::kNotSet;
Token::Value name_token = peek();
+ DCHECK_IMPLIES(name_token == Token::PRIVATE_NAME,
+ allow_harmony_private_fields());
int name_token_position = scanner()->peek_location().beg_pos;
IdentifierT name = impl()->NullIdentifier();
@@ -2285,12 +2295,22 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
peek() == Token::RBRACE) {
name = impl()->GetSymbol(); // TODO(bakkot) specialize on 'static'
name_expression = factory()->NewStringLiteral(name, position());
+ } else if (peek() == Token::PRIVATE_NAME) {
+ DCHECK(allow_harmony_private_fields());
+ // TODO(gsathya): Make a better error message for this.
+ ReportUnexpectedToken(Next());
+ *ok = false;
+ return impl()->NullLiteralProperty();
} else {
*is_static = true;
name_expression = ParsePropertyName(&name, &kind, &is_generator, &is_get,
&is_set, &is_async, is_computed_name,
CHECK_OK_CUSTOM(NullLiteralProperty));
}
+ } else if (name_token == Token::PRIVATE_NAME) {
+ Consume(Token::PRIVATE_NAME);
+ name = impl()->GetSymbol();
+ name_expression = factory()->NewStringLiteral(name, position());
} else {
name_expression = ParsePropertyName(&name, &kind, &is_generator, &is_get,
&is_set, &is_async, is_computed_name,
@@ -2312,9 +2332,14 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
// as an uninitialized field.
case PropertyKind::kShorthandProperty:
case PropertyKind::kValueProperty:
- if (allow_harmony_public_fields()) {
+ if (allow_harmony_public_fields() || allow_harmony_private_fields()) {
*property_kind = ClassLiteralProperty::FIELD;
- if (!*is_computed_name) {
+ if (*is_static && !allow_harmony_static_fields()) {
+ ReportUnexpectedToken(Next());
+ *ok = false;
+ return impl()->NullLiteralProperty();
+ }
+ if (!*is_computed_name && name_token != Token::PRIVATE_NAME) {
checker->CheckClassFieldName(*is_static,
CHECK_OK_CUSTOM(NullLiteralProperty));
}
@@ -2362,7 +2387,7 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
name, scanner()->location(), kSkipFunctionNameCheck, kind,
FLAG_harmony_function_tostring ? name_token_position
: kNoSourcePosition,
- FunctionLiteral::kAccessorOrMethod, language_mode(),
+ FunctionLiteral::kAccessorOrMethod, language_mode(), nullptr,
CHECK_OK_CUSTOM(NullLiteralProperty));
*property_kind = ClassLiteralProperty::METHOD;
@@ -2394,7 +2419,7 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
name, scanner()->location(), kSkipFunctionNameCheck, kind,
FLAG_harmony_function_tostring ? name_token_position
: kNoSourcePosition,
- FunctionLiteral::kAccessorOrMethod, language_mode(),
+ FunctionLiteral::kAccessorOrMethod, language_mode(), nullptr,
CHECK_OK_CUSTOM(NullLiteralProperty));
*property_kind =
@@ -2427,7 +2452,8 @@ ParserBase<Impl>::ParseClassFieldInitializer(ClassInfo* class_info,
: class_info->instance_fields_scope;
if (initializer_scope == nullptr) {
- initializer_scope = NewFunctionScope(FunctionKind::kConciseMethod);
+ initializer_scope =
+ NewFunctionScope(FunctionKind::kClassFieldsInitializerFunction);
// TODO(gsathya): Make scopes be non contiguous.
initializer_scope->set_start_position(scanner()->location().end_pos);
initializer_scope->SetLanguageMode(LanguageMode::kStrict);
@@ -2441,7 +2467,7 @@ ParserBase<Impl>::ParseClassFieldInitializer(ClassInfo* class_info,
initializer =
ParseAssignmentExpression(true, CHECK_OK_CUSTOM(NullExpression));
- impl()->RewriteNonPattern(CHECK_OK_CUSTOM(NullExpression));
+ ValidateExpression(CHECK_OK_CUSTOM(NullExpression));
} else {
initializer = factory()->NewUndefinedLiteral(kNoSourcePosition);
}
@@ -2560,7 +2586,7 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
ExpressionClassifier rhs_classifier(this);
ExpressionT rhs = ParseAssignmentExpression(
true, CHECK_OK_CUSTOM(NullLiteralProperty));
- impl()->RewriteNonPattern(CHECK_OK_CUSTOM(NullLiteralProperty));
+ ValidateExpression(CHECK_OK_CUSTOM(NullLiteralProperty));
AccumulateFormalParameterContainmentErrors();
value = factory()->NewAssignment(Token::ASSIGN, lhs, rhs,
kNoSourcePosition);
@@ -2595,7 +2621,7 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
ExpressionT value = impl()->ParseFunctionLiteral(
name, scanner()->location(), kSkipFunctionNameCheck, kind,
FLAG_harmony_function_tostring ? next_beg_pos : kNoSourcePosition,
- FunctionLiteral::kAccessorOrMethod, language_mode(),
+ FunctionLiteral::kAccessorOrMethod, language_mode(), nullptr,
CHECK_OK_CUSTOM(NullLiteralProperty));
ObjectLiteralPropertyT result = factory()->NewObjectLiteralProperty(
@@ -2627,7 +2653,7 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
FunctionLiteralT value = impl()->ParseFunctionLiteral(
name, scanner()->location(), kSkipFunctionNameCheck, kind,
FLAG_harmony_function_tostring ? next_beg_pos : kNoSourcePosition,
- FunctionLiteral::kAccessorOrMethod, language_mode(),
+ FunctionLiteral::kAccessorOrMethod, language_mode(), nullptr,
CHECK_OK_CUSTOM(NullLiteralProperty));
ObjectLiteralPropertyT result = factory()->NewObjectLiteralProperty(
@@ -2711,8 +2737,8 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseObjectLiteral(
MessageTemplate::kTooManyArguments);
}
- return factory()->NewObjectLiteral(
- properties, number_of_boilerplate_properties, pos, has_rest_property);
+ return impl()->InitializeObjectLiteral(factory()->NewObjectLiteral(
+ properties, number_of_boilerplate_properties, pos, has_rest_property));
}
template <typename Impl>
@@ -2738,7 +2764,7 @@ typename ParserBase<Impl>::ExpressionListT ParserBase<Impl>::ParseArguments(
*is_simple_parameter_list = false;
}
if (!maybe_arrow) {
- impl()->RewriteNonPattern(CHECK_OK_CUSTOM(NullExpressionList));
+ ValidateExpression(CHECK_OK_CUSTOM(NullExpressionList));
}
if (is_spread) {
if (is_simple_parameter_list != nullptr) {
@@ -2784,7 +2810,7 @@ typename ParserBase<Impl>::ExpressionListT ParserBase<Impl>::ParseArguments(
if (!maybe_arrow || peek() != Token::ARROW) {
if (maybe_arrow) {
- impl()->RewriteNonPattern(CHECK_OK_CUSTOM(NullExpressionList));
+ ValidateExpression(CHECK_OK_CUSTOM(NullExpressionList));
}
}
@@ -2864,7 +2890,6 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
// Because the arrow's parameters were parsed in the outer scope,
// we need to fix up the scope chain appropriately.
scope_snapshot.Reparent(scope);
- function_state_->SetDestructuringAssignmentsScope(rewritable_length, scope);
FormalParametersT parameters(scope);
if (!classifier()->is_simple_parameter_list()) {
@@ -2914,15 +2939,8 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
productions &= ~ExpressionClassifier::ExpressionProduction;
}
- if (!Token::IsAssignmentOp(peek())) {
- // Parsed conditional expression only (no assignment).
- // Pending non-pattern expressions must be merged.
- Accumulate(productions);
- return expression;
- } else {
- // Pending non-pattern expressions must be discarded.
- Accumulate(productions, false);
- }
+ Accumulate(productions);
+ if (!Token::IsAssignmentOp(peek())) return expression;
if (is_destructuring_assignment) {
ValidateAssignmentPattern(CHECK_OK);
@@ -2945,7 +2963,7 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
ExpressionClassifier rhs_classifier(this);
ExpressionT right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
AccumulateFormalParameterContainmentErrors();
// We try to estimate the set of properties set by constructors. We define a
@@ -3019,7 +3037,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseYieldExpression(
// Delegating yields require an RHS; fall through.
default:
expression = ParseAssignmentExpression(accept_IN, CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
break;
}
}
@@ -3052,7 +3070,7 @@ ParserBase<Impl>::ParseConditionalExpression(bool accept_IN,
// We start using the binary expression parser for prec >= 4 only!
ExpressionT expression = ParseBinaryExpression(4, accept_IN, CHECK_OK);
if (peek() != Token::CONDITIONAL) return expression;
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
BindingPatternUnexpectedToken();
ArrowFormalParametersUnexpectedToken();
@@ -3067,7 +3085,7 @@ ParserBase<Impl>::ParseConditionalExpression(bool accept_IN,
left = ParseAssignmentExpression(true, CHECK_OK);
AccumulateNonBindingPatternErrors();
}
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
ExpressionT right;
{
SourceRangeScope range_scope(scanner(), &else_range);
@@ -3076,7 +3094,7 @@ ParserBase<Impl>::ParseConditionalExpression(bool accept_IN,
right = ParseAssignmentExpression(accept_IN, CHECK_OK);
AccumulateNonBindingPatternErrors();
}
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
ExpressionT expr = factory()->NewConditional(expression, left, right, pos);
impl()->RecordConditionalSourceRange(expr, then_range, else_range);
return expr;
@@ -3093,7 +3111,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseBinaryExpression(
for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
// prec1 >= 4
while (Precedence(peek(), accept_IN) == prec1) {
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
BindingPatternUnexpectedToken();
ArrowFormalParametersUnexpectedToken();
@@ -3105,7 +3123,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseBinaryExpression(
const int next_prec = is_right_associative ? prec1 : prec1 + 1;
ExpressionT y = ParseBinaryExpression(next_prec, accept_IN, CHECK_OK);
right_range_scope.Finalize();
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
if (impl()->ShortcutNumericLiteralBinaryExpression(&x, y, op, pos)) {
continue;
@@ -3171,7 +3189,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseUnaryExpression(
}
ExpressionT expression = ParseUnaryExpression(CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
if (op == Token::DELETE && is_strict(language_mode())) {
if (impl()->IsIdentifier(expression)) {
@@ -3200,7 +3218,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseUnaryExpression(
expression, beg_pos, scanner()->location().end_pos,
MessageTemplate::kInvalidLhsInPrefixOp, CHECK_OK);
impl()->MarkExpressionAsAssigned(expression);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
return factory()->NewCountOperation(op,
true /* prefix */,
@@ -3211,12 +3229,15 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseUnaryExpression(
classifier()->RecordFormalParameterInitializerError(
scanner()->peek_location(),
MessageTemplate::kAwaitExpressionFormalParameter);
-
int await_pos = peek_position();
Consume(Token::AWAIT);
ExpressionT value = ParseUnaryExpression(CHECK_OK);
+ classifier()->RecordBindingPatternError(
+ Scanner::Location(await_pos, scanner()->location().end_pos),
+ MessageTemplate::kInvalidDestructuringTarget);
+
ExpressionT expr = factory()->NewAwait(value, await_pos);
impl()->RecordSuspendSourceRange(expr, PositionAfterSemicolon());
return expr;
@@ -3242,7 +3263,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePostfixExpression(
expression, lhs_beg_pos, scanner()->location().end_pos,
MessageTemplate::kInvalidLhsInPostfixOp, CHECK_OK);
impl()->MarkExpressionAsAssigned(expression);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
Token::Value next = Next();
expression =
@@ -3267,13 +3288,13 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
while (true) {
switch (peek()) {
case Token::LBRACK: {
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
BindingPatternUnexpectedToken();
ArrowFormalParametersUnexpectedToken();
Consume(Token::LBRACK);
int pos = position();
ExpressionT index = ParseExpressionCoverGrammar(true, CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
result = factory()->NewProperty(result, index, pos);
Expect(Token::RBRACK, CHECK_OK);
break;
@@ -3281,7 +3302,7 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
case Token::LPAREN: {
int pos;
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
BindingPatternUnexpectedToken();
if (scanner()->current_token() == Token::IDENTIFIER ||
scanner()->current_token() == Token::SUPER ||
@@ -3373,7 +3394,7 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
}
case Token::PERIOD: {
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
BindingPatternUnexpectedToken();
ArrowFormalParametersUnexpectedToken();
Consume(Token::PERIOD);
@@ -3387,7 +3408,7 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
case Token::TEMPLATE_SPAN:
case Token::TEMPLATE_TAIL: {
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
BindingPatternUnexpectedToken();
ArrowFormalParametersUnexpectedToken();
result = ParseTemplateLiteral(result, position(), true, CHECK_OK);
@@ -3446,7 +3467,7 @@ ParserBase<Impl>::ParseMemberWithNewPrefixesExpression(bool* is_async,
} else {
result = ParseMemberWithNewPrefixesExpression(is_async, CHECK_OK);
}
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
if (peek() == Token::LPAREN) {
// NewExpression with arguments.
Scanner::Location spread_pos;
@@ -3537,7 +3558,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberExpression(
is_strict_reserved_name ? kFunctionNameIsStrictReserved
: kFunctionNameValidityUnknown,
function_kind, function_token_position, function_type, language_mode(),
- CHECK_OK);
+ nullptr, CHECK_OK);
} else if (peek() == Token::SUPER) {
const bool is_new = false;
result = ParseSuperExpression(is_new, CHECK_OK);
@@ -3657,14 +3678,14 @@ ParserBase<Impl>::ParseMemberExpressionContinuation(ExpressionT expression,
switch (peek()) {
case Token::LBRACK: {
*is_async = false;
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
BindingPatternUnexpectedToken();
ArrowFormalParametersUnexpectedToken();
Consume(Token::LBRACK);
int pos = position();
ExpressionT index = ParseExpressionCoverGrammar(true, CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
expression = factory()->NewProperty(expression, index, pos);
impl()->PushPropertyName(index);
Expect(Token::RBRACK, CHECK_OK);
@@ -3672,13 +3693,19 @@ ParserBase<Impl>::ParseMemberExpressionContinuation(ExpressionT expression,
}
case Token::PERIOD: {
*is_async = false;
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
BindingPatternUnexpectedToken();
ArrowFormalParametersUnexpectedToken();
Consume(Token::PERIOD);
int pos = peek_position();
- IdentifierT name = ParseIdentifierName(CHECK_OK);
+ IdentifierT name;
+ if (allow_harmony_private_fields() && peek() == Token::PRIVATE_NAME) {
+ Consume(Token::PRIVATE_NAME);
+ name = impl()->GetSymbol();
+ } else {
+ name = ParseIdentifierName(CHECK_OK);
+ }
expression = factory()->NewProperty(
expression, factory()->NewStringLiteral(name, pos), pos);
impl()->PushLiteralName(name);
@@ -3687,7 +3714,7 @@ ParserBase<Impl>::ParseMemberExpressionContinuation(ExpressionT expression,
case Token::TEMPLATE_SPAN:
case Token::TEMPLATE_TAIL: {
*is_async = false;
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
BindingPatternUnexpectedToken();
ArrowFormalParametersUnexpectedToken();
int pos;
@@ -3743,7 +3770,7 @@ void ParserBase<Impl>::ParseFormalParameter(FormalParametersT* parameters,
}
ExpressionClassifier init_classifier(this);
initializer = ParseAssignmentExpression(true, CHECK_OK_CUSTOM(Void));
- impl()->RewriteNonPattern(CHECK_OK_CUSTOM(Void));
+ ValidateExpression(CHECK_OK_CUSTOM(Void));
ValidateFormalParameterInitializer(CHECK_OK_CUSTOM(Void));
parameters->is_simple = false;
DiscardExpressionClassifier();
@@ -3882,7 +3909,7 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseVariableDeclarations(
ExpressionClassifier classifier(this);
value = ParseAssignmentExpression(var_context != kForStatement,
CHECK_OK_CUSTOM(NullStatement));
- impl()->RewriteNonPattern(CHECK_OK_CUSTOM(NullStatement));
+ ValidateExpression(CHECK_OK_CUSTOM(NullStatement));
variable_loc.end_pos = scanner()->location().end_pos;
if (!parsing_result->first_initializer_loc.IsValid()) {
@@ -3997,7 +4024,7 @@ ParserBase<Impl>::ParseHoistableDeclaration(
const bool is_async = flags & ParseFunctionFlags::kIsAsync;
DCHECK(!is_generator || !is_async);
- if (allow_harmony_async_iteration() && is_async && Check(Token::MUL)) {
+ if (is_async && Check(Token::MUL)) {
// Async generator
is_generator = true;
}
@@ -4025,7 +4052,7 @@ ParserBase<Impl>::ParseHoistableDeclaration(
FunctionLiteralT function = impl()->ParseFunctionLiteral(
name, scanner()->location(), name_validity, kind, pos,
- FunctionLiteral::kDeclaration, language_mode(),
+ FunctionLiteral::kDeclaration, language_mode(), nullptr,
CHECK_OK_CUSTOM(NullStatement));
// In ES6, a function behaves as a lexical binding, except in
@@ -4096,6 +4123,8 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseClassDeclaration(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseNativeDeclaration(
bool* ok) {
+ function_state_->DisableOptimization(BailoutReason::kNativeFunctionLiteral);
+
int pos = peek_position();
Expect(Token::FUNCTION, CHECK_OK_CUSTOM(NullStatement));
// Allow "eval" or "arguments" for backward compatibility.
@@ -4150,6 +4179,11 @@ void ParserBase<Impl>::ParseFunctionBody(
body = inner_block->statements();
}
+ // If we are parsing the source as if it is wrapped in a function, the source
+ // ends without a closing brace.
+ Token::Value closing_token =
+ function_type == FunctionLiteral::kWrapped ? Token::EOS : Token::RBRACE;
+
{
BlockState block_state(&scope_, inner_scope);
@@ -4162,7 +4196,7 @@ void ParserBase<Impl>::ParseFunctionBody(
} else if (IsAsyncFunction(kind)) {
ParseAsyncFunctionBody(inner_scope, body, CHECK_OK_VOID);
} else {
- ParseStatementList(body, Token::RBRACE, CHECK_OK_VOID);
+ ParseStatementList(body, closing_token, CHECK_OK_VOID);
}
if (IsDerivedConstructor(kind)) {
@@ -4172,7 +4206,7 @@ void ParserBase<Impl>::ParseFunctionBody(
}
}
- Expect(Token::RBRACE, CHECK_OK_VOID);
+ Expect(closing_token, CHECK_OK_VOID);
scope()->set_end_position(scanner()->location().end_pos);
if (!parameters.is_simple) {
@@ -4298,11 +4332,11 @@ typename ParserBase<Impl>::ExpressionT
ParserBase<Impl>::ParseArrowFunctionLiteral(
bool accept_IN, const FormalParametersT& formal_parameters,
int rewritable_length, bool* ok) {
- const RuntimeCallStats::CounterId counters[2][2] = {
- {&RuntimeCallStats::ParseBackgroundArrowFunctionLiteral,
- &RuntimeCallStats::ParseArrowFunctionLiteral},
- {&RuntimeCallStats::PreParseBackgroundArrowFunctionLiteral,
- &RuntimeCallStats::PreParseArrowFunctionLiteral}};
+ const RuntimeCallCounterId counters[2][2] = {
+ {RuntimeCallCounterId::kParseBackgroundArrowFunctionLiteral,
+ RuntimeCallCounterId::kParseArrowFunctionLiteral},
+ {RuntimeCallCounterId::kPreParseBackgroundArrowFunctionLiteral,
+ RuntimeCallCounterId::kPreParseArrowFunctionLiteral}};
RuntimeCallTimerScope runtime_timer(
runtime_call_stats_,
counters[Impl::IsPreParser()][parsing_on_main_thread_]);
@@ -4337,6 +4371,11 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
FunctionState function_state(&function_state_, &scope_,
formal_parameters.scope);
+ // Move any queued destructuring assignments which appeared
+ // in this function's parameter list into its own function_state.
+ function_state.AdoptDestructuringAssignmentsFromParentState(
+ rewritable_length);
+
Expect(Token::ARROW, CHECK_OK);
if (peek() == Token::LBRACE) {
@@ -4360,14 +4399,10 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
USE(result);
formal_parameters.scope->ResetAfterPreparsing(ast_value_factory_,
false);
-
// Discard any queued destructuring assignments which appeared
- // in this function's parameter list.
- FunctionState* parent_state = function_state.outer();
- DCHECK_NOT_NULL(parent_state);
- DCHECK_GE(parent_state->destructuring_assignments_to_rewrite().length(),
- rewritable_length);
- parent_state->RewindDestructuringAssignments(rewritable_length);
+ // in this function's parameter list, and which were adopted
+ // into this function state, above.
+ function_state.RewindDestructuringAssignments(0);
} else {
Consume(Token::LBRACE);
body = impl()->NewStatementList(8);
@@ -4467,9 +4502,10 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
scope()->set_start_position(scanner()->location().end_pos);
if (Check(Token::EXTENDS)) {
+ FuncNameInferrer::State fni_state(fni_);
ExpressionClassifier extends_classifier(this);
class_info.extends = ParseLeftHandSideExpression(CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
AccumulateFormalParameterContainmentErrors();
}
@@ -4501,7 +4537,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
class_info.computed_field_count++;
}
is_constructor &= class_info.has_seen_constructor;
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
AccumulateFormalParameterContainmentErrors();
impl()->DeclareClassProperty(name, property, property_kind, is_static,
@@ -4526,7 +4562,7 @@ void ParserBase<Impl>::ParseSingleExpressionFunctionBody(StatementListT body,
ExpressionClassifier classifier(this);
ExpressionT expression = ParseAssignmentExpression(accept_IN, CHECK_OK_VOID);
- impl()->RewriteNonPattern(CHECK_OK_VOID);
+ ValidateExpression(CHECK_OK_VOID);
if (is_async) {
BlockT block = factory()->NewBlock(1, true);
@@ -4564,7 +4600,7 @@ ParserBase<Impl>::ParseAsyncFunctionLiteral(bool* ok) {
IdentifierT name = impl()->NullIdentifier();
FunctionLiteral::FunctionType type = FunctionLiteral::kAnonymousExpression;
- bool is_generator = allow_harmony_async_iteration() && Check(Token::MUL);
+ bool is_generator = Check(Token::MUL);
const bool kIsAsync = true;
const FunctionKind kind = FunctionKindFor(is_generator, kIsAsync);
@@ -4590,7 +4626,7 @@ ParserBase<Impl>::ParseAsyncFunctionLiteral(bool* ok) {
name, scanner()->location(),
is_strict_reserved ? kFunctionNameIsStrictReserved
: kFunctionNameValidityUnknown,
- kind, pos, type, language_mode(), CHECK_OK);
+ kind, pos, type, language_mode(), nullptr, CHECK_OK);
}
template <typename Impl>
@@ -4650,7 +4686,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseTemplateLiteral(
int expr_pos = peek_position();
ExpressionT expression = ParseExpressionCoverGrammar(true, CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
impl()->AddTemplateExpression(&ts, expression);
if (peek() != Token::RBRACE) {
@@ -4781,8 +4817,9 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseDoExpression(
template <typename Impl>
typename ParserBase<Impl>::LazyParsingResult
-ParserBase<Impl>::ParseStatementList(StatementListT body, int end_token,
- bool may_abort, bool* ok) {
+ParserBase<Impl>::ParseStatementList(StatementListT body,
+ Token::Value end_token, bool may_abort,
+ bool* ok) {
// StatementList ::
// (StatementListItem)* <end_token>
@@ -4953,8 +4990,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatement(
case Token::WHILE:
return ParseWhileStatement(labels, ok);
case Token::FOR:
- if (V8_UNLIKELY(allow_harmony_async_iteration() && is_async_function() &&
- PeekAhead() == Token::AWAIT)) {
+ if (V8_UNLIKELY(is_async_function() && PeekAhead() == Token::AWAIT)) {
return ParseForAwaitStatement(labels, ok);
}
return ParseForStatement(labels, ok);
@@ -5175,11 +5211,6 @@ ParserBase<Impl>::ParseExpressionOrLabelledStatement(
// Parsed expression statement, followed by semicolon.
ExpectSemicolon(CHECK_OK);
- if (labels != nullptr) {
- // TODO(adamk): Also measure in the PreParser by passing something
- // non-null as |labels|.
- impl()->CountUsage(v8::Isolate::kLabeledExpressionStatement);
- }
return factory()->NewExpressionStatement(expr, pos);
}
@@ -5204,8 +5235,9 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseIfStatement(
StatementT else_statement = impl()->NullStatement();
if (Check(Token::ELSE)) {
- SourceRangeScope range_scope(scanner(), &else_range);
+ else_range = SourceRange::ContinuationOf(then_range);
else_statement = ParseScopedStatement(labels, CHECK_OK);
+ else_range.end = scanner_->location().end_pos;
} else {
else_statement = factory()->NewEmptyStatement(kNoSourcePosition);
}
@@ -5547,50 +5579,60 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement(
{
SourceRangeScope catch_range_scope(scanner(), &catch_range);
if (Check(Token::CATCH)) {
- Expect(Token::LPAREN, CHECK_OK);
- catch_info.scope = NewScope(CATCH_SCOPE);
- catch_info.scope->set_start_position(scanner()->location().beg_pos);
-
- {
- BlockState catch_block_state(&scope_, catch_info.scope);
+ bool has_binding;
+ if (allow_harmony_optional_catch_binding()) {
+ has_binding = Check(Token::LPAREN);
+ } else {
+ has_binding = true;
+ Expect(Token::LPAREN, CHECK_OK);
+ }
- catch_block = factory()->NewBlock(16, false);
+ if (has_binding) {
+ catch_info.scope = NewScope(CATCH_SCOPE);
+ catch_info.scope->set_start_position(scanner()->location().beg_pos);
- // Create a block scope to hold any lexical declarations created
- // as part of destructuring the catch parameter.
{
- BlockState catch_variable_block_state(zone(), &scope_);
- scope()->set_start_position(scanner()->location().beg_pos);
- typename Types::Target target(this, catch_block);
-
- // This does not simply call ParsePrimaryExpression to avoid
- // ExpressionFromIdentifier from being called in the first
- // branch, which would introduce an unresolved symbol and mess
- // with arrow function names.
- if (peek_any_identifier()) {
- catch_info.name =
- ParseIdentifier(kDontAllowRestrictedIdentifiers, CHECK_OK);
- } else {
- ExpressionClassifier pattern_classifier(this);
- catch_info.pattern = ParsePrimaryExpression(CHECK_OK);
- ValidateBindingPattern(CHECK_OK);
- }
+ BlockState catch_block_state(&scope_, catch_info.scope);
+
+ catch_block = factory()->NewBlock(16, false);
+
+ // Create a block scope to hold any lexical declarations created
+ // as part of destructuring the catch parameter.
+ {
+ BlockState catch_variable_block_state(zone(), &scope_);
+ scope()->set_start_position(scanner()->location().beg_pos);
+
+ // This does not simply call ParsePrimaryExpression to avoid
+ // ExpressionFromIdentifier from being called in the first
+ // branch, which would introduce an unresolved symbol and mess
+ // with arrow function names.
+ if (peek_any_identifier()) {
+ catch_info.name =
+ ParseIdentifier(kDontAllowRestrictedIdentifiers, CHECK_OK);
+ } else {
+ ExpressionClassifier pattern_classifier(this);
+ catch_info.pattern = ParsePrimaryExpression(CHECK_OK);
+ ValidateBindingPattern(CHECK_OK);
+ }
- Expect(Token::RPAREN, CHECK_OK);
- impl()->RewriteCatchPattern(&catch_info, CHECK_OK);
- if (!impl()->IsNull(catch_info.init_block)) {
- catch_block->statements()->Add(catch_info.init_block, zone());
- }
+ Expect(Token::RPAREN, CHECK_OK);
+ impl()->RewriteCatchPattern(&catch_info, CHECK_OK);
+ if (!impl()->IsNull(catch_info.init_block)) {
+ catch_block->statements()->Add(catch_info.init_block, zone());
+ }
- catch_info.inner_block = ParseBlock(nullptr, CHECK_OK);
- catch_block->statements()->Add(catch_info.inner_block, zone());
- impl()->ValidateCatchBlock(catch_info, CHECK_OK);
- scope()->set_end_position(scanner()->location().end_pos);
- catch_block->set_scope(scope()->FinalizeBlockScope());
+ catch_info.inner_block = ParseBlock(nullptr, CHECK_OK);
+ catch_block->statements()->Add(catch_info.inner_block, zone());
+ impl()->ValidateCatchBlock(catch_info, CHECK_OK);
+ scope()->set_end_position(scanner()->location().end_pos);
+ catch_block->set_scope(scope()->FinalizeBlockScope());
+ }
}
- }
- catch_info.scope->set_end_position(scanner()->location().end_pos);
+ catch_info.scope->set_end_position(scanner()->location().end_pos);
+ } else {
+ catch_block = ParseBlock(nullptr, CHECK_OK);
+ }
}
}
@@ -5687,7 +5729,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
if (is_destructuring) {
ValidateAssignmentPattern(CHECK_OK);
} else {
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
}
if (is_for_each) {
@@ -5750,7 +5792,7 @@ ParserBase<Impl>::ParseForEachStatementWithDeclarations(
if (for_info->mode == ForEachStatement::ITERATE) {
ExpressionClassifier classifier(this);
enumerable = ParseAssignmentExpression(true, CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
} else {
enumerable = ParseExpression(true, CHECK_OK);
}
@@ -5826,7 +5868,7 @@ ParserBase<Impl>::ParseForEachStatementWithoutDeclarations(
if (for_info->mode == ForEachStatement::ITERATE) {
ExpressionClassifier classifier(this);
enumerable = ParseAssignmentExpression(true, CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
} else {
enumerable = ParseExpression(true, CHECK_OK);
}
@@ -5945,7 +5987,6 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
ZoneList<const AstRawString*>* labels, bool* ok) {
// for await '(' ForDeclaration of AssignmentExpression ')'
DCHECK(is_async_function());
- DCHECK(allow_harmony_async_iteration());
int stmt_pos = peek_position();
@@ -6014,7 +6055,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
if (lhs->IsArrayLiteral() || lhs->IsObjectLiteral()) {
ValidateAssignmentPattern(CHECK_OK);
} else {
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
each_variable = CheckAndRewriteReferenceExpression(
lhs, lhs_beg_pos, lhs_end_pos, MessageTemplate::kInvalidLhsInFor,
kSyntaxError, CHECK_OK);
@@ -6030,7 +6071,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
{
ExpressionClassifier classifier(this);
iterable = ParseAssignmentExpression(kAllowIn, CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
}
Expect(Token::RPAREN, CHECK_OK);
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index 4d291a741e..0497958c82 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -276,18 +276,18 @@ bool Parser::ShortcutNumericLiteralBinaryExpression(Expression** x,
return true;
}
case Token::SHL: {
- int value = DoubleToInt32(x_val) << (DoubleToInt32(y_val) & 0x1f);
+ int value = DoubleToInt32(x_val) << (DoubleToInt32(y_val) & 0x1F);
*x = factory()->NewNumberLiteral(value, pos);
return true;
}
case Token::SHR: {
- uint32_t shift = DoubleToInt32(y_val) & 0x1f;
+ uint32_t shift = DoubleToInt32(y_val) & 0x1F;
uint32_t value = DoubleToUint32(x_val) >> shift;
*x = factory()->NewNumberLiteral(value, pos);
return true;
}
case Token::SAR: {
- uint32_t shift = DoubleToInt32(y_val) & 0x1f;
+ uint32_t shift = DoubleToInt32(y_val) & 0x1F;
int value = ArithmeticShiftRight(DoubleToInt32(x_val), shift);
*x = factory()->NewNumberLiteral(value, pos);
return true;
@@ -506,7 +506,7 @@ Parser::Parser(ParseInfo* info)
info->runtime_call_stats(), info->logger(),
info->script().is_null() ? -1 : info->script()->id(),
info->is_module(), true),
- scanner_(info->unicode_cache(), use_counts_),
+ scanner_(info->unicode_cache()),
reusable_preparser_(nullptr),
mode_(PARSE_EAGERLY), // Lazy mode must be set explicitly.
source_range_map_(info->source_range_map()),
@@ -543,10 +543,12 @@ Parser::Parser(ParseInfo* info)
set_allow_harmony_do_expressions(FLAG_harmony_do_expressions);
set_allow_harmony_function_sent(FLAG_harmony_function_sent);
set_allow_harmony_public_fields(FLAG_harmony_public_fields);
+ set_allow_harmony_static_fields(FLAG_harmony_static_fields);
set_allow_harmony_dynamic_import(FLAG_harmony_dynamic_import);
set_allow_harmony_import_meta(FLAG_harmony_import_meta);
- set_allow_harmony_async_iteration(FLAG_harmony_async_iteration);
set_allow_harmony_bigint(FLAG_harmony_bigint);
+ set_allow_harmony_optional_catch_binding(FLAG_harmony_optional_catch_binding);
+ set_allow_harmony_private_fields(FLAG_harmony_private_fields);
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
++feature) {
use_counts_[feature] = 0;
@@ -592,8 +594,9 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
// called in the main thread.
DCHECK(parsing_on_main_thread_);
RuntimeCallTimerScope runtime_timer(
- runtime_call_stats_, info->is_eval() ? &RuntimeCallStats::ParseEval
- : &RuntimeCallStats::ParseProgram);
+ runtime_call_stats_, info->is_eval()
+ ? RuntimeCallCounterId::kParseEval
+ : RuntimeCallCounterId::kParseProgram);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.ParseProgram");
base::ElapsedTimer timer;
if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
@@ -667,11 +670,9 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
}
DeclarationScope* scope = outer->AsDeclarationScope();
-
scope->set_start_position(0);
FunctionState function_state(&function_state_, &scope_, scope);
-
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
bool ok = true;
int beg_pos = scanner()->location().beg_pos;
@@ -689,7 +690,6 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
var->AllocateTo(VariableLocation::PARAMETER, 0);
PrepareGeneratorVariables();
- scope->ForceContextAllocation();
Expression* initial_yield =
BuildInitialYield(kNoSourcePosition, kGeneratorFunction);
body->Add(
@@ -699,6 +699,8 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
ParseModuleItemList(body, &ok);
ok = ok && module()->Validate(this->scope()->AsModuleScope(),
pending_error_handler(), zone());
+ } else if (info->is_wrapped_as_function()) {
+ ParseWrapped(info, body, scope, zone(), &ok);
} else {
// Don't count the mode in the use counters--give the program a chance
// to enable script-wide strict mode below.
@@ -751,13 +753,53 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
return result;
}
+ZoneList<const AstRawString*>* Parser::PrepareWrappedArguments(ParseInfo* info,
+ Zone* zone) {
+ DCHECK(parsing_on_main_thread_);
+ Handle<FixedArray> arguments(info->script()->wrapped_arguments());
+ int arguments_length = arguments->length();
+ ZoneList<const AstRawString*>* arguments_for_wrapped_function =
+ new (zone) ZoneList<const AstRawString*>(arguments_length, zone);
+ for (int i = 0; i < arguments_length; i++) {
+ const AstRawString* argument_string = ast_value_factory()->GetString(
+ Handle<String>(String::cast(arguments->get(i))));
+ arguments_for_wrapped_function->Add(argument_string, zone);
+ }
+ return arguments_for_wrapped_function;
+}
+
+void Parser::ParseWrapped(ParseInfo* info, ZoneList<Statement*>* body,
+ DeclarationScope* outer_scope, Zone* zone, bool* ok) {
+ DCHECK(info->is_wrapped_as_function());
+ ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
+
+ // Set function and block state for the outer eval scope.
+ DCHECK(outer_scope->is_eval_scope());
+ FunctionState function_state(&function_state_, &scope_, outer_scope);
+
+ const AstRawString* function_name = nullptr;
+ Scanner::Location location(0, 0);
+
+ ZoneList<const AstRawString*>* arguments_for_wrapped_function =
+ PrepareWrappedArguments(info, zone);
+
+ FunctionLiteral* function_literal = ParseFunctionLiteral(
+ function_name, location, kSkipFunctionNameCheck, kNormalFunction,
+ kNoSourcePosition, FunctionLiteral::kWrapped, LanguageMode::kSloppy,
+ arguments_for_wrapped_function, CHECK_OK_VOID);
+
+ Statement* return_statement = factory()->NewReturnStatement(
+ function_literal, kNoSourcePosition, kNoSourcePosition);
+ body->Add(return_statement, zone);
+}
+
FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
Handle<SharedFunctionInfo> shared_info) {
// It's OK to use the Isolate & counters here, since this function is only
// called in the main thread.
DCHECK(parsing_on_main_thread_);
RuntimeCallTimerScope runtime_timer(runtime_call_stats_,
- &RuntimeCallStats::ParseFunction);
+ RuntimeCallCounterId::kParseFunction);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.ParseFunction");
base::ElapsedTimer timer;
if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
@@ -794,7 +836,9 @@ FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
}
static FunctionLiteral::FunctionType ComputeFunctionType(ParseInfo* info) {
- if (info->is_declaration()) {
+ if (info->is_wrapped_as_function()) {
+ return FunctionLiteral::kWrapped;
+ } else if (info->is_declaration()) {
return FunctionLiteral::kDeclaration;
} else if (info->is_named_expression()) {
return FunctionLiteral::kNamedExpression;
@@ -927,9 +971,13 @@ FunctionLiteral* Parser::DoParseFunction(ParseInfo* info,
result = DefaultConstructor(raw_name, IsDerivedConstructor(kind),
info->start_position(), info->end_position());
} else {
+ ZoneList<const AstRawString*>* arguments_for_wrapped_function =
+ info->is_wrapped_as_function() ? PrepareWrappedArguments(info, zone())
+ : nullptr;
result = ParseFunctionLiteral(
raw_name, Scanner::Location::invalid(), kSkipFunctionNameCheck, kind,
- kNoSourcePosition, function_type, info->language_mode(), &ok);
+ kNoSourcePosition, function_type, info->language_mode(),
+ arguments_for_wrapped_function, &ok);
}
if (ok) {
@@ -1251,7 +1299,7 @@ Statement* Parser::ParseExportDefault(bool* ok) {
int pos = position();
ExpressionClassifier classifier(this);
Expression* value = ParseAssignmentExpression(true, CHECK_OK);
- RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
SetFunctionName(value, ast_value_factory()->default_string());
const AstRawString* local_name =
@@ -1714,7 +1762,6 @@ Statement* Parser::RewriteTryStatement(Block* try_block, Block* catch_block,
if (catch_block != nullptr && finally_block != nullptr) {
// If we have both, create an inner try/catch.
- DCHECK_NOT_NULL(catch_info.scope);
TryCatchStatement* statement;
statement = factory()->NewTryCatchStatement(try_block, catch_info.scope,
catch_block, kNoSourcePosition);
@@ -1727,7 +1774,6 @@ Statement* Parser::RewriteTryStatement(Block* try_block, Block* catch_block,
if (catch_block != nullptr) {
DCHECK_NULL(finally_block);
- DCHECK_NOT_NULL(catch_info.scope);
TryCatchStatement* stmt = factory()->NewTryCatchStatement(
try_block, catch_info.scope, catch_block, pos);
RecordTryCatchStatementSourceRange(stmt, catch_range);
@@ -1844,13 +1890,11 @@ void Parser::DeclareFunctionNameVar(const AstRawString* function_name,
// !%_IsJSReceiver(result = Await(iterator.next())) &&
// %ThrowIteratorResultNotAnObject(result)
// [endif]
-Expression* Parser::BuildIteratorNextResult(Expression* iterator,
+Expression* Parser::BuildIteratorNextResult(VariableProxy* iterator,
+ VariableProxy* next,
Variable* result, IteratorType type,
int pos) {
- Expression* next_literal = factory()->NewStringLiteral(
- ast_value_factory()->next_string(), kNoSourcePosition);
- Expression* next_property =
- factory()->NewProperty(iterator, next_literal, kNoSourcePosition);
+ Expression* next_property = factory()->NewResolvedProperty(iterator, next);
ZoneList<Expression*>* next_arguments =
new (zone()) ZoneList<Expression*>(0, zone());
Expression* next_call =
@@ -2053,6 +2097,7 @@ Statement* Parser::InitializeForOfStatement(
auto avfactory = ast_value_factory();
Variable* iterator = NewTemporary(avfactory->dot_iterator_string());
+ Variable* next = NewTemporary(avfactory->empty_string());
Variable* result = NewTemporary(avfactory->dot_result_string());
Variable* completion = NewTemporary(avfactory->empty_string());
@@ -2065,6 +2110,17 @@ Statement* Parser::InitializeForOfStatement(
iterable->position());
}
+ Expression* assign_next;
+ {
+ assign_next = factory()->NewAssignment(
+ Token::ASSIGN, factory()->NewVariableProxy(next),
+ factory()->NewProperty(factory()->NewVariableProxy(iterator),
+ factory()->NewStringLiteral(
+ avfactory->next_string(), kNoSourcePosition),
+ kNoSourcePosition),
+ kNoSourcePosition);
+ }
+
// [if (IteratorType == kNormal)]
// !%_IsJSReceiver(result = iterator.next()) &&
// %ThrowIteratorResultNotAnObject(result)
@@ -2074,9 +2130,10 @@ Statement* Parser::InitializeForOfStatement(
// [endif]
Expression* next_result;
{
- Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
- next_result =
- BuildIteratorNextResult(iterator_proxy, result, type, next_result_pos);
+ VariableProxy* iterator_proxy = factory()->NewVariableProxy(iterator);
+ VariableProxy* next_proxy = factory()->NewVariableProxy(next);
+ next_result = BuildIteratorNextResult(iterator_proxy, next_proxy, result,
+ type, next_result_pos);
}
// result.done
@@ -2146,8 +2203,8 @@ Statement* Parser::InitializeForOfStatement(
body = block;
}
- for_of->Initialize(body, iterator, assign_iterator, next_result, result_done,
- assign_each);
+ for_of->Initialize(body, iterator, assign_iterator, assign_next, next_result,
+ result_done, assign_each);
return finalize ? FinalizeForOfStatement(for_of, completion, type, nopos)
: for_of;
}
@@ -2510,7 +2567,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
const AstRawString* function_name, Scanner::Location function_name_location,
FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_pos, FunctionLiteral::FunctionType function_type,
- LanguageMode language_mode, bool* ok) {
+ LanguageMode language_mode,
+ ZoneList<const AstRawString*>* arguments_for_wrapped_function, bool* ok) {
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
//
@@ -2520,8 +2578,12 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// Setter ::
// '(' PropertySetParameterList ')' '{' FunctionBody '}'
+ bool is_wrapped = function_type == FunctionLiteral::kWrapped;
+ DCHECK_EQ(is_wrapped, arguments_for_wrapped_function != nullptr);
+
int pos = function_token_pos == kNoSourcePosition ? peek_position()
: function_token_pos;
+ DCHECK_NE(kNoSourcePosition, pos);
// Anonymous functions were passed either the empty symbol or a null
// handle as the function name. Remember if we were passed a non-empty
@@ -2535,7 +2597,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
}
FunctionLiteral::EagerCompileHint eager_compile_hint =
- function_state_->next_function_is_likely_called()
+ function_state_->next_function_is_likely_called() || is_wrapped
? FunctionLiteral::kShouldEagerCompile
: default_eager_compile_hint();
@@ -2587,8 +2649,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
RuntimeCallTimerScope runtime_timer(
runtime_call_stats_,
parsing_on_main_thread_
- ? &RuntimeCallStats::ParseFunctionLiteral
- : &RuntimeCallStats::ParseBackgroundFunctionLiteral);
+ ? RuntimeCallCounterId::kParseFunctionLiteral
+ : RuntimeCallCounterId::kParseBackgroundFunctionLiteral);
base::ElapsedTimer timer;
if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
@@ -2650,7 +2712,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (should_preparse) scope->set_needs_migration();
#endif
- Expect(Token::LPAREN, CHECK_OK);
+ if (!is_wrapped) Expect(Token::LPAREN, CHECK_OK);
scope->set_start_position(scanner()->location().beg_pos);
// Eager or lazy parse? If is_lazy_top_level_function, we'll parse
@@ -2661,6 +2723,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (should_preparse) {
DCHECK(parse_lazily());
DCHECK(is_lazy_top_level_function || is_lazy_inner_function);
+ DCHECK(!is_wrapped);
Scanner::BookmarkScope bookmark(scanner());
bookmark.Set();
LazyParsingResult result = SkipFunction(
@@ -2687,7 +2750,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
body = ParseFunction(function_name, pos, kind, function_type, scope,
&num_parameters, &function_length,
&has_duplicate_parameters, &expected_property_count,
- CHECK_OK);
+ arguments_for_wrapped_function, CHECK_OK);
}
DCHECK_EQ(should_preparse, temp_zoned_);
@@ -2705,18 +2768,20 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
}
if (V8_UNLIKELY(FLAG_runtime_stats)) {
if (should_preparse) {
- RuntimeCallStats::CounterId counter_id =
+ RuntimeCallCounterId counter_id =
parsing_on_main_thread_
- ? &RuntimeCallStats::PreParseWithVariableResolution
- : &RuntimeCallStats::PreParseBackgroundWithVariableResolution;
+ ? RuntimeCallCounterId::kPreParseWithVariableResolution
+ : RuntimeCallCounterId::
+ kPreParseBackgroundWithVariableResolution;
if (is_top_level) {
- counter_id =
- parsing_on_main_thread_
- ? &RuntimeCallStats::PreParseNoVariableResolution
- : &RuntimeCallStats::PreParseBackgroundNoVariableResolution;
+ counter_id = parsing_on_main_thread_
+ ? RuntimeCallCounterId::kPreParseNoVariableResolution
+ : RuntimeCallCounterId::
+ kPreParseBackgroundNoVariableResolution;
+ }
+ if (runtime_call_stats_) {
+ runtime_call_stats_->CorrectCurrentCounterId(counter_id);
}
- RuntimeCallStats::CorrectCurrentCounterId(runtime_call_stats_,
- counter_id);
}
}
@@ -3118,11 +3183,14 @@ ZoneList<Statement*>* Parser::ParseFunction(
const AstRawString* function_name, int pos, FunctionKind kind,
FunctionLiteral::FunctionType function_type,
DeclarationScope* function_scope, int* num_parameters, int* function_length,
- bool* has_duplicate_parameters, int* expected_property_count, bool* ok) {
+ bool* has_duplicate_parameters, int* expected_property_count,
+ ZoneList<const AstRawString*>* arguments_for_wrapped_function, bool* ok) {
ParsingModeScope mode(this, allow_lazy_ ? PARSE_LAZILY : PARSE_EAGERLY);
FunctionState function_state(&function_state_, &scope_, function_scope);
+ bool is_wrapped = function_type == FunctionLiteral::kWrapped;
+
DuplicateFinder duplicate_finder;
ExpressionClassifier formals_classifier(this, &duplicate_finder);
@@ -3136,34 +3204,53 @@ ZoneList<Statement*>* Parser::ParseFunction(
}
ParserFormalParameters formals(function_scope);
- ParseFormalParameterList(&formals, CHECK_OK);
- if (expected_parameters_end_pos != kNoSourcePosition) {
- // Check for '(' or ')' shenanigans in the parameter string for dynamic
- // functions.
- int position = peek_position();
- if (position < expected_parameters_end_pos) {
- ReportMessageAt(Scanner::Location(position, position + 1),
- MessageTemplate::kArgStringTerminatesParametersEarly);
- *ok = false;
- return nullptr;
- } else if (position > expected_parameters_end_pos) {
- ReportMessageAt(Scanner::Location(expected_parameters_end_pos - 2,
- expected_parameters_end_pos),
- MessageTemplate::kUnexpectedEndOfArgString);
- *ok = false;
- return nullptr;
+
+ if (is_wrapped) {
+ // For a function implicitly wrapped in function header and footer, the
+ // function arguments are provided separately to the source, and are
+ // declared directly here.
+ int arguments_length = arguments_for_wrapped_function->length();
+ for (int i = 0; i < arguments_length; i++) {
+ const bool is_rest = false;
+ Expression* argument = ExpressionFromIdentifier(
+ arguments_for_wrapped_function->at(i), kNoSourcePosition);
+ AddFormalParameter(&formals, argument, NullExpression(),
+ kNoSourcePosition, is_rest);
+ }
+ DCHECK_EQ(arguments_length, formals.num_parameters());
+ DeclareFormalParameters(formals.scope, formals.params, formals.is_simple);
+ } else {
+ // For a regular function, the function arguments are parsed from source.
+ DCHECK_NULL(arguments_for_wrapped_function);
+ ParseFormalParameterList(&formals, CHECK_OK);
+ if (expected_parameters_end_pos != kNoSourcePosition) {
+ // Check for '(' or ')' shenanigans in the parameter string for dynamic
+ // functions.
+ int position = peek_position();
+ if (position < expected_parameters_end_pos) {
+ ReportMessageAt(Scanner::Location(position, position + 1),
+ MessageTemplate::kArgStringTerminatesParametersEarly);
+ *ok = false;
+ return nullptr;
+ } else if (position > expected_parameters_end_pos) {
+ ReportMessageAt(Scanner::Location(expected_parameters_end_pos - 2,
+ expected_parameters_end_pos),
+ MessageTemplate::kUnexpectedEndOfArgString);
+ *ok = false;
+ return nullptr;
+ }
}
+ Expect(Token::RPAREN, CHECK_OK);
+ int formals_end_position = scanner()->location().end_pos;
+
+ CheckArityRestrictions(formals.arity, kind, formals.has_rest,
+ function_scope->start_position(),
+ formals_end_position, CHECK_OK);
+ Expect(Token::LBRACE, CHECK_OK);
}
- Expect(Token::RPAREN, CHECK_OK);
- int formals_end_position = scanner()->location().end_pos;
*num_parameters = formals.num_parameters();
*function_length = formals.function_length;
- CheckArityRestrictions(formals.arity, kind, formals.has_rest,
- function_scope->start_position(), formals_end_position,
- CHECK_OK);
- Expect(Token::LBRACE, CHECK_OK);
-
ZoneList<Statement*>* body = new (zone()) ZoneList<Statement*>(8, zone());
ParseFunctionBody(body, function_name, pos, formals, kind, function_type, ok);
@@ -3240,9 +3327,10 @@ void Parser::DeclareClassProperty(const AstRawString* class_name,
return;
}
- DCHECK(allow_harmony_public_fields());
+ DCHECK(allow_harmony_public_fields() || allow_harmony_private_fields());
if (is_static) {
+ DCHECK(allow_harmony_static_fields());
class_info->static_fields->Add(property, zone());
} else {
class_info->instance_fields->Add(property, zone());
@@ -3262,6 +3350,8 @@ void Parser::DeclareClassProperty(const AstRawString* class_name,
FunctionLiteral* Parser::CreateInitializerFunction(
DeclarationScope* scope, ZoneList<ClassLiteral::Property*>* fields) {
+ DCHECK_EQ(scope->function_kind(),
+ FunctionKind::kClassFieldsInitializerFunction);
// function() { .. class fields initializer .. }
ZoneList<Statement*>* statements = NewStatementList(1);
InitializeClassFieldsStatement* static_fields =
@@ -3450,8 +3540,8 @@ void Parser::UpdateStatistics(Isolate* isolate, Handle<Script> script) {
}
void Parser::ParseOnBackground(ParseInfo* info) {
- RuntimeCallTimerScope runtimeTimer(runtime_call_stats_,
- &RuntimeCallStats::ParseBackgroundProgram);
+ RuntimeCallTimerScope runtimeTimer(
+ runtime_call_stats_, RuntimeCallCounterId::kParseBackgroundProgram);
parsing_on_main_thread_ = false;
if (!info->script().is_null()) {
set_script_id(info->script()->id());
@@ -3581,11 +3671,11 @@ namespace {
// http://burtleburtle.net/bob/hash/integer.html
uint32_t HalfAvalance(uint32_t a) {
- a = (a + 0x479ab41d) + (a << 8);
- a = (a ^ 0xe4aa10ce) ^ (a >> 5);
- a = (a + 0x9942f0a6) - (a << 14);
- a = (a ^ 0x5aedd67d) ^ (a >> 3);
- a = (a + 0x17bea992) + (a << 7);
+ a = (a + 0x479AB41D) + (a << 8);
+ a = (a ^ 0xE4AA10CE) ^ (a >> 5);
+ a = (a + 0x9942F0A6) - (a << 14);
+ a = (a ^ 0x5AEDD67D) ^ (a >> 3);
+ a = (a + 0x17BEA992) + (a << 7);
return a;
}
@@ -3808,24 +3898,6 @@ void Parser::RewriteAsyncFunctionBody(ZoneList<Statement*>* body, Block* block,
body->Add(block, zone());
}
-void Parser::RewriteNonPattern(bool* ok) {
- ValidateExpression(CHECK_OK_VOID);
- auto non_patterns_to_rewrite = function_state_->non_patterns_to_rewrite();
- int begin = classifier()->GetNonPatternBegin();
- int end = non_patterns_to_rewrite->length();
- if (begin < end) {
- for (int i = begin; i < end; i++) {
- RewritableExpression* expr = non_patterns_to_rewrite->at(i);
- // TODO(adamk): Make this more typesafe.
- DCHECK(expr->expression()->IsArrayLiteral());
- ArrayLiteral* lit = expr->expression()->AsArrayLiteral();
- expr->Rewrite(RewriteSpreads(lit));
- }
- non_patterns_to_rewrite->Rewind(begin);
- }
-}
-
-
void Parser::RewriteDestructuringAssignments() {
const auto& assignments =
function_state_->destructuring_assignments_to_rewrite();
@@ -3845,102 +3917,11 @@ void Parser::RewriteDestructuringAssignments() {
}
}
-Expression* Parser::RewriteSpreads(ArrayLiteral* lit) {
- // Array literals containing spreads are rewritten using do expressions, e.g.
- // [1, 2, 3, ...x, 4, ...y, 5]
- // is roughly rewritten as:
- // do {
- // $R = [1, 2, 3];
- // for ($i of x) %AppendElement($R, $i);
- // %AppendElement($R, 4);
- // for ($j of y) %AppendElement($R, $j);
- // %AppendElement($R, 5);
- // $R
- // }
- // where $R, $i and $j are fresh temporary variables.
- ZoneList<Expression*>::iterator s = lit->FirstSpread();
- if (s == lit->EndValue()) return nullptr; // no spread, no rewriting...
- Variable* result = NewTemporary(ast_value_factory()->dot_result_string());
- // NOTE: The value assigned to R is the whole original array literal,
- // spreads included. This will be fixed before the rewritten AST is returned.
- // $R = lit
- Expression* init_result = factory()->NewAssignment(
- Token::INIT, factory()->NewVariableProxy(result), lit, kNoSourcePosition);
- Block* do_block = factory()->NewBlock(16, false);
- do_block->statements()->Add(
- factory()->NewExpressionStatement(init_result, kNoSourcePosition),
- zone());
- // Traverse the array literal starting from the first spread.
- while (s != lit->EndValue()) {
- Expression* value = *s++;
- Spread* spread = value->AsSpread();
- if (spread == nullptr) {
- // If the element is not a spread, we're adding a single:
- // %AppendElement($R, value)
- // or, in case of a hole,
- // ++($R.length)
- if (!value->IsTheHoleLiteral()) {
- ZoneList<Expression*>* append_element_args = NewExpressionList(2);
- append_element_args->Add(factory()->NewVariableProxy(result), zone());
- append_element_args->Add(value, zone());
- do_block->statements()->Add(
- factory()->NewExpressionStatement(
- factory()->NewCallRuntime(Runtime::kAppendElement,
- append_element_args,
- kNoSourcePosition),
- kNoSourcePosition),
- zone());
- } else {
- Property* length_property = factory()->NewProperty(
- factory()->NewVariableProxy(result),
- factory()->NewStringLiteral(ast_value_factory()->length_string(),
- kNoSourcePosition),
- kNoSourcePosition);
- CountOperation* count_op = factory()->NewCountOperation(
- Token::INC, true /* prefix */, length_property, kNoSourcePosition);
- do_block->statements()->Add(
- factory()->NewExpressionStatement(count_op, kNoSourcePosition),
- zone());
- }
- } else {
- // If it's a spread, we're adding a for/of loop iterating through it.
- Variable* each = NewTemporary(ast_value_factory()->dot_for_string());
- Expression* subject = spread->expression();
- // %AppendElement($R, each)
- Statement* append_body;
- {
- ZoneList<Expression*>* append_element_args = NewExpressionList(2);
- append_element_args->Add(factory()->NewVariableProxy(result), zone());
- append_element_args->Add(factory()->NewVariableProxy(each), zone());
- append_body = factory()->NewExpressionStatement(
- factory()->NewCallRuntime(Runtime::kAppendElement,
- append_element_args, kNoSourcePosition),
- kNoSourcePosition);
- }
- // for (each of spread) %AppendElement($R, each)
- ForOfStatement* loop =
- factory()->NewForOfStatement(nullptr, kNoSourcePosition);
- const bool finalize = false;
- InitializeForOfStatement(loop, factory()->NewVariableProxy(each), subject,
- append_body, finalize, IteratorType::kNormal);
- do_block->statements()->Add(loop, zone());
- }
- }
- // Now, rewind the original array literal to truncate everything from the
- // first spread (included) until the end. This fixes $R's initialization.
- lit->RewindSpreads();
- return factory()->NewDoExpression(do_block, result, lit->position());
-}
-
void Parser::QueueDestructuringAssignmentForRewriting(
RewritableExpression* expr) {
function_state_->AddDestructuringAssignment(expr);
}
-void Parser::QueueNonPatternForRewriting(RewritableExpression* expr, bool* ok) {
- function_state_->AddNonPatternForRewriting(expr, ok);
-}
-
void Parser::SetFunctionNameFromPropertyName(LiteralProperty* property,
const AstRawString* name,
const AstRawString* prefix) {
@@ -4315,9 +4296,8 @@ void Parser::BuildIteratorCloseForCompletion(ZoneList<Statement*>* statements,
zone());
Block* catch_block = factory()->NewBlock(0, false);
- Scope* catch_scope = NewHiddenCatchScope();
- try_call_return = factory()->NewTryCatchStatement(try_block, catch_scope,
- catch_block, nopos);
+ try_call_return =
+ factory()->NewTryCatchStatement(try_block, nullptr, catch_block, nopos);
}
// let output = %_Call(iteratorReturn, iterator);
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index aa800dafc5..f92eddcd9d 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -267,6 +267,15 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// Called by ParseProgram after setting up the scanner.
FunctionLiteral* DoParseProgram(ParseInfo* info);
+ // Parse with the script as if the source is implicitly wrapped in a function.
+ // We manually construct the AST and scopes for a top-level function and the
+ // function wrapper.
+ void ParseWrapped(ParseInfo* info, ZoneList<Statement*>* body,
+ DeclarationScope* scope, Zone* zone, bool* ok);
+
+ ZoneList<const AstRawString*>* PrepareWrappedArguments(ParseInfo* info,
+ Zone* zone);
+
void SetCachedData(ParseInfo* info);
void StitchAst(ParseInfo* top_level_parse_info, Isolate* isolate);
@@ -292,10 +301,12 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
SET_ALLOW(harmony_do_expressions);
SET_ALLOW(harmony_function_sent);
SET_ALLOW(harmony_public_fields);
+ SET_ALLOW(harmony_static_fields);
SET_ALLOW(harmony_dynamic_import);
SET_ALLOW(harmony_import_meta);
- SET_ALLOW(harmony_async_iteration);
SET_ALLOW(harmony_bigint);
+ SET_ALLOW(harmony_optional_catch_binding);
+ SET_ALLOW(harmony_private_fields);
#undef SET_ALLOW
}
return reusable_preparser_;
@@ -391,13 +402,14 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Expression* RewriteDestructuringAssignment(Assignment* assignment);
// [if (IteratorType == kAsync)]
- // !%_IsJSReceiver(result = Await(iterator.next()) &&
+ // !%_IsJSReceiver(result = Await(next.[[Call]](iterator, « »)) &&
// %ThrowIteratorResultNotAnObject(result)
// [else]
- // !%_IsJSReceiver(result = iterator.next()) &&
+ // !%_IsJSReceiver(result = next.[[Call]](iterator, « »)) &&
// %ThrowIteratorResultNotAnObject(result)
// [endif]
- Expression* BuildIteratorNextResult(Expression* iterator, Variable* result,
+ Expression* BuildIteratorNextResult(VariableProxy* iterator,
+ VariableProxy* next, Variable* result,
IteratorType type, int pos);
// Initialize the components of a for-in / for-of statement.
@@ -425,7 +437,13 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
const AstRawString* name, Scanner::Location function_name_location,
FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_position, FunctionLiteral::FunctionType type,
- LanguageMode language_mode, bool* ok);
+ LanguageMode language_mode,
+ ZoneList<const AstRawString*>* arguments_for_wrapped_function, bool* ok);
+
+ ObjectLiteral* InitializeObjectLiteral(ObjectLiteral* object_literal) {
+ object_literal->CalculateEmitStore(main_zone());
+ return object_literal;
+ }
// Check if the scope has conflicting var/let declarations from different
// scopes. This covers for example
@@ -488,7 +506,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
FunctionLiteral::FunctionType function_type,
DeclarationScope* function_scope, int* num_parameters,
int* function_length, bool* has_duplicate_parameters,
- int* expected_property_count, bool* ok);
+ int* expected_property_count,
+ ZoneList<const AstRawString*>* arguments_for_wrapped_function, bool* ok);
void ThrowPendingError(Isolate* isolate, Handle<Script> script);
@@ -553,13 +572,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Expression* RewriteSpreads(ArrayLiteral* lit);
- // Rewrite expressions that are not used as patterns
- V8_INLINE void RewriteNonPattern(bool* ok);
-
V8_INLINE void QueueDestructuringAssignmentForRewriting(
RewritableExpression* assignment);
- V8_INLINE void QueueNonPatternForRewriting(RewritableExpression* expr,
- bool* ok);
friend class InitializerRewriter;
void RewriteParameterInitializer(Expression* expr);
@@ -760,17 +774,11 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
bool CollapseNaryExpression(Expression** x, Expression* y, Token::Value op,
int pos, const SourceRange& range);
- // Rewrites the following types of unary expressions:
- // not <literal> -> true / false
- // + <numeric literal> -> <numeric literal>
- // - <numeric literal> -> <numeric literal with value negated>
+ // Returns a UnaryExpression or, in one of the following cases, a Literal.
// ! <literal> -> true / false
- // The following rewriting rules enable the collection of type feedback
- // without any special stub and the multiplication is removed later in
- // Crankshaft's canonicalization pass.
- // + foo -> foo * 1
- // - foo -> foo * (-1)
- // ~ foo -> foo ^(~0)
+ // + <Number literal> -> <Number literal>
+ // - <Number literal> -> <Number literal with value negated>
+ // ~ <literal> -> true / false
Expression* BuildUnaryExpression(Expression* expression, Token::Value op,
int pos);
@@ -990,10 +998,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return function_state_->GetReportedErrorList();
}
- V8_INLINE ZoneList<RewritableExpression*>* GetNonPatternList() const {
- return function_state_->non_patterns_to_rewrite();
- }
-
V8_INLINE void CountUsage(v8::Isolate::UseCounterFeature feature) {
++use_counts_[feature];
}
diff --git a/deps/v8/src/parsing/pattern-rewriter.cc b/deps/v8/src/parsing/pattern-rewriter.cc
index faecb5bb0c..daa126d443 100644
--- a/deps/v8/src/parsing/pattern-rewriter.cc
+++ b/deps/v8/src/parsing/pattern-rewriter.cc
@@ -445,6 +445,11 @@ void PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
auto iterator = CreateTempVar(factory()->NewGetIterator(
factory()->NewVariableProxy(temp), current_value_, IteratorType::kNormal,
current_value_->position()));
+ auto next = CreateTempVar(factory()->NewProperty(
+ factory()->NewVariableProxy(iterator),
+ factory()->NewStringLiteral(ast_value_factory()->next_string(),
+ kNoSourcePosition),
+ kNoSourcePosition));
auto done =
CreateTempVar(factory()->NewBooleanLiteral(false, kNoSourcePosition));
auto result = CreateTempVar();
@@ -525,7 +530,8 @@ void PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
next_block->statements()->Add(
factory()->NewExpressionStatement(
parser_->BuildIteratorNextResult(
- factory()->NewVariableProxy(iterator), result,
+ factory()->NewVariableProxy(iterator),
+ factory()->NewVariableProxy(next), result,
IteratorType::kNormal, kNoSourcePosition),
kNoSourcePosition),
zone());
@@ -599,6 +605,7 @@ void PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
// result = IteratorNext(iterator);
Statement* get_next = factory()->NewExpressionStatement(
parser_->BuildIteratorNextResult(factory()->NewVariableProxy(iterator),
+ factory()->NewVariableProxy(next),
result, IteratorType::kNormal, nopos),
nopos);
@@ -756,6 +763,7 @@ NOT_A_PATTERN(ImportCallExpression)
NOT_A_PATTERN(Literal)
NOT_A_PATTERN(NativeFunctionLiteral)
NOT_A_PATTERN(RegExpLiteral)
+NOT_A_PATTERN(ResolvedProperty)
NOT_A_PATTERN(ReturnStatement)
NOT_A_PATTERN(SloppyBlockFunctionStatement)
NOT_A_PATTERN(Spread)
diff --git a/deps/v8/src/parsing/preparsed-scope-data.cc b/deps/v8/src/parsing/preparsed-scope-data.cc
index 7191639cf8..786be3f0e5 100644
--- a/deps/v8/src/parsing/preparsed-scope-data.cc
+++ b/deps/v8/src/parsing/preparsed-scope-data.cc
@@ -24,7 +24,7 @@ class VariableMaybeAssignedField : public BitField8<bool, 0, 1> {};
class VariableContextAllocatedField
: public BitField8<bool, VariableMaybeAssignedField::kNext, 1> {};
-const int kMagicValue = 0xc0de0de;
+const int kMagicValue = 0xC0DE0DE;
#ifdef DEBUG
const size_t kUint32Size = 5;
@@ -571,8 +571,8 @@ void ConsumedPreParsedScopeData::RestoreData(Scope* scope) {
if (scope_data_->RemainingBytes() < kUint8Size) {
// Temporary debugging code for detecting inconsistent data. Write debug
// information on the stack, then crash.
- data_->GetIsolate()->PushStackTraceAndDie(0xc0defee, nullptr, nullptr,
- 0xc0defee);
+ data_->GetIsolate()->PushStackTraceAndDie(0xC0DEFEE, nullptr, nullptr,
+ 0xC0DEFEE);
}
// scope_type is stored only in debug mode.
diff --git a/deps/v8/src/parsing/preparser.cc b/deps/v8/src/parsing/preparser.cc
index 16879e518c..b28eab2e75 100644
--- a/deps/v8/src/parsing/preparser.cc
+++ b/deps/v8/src/parsing/preparser.cc
@@ -207,12 +207,12 @@ PreParser::PreParseResult PreParser::PreParseFunction(
if (!IsArrowFunction(kind) && track_unresolved_variables_ &&
result == kLazyParsingComplete) {
- DeclareFunctionNameVar(function_name, function_type, function_scope);
-
// Declare arguments after parsing the function since lexical 'arguments'
// masks the arguments object. Declare arguments before declaring the
// function var since the arguments object masks 'function arguments'.
function_scope->DeclareArguments(ast_value_factory());
+
+ DeclareFunctionNameVar(function_name, function_type, function_scope);
}
use_counts_ = nullptr;
@@ -266,14 +266,18 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
Identifier function_name, Scanner::Location function_name_location,
FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_pos, FunctionLiteral::FunctionType function_type,
- LanguageMode language_mode, bool* ok) {
+ LanguageMode language_mode,
+ ZoneList<const AstRawString*>* arguments_for_wrapped_function, bool* ok) {
+ // Wrapped functions are not parsed in the preparser.
+ DCHECK_NULL(arguments_for_wrapped_function);
+ DCHECK_NE(FunctionLiteral::kWrapped, function_type);
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
- const RuntimeCallStats::CounterId counters[2][2] = {
- {&RuntimeCallStats::PreParseBackgroundNoVariableResolution,
- &RuntimeCallStats::PreParseNoVariableResolution},
- {&RuntimeCallStats::PreParseBackgroundWithVariableResolution,
- &RuntimeCallStats::PreParseWithVariableResolution}};
+ const RuntimeCallCounterId counters[2][2] = {
+ {RuntimeCallCounterId::kPreParseBackgroundNoVariableResolution,
+ RuntimeCallCounterId::kPreParseNoVariableResolution},
+ {RuntimeCallCounterId::kPreParseBackgroundWithVariableResolution,
+ RuntimeCallCounterId::kPreParseWithVariableResolution}};
RuntimeCallTimerScope runtime_timer(
runtime_call_stats_,
counters[track_unresolved_variables_][parsing_on_main_thread_]);
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index 8c1d183fd6..705cd011ee 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -956,12 +956,18 @@ class PreParser : public ParserBase<PreParser> {
bool is_inner_function, bool may_abort, bool* ok) {
UNREACHABLE();
}
- Expression ParseFunctionLiteral(Identifier name,
- Scanner::Location function_name_location,
- FunctionNameValidity function_name_validity,
- FunctionKind kind, int function_token_pos,
- FunctionLiteral::FunctionType function_type,
- LanguageMode language_mode, bool* ok);
+
+ Expression ParseFunctionLiteral(
+ Identifier name, Scanner::Location function_name_location,
+ FunctionNameValidity function_name_validity, FunctionKind kind,
+ int function_token_pos, FunctionLiteral::FunctionType function_type,
+ LanguageMode language_mode,
+ ZoneList<const AstRawString*>* arguments_for_wrapped_function, bool* ok);
+
+ PreParserExpression InitializeObjectLiteral(PreParserExpression literal) {
+ return literal;
+ }
+
LazyParsingResult ParseStatementListAndLogFunction(
PreParserFormalParameters* formals, bool maybe_abort, bool* ok);
@@ -999,7 +1005,6 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE void RewriteAsyncFunctionBody(
PreParserStatementList body, PreParserStatement block,
const PreParserExpression& return_value, bool* ok) {}
- V8_INLINE void RewriteNonPattern(bool* ok) { ValidateExpression(ok); }
void DeclareAndInitializeVariables(
PreParserStatement block,
@@ -1186,8 +1191,6 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE void QueueDestructuringAssignmentForRewriting(
PreParserExpression assignment) {}
- V8_INLINE void QueueNonPatternForRewriting(const PreParserExpression& expr,
- bool* ok) {}
// Helper functions for recursive descent.
V8_INLINE bool IsEval(const PreParserIdentifier& identifier) const {
@@ -1665,10 +1668,6 @@ class PreParser : public ParserBase<PreParser> {
return function_state_->GetReportedErrorList();
}
- V8_INLINE ZoneList<PreParserExpression>* GetNonPatternList() const {
- return function_state_->non_patterns_to_rewrite();
- }
-
V8_INLINE void CountUsage(v8::Isolate::UseCounterFeature feature) {
if (use_counts_ != nullptr) ++use_counts_[feature];
}
diff --git a/deps/v8/src/parsing/rewriter.cc b/deps/v8/src/parsing/rewriter.cc
index c31d0ea21d..102efad292 100644
--- a/deps/v8/src/parsing/rewriter.cc
+++ b/deps/v8/src/parsing/rewriter.cc
@@ -367,8 +367,8 @@ bool Rewriter::Rewrite(ParseInfo* info) {
RuntimeCallTimerScope runtimeTimer(
info->runtime_call_stats(),
info->on_background_thread()
- ? &RuntimeCallStats::CompileBackgroundRewriteReturnResult
- : &RuntimeCallStats::CompileRewriteReturnResult);
+ ? RuntimeCallCounterId::kCompileBackgroundRewriteReturnResult
+ : RuntimeCallCounterId::kCompileRewriteReturnResult);
FunctionLiteral* function = info->literal();
DCHECK_NOT_NULL(function);
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index c5175c4de7..20aa5c9f8e 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -16,7 +16,7 @@ namespace v8 {
namespace internal {
namespace {
-const unibrow::uchar kUtf8Bom = 0xfeff;
+const unibrow::uchar kUtf8Bom = 0xFEFF;
} // namespace
// ----------------------------------------------------------------------------
@@ -203,7 +203,7 @@ class Utf8ExternalStreamingStream : public BufferedUtf16CharacterStream {
Utf8ExternalStreamingStream(
ScriptCompiler::ExternalSourceStream* source_stream,
RuntimeCallStats* stats)
- : current_({0, {0, 0, unibrow::Utf8::Utf8IncrementalBuffer(0)}}),
+ : current_({0, {0, 0, 0, unibrow::Utf8::State::kAccept}}),
source_stream_(source_stream),
stats_(stats) {}
~Utf8ExternalStreamingStream() override {
@@ -223,7 +223,8 @@ class Utf8ExternalStreamingStream : public BufferedUtf16CharacterStream {
struct StreamPosition {
size_t bytes;
size_t chars;
- unibrow::Utf8::Utf8IncrementalBuffer incomplete_char;
+ uint32_t incomplete_char;
+ unibrow::Utf8::State state;
};
// Position contains a StreamPosition and the index of the chunk the position
@@ -268,25 +269,25 @@ bool Utf8ExternalStreamingStream::SkipToPosition(size_t position) {
const Chunk& chunk = chunks_[current_.chunk_no];
DCHECK(current_.pos.bytes >= chunk.start.bytes);
- unibrow::Utf8::Utf8IncrementalBuffer incomplete_char =
- chunk.start.incomplete_char;
+ unibrow::Utf8::State state = chunk.start.state;
+ uint32_t incomplete_char = chunk.start.incomplete_char;
size_t it = current_.pos.bytes - chunk.start.bytes;
size_t chars = chunk.start.chars;
while (it < chunk.length && chars < position) {
- unibrow::uchar t =
- unibrow::Utf8::ValueOfIncremental(chunk.data[it], &incomplete_char);
+ unibrow::uchar t = unibrow::Utf8::ValueOfIncremental(
+ chunk.data[it], &it, &state, &incomplete_char);
if (t == kUtf8Bom && current_.pos.chars == 0) {
// BOM detected at beginning of the stream. Don't copy it.
} else if (t != unibrow::Utf8::kIncomplete) {
chars++;
if (t > unibrow::Utf16::kMaxNonSurrogateCharCode) chars++;
}
- it++;
}
current_.pos.bytes += it;
current_.pos.chars = chars;
current_.pos.incomplete_char = incomplete_char;
+ current_.pos.state = state;
current_.chunk_no += (it == chunk.length);
return current_.pos.chars == position;
@@ -304,31 +305,33 @@ void Utf8ExternalStreamingStream::FillBufferFromCurrentChunk() {
uint16_t* cursor = buffer_ + (buffer_end_ - buffer_start_);
DCHECK_EQ(cursor, buffer_end_);
+ unibrow::Utf8::State state = current_.pos.state;
+ uint32_t incomplete_char = current_.pos.incomplete_char;
+
// If the current chunk is the last (empty) chunk we'll have to process
// any left-over, partial characters.
if (chunk.length == 0) {
- unibrow::uchar t =
- unibrow::Utf8::ValueOfIncrementalFinish(&current_.pos.incomplete_char);
+ unibrow::uchar t = unibrow::Utf8::ValueOfIncrementalFinish(&state);
if (t != unibrow::Utf8::kBufferEmpty) {
- DCHECK_LT(t, unibrow::Utf16::kMaxNonSurrogateCharCode);
+ DCHECK_EQ(t, unibrow::Utf8::kBadChar);
*cursor = static_cast<uc16>(t);
buffer_end_++;
current_.pos.chars++;
+ current_.pos.incomplete_char = 0;
+ current_.pos.state = state;
}
return;
}
- unibrow::Utf8::Utf8IncrementalBuffer incomplete_char =
- current_.pos.incomplete_char;
- size_t it;
- for (it = current_.pos.bytes - chunk.start.bytes;
- it < chunk.length && cursor + 1 < buffer_start_ + kBufferSize; it++) {
- unibrow::uchar t =
- unibrow::Utf8::ValueOfIncremental(chunk.data[it], &incomplete_char);
- if (t == unibrow::Utf8::kIncomplete) continue;
+ size_t it = current_.pos.bytes - chunk.start.bytes;
+ while (it < chunk.length && cursor + 1 < buffer_start_ + kBufferSize) {
+ unibrow::uchar t = unibrow::Utf8::ValueOfIncremental(
+ chunk.data[it], &it, &state, &incomplete_char);
if (V8_LIKELY(t < kUtf8Bom)) {
*(cursor++) = static_cast<uc16>(t); // The by most frequent case.
- } else if (t == kUtf8Bom && current_.pos.bytes + it == 2) {
+ } else if (t == unibrow::Utf8::kIncomplete) {
+ continue;
+ } else if (t == kUtf8Bom && current_.pos.bytes + it == 3) {
// BOM detected at beginning of the stream. Don't copy it.
} else if (t <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
*(cursor++) = static_cast<uc16>(t);
@@ -341,13 +344,15 @@ void Utf8ExternalStreamingStream::FillBufferFromCurrentChunk() {
current_.pos.bytes = chunk.start.bytes + it;
current_.pos.chars += (cursor - buffer_end_);
current_.pos.incomplete_char = incomplete_char;
+ current_.pos.state = state;
current_.chunk_no += (it == chunk.length);
buffer_end_ = cursor;
}
bool Utf8ExternalStreamingStream::FetchChunk() {
- RuntimeCallTimerScope scope(stats_, &RuntimeCallStats::GetMoreDataCallback);
+ RuntimeCallTimerScope scope(stats_,
+ RuntimeCallCounterId::kGetMoreDataCallback);
DCHECK_EQ(current_.chunk_no, chunks_.size());
DCHECK(chunks_.empty() || chunks_.back().length != 0);
@@ -395,16 +400,15 @@ void Utf8ExternalStreamingStream::SearchPosition(size_t position) {
// checking whether the # bytes in a chunk are equal to the # chars, and if
// so avoid the expensive SkipToPosition.)
bool ascii_only_chunk =
- chunks_[chunk_no].start.incomplete_char ==
- unibrow::Utf8::Utf8IncrementalBuffer(0) &&
+ chunks_[chunk_no].start.incomplete_char == 0 &&
(chunks_[chunk_no + 1].start.bytes - chunks_[chunk_no].start.bytes) ==
(chunks_[chunk_no + 1].start.chars - chunks_[chunk_no].start.chars);
if (ascii_only_chunk) {
size_t skip = position - chunks_[chunk_no].start.chars;
current_ = {chunk_no,
{chunks_[chunk_no].start.bytes + skip,
- chunks_[chunk_no].start.chars + skip,
- unibrow::Utf8::Utf8IncrementalBuffer(0)}};
+ chunks_[chunk_no].start.chars + skip, 0,
+ unibrow::Utf8::State::kAccept}};
} else {
current_ = {chunk_no, chunks_[chunk_no].start};
SkipToPosition(position);
@@ -491,7 +495,8 @@ size_t FindChunk(Chunks& chunks, ScriptCompiler::ExternalSourceStream* source,
// Get more data if needed. We usually won't enter the loop body.
bool out_of_data = !chunks.empty() && chunks.back().byte_length == 0;
{
- RuntimeCallTimerScope scope(stats, &RuntimeCallStats::GetMoreDataCallback);
+ RuntimeCallTimerScope scope(stats,
+ RuntimeCallCounterId::kGetMoreDataCallback);
while (!out_of_data && end_pos <= position + 1) {
const uint8_t* chunk = nullptr;
size_t len = source->GetMoreData(&chunk);
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index 8030b93889..3152ab184e 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -174,31 +174,15 @@ bool Scanner::BookmarkScope::HasBeenApplied() {
return bookmark_ == kBookmarkWasApplied;
}
-// LineTerminator: 'JS_Line_Terminator' in point.properties
-// ES#sec-line-terminators lists exactly 4 code points:
-// LF (U+000A), CR (U+000D), LS(U+2028), PS(U+2029)
-bool Scanner::IsLineTerminator(uc32 c) {
- if (c == 0x000A || c == 0x000D) {
- return true;
- }
- if (c == 0x2028 || c == 0x2029) {
- ++use_counts_[v8::Isolate::UseCounterFeature::
- kLineOrParagraphSeparatorAsLineTerminator];
- return true;
- }
- return false;
-}
-
// ----------------------------------------------------------------------------
// Scanner
-Scanner::Scanner(UnicodeCache* unicode_cache, int* use_counts)
+Scanner::Scanner(UnicodeCache* unicode_cache)
: unicode_cache_(unicode_cache),
octal_pos_(Location::invalid()),
octal_message_(MessageTemplate::kNone),
found_html_comment_(false),
- allow_harmony_bigint_(false),
- use_counts_(use_counts) {}
+ allow_harmony_bigint_(false) {}
void Scanner::Initialize(Utf16CharacterStream* source, bool is_module) {
DCHECK_NOT_NULL(source);
@@ -257,7 +241,8 @@ uc32 Scanner::ScanUnlimitedLengthHexNumber(int max_value, int beg_pos) {
// Ensure that tokens can be stored in a byte.
STATIC_ASSERT(Token::NUM_TOKENS <= 0x100);
-// Table of one-character tokens, by character (0x00..0x7f only).
+// Table of one-character tokens, by character (0x00..0x7F only).
+// clang-format off
static const byte one_char_tokens[] = {
Token::ILLEGAL,
Token::ILLEGAL,
@@ -303,7 +288,7 @@ static const byte one_char_tokens[] = {
Token::RPAREN, // 0x29
Token::ILLEGAL,
Token::ILLEGAL,
- Token::COMMA, // 0x2c
+ Token::COMMA, // 0x2C
Token::ILLEGAL,
Token::ILLEGAL,
Token::ILLEGAL,
@@ -317,12 +302,12 @@ static const byte one_char_tokens[] = {
Token::ILLEGAL,
Token::ILLEGAL,
Token::ILLEGAL,
- Token::COLON, // 0x3a
- Token::SEMICOLON, // 0x3b
+ Token::COLON, // 0x3A
+ Token::SEMICOLON, // 0x3B
Token::ILLEGAL,
Token::ILLEGAL,
Token::ILLEGAL,
- Token::CONDITIONAL, // 0x3f
+ Token::CONDITIONAL, // 0x3F
Token::ILLEGAL,
Token::ILLEGAL,
Token::ILLEGAL,
@@ -350,9 +335,9 @@ static const byte one_char_tokens[] = {
Token::ILLEGAL,
Token::ILLEGAL,
Token::ILLEGAL,
- Token::LBRACK, // 0x5b
+ Token::LBRACK, // 0x5B
Token::ILLEGAL,
- Token::RBRACK, // 0x5d
+ Token::RBRACK, // 0x5D
Token::ILLEGAL,
Token::ILLEGAL,
Token::ILLEGAL,
@@ -382,13 +367,13 @@ static const byte one_char_tokens[] = {
Token::ILLEGAL,
Token::ILLEGAL,
Token::ILLEGAL,
- Token::LBRACE, // 0x7b
+ Token::LBRACE, // 0x7B
Token::ILLEGAL,
- Token::RBRACE, // 0x7d
- Token::BIT_NOT, // 0x7e
+ Token::RBRACE, // 0x7D
+ Token::BIT_NOT, // 0x7E
Token::ILLEGAL
};
-
+// clang-format on
Token::Value Scanner::Next() {
if (next_.token == Token::EOS) {
@@ -405,7 +390,7 @@ Token::Value Scanner::Next() {
}
has_line_terminator_before_next_ = false;
has_multiline_comment_before_next_ = false;
- if (static_cast<unsigned>(c0_) <= 0x7f) {
+ if (static_cast<unsigned>(c0_) <= 0x7F) {
Token::Value token = static_cast<Token::Value>(one_char_tokens[c0_]);
if (token != Token::ILLEGAL) {
int pos = source_pos();
@@ -457,7 +442,7 @@ Token::Value Scanner::SkipWhiteSpace() {
// Advance as long as character is a WhiteSpace or LineTerminator.
// Remember if the latter is the case.
- if (IsLineTerminator(c0_)) {
+ if (unibrow::IsLineTerminator(c0_)) {
has_line_terminator_before_next_ = true;
} else if (!unicode_cache_->IsWhiteSpace(c0_)) {
break;
@@ -514,7 +499,7 @@ Token::Value Scanner::SkipSingleLineComment() {
// separately by the lexical grammar and becomes part of the
// stream of input elements for the syntactic grammar (see
// ECMA-262, section 7.4).
- while (c0_ != kEndOfInput && !IsLineTerminator(c0_)) {
+ while (c0_ != kEndOfInput && !unibrow::IsLineTerminator(c0_)) {
Advance();
}
@@ -524,7 +509,7 @@ Token::Value Scanner::SkipSingleLineComment() {
Token::Value Scanner::SkipSourceURLComment() {
TryToParseSourceURLComment();
- while (c0_ != kEndOfInput && !IsLineTerminator(c0_)) {
+ while (c0_ != kEndOfInput && !unibrow::IsLineTerminator(c0_)) {
Advance();
}
@@ -560,7 +545,7 @@ void Scanner::TryToParseSourceURLComment() {
while (c0_ != kEndOfInput && unicode_cache_->IsWhiteSpace(c0_)) {
Advance();
}
- while (c0_ != kEndOfInput && !IsLineTerminator(c0_)) {
+ while (c0_ != kEndOfInput && !unibrow::IsLineTerminator(c0_)) {
// Disallowed characters.
if (c0_ == '"' || c0_ == '\'') {
value->Reset();
@@ -573,7 +558,7 @@ void Scanner::TryToParseSourceURLComment() {
Advance();
}
// Allow whitespace at the end.
- while (c0_ != kEndOfInput && !IsLineTerminator(c0_)) {
+ while (c0_ != kEndOfInput && !unibrow::IsLineTerminator(c0_)) {
if (!unicode_cache_->IsWhiteSpace(c0_)) {
value->Reset();
break;
@@ -590,7 +575,7 @@ Token::Value Scanner::SkipMultiLineComment() {
while (c0_ != kEndOfInput) {
uc32 ch = c0_;
Advance();
- if (c0_ != kEndOfInput && IsLineTerminator(ch)) {
+ if (c0_ != kEndOfInput && unibrow::IsLineTerminator(ch)) {
// Following ECMA-262, section 7.4, a comment containing
// a newline will make the comment count as a line-terminator.
has_multiline_comment_before_next_ = true;
@@ -875,6 +860,10 @@ void Scanner::Scan() {
token = ScanTemplateStart();
break;
+ case '#':
+ token = ScanPrivateName();
+ break;
+
default:
if (c0_ == kEndOfInput) {
token = Token::EOS;
@@ -940,6 +929,7 @@ void Scanner::SanityCheckTokenDesc(const TokenDesc& token) const {
case Token::REGEXP_LITERAL:
case Token::SMI:
case Token::STRING:
+ case Token::PRIVATE_NAME:
DCHECK_NOT_NULL(token.literal_chars);
DCHECK_NULL(token.raw_literal_chars);
DCHECK_EQ(token.invalid_template_escape_message, MessageTemplate::kNone);
@@ -987,7 +977,8 @@ bool Scanner::ScanEscape() {
Advance<capture_raw>();
// Skip escaped newlines.
- if (!in_template_literal && c0_ != kEndOfInput && IsLineTerminator(c)) {
+ if (!in_template_literal && c0_ != kEndOfInput &&
+ unibrow::IsLineTerminator(c)) {
// Allow escaped CR+LF newlines in multiline string literals.
if (IsCarriageReturn(c) && IsLineFeed(c0_)) Advance<capture_raw>();
return true;
@@ -1080,7 +1071,8 @@ Token::Value Scanner::ScanString() {
AddLiteralChar(c);
}
- while (c0_ != quote && c0_ != kEndOfInput && !IsLineTerminator(c0_)) {
+ while (c0_ != quote && c0_ != kEndOfInput &&
+ !unibrow::IsLineTerminator(c0_)) {
uc32 c = c0_;
Advance();
if (c == '\\') {
@@ -1098,6 +1090,26 @@ Token::Value Scanner::ScanString() {
return Token::STRING;
}
+Token::Value Scanner::ScanPrivateName() {
+ if (!allow_harmony_private_fields()) {
+ ReportScannerError(source_pos(),
+ MessageTemplate::kInvalidOrUnexpectedToken);
+ return Token::ILLEGAL;
+ }
+
+ LiteralScope literal(this);
+ DCHECK_EQ(c0_, '#');
+ AddLiteralCharAdvance();
+ if (c0_ == kEndOfInput || !unicode_cache_->IsIdentifierStart(c0_)) {
+ PushBack(c0_);
+ ReportScannerError(source_pos(),
+ MessageTemplate::kInvalidOrUnexpectedToken);
+ return Token::ILLEGAL;
+ }
+
+ Token::Value token = ScanIdentifierOrKeywordInner(&literal);
+ return token == Token::ILLEGAL ? Token::ILLEGAL : Token::PRIVATE_NAME;
+}
Token::Value Scanner::ScanTemplateSpan() {
// When scanning a TemplateSpan, we are looking for the following construct:
@@ -1136,7 +1148,7 @@ Token::Value Scanner::ScanTemplateSpan() {
ReduceRawLiteralLength(2);
break;
} else if (c == '\\') {
- if (c0_ != kEndOfInput && IsLineTerminator(c0_)) {
+ if (c0_ != kEndOfInput && unibrow::IsLineTerminator(c0_)) {
// The TV of LineContinuation :: \ LineTerminatorSequence is the empty
// code unit sequence.
uc32 lastChar = c0_;
@@ -1397,7 +1409,7 @@ uc32 Scanner::ScanUnicodeEscape() {
if (c0_ == '{') {
int begin = source_pos() - 2;
Advance<capture_raw>();
- uc32 cp = ScanUnlimitedLengthHexNumber<capture_raw>(0x10ffff, begin);
+ uc32 cp = ScanUnlimitedLengthHexNumber<capture_raw>(0x10FFFF, begin);
if (cp < 0 || c0_ != '}') {
ReportScannerError(source_pos(),
MessageTemplate::kInvalidUnicodeEscapeSequence);
@@ -1541,10 +1553,13 @@ static Token::Value KeywordOrIdentifierToken(const uint8_t* input,
return Token::IDENTIFIER;
}
-
Token::Value Scanner::ScanIdentifierOrKeyword() {
- DCHECK(unicode_cache_->IsIdentifierStart(c0_));
LiteralScope literal(this);
+ return ScanIdentifierOrKeywordInner(&literal);
+}
+
+Token::Value Scanner::ScanIdentifierOrKeywordInner(LiteralScope* literal) {
+ DCHECK(unicode_cache_->IsIdentifierStart(c0_));
if (IsInRange(c0_, 'a', 'z') || c0_ == '_') {
do {
char first_char = static_cast<char>(c0_);
@@ -1564,7 +1579,7 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
AddLiteralChar(first_char);
}
if (c0_ <= kMaxAscii && c0_ != '\\') {
- literal.Complete();
+ literal->Complete();
return Token::IDENTIFIER;
}
} else if (c0_ <= kMaxAscii && c0_ != '\\') {
@@ -1575,7 +1590,7 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
if (token == Token::IDENTIFIER ||
token == Token::FUTURE_STRICT_RESERVED_WORD ||
Token::IsContextualKeyword(token))
- literal.Complete();
+ literal->Complete();
return token;
}
@@ -1588,7 +1603,7 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
} while (IsAsciiIdentifier(c0_));
if (c0_ <= kMaxAscii && c0_ != '\\') {
- literal.Complete();
+ literal->Complete();
return Token::IDENTIFIER;
}
@@ -1603,7 +1618,7 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
return Token::ILLEGAL;
}
AddLiteralChar(c);
- return ScanIdentifierSuffix(&literal, true);
+ return ScanIdentifierSuffix(literal, true);
} else {
uc32 first_char = c0_;
Advance();
@@ -1619,7 +1634,7 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
continue;
}
// Fallthrough if no longer able to complete keyword.
- return ScanIdentifierSuffix(&literal, false);
+ return ScanIdentifierSuffix(literal, false);
}
if (next_.literal_chars->is_one_byte()) {
@@ -1629,10 +1644,10 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
if (token == Token::IDENTIFIER ||
token == Token::FUTURE_STRICT_RESERVED_WORD ||
Token::IsContextualKeyword(token))
- literal.Complete();
+ literal->Complete();
return token;
}
- literal.Complete();
+ literal->Complete();
return Token::IDENTIFIER;
}
@@ -1697,12 +1712,12 @@ bool Scanner::ScanRegExpPattern() {
}
while (c0_ != '/' || in_character_class) {
- if (c0_ == kEndOfInput || IsLineTerminator(c0_)) {
+ if (c0_ == kEndOfInput || unibrow::IsLineTerminator(c0_)) {
return false;
}
if (c0_ == '\\') { // Escape sequence.
AddLiteralCharAdvance();
- if (c0_ == kEndOfInput || IsLineTerminator(c0_)) {
+ if (c0_ == kEndOfInput || unibrow::IsLineTerminator(c0_)) {
return false;
}
AddLiteralCharAdvance();
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index 08d77c686b..f5106990ff 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -207,7 +207,7 @@ class Scanner {
static const int kNoOctalLocation = -1;
static const uc32 kEndOfInput = Utf16CharacterStream::kEndOfInput;
- explicit Scanner(UnicodeCache* scanner_contants, int* use_counts_);
+ explicit Scanner(UnicodeCache* scanner_contants);
void Initialize(Utf16CharacterStream* source, bool is_module);
@@ -360,6 +360,12 @@ class Scanner {
bool allow_harmony_bigint() const { return allow_harmony_bigint_; }
void set_allow_harmony_bigint(bool allow) { allow_harmony_bigint_ = allow; }
+ bool allow_harmony_private_fields() const {
+ return allow_harmony_private_fields_;
+ }
+ void set_allow_harmony_private_fields(bool allow) {
+ allow_harmony_private_fields_ = allow;
+ }
private:
// Scoped helper for saving & restoring scanner error state.
@@ -717,9 +723,11 @@ class Scanner {
void ScanDecimalDigits();
Token::Value ScanNumber(bool seen_period);
Token::Value ScanIdentifierOrKeyword();
+ Token::Value ScanIdentifierOrKeywordInner(LiteralScope* literal);
Token::Value ScanIdentifierSuffix(LiteralScope* literal, bool escaped);
Token::Value ScanString();
+ Token::Value ScanPrivateName();
// Scans an escape-sequence which is part of a string and adds the
// decoded character to the current literal. Returns true if a pattern
@@ -736,8 +744,6 @@ class Scanner {
bool is_module_;
- bool IsLineTerminator(uc32 c);
-
Token::Value ScanTemplateSpan();
// Return the current source position.
@@ -802,10 +808,9 @@ class Scanner {
// Whether this scanner encountered an HTML comment.
bool found_html_comment_;
- // Whether to recognize BIGINT tokens.
+ // Harmony flags to allow ESNext features.
bool allow_harmony_bigint_;
-
- int* use_counts_;
+ bool allow_harmony_private_fields_;
MessageTemplate::Template scanner_error_;
Location scanner_error_location_;
diff --git a/deps/v8/src/parsing/token.h b/deps/v8/src/parsing/token.h
index e4a4a5e587..07974edf41 100644
--- a/deps/v8/src/parsing/token.h
+++ b/deps/v8/src/parsing/token.h
@@ -151,6 +151,7 @@ namespace internal {
\
/* Identifiers (not keywords or future reserved words). */ \
T(IDENTIFIER, nullptr, 0) \
+ T(PRIVATE_NAME, nullptr, 0) \
\
/* Future reserved words (ECMA-262, section 7.6.1.2). */ \
T(FUTURE_STRICT_RESERVED_WORD, nullptr, 0) \
diff --git a/deps/v8/src/perf-jit.cc b/deps/v8/src/perf-jit.cc
index c52bb5222a..7ccd02ef9b 100644
--- a/deps/v8/src/perf-jit.cc
+++ b/deps/v8/src/perf-jit.cc
@@ -87,7 +87,7 @@ struct PerfJitDebugEntry {
uint64_t address_;
int line_number_;
int column_;
- // Followed by null-terminated name or \0xff\0 if same as previous.
+ // Followed by null-terminated name or \0xFF\0 if same as previous.
};
struct PerfJitCodeDebugInfo : PerfJitBase {
@@ -396,7 +396,7 @@ void PerfJitLogger::LogWriteHeader() {
header.version_ = PerfJitHeader::kVersion;
header.size_ = sizeof(header);
header.elf_mach_target_ = GetElfMach();
- header.reserved_ = 0xdeadbeef;
+ header.reserved_ = 0xDEADBEEF;
header.process_id_ = base::OS::GetCurrentProcessId();
header.time_stamp_ =
static_cast<uint64_t>(V8::GetCurrentPlatform()->CurrentClockTimeMillis() *
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index d9b12ac8db..451a1afa46 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -60,9 +60,9 @@ void RelocInfo::apply(intptr_t delta) {
} else {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
- Address target = Assembler::target_address_at(pc_, host_);
- Assembler::set_target_address_at(nullptr, pc_, host_, target + delta,
- SKIP_ICACHE_FLUSH);
+ Address target = Assembler::target_address_at(pc_, constant_pool_);
+ Assembler::set_target_address_at(nullptr, pc_, constant_pool_,
+ target + delta, SKIP_ICACHE_FLUSH);
}
}
@@ -74,7 +74,7 @@ Address RelocInfo::target_internal_reference() {
} else {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
}
@@ -87,7 +87,7 @@ Address RelocInfo::target_internal_reference_address() {
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
Address RelocInfo::target_address_address() {
@@ -117,12 +117,11 @@ Address RelocInfo::target_address_address() {
Address RelocInfo::constant_pool_entry_address() {
if (FLAG_enable_embedded_constant_pool) {
- Address constant_pool = host_->constant_pool();
- DCHECK(constant_pool);
+ DCHECK(constant_pool_);
ConstantPoolEntry::Access access;
if (Assembler::IsConstantPoolLoadStart(pc_, &access))
return Assembler::target_constant_pool_address_at(
- pc_, constant_pool, access, ConstantPoolEntry::INTPTR);
+ pc_, constant_pool_, access, ConstantPoolEntry::INTPTR);
}
UNREACHABLE();
}
@@ -130,18 +129,6 @@ Address RelocInfo::constant_pool_entry_address() {
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
-Address Assembler::target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- return target_address_at(pc, constant_pool);
-}
-
-void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
-}
-
Address Assembler::target_address_from_return_address(Address pc) {
// Returns the address of the call target from the return address that will
// be returned to after a call.
@@ -176,21 +163,21 @@ Address Assembler::return_address_from_call_start(Address pc) {
HeapObject* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return HeapObject::cast(
- reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)));
+ return HeapObject::cast(reinterpret_cast<Object*>(
+ Assembler::target_address_at(pc_, constant_pool_)));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Handle<HeapObject>(
- reinterpret_cast<HeapObject**>(Assembler::target_address_at(pc_, host_)));
+ return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
+ Assembler::target_address_at(pc_, constant_pool_)));
}
void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
+ Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
@@ -203,7 +190,7 @@ void RelocInfo::set_target_object(HeapObject* target,
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
@@ -230,10 +217,10 @@ void RelocInfo::WipeOut(Isolate* isolate) {
} else if (IsInternalReferenceEncoded(rmode_)) {
// mov sequence
// Currently used only by deserializer, no need to flush.
- Assembler::set_target_address_at(isolate, pc_, host_, nullptr,
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr,
SKIP_ICACHE_FLUSH);
} else {
- Assembler::set_target_address_at(isolate, pc_, host_, nullptr);
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
}
}
@@ -428,15 +415,15 @@ Address Assembler::target_constant_pool_address_at(
// There is a FIXED_SEQUENCE assumption here
void Assembler::deserialization_set_special_target_at(
Isolate* isolate, Address instruction_payload, Code* code, Address target) {
- set_target_address_at(isolate, instruction_payload, code, target);
+ set_target_address_at(isolate, instruction_payload,
+ code ? code->constant_pool() : nullptr, target);
}
void Assembler::deserialization_set_target_internal_reference_at(
Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
if (RelocInfo::IsInternalReferenceEncoded(mode)) {
- Code* code = nullptr;
- set_target_address_at(isolate, pc, code, target, SKIP_ICACHE_FLUSH);
+ set_target_address_at(isolate, pc, nullptr, target, SKIP_ICACHE_FLUSH);
} else {
Memory::Address_at(pc) = target;
}
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index 0c4a518772..90b18b02ba 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -155,30 +155,30 @@ bool RelocInfo::IsCodedSpecially() {
bool RelocInfo::IsInConstantPool() {
- if (FLAG_enable_embedded_constant_pool && host_ != nullptr) {
- Address constant_pool = host_->constant_pool();
- return (constant_pool && Assembler::IsConstantPoolLoadStart(pc_));
+ if (FLAG_enable_embedded_constant_pool && constant_pool_ != nullptr) {
+ return (constant_pool_ && Assembler::IsConstantPoolLoadStart(pc_));
}
return false;
}
Address RelocInfo::embedded_address() const {
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
uint32_t RelocInfo::embedded_size() const {
- return static_cast<uint32_t>(
- reinterpret_cast<intptr_t>(Assembler::target_address_at(pc_, host_)));
+ return static_cast<uint32_t>(reinterpret_cast<intptr_t>(
+ Assembler::target_address_at(pc_, constant_pool_)));
}
void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, host_, address, flush_mode);
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
+ flush_mode);
}
void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, host_,
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_,
reinterpret_cast<Address>(size), flush_mode);
}
@@ -350,9 +350,9 @@ bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
// 798c07c6 rldicr r12, r12, 32, 31
// 658c00c3 oris r12, r12, 195
// 618ccd40 ori r12, r12, 52544
- return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c) &&
- (instr3 == 0x798c07c6) && ((instr4 >> 16) == 0x658c) &&
- ((instr5 >> 16) == 0x618c));
+ return (((instr1 >> 16) == 0x3D80) && ((instr2 >> 16) == 0x618C) &&
+ (instr3 == 0x798C07C6) && ((instr4 >> 16) == 0x658C) &&
+ ((instr5 >> 16) == 0x618C));
}
#else
// This code assumes a FIXED_SEQUENCE for 32bit loads (lis/ori)
@@ -360,7 +360,7 @@ bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) {
// Check the instruction is indeed a two part load (into r12)
// 3d802553 lis r12, 9555
// 618c5000 ori r12, r12, 20480
- return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c));
+ return (((instr1 >> 16) == 0x3D80) && ((instr2 >> 16) == 0x618C));
}
#endif
@@ -513,8 +513,8 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
case kUnboundAddLabelOffsetOpcode: {
// dst = base + position + immediate
Instr operands = instr_at(pos + kInstrSize);
- Register dst = Register::from_code((operands >> 21) & 0x1f);
- Register base = Register::from_code((operands >> 16) & 0x1f);
+ Register dst = Register::from_code((operands >> 21) & 0x1F);
+ Register base = Register::from_code((operands >> 16) & 0x1F);
int32_t offset = target_pos + SIGN_EXT_IMM16(operands & kImm16Mask);
PatchingAssembler patcher(isolate_data(),
reinterpret_cast<byte*>(buffer_ + pos), 2);
@@ -660,9 +660,9 @@ void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb,
void Assembler::md_form(Instr instr, Register ra, Register rs, int shift,
int maskbit, RCBit r) {
- int sh0_4 = shift & 0x1f;
+ int sh0_4 = shift & 0x1F;
int sh5 = (shift >> 5) & 0x1;
- int m0_4 = maskbit & 0x1f;
+ int m0_4 = maskbit & 0x1F;
int m5 = (maskbit >> 5) & 0x1;
emit(instr | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 | m0_4 * B6 |
@@ -672,7 +672,7 @@ void Assembler::md_form(Instr instr, Register ra, Register rs, int shift,
void Assembler::mds_form(Instr instr, Register ra, Register rs, Register rb,
int maskbit, RCBit r) {
- int m0_4 = maskbit & 0x1f;
+ int m0_4 = maskbit & 0x1F;
int m5 = (maskbit >> 5) & 0x1;
emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | m0_4 * B6 |
@@ -766,9 +766,9 @@ void Assembler::xoris(Register ra, Register rs, const Operand& imm) {
void Assembler::rlwinm(Register ra, Register rs, int sh, int mb, int me,
RCBit rc) {
- sh &= 0x1f;
- mb &= 0x1f;
- me &= 0x1f;
+ sh &= 0x1F;
+ mb &= 0x1F;
+ me &= 0x1F;
emit(RLWINMX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
me << 1 | rc);
}
@@ -776,8 +776,8 @@ void Assembler::rlwinm(Register ra, Register rs, int sh, int mb, int me,
void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me,
RCBit rc) {
- mb &= 0x1f;
- me &= 0x1f;
+ mb &= 0x1F;
+ me &= 0x1F;
emit(RLWNMX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | mb * B6 |
me << 1 | rc);
}
@@ -785,9 +785,9 @@ void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me,
void Assembler::rlwimi(Register ra, Register rs, int sh, int mb, int me,
RCBit rc) {
- sh &= 0x1f;
- mb &= 0x1f;
- me &= 0x1f;
+ sh &= 0x1F;
+ mb &= 0x1F;
+ me &= 0x1F;
emit(RLWIMIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
me << 1 | rc);
}
@@ -1191,7 +1191,7 @@ void Assembler::rldimi(Register ra, Register rs, int sh, int mb, RCBit r) {
void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) {
- int sh0_4 = sh & 0x1f;
+ int sh0_4 = sh & 0x1F;
int sh5 = (sh >> 5) & 0x1;
emit(EXT2 | SRADIX | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 |
@@ -1368,19 +1368,19 @@ void Assembler::mov(Register dst, const Operand& src) {
li(dst, Operand(value >> 32));
} else {
lis(dst, Operand(value >> 48));
- u16 = ((value >> 32) & 0xffff);
+ u16 = ((value >> 32) & 0xFFFF);
if (u16) {
ori(dst, dst, Operand(u16));
}
}
sldi(dst, dst, Operand(32));
- u16 = ((value >> 16) & 0xffff);
+ u16 = ((value >> 16) & 0xFFFF);
if (u16) {
oris(dst, dst, Operand(u16));
}
}
#endif
- u16 = (value & 0xffff);
+ u16 = (value & 0xFFFF);
if (u16) {
ori(dst, dst, Operand(u16));
}
@@ -1402,17 +1402,17 @@ void Assembler::bitwise_mov(Register dst, intptr_t value) {
int32_t hi_32 = static_cast<int32_t>(value >> 32);
int32_t lo_32 = static_cast<int32_t>(value);
int hi_word = static_cast<int>(hi_32 >> 16);
- int lo_word = static_cast<int>(hi_32 & 0xffff);
+ int lo_word = static_cast<int>(hi_32 & 0xFFFF);
lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
ori(dst, dst, Operand(lo_word));
sldi(dst, dst, Operand(32));
- hi_word = static_cast<int>(((lo_32 >> 16) & 0xffff));
- lo_word = static_cast<int>(lo_32 & 0xffff);
+ hi_word = static_cast<int>(((lo_32 >> 16) & 0xFFFF));
+ lo_word = static_cast<int>(lo_32 & 0xFFFF);
oris(dst, dst, Operand(hi_word));
ori(dst, dst, Operand(lo_word));
#else
int hi_word = static_cast<int>(value >> 16);
- int lo_word = static_cast<int>(value & 0xffff);
+ int lo_word = static_cast<int>(value & 0xFFFF);
lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
ori(dst, dst, Operand(lo_word));
#endif
@@ -1422,7 +1422,7 @@ void Assembler::bitwise_mov(Register dst, intptr_t value) {
void Assembler::bitwise_mov32(Register dst, int32_t value) {
BlockTrampolinePoolScope block_trampoline_pool(this);
int hi_word = static_cast<int>(value >> 16);
- int lo_word = static_cast<int>(value & 0xffff);
+ int lo_word = static_cast<int>(value & 0xFFFF);
lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
ori(dst, dst, Operand(lo_word));
}
@@ -1435,7 +1435,7 @@ void Assembler::bitwise_add32(Register dst, Register src, int32_t value) {
nop();
} else {
int hi_word = static_cast<int>(value >> 16);
- int lo_word = static_cast<int>(value & 0xffff);
+ int lo_word = static_cast<int>(value & 0xFFFF);
if (lo_word & 0x8000) hi_word++;
addis(dst, src, Operand(SIGN_EXT_IMM16(hi_word)));
addic(dst, dst, Operand(SIGN_EXT_IMM16(lo_word)));
@@ -1636,9 +1636,7 @@ void Assembler::stop(const char* msg, Condition cond, int32_t code,
}
}
-
-void Assembler::bkpt(uint32_t imm16) { emit(0x7d821008); }
-
+void Assembler::bkpt(uint32_t imm16) { emit(0x7D821008); }
void Assembler::dcbf(Register ra, Register rb) {
emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11);
@@ -2085,8 +2083,7 @@ void Assembler::EmitRelocations() {
it != relocations_.end(); it++) {
RelocInfo::Mode rmode = it->rmode();
Address pc = buffer_ + it->position();
- Code* code = nullptr;
- RelocInfo rinfo(pc, rmode, it->data(), code);
+ RelocInfo rinfo(pc, rmode, it->data(), nullptr);
// Fix up internal references now that they are guaranteed to be bound.
if (RelocInfo::IsInternalReference(rmode)) {
@@ -2095,8 +2092,8 @@ void Assembler::EmitRelocations() {
Memory::Address_at(pc) = buffer_ + pos;
} else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
// mov sequence
- intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, code));
- set_target_address_at(nullptr, pc, code, buffer_ + pos,
+ intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, nullptr));
+ set_target_address_at(nullptr, pc, nullptr, buffer_ + pos,
SKIP_ICACHE_FLUSH);
}
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index 77c1422424..0204d65fa5 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -303,6 +303,7 @@ constexpr Register kConstantPoolRegister = r28; // Constant pool.
constexpr Register kRootRegister = r29; // Roots array pointer.
constexpr Register cp = r30; // JavaScript context pointer.
+constexpr bool kPadArguments = false;
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
@@ -582,10 +583,6 @@ class Assembler : public AssemblerBase {
INLINE(static void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
- INLINE(static Address target_address_at(Address pc, Code* code));
- INLINE(static void set_target_address_at(
- Isolate* isolate, Address pc, Code* code, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index d5af6bfec0..5c3d38786f 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -129,8 +129,8 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// scratch_high LSR 31 equals zero.
// New result = (result eor 0) + 0 = result.
// If the input was negative, we have to negate the result.
- // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
- // New result = (result eor 0xffffffff) + 1 = 0 - result.
+ // Input_high ASR 31 equals 0xFFFFFFFF and scratch_high LSR 31 equals 1.
+ // New result = (result eor 0xFFFFFFFF) + 1 = 0 - result.
__ srawi(r0, scratch_high, 31);
#if V8_TARGET_ARCH_PPC64
__ srdi(r0, r0, Operand(32));
@@ -489,6 +489,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Set up the reserved register for 0.0.
__ LoadDoubleLiteral(kDoubleRegZero, Double(0.0), r0);
+ __ InitializeRootRegister();
+
// Push a frame with special values setup to mark it as an entry frame.
// r3: code entry
// r4: function
@@ -566,12 +568,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// r5: receiver
// r6: argc
// r7: argv
- if (type() == StackFrame::CONSTRUCT_ENTRY) {
- __ Call(BUILTIN_CODE(isolate(), JSConstructEntryTrampoline),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(BUILTIN_CODE(isolate(), JSEntryTrampoline), RelocInfo::CODE_TARGET);
- }
+ __ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
// Unlink this frame from the handler chain.
__ PopStackHandler();
@@ -763,7 +760,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -804,7 +801,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
if (FLAG_debug_code) {
__ LoadP(r8, FieldMemOperand(r5, 0));
__ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
- __ Assert(eq, kExpectedAllocationSite);
+ __ Assert(eq, AbortReason::kExpectedAllocationSite);
}
// Save the resulting elements kind in type info. We can't just store r6
@@ -831,7 +828,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -906,9 +903,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ TestIfSmi(r7, r0);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
__ CompareObjectType(r7, r7, r8, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
// We should either have undefined in r5 or a valid AllocationSite
__ AssertUndefinedOrAllocationSite(r5, r7);
@@ -987,9 +984,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ LoadP(r6, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ TestIfSmi(r6, r0);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
__ CompareObjectType(r6, r6, r7, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
// Figure out the right elements kind
@@ -1004,7 +1001,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ cmpi(r6, Operand(PACKED_ELEMENTS));
__ beq(&done);
__ cmpi(r6, Operand(HOLEY_ELEMENTS));
- __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ Assert(
+ eq,
+ AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
__ bind(&done);
}
@@ -1115,7 +1114,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
if (__ emit_debug_code()) {
__ lwz(r4, MemOperand(r17, kLevelOffset));
__ cmp(r4, r16);
- __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
+ __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
}
__ subi(r16, r16, Operand(1));
__ stw(r16, MemOperand(r17, kLevelOffset));
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
index 13c9af7e22..4641dc260c 100644
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ b/deps/v8/src/ppc/codegen-ppc.cc
@@ -20,8 +20,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
#else
size_t allocated = 0;
- byte* buffer =
- AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@@ -41,8 +40,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
- CHECK(base::OS::SetPermissions(buffer, allocated,
- base::OS::MemoryPermission::kReadExecute));
+ CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index 9c4fe5fd6a..069fcb26ad 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -43,8 +43,6 @@ const Register LoadDescriptor::SlotRegister() { return r3; }
const Register LoadWithVectorDescriptor::VectorRegister() { return r6; }
-const Register LoadICProtoArrayDescriptor::HandlerRegister() { return r7; }
-
const Register StoreDescriptor::ReceiverRegister() { return r4; }
const Register StoreDescriptor::NameRegister() { return r5; }
const Register StoreDescriptor::ValueRegister() { return r3; }
@@ -202,6 +200,11 @@ void TransitionElementsKindDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void AbortJSDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 75e176c09c..8d7c3d05b4 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -445,7 +445,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
if (emit_debug_code()) {
LoadP(r0, MemOperand(address));
cmp(r0, value);
- Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
if (remembered_set_action == OMIT_REMEMBERED_SET &&
@@ -694,7 +694,7 @@ void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
cmpi(shift, Operand(32));
blt(&less_than_32);
// If shift >= 32
- andi(scratch, shift, Operand(0x1f));
+ andi(scratch, shift, Operand(0x1F));
slw(dst_high, src_low, scratch);
li(dst_low, Operand::Zero());
b(&done);
@@ -717,7 +717,7 @@ void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
Move(dst_high, src_low);
li(dst_low, Operand::Zero());
} else if (shift > 32) {
- shift &= 0x1f;
+ shift &= 0x1F;
slwi(dst_high, src_low, Operand(shift));
li(dst_low, Operand::Zero());
} else if (shift == 0) {
@@ -741,7 +741,7 @@ void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
cmpi(shift, Operand(32));
blt(&less_than_32);
// If shift >= 32
- andi(scratch, shift, Operand(0x1f));
+ andi(scratch, shift, Operand(0x1F));
srw(dst_low, src_high, scratch);
li(dst_high, Operand::Zero());
b(&done);
@@ -764,7 +764,7 @@ void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
Move(dst_low, src_high);
li(dst_high, Operand::Zero());
} else if (shift > 32) {
- shift &= 0x1f;
+ shift &= 0x1F;
srwi(dst_low, src_high, Operand(shift));
li(dst_high, Operand::Zero());
} else if (shift == 0) {
@@ -787,7 +787,7 @@ void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
cmpi(shift, Operand(32));
blt(&less_than_32);
// If shift >= 32
- andi(scratch, shift, Operand(0x1f));
+ andi(scratch, shift, Operand(0x1F));
sraw(dst_low, src_high, scratch);
srawi(dst_high, src_high, 31);
b(&done);
@@ -810,7 +810,7 @@ void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
Move(dst_low, src_high);
srawi(dst_high, src_high, 31);
} else if (shift > 32) {
- shift &= 0x1f;
+ shift &= 0x1F;
srawi(dst_low, src_high, shift);
srawi(dst_high, src_high, 31);
} else if (shift == 0) {
@@ -1034,6 +1034,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
LoadP(cp, MemOperand(ip));
#ifdef DEBUG
+ mov(r6, Operand(Context::kInvalidContext));
mov(ip,
Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
StoreP(r6, MemOperand(ip));
@@ -1091,7 +1092,7 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
if (FLAG_debug_code) {
cmpl(src_reg, dst_reg);
- Check(lt, kStackAccessBelowStackPointer);
+ Check(lt, AbortReason::kStackAccessBelowStackPointer);
}
// Restore caller's frame pointer and return address now as they will be
@@ -1327,9 +1328,11 @@ void MacroAssembler::MaybeDropFrames() {
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ Push(Smi::kZero); // Padding.
+
// Link the current handler as the next handler.
// Preserve r3-r7.
mov(r8,
@@ -1343,13 +1346,15 @@ void MacroAssembler::PushStackHandler() {
void MacroAssembler::PopStackHandler() {
- STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(r4);
mov(ip,
Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
StoreP(r4, MemOperand(ip));
+
+ Drop(1); // Drop padding.
}
@@ -1365,7 +1370,7 @@ void MacroAssembler::CompareObjectType(Register object, Register map,
void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
InstanceType type) {
STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
- STATIC_ASSERT(LAST_TYPE <= 0xffff);
+ STATIC_ASSERT(LAST_TYPE <= 0xFFFF);
lhz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
cmpi(type_reg, Operand(type));
}
@@ -1645,12 +1650,12 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
}
-void TurboAssembler::Assert(Condition cond, BailoutReason reason,
+void TurboAssembler::Assert(Condition cond, AbortReason reason,
CRegister cr) {
if (emit_debug_code()) Check(cond, reason, cr);
}
-void TurboAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
+void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
Label L;
b(cond, &L, cr);
Abort(reason);
@@ -1658,7 +1663,7 @@ void TurboAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
bind(&L);
}
-void TurboAssembler::Abort(BailoutReason reason) {
+void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
#ifdef DEBUG
@@ -1713,7 +1718,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object, r0);
- Check(ne, kOperandIsASmi, cr0);
+ Check(ne, AbortReason::kOperandIsASmi, cr0);
}
}
@@ -1722,7 +1727,7 @@ void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object, r0);
- Check(eq, kOperandIsNotSmi, cr0);
+ Check(eq, AbortReason::kOperandIsNotASmi, cr0);
}
}
@@ -1730,11 +1735,11 @@ void MacroAssembler::AssertFixedArray(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object, r0);
- Check(ne, kOperandIsASmiAndNotAFixedArray, cr0);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFixedArray, cr0);
push(object);
CompareObjectType(object, object, object, FIXED_ARRAY_TYPE);
pop(object);
- Check(eq, kOperandIsNotAFixedArray);
+ Check(eq, AbortReason::kOperandIsNotAFixedArray);
}
}
@@ -1742,11 +1747,11 @@ void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object, r0);
- Check(ne, kOperandIsASmiAndNotAFunction, cr0);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, cr0);
push(object);
CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
pop(object);
- Check(eq, kOperandIsNotAFunction);
+ Check(eq, AbortReason::kOperandIsNotAFunction);
}
}
@@ -1755,18 +1760,18 @@ void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object, r0);
- Check(ne, kOperandIsASmiAndNotABoundFunction, cr0);
+ Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, cr0);
push(object);
CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
pop(object);
- Check(eq, kOperandIsNotABoundFunction);
+ Check(eq, AbortReason::kOperandIsNotABoundFunction);
}
}
void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
TestIfSmi(object, r0);
- Check(ne, kOperandIsASmiAndNotAGeneratorObject, cr0);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, cr0);
// Load map
Register map = object;
@@ -1785,7 +1790,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
bind(&do_check);
// Restore generator object to register and perform assertion
pop(object);
- Check(eq, kOperandIsNotAGeneratorObject);
+ Check(eq, AbortReason::kOperandIsNotAGeneratorObject);
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
@@ -1797,7 +1802,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
beq(&done_checking);
LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
- Assert(eq, kExpectedUndefinedOrCell);
+ Assert(eq, AbortReason::kExpectedUndefinedOrCell);
bind(&done_checking);
}
}
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index c508ae128a..c67ef4ab90 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -404,13 +404,13 @@ class TurboAssembler : public Assembler {
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
+ void Assert(Condition cond, AbortReason reason, CRegister cr = cr7);
// Like Assert(), but always enabled.
- void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
+ void Check(Condition cond, AbortReason reason, CRegister cr = cr7);
// Print a message to stdout and abort execution.
- void Abort(BailoutReason reason);
+ void Abort(AbortReason reason);
inline bool AllowThisStubCall(CodeStub* stub);
#if !V8_TARGET_ARCH_PPC64
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index ff62c4a56e..a92e5363ea 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -47,7 +47,7 @@ class PPCDebugger {
void Debug();
private:
- static const Instr kBreakpointInstr = (TWI | 0x1f * B21);
+ static const Instr kBreakpointInstr = (TWI | 0x1F * B21);
static const Instr kNopInstr = (ORI); // ori, 0,0,0
Simulator* sim_;
@@ -232,7 +232,7 @@ void PPCDebugger::Debug() {
// If at a breakpoint, proceed past it.
if ((reinterpret_cast<Instruction*>(sim_->get_pc()))
- ->InstructionBits() == 0x7d821008) {
+ ->InstructionBits() == 0x7D821008) {
sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
} else {
sim_->ExecuteInstruction(
@@ -256,7 +256,7 @@ void PPCDebugger::Debug() {
} else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
// If at a breakpoint, proceed past it.
if ((reinterpret_cast<Instruction*>(sim_->get_pc()))
- ->InstructionBits() == 0x7d821008) {
+ ->InstructionBits() == 0x7D821008) {
sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
} else {
// Execute the one instruction we broke at with breakpoints disabled.
@@ -314,7 +314,7 @@ void PPCDebugger::Debug() {
PrintF("%3s: %f 0x%08x %08x\n",
GetRegConfig()->GetDoubleRegisterName(i), dvalue,
static_cast<uint32_t>(as_words >> 32),
- static_cast<uint32_t>(as_words & 0xffffffff));
+ static_cast<uint32_t>(as_words & 0xFFFFFFFF));
}
} else if (arg1[0] == 'r' &&
(arg1[1] >= '0' && arg1[1] <= '9' &&
@@ -336,7 +336,7 @@ void PPCDebugger::Debug() {
uint64_t as_words = bit_cast<uint64_t>(dvalue);
PrintF("%s: %f 0x%08x %08x\n", arg1, dvalue,
static_cast<uint32_t>(as_words >> 32),
- static_cast<uint32_t>(as_words & 0xffffffff));
+ static_cast<uint32_t>(as_words & 0xFFFFFFFF));
} else {
PrintF("%s unrecognized\n", arg1);
}
@@ -664,6 +664,10 @@ void Simulator::set_last_debugger_input(char* input) {
last_debugger_input_ = input;
}
+void Simulator::SetRedirectInstruction(Instruction* instruction) {
+ instruction->SetInstructionBits(rtCallRedirInstr | kCallRtRedirected);
+}
+
void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
void* start_addr, size_t size) {
intptr_t start = reinterpret_cast<intptr_t>(start_addr);
@@ -733,21 +737,12 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
}
-void Simulator::Initialize(Isolate* isolate) {
- if (isolate->simulator_initialized()) return;
- isolate->set_simulator_initialized(true);
- ::v8::internal::ExternalReference::set_redirector(isolate,
- &RedirectExternalReference);
-}
-
-
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
if (i_cache_ == nullptr) {
i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
- Initialize(isolate);
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
#if V8_TARGET_ARCH_PPC64
@@ -792,116 +787,6 @@ Simulator::~Simulator() {
free(stack_);
}
-// When the generated code calls an external reference we need to catch that in
-// the simulator. The external reference will be a function compiled for the
-// host architecture. We need to call that function instead of trying to
-// execute it with the simulator. We do that by redirecting the external
-// reference to a svc (Supervisor Call) instruction that is handled by
-// the simulator. We write the original destination of the jump just at a known
-// offset from the svc instruction so the simulator knows what to call.
-class Redirection {
- public:
- Redirection(Isolate* isolate, void* external_function,
- ExternalReference::Type type)
- : external_function_(external_function),
- swi_instruction_(rtCallRedirInstr | kCallRtRedirected),
- type_(type),
- next_(nullptr) {
- next_ = isolate->simulator_redirection();
- Simulator::current(isolate)->FlushICache(
- isolate->simulator_i_cache(),
- reinterpret_cast<void*>(&swi_instruction_), Instruction::kInstrSize);
- isolate->set_simulator_redirection(this);
- if (ABI_USES_FUNCTION_DESCRIPTORS) {
- function_descriptor_[0] = reinterpret_cast<intptr_t>(&swi_instruction_);
- function_descriptor_[1] = 0;
- function_descriptor_[2] = 0;
- }
- }
-
- void* address() {
- if (ABI_USES_FUNCTION_DESCRIPTORS) {
- return reinterpret_cast<void*>(function_descriptor_);
- } else {
- return reinterpret_cast<void*>(&swi_instruction_);
- }
- }
-
- void* external_function() { return external_function_; }
- ExternalReference::Type type() { return type_; }
-
- static Redirection* Get(Isolate* isolate, void* external_function,
- ExternalReference::Type type) {
- Redirection* current = isolate->simulator_redirection();
- for (; current != nullptr; current = current->next_) {
- if (current->external_function_ == external_function &&
- current->type_ == type) {
- return current;
- }
- }
- return new Redirection(isolate, external_function, type);
- }
-
- static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
- char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
- char* addr_of_redirection =
- addr_of_swi - offsetof(Redirection, swi_instruction_);
- return reinterpret_cast<Redirection*>(addr_of_redirection);
- }
-
- static Redirection* FromAddress(void* address) {
- int delta = ABI_USES_FUNCTION_DESCRIPTORS
- ? offsetof(Redirection, function_descriptor_)
- : offsetof(Redirection, swi_instruction_);
- char* addr_of_redirection = reinterpret_cast<char*>(address) - delta;
- return reinterpret_cast<Redirection*>(addr_of_redirection);
- }
-
- static void* ReverseRedirection(intptr_t reg) {
- Redirection* redirection = FromAddress(reinterpret_cast<void*>(reg));
- return redirection->external_function();
- }
-
- static void DeleteChain(Redirection* redirection) {
- while (redirection != nullptr) {
- Redirection* next = redirection->next_;
- delete redirection;
- redirection = next;
- }
- }
-
- private:
- void* external_function_;
- uint32_t swi_instruction_;
- ExternalReference::Type type_;
- Redirection* next_;
- intptr_t function_descriptor_[3];
-};
-
-
-// static
-void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
- Redirection* first) {
- Redirection::DeleteChain(first);
- if (i_cache != nullptr) {
- for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
- entry = i_cache->Next(entry)) {
- delete static_cast<CachePage*>(entry->value);
- }
- delete i_cache;
- }
-}
-
-
-void* Simulator::RedirectExternalReference(Isolate* isolate,
- void* external_function,
- ExternalReference::Type type) {
- base::LockGuard<base::Mutex> lock_guard(
- isolate->simulator_redirection_mutex());
- Redirection* redirection = Redirection::Get(isolate, external_function, type);
- return redirection->address();
-}
-
// Get the active Simulator for the current thread.
Simulator* Simulator::current(Isolate* isolate) {
@@ -988,9 +873,9 @@ void Simulator::SetFpResult(const double& result) {
void Simulator::TrashCallerSaveRegisters() {
// We don't trash the registers with the return value.
#if 0 // A good idea to trash volatile registers, needs to be done
- registers_[2] = 0x50Bad4U;
- registers_[3] = 0x50Bad4U;
- registers_[12] = 0x50Bad4U;
+ registers_[2] = 0x50BAD4U;
+ registers_[3] = 0x50BAD4U;
+ registers_[12] = 0x50BAD4U;
#endif
}
@@ -1239,7 +1124,7 @@ void Simulator::Format(Instruction* instr, const char* format) {
bool Simulator::CarryFrom(int32_t left, int32_t right, int32_t carry) {
uint32_t uleft = static_cast<uint32_t>(left);
uint32_t uright = static_cast<uint32_t>(right);
- uint32_t urest = 0xffffffffU - uleft;
+ uint32_t urest = 0xFFFFFFFFU - uleft;
return (uright > urest) ||
(carry && (((uright + 1) > urest) || (uright > (urest - 1))));
@@ -1330,7 +1215,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
bool stack_aligned =
(get_register(sp) & (::v8::internal::FLAG_sim_stack_alignment - 1)) ==
0;
- Redirection* redirection = Redirection::FromSwiInstruction(instr);
+ Redirection* redirection = Redirection::FromInstruction(instr);
const int kArgCount = 9;
const int kRegisterArgCount = 8;
int arg0_regnum = 3;
@@ -1641,7 +1526,7 @@ void Simulator::DisableStop(uint32_t code) {
void Simulator::IncreaseStopCounter(uint32_t code) {
DCHECK_LE(code, kMaxStopCode);
DCHECK(isWatchedStop(code));
- if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
+ if ((watched_stops_[code].count & ~(1 << 31)) == 0x7FFFFFFF) {
PrintF(
"Stop counter for code %i has overflowed.\n"
"Enabling this code and reseting the counter to 0.\n",
@@ -1958,10 +1843,10 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
bit >>= 1;
}
} else if (mb == me + 1) {
- mask = 0xffffffff;
+ mask = 0xFFFFFFFF;
} else { // mb > me+1
int bit = 0x80000000 >> (me + 1); // needs to be tested
- mask = 0xffffffff;
+ mask = 0xFFFFFFFF;
for (; me < mb; me++) {
mask ^= bit;
bit >>= 1;
@@ -1987,7 +1872,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
} else {
int rb = instr->RBValue();
uint32_t rb_val = get_register(rb);
- sh = (rb_val & 0x1f);
+ sh = (rb_val & 0x1F);
}
int mb = instr->Bits(10, 6);
int me = instr->Bits(5, 1);
@@ -2000,10 +1885,10 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
bit >>= 1;
}
} else if (mb == me + 1) {
- mask = 0xffffffff;
+ mask = 0xFFFFFFFF;
} else { // mb > me+1
int bit = 0x80000000 >> (me + 1); // needs to be tested
- mask = 0xffffffff;
+ mask = 0xFFFFFFFF;
for (; me < mb; me++) {
mask ^= bit;
bit >>= 1;
@@ -2078,7 +1963,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int ra = instr->RAValue();
int rb = instr->RBValue();
uint32_t rs_val = get_register(rs);
- uintptr_t rb_val = get_register(rb) & 0x3f;
+ uintptr_t rb_val = get_register(rb) & 0x3F;
intptr_t result = (rb_val > 31) ? 0 : rs_val >> rb_val;
set_register(ra, result);
if (instr->Bit(0)) { // RC bit set
@@ -2092,7 +1977,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int ra = instr->RAValue();
int rb = instr->RBValue();
uintptr_t rs_val = get_register(rs);
- uintptr_t rb_val = get_register(rb) & 0x7f;
+ uintptr_t rb_val = get_register(rb) & 0x7F;
intptr_t result = (rb_val > 63) ? 0 : rs_val >> rb_val;
set_register(ra, result);
if (instr->Bit(0)) { // RC bit set
@@ -2160,7 +2045,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int ra = instr->RAValue();
int rb = instr->RBValue();
int32_t rs_val = get_register(rs);
- intptr_t rb_val = get_register(rb) & 0x3f;
+ intptr_t rb_val = get_register(rb) & 0x3F;
intptr_t result = (rb_val > 31) ? rs_val >> 31 : rs_val >> rb_val;
set_register(ra, result);
if (instr->Bit(0)) { // RC bit set
@@ -2174,7 +2059,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int ra = instr->RAValue();
int rb = instr->RBValue();
intptr_t rs_val = get_register(rs);
- intptr_t rb_val = get_register(rb) & 0x7f;
+ intptr_t rb_val = get_register(rb) & 0x7F;
intptr_t result = (rb_val > 63) ? rs_val >> 63 : rs_val >> rb_val;
set_register(ra, result);
if (instr->Bit(0)) { // RC bit set
@@ -2244,10 +2129,10 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
float* fptr = reinterpret_cast<float*>(&val);
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
// Conversion using double changes sNan to qNan on ia32/x64
- if ((val & 0x7f800000) == 0x7f800000) {
+ if ((val & 0x7F800000) == 0x7F800000) {
int64_t dval = static_cast<int64_t>(val);
- dval = ((dval & 0xc0000000) << 32) | ((dval & 0x40000000) << 31) |
- ((dval & 0x40000000) << 30) | ((dval & 0x7fffffff) << 29) | 0x0;
+ dval = ((dval & 0xC0000000) << 32) | ((dval & 0x40000000) << 31) |
+ ((dval & 0x40000000) << 30) | ((dval & 0x7FFFFFFF) << 29) | 0x0;
set_d_register(frt, dval);
} else {
set_d_register_from_double(frt, static_cast<double>(*fptr));
@@ -2289,9 +2174,9 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
// Conversion using double changes sNan to qNan on ia32/x64
int32_t sval = 0;
int64_t dval = get_d_register(frs);
- if ((dval & 0x7ff0000000000000) == 0x7ff0000000000000) {
- sval = ((dval & 0xc000000000000000) >> 32) |
- ((dval & 0x07ffffffe0000000) >> 29);
+ if ((dval & 0x7FF0000000000000) == 0x7FF0000000000000) {
+ sval = ((dval & 0xC000000000000000) >> 32) |
+ ((dval & 0x07FFFFFFE0000000) >> 29);
p = &sval;
} else {
p = reinterpret_cast<int32_t*>(&frs_val);
@@ -2625,7 +2510,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int ra = instr->RAValue();
int rb = instr->RBValue();
uint32_t rs_val = get_register(rs);
- uintptr_t rb_val = get_register(rb) & 0x3f;
+ uintptr_t rb_val = get_register(rb) & 0x3F;
uint32_t result = (rb_val > 31) ? 0 : rs_val << rb_val;
set_register(ra, result);
if (instr->Bit(0)) { // RC bit set
@@ -2639,7 +2524,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int ra = instr->RAValue();
int rb = instr->RBValue();
uintptr_t rs_val = get_register(rs);
- uintptr_t rb_val = get_register(rb) & 0x7f;
+ uintptr_t rb_val = get_register(rb) & 0x7F;
uintptr_t result = (rb_val > 63) ? 0 : rs_val << rb_val;
set_register(ra, result);
if (instr->Bit(0)) { // RC bit set
@@ -3249,7 +3134,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rt = instr->RTValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
- uintptr_t result = ReadHU(ra_val + offset, instr) & 0xffff;
+ uintptr_t result = ReadHU(ra_val + offset, instr) & 0xFFFF;
set_register(rt, result);
if (opcode == LHZU) {
set_register(ra, ra_val + offset);
@@ -3302,10 +3187,10 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
float* fptr = reinterpret_cast<float*>(&val);
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
// Conversion using double changes sNan to qNan on ia32/x64
- if ((val & 0x7f800000) == 0x7f800000) {
+ if ((val & 0x7F800000) == 0x7F800000) {
int64_t dval = static_cast<int64_t>(val);
- dval = ((dval & 0xc0000000) << 32) | ((dval & 0x40000000) << 31) |
- ((dval & 0x40000000) << 30) | ((dval & 0x7fffffff) << 29) | 0x0;
+ dval = ((dval & 0xC0000000) << 32) | ((dval & 0x40000000) << 31) |
+ ((dval & 0x40000000) << 30) | ((dval & 0x7FFFFFFF) << 29) | 0x0;
set_d_register(frt, dval);
} else {
set_d_register_from_double(frt, static_cast<double>(*fptr));
@@ -3347,9 +3232,9 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
// Conversion using double changes sNan to qNan on ia32/x64
int32_t sval = 0;
int64_t dval = get_d_register(frs);
- if ((dval & 0x7ff0000000000000) == 0x7ff0000000000000) {
- sval = ((dval & 0xc000000000000000) >> 32) |
- ((dval & 0x07ffffffe0000000) >> 29);
+ if ((dval & 0x7FF0000000000000) == 0x7FF0000000000000) {
+ sval = ((dval & 0xC000000000000000) >> 32) |
+ ((dval & 0x07FFFFFFE0000000) >> 29);
p = &sval;
} else {
p = reinterpret_cast<int32_t*>(&frs_val);
@@ -3749,7 +3634,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
case MTFSF: {
int frb = instr->RBValue();
int64_t frb_dval = get_d_register(frb);
- int32_t frb_ival = static_cast<int32_t>((frb_dval)&0xffffffff);
+ int32_t frb_ival = static_cast<int32_t>((frb_dval)&0xFFFFFFFF);
int l = instr->Bits(25, 25);
if (l == 1) {
fp_condition_reg_ = frb_ival;
@@ -3774,8 +3659,8 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int bfa = instr->Bits(20, 18);
int cr_shift = (7 - bf) * CRWIDTH;
int fp_shift = (7 - bfa) * CRWIDTH;
- int field_val = (fp_condition_reg_ >> fp_shift) & 0xf;
- condition_reg_ &= ~(0x0f << cr_shift);
+ int field_val = (fp_condition_reg_ >> fp_shift) & 0xF;
+ condition_reg_ &= ~(0x0F << cr_shift);
condition_reg_ |= (field_val << cr_shift);
// Clear copied exception bits
switch (bfa) {
@@ -3826,7 +3711,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
DCHECK(sh >= 0 && sh <= 63);
DCHECK(mb >= 0 && mb <= 63);
uintptr_t result = base::bits::RotateLeft64(rs_val, sh);
- uintptr_t mask = 0xffffffffffffffff >> mb;
+ uintptr_t mask = 0xFFFFFFFFFFFFFFFF >> mb;
result &= mask;
set_register(ra, result);
if (instr->Bit(0)) { // RC bit set
@@ -3843,7 +3728,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
DCHECK(sh >= 0 && sh <= 63);
DCHECK(me >= 0 && me <= 63);
uintptr_t result = base::bits::RotateLeft64(rs_val, sh);
- uintptr_t mask = 0xffffffffffffffff << (63 - me);
+ uintptr_t mask = 0xFFFFFFFFFFFFFFFF << (63 - me);
result &= mask;
set_register(ra, result);
if (instr->Bit(0)) { // RC bit set
@@ -3860,7 +3745,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
DCHECK(sh >= 0 && sh <= 63);
DCHECK(mb >= 0 && mb <= 63);
uintptr_t result = base::bits::RotateLeft64(rs_val, sh);
- uintptr_t mask = (0xffffffffffffffff >> mb) & (0xffffffffffffffff << sh);
+ uintptr_t mask = (0xFFFFFFFFFFFFFFFF >> mb) & (0xFFFFFFFFFFFFFFFF << sh);
result &= mask;
set_register(ra, result);
if (instr->Bit(0)) { // RC bit set
@@ -3885,10 +3770,10 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
bit >>= 1;
}
} else if (mb == me + 1) {
- mask = 0xffffffffffffffff;
+ mask = 0xFFFFFFFFFFFFFFFF;
} else { // mb > me+1
uintptr_t bit = 0x8000000000000000 >> (me + 1); // needs to be tested
- mask = 0xffffffffffffffff;
+ mask = 0xFFFFFFFFFFFFFFFF;
for (; me < mb; me++) {
mask ^= bit;
bit >>= 1;
@@ -3909,12 +3794,12 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rb = instr->RBValue();
uintptr_t rs_val = get_register(rs);
uintptr_t rb_val = get_register(rb);
- int sh = (rb_val & 0x3f);
+ int sh = (rb_val & 0x3F);
int mb = (instr->Bits(10, 6) | (instr->Bit(5) << 5));
DCHECK(sh >= 0 && sh <= 63);
DCHECK(mb >= 0 && mb <= 63);
uintptr_t result = base::bits::RotateLeft64(rs_val, sh);
- uintptr_t mask = 0xffffffffffffffff >> mb;
+ uintptr_t mask = 0xFFFFFFFFFFFFFFFF >> mb;
result &= mask;
set_register(ra, result);
if (instr->Bit(0)) { // RC bit set
@@ -4202,17 +4087,15 @@ void Simulator::CallInternal(byte* entry) {
set_register(fp, r31_val);
}
-
-intptr_t Simulator::Call(byte* entry, int argument_count, ...) {
- va_list parameters;
- va_start(parameters, argument_count);
+intptr_t Simulator::CallImpl(byte* entry, int argument_count,
+ const intptr_t* arguments) {
// Set up arguments
// First eight arguments passed in registers r3-r10.
- int reg_arg_count = (argument_count > 8) ? 8 : argument_count;
+ int reg_arg_count = std::min(8, argument_count);
int stack_arg_count = argument_count - reg_arg_count;
for (int i = 0; i < reg_arg_count; i++) {
- set_register(i + 3, va_arg(parameters, intptr_t));
+ set_register(i + 3, arguments[i]);
}
// Remaining arguments passed on stack.
@@ -4228,10 +4111,8 @@ intptr_t Simulator::Call(byte* entry, int argument_count, ...) {
// +2 is a hack for the LR slot + old SP on PPC
intptr_t* stack_argument =
reinterpret_cast<intptr_t*>(entry_stack) + kStackFrameExtraParamSlot;
- for (int i = 0; i < stack_arg_count; i++) {
- stack_argument[i] = va_arg(parameters, intptr_t);
- }
- va_end(parameters);
+ memcpy(stack_argument, arguments + reg_arg_count,
+ stack_arg_count * sizeof(*arguments));
set_register(sp, entry_stack);
CallInternal(entry);
@@ -4240,8 +4121,7 @@ intptr_t Simulator::Call(byte* entry, int argument_count, ...) {
CHECK_EQ(entry_stack, get_register(sp));
set_register(sp, original_stack);
- intptr_t result = get_register(r3);
- return result;
+ return get_register(r3);
}
diff --git a/deps/v8/src/ppc/simulator-ppc.h b/deps/v8/src/ppc/simulator-ppc.h
index aba6c3671b..544b9d463e 100644
--- a/deps/v8/src/ppc/simulator-ppc.h
+++ b/deps/v8/src/ppc/simulator-ppc.h
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
// Declares a Simulator for PPC instructions if we are not generating a native
// PPC binary. This Simulator allows us to run and debug PPC code generation on
// regular desktop machines.
-// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
+// V8 calls into generated code via the GeneratedCode wrapper,
// which will start execution in the Simulator or forwards to the real entry
// on a PPC HW platform.
@@ -15,55 +14,13 @@
#include "src/allocation.h"
-#if !defined(USE_SIMULATOR)
-// Running without a simulator on a native ppc platform.
-
-namespace v8 {
-namespace internal {
-
-// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- (entry(p0, p1, p2, p3, p4))
-
-typedef int (*ppc_regexp_matcher)(String*, int, const byte*, const byte*, int*,
- int, Address, int, Isolate*);
-
-// Call the generated regexp code directly. The code at the entry address
-// should act as a function matching the type ppc_regexp_matcher.
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- (FUNCTION_CAST<ppc_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
-
-// The stack limit beyond which we will throw stack overflow errors in
-// generated code. Because generated code on ppc uses the C stack, we
-// just use the C stack limit.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
- uintptr_t c_limit) {
- USE(isolate);
- return c_limit;
- }
-
- static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
- uintptr_t try_catch_address) {
- USE(isolate);
- return try_catch_address;
- }
-
- static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
- USE(isolate);
- }
-};
-} // namespace internal
-} // namespace v8
-
-#else // !defined(USE_SIMULATOR)
+#if defined(USE_SIMULATOR)
// Running with a simulator.
#include "src/assembler.h"
#include "src/base/hashmap.h"
#include "src/ppc/constants-ppc.h"
+#include "src/simulator-base.h"
namespace v8 {
namespace internal {
@@ -94,8 +51,7 @@ class CachePage {
char validity_map_[kValidityMapSize]; // One byte per line.
};
-
-class Simulator {
+class Simulator : public SimulatorBase {
public:
friend class PPCDebugger;
enum Register {
@@ -210,15 +166,11 @@ class Simulator {
// Executes PPC instructions until the PC reaches end_sim_pc.
void Execute();
- // Call on program start.
- static void Initialize(Isolate* isolate);
-
- static void TearDown(base::CustomMatcherHashMap* i_cache, Redirection* first);
+ template <typename Return, typename... Args>
+ Return Call(byte* entry, Args... args) {
+ return VariadicCall<Return>(this, &Simulator::CallImpl, entry, args...);
+ }
- // V8 generally calls into generated JS code with 5 parameters and into
- // generated RegExp code with 7 parameters. This is a convenience function,
- // which sets up the simulator state and grabs the result on return.
- intptr_t Call(byte* entry, int argument_count, ...);
// Alternative: call a 2-argument double function.
void CallFP(byte* entry, double d0, double d1);
int32_t CallFPReturnsInt(byte* entry, double d0, double d1);
@@ -234,6 +186,9 @@ class Simulator {
void set_last_debugger_input(char* input);
char* last_debugger_input() { return last_debugger_input_; }
+ // Redirection support.
+ static void SetRedirectInstruction(Instruction* instruction);
+
// ICache checking.
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size);
@@ -254,6 +209,8 @@ class Simulator {
end_sim_pc = -2
};
+ intptr_t CallImpl(byte* entry, int argument_count, const intptr_t* arguments);
+
enum BCType { BC_OFFSET, BC_LINK_REG, BC_CTR_REG };
// Unsupported instructions use Format to print an error and stop execution.
@@ -341,11 +298,6 @@ class Simulator {
static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
void* page);
- // Runtime call support. Uses the isolate in a thread-safe way.
- static void* RedirectExternalReference(
- Isolate* isolate, void* external_function,
- v8::internal::ExternalReference::Type type);
-
// Handle arguments and return value for runtime FP functions.
void GetFpArgs(double* x, double* y, intptr_t* z);
void SetFpResult(const double& result);
@@ -481,43 +433,8 @@ class Simulator {
static base::LazyInstance<GlobalMonitor>::type global_monitor_;
};
-
-// When running with the simulator transition into simulated execution at this
-// point.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(isolate)->Call( \
- FUNCTION_ADDR(entry), 5, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2, \
- (intptr_t)p3, (intptr_t)p4))
-
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- Simulator::current(isolate)->Call( \
- entry, 9, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2, (intptr_t)p3, \
- (intptr_t)p4, (intptr_t)p5, (intptr_t)p6, (intptr_t)p7, (intptr_t)p8)
-
-// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code. The JS-based limit normally points near the end of
-// the simulator stack. When the C-based limit is exhausted we reflect that by
-// lowering the JS-based limit as well, to make stack checks trigger.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
- uintptr_t c_limit) {
- return Simulator::current(isolate)->StackLimit(c_limit);
- }
-
- static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
- uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(isolate);
- return sim->PushAddress(try_catch_address);
- }
-
- static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
- Simulator::current(isolate)->PopAddress();
- }
-};
} // namespace internal
} // namespace v8
-#endif // !defined(USE_SIMULATOR)
+#endif // defined(USE_SIMULATOR)
#endif // V8_PPC_SIMULATOR_PPC_H_
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index 91617d7231..ac8f55a89b 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -322,8 +322,8 @@ void CpuProfiler::CreateEntriesForRuntimeCallStats() {
static_entries_.clear();
RuntimeCallStats* rcs = isolate_->counters()->runtime_call_stats();
CodeMap* code_map = generator_->code_map();
- for (int i = 0; i < RuntimeCallStats::counters_count; ++i) {
- RuntimeCallCounter* counter = &(rcs->*(RuntimeCallStats::counters[i]));
+ for (int i = 0; i < RuntimeCallStats::kNumberOfCounters; ++i) {
+ RuntimeCallCounter* counter = rcs->GetCounter(i);
DCHECK(counter->name());
std::unique_ptr<CodeEntry> entry(
new CodeEntry(CodeEventListener::FUNCTION_TAG, counter->name(),
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 6f3a952d1f..40779d9e5f 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -1905,6 +1905,9 @@ const char* V8HeapExplorer::GetStrongGcSubrootName(Object* object) {
#define STRUCT_MAP_NAME(NAME, Name, name) NAME_ENTRY(name##_map)
STRUCT_LIST(STRUCT_MAP_NAME)
#undef STRUCT_MAP_NAME
+#define DATA_HANDLER_MAP_NAME(NAME, Name, Size, name) NAME_ENTRY(name##_map)
+ DATA_HANDLER_LIST(DATA_HANDLER_MAP_NAME)
+#undef DATA_HANDLER_MAP_NAME
#define STRING_NAME(name, str) NAME_ENTRY(name)
INTERNALIZED_STRING_LIST(STRING_NAME)
#undef STRING_NAME
@@ -2732,10 +2735,10 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
static void WriteUChar(OutputStreamWriter* w, unibrow::uchar u) {
static const char hex_chars[] = "0123456789ABCDEF";
w->AddString("\\u");
- w->AddCharacter(hex_chars[(u >> 12) & 0xf]);
- w->AddCharacter(hex_chars[(u >> 8) & 0xf]);
- w->AddCharacter(hex_chars[(u >> 4) & 0xf]);
- w->AddCharacter(hex_chars[u & 0xf]);
+ w->AddCharacter(hex_chars[(u >> 12) & 0xF]);
+ w->AddCharacter(hex_chars[(u >> 8) & 0xF]);
+ w->AddCharacter(hex_chars[(u >> 4) & 0xF]);
+ w->AddCharacter(hex_chars[u & 0xF]);
}
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index 070432225a..2dacd5a9fe 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -12,6 +12,7 @@
#include "include/v8-profiler.h"
#include "src/base/platform/time.h"
#include "src/objects.h"
+#include "src/objects/fixed-array.h"
#include "src/profiler/strings-storage.h"
#include "src/string-hasher.h"
#include "src/visitors.h"
@@ -26,6 +27,8 @@ class HeapIterator;
class HeapProfiler;
class HeapSnapshot;
class JSArrayBuffer;
+class JSCollection;
+class JSWeakCollection;
class SnapshotFiller;
class HeapGraphEdge BASE_EMBEDDED {
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index 9570c77dd2..bb6ede6d95 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -332,14 +332,6 @@ ProfileNode* ProfileTree::AddPathFromEnd(const std::vector<CodeEntry*>& path,
}
-struct NodesPair {
- NodesPair(ProfileNode* src, ProfileNode* dst)
- : src(src), dst(dst) { }
- ProfileNode* src;
- ProfileNode* dst;
-};
-
-
class Position {
public:
explicit Position(ProfileNode* node)
diff --git a/deps/v8/src/profiler/profiler-listener.cc b/deps/v8/src/profiler/profiler-listener.cc
index fecfdb66b0..bd2f158e60 100644
--- a/deps/v8/src/profiler/profiler-listener.cc
+++ b/deps/v8/src/profiler/profiler-listener.cc
@@ -259,7 +259,7 @@ void ProfilerListener::RecordDeoptInlinedFrames(CodeEntry* entry,
DCHECK(last_position.IsKnown());
std::vector<CpuProfileDeoptFrame> inlined_frames;
for (SourcePositionInfo& pos_info : last_position.InliningStack(code)) {
- DCHECK_NE(pos_info.position.ScriptOffset(), kNoSourcePosition);
+ if (pos_info.position.ScriptOffset() == kNoSourcePosition) continue;
if (!pos_info.function->script()->IsScript()) continue;
int script_id = Script::cast(pos_info.function->script())->id();
size_t offset = static_cast<size_t>(pos_info.position.ScriptOffset());
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.cc b/deps/v8/src/profiler/sampling-heap-profiler.cc
index 51fe8866fa..fef21550ec 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.cc
+++ b/deps/v8/src/profiler/sampling-heap-profiler.cc
@@ -66,24 +66,15 @@ SamplingHeapProfiler::SamplingHeapProfiler(
rate_(rate),
flags_(flags) {
CHECK_GT(rate_, 0u);
- heap->new_space()->AddAllocationObserver(new_space_observer_.get());
- AllSpaces spaces(heap);
- for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
- if (space != heap->new_space()) {
- space->AddAllocationObserver(other_spaces_observer_.get());
- }
- }
+
+ heap_->AddAllocationObserversToAllSpaces(other_spaces_observer_.get(),
+ new_space_observer_.get());
}
SamplingHeapProfiler::~SamplingHeapProfiler() {
- heap_->new_space()->RemoveAllocationObserver(new_space_observer_.get());
- AllSpaces spaces(heap_);
- for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
- if (space != heap_->new_space()) {
- space->RemoveAllocationObserver(other_spaces_observer_.get());
- }
- }
+ heap_->RemoveAllocationObserversFromAllSpaces(other_spaces_observer_.get(),
+ new_space_observer_.get());
for (auto sample : samples_) {
delete sample;
diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc
index f4ca28c19f..44bf9af3d1 100644
--- a/deps/v8/src/profiler/tick-sample.cc
+++ b/deps/v8/src/profiler/tick-sample.cc
@@ -34,23 +34,23 @@ bool IsNoFrameRegion(i::Address address) {
#if V8_HOST_ARCH_IA32
// push %ebp
// mov %esp,%ebp
- {3, {0x55, 0x89, 0xe5}, {0, 1, -1}},
+ {3, {0x55, 0x89, 0xE5}, {0, 1, -1}},
// pop %ebp
// ret N
- {2, {0x5d, 0xc2}, {0, 1, -1}},
+ {2, {0x5D, 0xC2}, {0, 1, -1}},
// pop %ebp
// ret
- {2, {0x5d, 0xc3}, {0, 1, -1}},
+ {2, {0x5D, 0xC3}, {0, 1, -1}},
#elif V8_HOST_ARCH_X64
// pushq %rbp
// movq %rsp,%rbp
- {4, {0x55, 0x48, 0x89, 0xe5}, {0, 1, -1}},
+ {4, {0x55, 0x48, 0x89, 0xE5}, {0, 1, -1}},
// popq %rbp
// ret N
- {2, {0x5d, 0xc2}, {0, 1, -1}},
+ {2, {0x5D, 0xC2}, {0, 1, -1}},
// popq %rbp
// ret
- {2, {0x5d, 0xc3}, {0, 1, -1}},
+ {2, {0x5D, 0xC3}, {0, 1, -1}},
#endif
{0, {}, {}}
};
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index 2e6425568b..5f9d3905a3 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -86,8 +86,7 @@ namespace internal {
* bool direct_call = false,
* Isolate* isolate);
* The call is performed by NativeRegExpMacroAssembler::Execute()
- * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
- * in arm/simulator-arm.h.
+ * (in regexp-macro-assembler.cc) via the GeneratedCode wrapper.
*/
#define __ ACCESS_MASM(masm_)
@@ -506,12 +505,12 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
Label success;
__ cmp(current_character(), Operand(' '));
__ b(eq, &success);
- // Check range 0x09..0x0d
+ // Check range 0x09..0x0D
__ sub(r0, current_character(), Operand('\t'));
__ cmp(r0, Operand('\r' - '\t'));
__ b(ls, &success);
// \u00a0 (NBSP).
- __ cmp(r0, Operand(0x00a0 - '\t'));
+ __ cmp(r0, Operand(0x00A0 - '\t'));
BranchOrBacktrack(ne, on_no_match);
__ bind(&success);
return true;
@@ -533,37 +532,37 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
BranchOrBacktrack(ls, on_no_match);
return true;
case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
__ eor(r0, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(r0, r0, Operand(0x0b));
- __ cmp(r0, Operand(0x0c - 0x0b));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C
+ __ sub(r0, r0, Operand(0x0B));
+ __ cmp(r0, Operand(0x0C - 0x0B));
BranchOrBacktrack(ls, on_no_match);
if (mode_ == UC16) {
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(r0, r0, Operand(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ sub(r0, r0, Operand(0x2028 - 0x0B));
__ cmp(r0, Operand(1));
BranchOrBacktrack(ls, on_no_match);
}
return true;
}
case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
__ eor(r0, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(r0, r0, Operand(0x0b));
- __ cmp(r0, Operand(0x0c - 0x0b));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C
+ __ sub(r0, r0, Operand(0x0B));
+ __ cmp(r0, Operand(0x0C - 0x0B));
if (mode_ == LATIN1) {
BranchOrBacktrack(hi, on_no_match);
} else {
Label done;
__ b(ls, &done);
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(r0, r0, Operand(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ sub(r0, r0, Operand(0x2028 - 0x0B));
__ cmp(r0, Operand(1));
BranchOrBacktrack(hi, on_no_match);
__ bind(&done);
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index 558ee673f1..5f77ff4021 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -96,8 +96,7 @@ namespace internal {
* bool direct_call = false,
* Isolate* isolate);
* The call is performed by NativeRegExpMacroAssembler::Execute()
- * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
- * in arm64/simulator-arm64.h.
+ * (in regexp-macro-assembler.cc) via the GeneratedCode wrapper.
*/
#define __ ACCESS_MASM(masm_)
@@ -116,7 +115,6 @@ RegExpMacroAssemblerARM64::RegExpMacroAssemblerARM64(Isolate* isolate,
success_label_(),
backtrack_label_(),
exit_label_() {
- __ SetStackPointer(csp);
DCHECK_EQ(0, registers_to_save % 2);
// We can cache at most 16 W registers in x0-x7.
STATIC_ASSERT(kNumCachedRegisters <= 16);
@@ -366,7 +364,7 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
__ Cmp(current_input_offset().X(), Operand(current_input_offset(), SXTW));
__ Ccmp(current_input_offset(), 0, NoFlag, eq);
// The current input offset should be <= 0, and fit in a W register.
- __ Check(le, kOffsetOutOfRange);
+ __ Check(le, AbortReason::kOffsetOutOfRange);
}
} else {
DCHECK(mode_ == UC16);
@@ -503,7 +501,7 @@ void RegExpMacroAssemblerARM64::CheckNotBackReference(int start_reg,
__ Cmp(current_input_offset().X(), Operand(current_input_offset(), SXTW));
__ Ccmp(current_input_offset(), 0, NoFlag, eq);
// The current input offset should be <= 0, and fit in a W register.
- __ Check(le, kOffsetOutOfRange);
+ __ Check(le, AbortReason::kOffsetOutOfRange);
}
__ Bind(&fallthrough);
}
@@ -588,11 +586,11 @@ bool RegExpMacroAssemblerARM64::CheckSpecialCharacterClass(uc16 type,
if (mode_ == LATIN1) {
// One byte space characters are '\t'..'\r', ' ' and \u00a0.
Label success;
- // Check for ' ' or 0x00a0.
+ // Check for ' ' or 0x00A0.
__ Cmp(current_character(), ' ');
- __ Ccmp(current_character(), 0x00a0, ZFlag, ne);
+ __ Ccmp(current_character(), 0x00A0, ZFlag, ne);
__ B(eq, &success);
- // Check range 0x09..0x0d.
+ // Check range 0x09..0x0D.
__ Sub(w10, current_character(), '\t');
CompareAndBranchOrBacktrack(w10, '\r' - '\t', hi, on_no_match);
__ Bind(&success);
@@ -613,12 +611,12 @@ bool RegExpMacroAssemblerARM64::CheckSpecialCharacterClass(uc16 type,
CompareAndBranchOrBacktrack(w10, '9' - '0', ls, on_no_match);
return true;
case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
// Here we emit the conditional branch only once at the end to make branch
// prediction more efficient, even though we could branch out of here
// as soon as a character matches.
- __ Cmp(current_character(), 0x0a);
- __ Ccmp(current_character(), 0x0d, ZFlag, ne);
+ __ Cmp(current_character(), 0x0A);
+ __ Ccmp(current_character(), 0x0D, ZFlag, ne);
if (mode_ == UC16) {
__ Sub(w10, current_character(), 0x2028);
// If the Z flag was set we clear the flags to force a branch.
@@ -631,11 +629,11 @@ bool RegExpMacroAssemblerARM64::CheckSpecialCharacterClass(uc16 type,
return true;
}
case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
// We have to check all 4 newline characters before emitting
// the conditional branch.
- __ Cmp(current_character(), 0x0a);
- __ Ccmp(current_character(), 0x0d, ZFlag, ne);
+ __ Cmp(current_character(), 0x0A);
+ __ Ccmp(current_character(), 0x0D, ZFlag, ne);
if (mode_ == UC16) {
__ Sub(w10, current_character(), 0x2028);
// If the Z flag was set we clear the flags to force a fall-through.
@@ -791,7 +789,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// Check that the size of the input string chars is in range.
__ Neg(x11, x10);
__ Cmp(x11, SeqTwoByteString::kMaxCharsSize);
- __ Check(ls, kInputStringTooLong);
+ __ Check(ls, AbortReason::kInputStringTooLong);
}
__ Mov(current_input_offset(), w10);
@@ -855,7 +853,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
if (masm_->emit_debug_code()) {
// Check that the size of the input string chars is in range.
__ Cmp(x10, SeqTwoByteString::kMaxCharsSize);
- __ Check(ls, kInputStringTooLong);
+ __ Check(ls, AbortReason::kInputStringTooLong);
}
// input_start has a start_offset offset on entry. We need to include
// it when computing the length of the whole string.
@@ -1158,7 +1156,7 @@ void RegExpMacroAssemblerARM64::PushBacktrack(Label* label) {
if (masm_->emit_debug_code()) {
__ Cmp(x10, kWRegMask);
// The code offset has to fit in a W register.
- __ Check(ls, kOffsetOutOfRange);
+ __ Check(ls, AbortReason::kOffsetOutOfRange);
}
}
Push(w10);
@@ -1314,7 +1312,7 @@ void RegExpMacroAssemblerARM64::WriteStackPointerToRegister(int reg) {
if (masm_->emit_debug_code()) {
__ Cmp(x10, Operand(w10, SXTW));
// The stack offset needs to fit in a W register.
- __ Check(eq, kOffsetOutOfRange);
+ __ Check(eq, AbortReason::kOffsetOutOfRange);
}
StoreRegister(reg, w10);
}
@@ -1623,7 +1621,7 @@ void RegExpMacroAssemblerARM64::LoadCurrentCharacterUnchecked(int cp_offset,
__ Add(x10, x10, Operand(current_input_offset(), SXTW));
__ Cmp(x10, Operand(w10, SXTW));
// The offset needs to fit in a W register.
- __ Check(eq, kOffsetOutOfRange);
+ __ Check(eq, AbortReason::kOffsetOutOfRange);
} else {
__ Add(w10, current_input_offset(), cp_offset * char_size());
}
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index 99d1466f54..cb240d6c67 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -531,12 +531,12 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
Label success;
__ cmp(current_character(), ' ');
__ j(equal, &success, Label::kNear);
- // Check range 0x09..0x0d
+ // Check range 0x09..0x0D
__ lea(eax, Operand(current_character(), -'\t'));
__ cmp(eax, '\r' - '\t');
__ j(below_equal, &success, Label::kNear);
// \u00a0 (NBSP).
- __ cmp(eax, 0x00a0 - '\t');
+ __ cmp(eax, 0x00A0 - '\t');
BranchOrBacktrack(not_equal, on_no_match);
__ bind(&success);
return true;
@@ -558,18 +558,18 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
BranchOrBacktrack(below_equal, on_no_match);
return true;
case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
__ mov(eax, current_character());
__ xor_(eax, Immediate(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(eax, Immediate(0x0b));
- __ cmp(eax, 0x0c - 0x0b);
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C
+ __ sub(eax, Immediate(0x0B));
+ __ cmp(eax, 0x0C - 0x0B);
BranchOrBacktrack(below_equal, on_no_match);
if (mode_ == UC16) {
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(eax, Immediate(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ sub(eax, Immediate(0x2028 - 0x0B));
__ cmp(eax, 0x2029 - 0x2028);
BranchOrBacktrack(below_equal, on_no_match);
}
@@ -610,13 +610,13 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
// Match any character.
return true;
case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 or 0x2029).
+ // Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 or 0x2029).
// The opposite of '.'.
__ mov(eax, current_character());
__ xor_(eax, Immediate(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(eax, Immediate(0x0b));
- __ cmp(eax, 0x0c - 0x0b);
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C
+ __ sub(eax, Immediate(0x0B));
+ __ cmp(eax, 0x0C - 0x0B);
if (mode_ == LATIN1) {
BranchOrBacktrack(above, on_no_match);
} else {
@@ -624,9 +624,9 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
BranchOrBacktrack(below_equal, &done);
DCHECK_EQ(UC16, mode_);
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(eax, Immediate(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ sub(eax, Immediate(0x2028 - 0x0B));
__ cmp(eax, 1);
BranchOrBacktrack(above, on_no_match);
__ bind(&done);
diff --git a/deps/v8/src/regexp/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
index 9d56e4cfa3..a26a1d77ce 100644
--- a/deps/v8/src/regexp/jsregexp.cc
+++ b/deps/v8/src/regexp/jsregexp.cc
@@ -98,12 +98,36 @@ ContainedInLattice AddRange(ContainedInLattice containment,
return containment;
}
-// Generic RegExp methods. Dispatches to implementation specific methods.
-
+// More makes code generation slower, less makes V8 benchmark score lower.
+const int kMaxLookaheadForBoyerMoore = 8;
// In a 3-character pattern you can maximally step forwards 3 characters
// at a time, which is not always enough to pay for the extra logic.
const int kPatternTooShortForBoyerMoore = 2;
+// Identifies the sort of regexps where the regexp engine is faster
+// than the code used for atom matches.
+static bool HasFewDifferentCharacters(Handle<String> pattern) {
+ int length = Min(kMaxLookaheadForBoyerMoore, pattern->length());
+ if (length <= kPatternTooShortForBoyerMoore) return false;
+ const int kMod = 128;
+ bool character_found[kMod];
+ int different = 0;
+ memset(&character_found[0], 0, sizeof(character_found));
+ for (int i = 0; i < length; i++) {
+ int ch = (pattern->Get(i) & (kMod - 1));
+ if (!character_found[ch]) {
+ character_found[ch] = true;
+ different++;
+ // We declare a regexp low-alphabet if it has at least 3 times as many
+ // characters as it has different characters.
+ if (different * 3 > length) return false;
+ }
+ }
+ return true;
+}
+
+// Generic RegExp methods. Dispatches to implementation specific methods.
+
MaybeHandle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
Handle<String> pattern,
JSRegExp::Flags flags) {
@@ -133,7 +157,7 @@ MaybeHandle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
bool has_been_compiled = false;
if (parse_result.simple && !IgnoreCase(flags) && !IsSticky(flags) &&
- pattern->length() <= kPatternTooShortForBoyerMoore) {
+ !HasFewDifferentCharacters(pattern)) {
// Parse-tree is a single atom that is equal to the pattern.
AtomCompile(re, pattern, flags, pattern);
has_been_compiled = true;
@@ -141,12 +165,11 @@ MaybeHandle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
parse_result.capture_count == 0) {
RegExpAtom* atom = parse_result.tree->AsAtom();
Vector<const uc16> atom_pattern = atom->data();
- if (!IgnoreCase(atom->flags()) &&
- atom_pattern.length() <= kPatternTooShortForBoyerMoore) {
- Handle<String> atom_string;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, atom_string,
- isolate->factory()->NewStringFromTwoByte(atom_pattern), Object);
+ Handle<String> atom_string;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, atom_string,
+ isolate->factory()->NewStringFromTwoByte(atom_pattern), Object);
+ if (!IgnoreCase(atom->flags()) && !HasFewDifferentCharacters(atom_string)) {
AtomCompile(re, pattern, flags, atom_string);
has_been_compiled = true;
}
@@ -2433,8 +2456,8 @@ bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
} else {
// For 2-character preloads in one-byte mode or 1-character preloads in
// two-byte mode we also use a 16 bit load with zero extend.
- static const uint32_t kTwoByteMask = 0xffff;
- static const uint32_t kFourByteMask = 0xffffffff;
+ static const uint32_t kTwoByteMask = 0xFFFF;
+ static const uint32_t kFourByteMask = 0xFFFFFFFF;
if (details->characters() == 2 && compiler->one_byte()) {
if ((mask & kTwoByteMask) == kTwoByteMask) need_mask = false;
} else if (details->characters() == 1 && !compiler->one_byte()) {
@@ -2554,6 +2577,7 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
details->positions(characters_filled_in);
RegExpCharacterClass* tree = elm.char_class();
ZoneList<CharacterRange>* ranges = tree->ranges(zone());
+ DCHECK(!ranges->is_empty());
if (tree->is_negated()) {
// A quick check uses multi-character mask and compare. There is no
// useful way to incorporate a negative char class into this scheme
@@ -2716,12 +2740,11 @@ RegExpNode* SeqRegExpNode::FilterSuccessor(int depth) {
return set_replacement(this);
}
-
-// We need to check for the following characters: 0x39c 0x3bc 0x178.
+// We need to check for the following characters: 0x39C 0x3BC 0x178.
static inline bool RangeContainsLatin1Equivalents(CharacterRange range) {
// TODO(dcarney): this could be a lot more efficient.
- return range.Contains(0x39c) ||
- range.Contains(0x3bc) || range.Contains(0x178);
+ return range.Contains(0x039C) || range.Contains(0x03BC) ||
+ range.Contains(0x0178);
}
@@ -2973,7 +2996,7 @@ static void EmitHat(RegExpCompiler* compiler,
new_trace.backtrack())) {
// Newline means \n, \r, 0x2028 or 0x2029.
if (!compiler->one_byte()) {
- assembler->CheckCharacterAfterAnd(0x2028, 0xfffe, &ok);
+ assembler->CheckCharacterAfterAnd(0x2028, 0xFFFE, &ok);
}
assembler->CheckCharacter('\n', &ok);
assembler->CheckNotCharacter('\r', new_trace.backtrack());
@@ -2982,8 +3005,6 @@ static void EmitHat(RegExpCompiler* compiler,
on_success->Emit(compiler, &new_trace);
}
-// More makes code generation slower, less makes V8 benchmark score lower.
-const int kMaxLookaheadForBoyerMoore = 8;
// Emit the code to handle \b and \B (word-boundary or non-word-boundary).
void AssertionNode::EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace) {
@@ -3253,9 +3274,9 @@ TextNode* TextNode::CreateForCharacterRanges(Zone* zone,
JSRegExp::Flags flags) {
DCHECK_NOT_NULL(ranges);
ZoneList<TextElement>* elms = new (zone) ZoneList<TextElement>(1, zone);
- elms->Add(
- TextElement::CharClass(new (zone) RegExpCharacterClass(ranges, flags)),
- zone);
+ elms->Add(TextElement::CharClass(
+ new (zone) RegExpCharacterClass(zone, ranges, flags)),
+ zone);
return new (zone) TextNode(elms, read_backward, on_success);
}
@@ -3268,10 +3289,10 @@ TextNode* TextNode::CreateForSurrogatePair(Zone* zone, CharacterRange lead,
ZoneList<CharacterRange>* trail_ranges = CharacterRange::List(zone, trail);
ZoneList<TextElement>* elms = new (zone) ZoneList<TextElement>(2, zone);
elms->Add(TextElement::CharClass(
- new (zone) RegExpCharacterClass(lead_ranges, flags)),
+ new (zone) RegExpCharacterClass(zone, lead_ranges, flags)),
zone);
elms->Add(TextElement::CharClass(
- new (zone) RegExpCharacterClass(trail_ranges, flags)),
+ new (zone) RegExpCharacterClass(zone, trail_ranges, flags)),
zone);
return new (zone) TextNode(elms, read_backward, on_success);
}
@@ -5089,10 +5110,9 @@ RegExpNode* RegExpCharacterClass::ToNode(RegExpCompiler* compiler,
ranges = negated;
}
if (ranges->length() == 0) {
- JSRegExp::Flags default_flags = JSRegExp::Flags();
- ranges->Add(CharacterRange::Everything(), zone);
+ JSRegExp::Flags default_flags;
RegExpCharacterClass* fail =
- new (zone) RegExpCharacterClass(ranges, default_flags, NEGATED);
+ new (zone) RegExpCharacterClass(zone, ranges, default_flags);
return new (zone) TextNode(fail, compiler->read_backward(), on_success);
}
if (standard_type() == '*') {
@@ -5346,8 +5366,8 @@ void RegExpDisjunction::FixSingleCharacterDisjunctions(
if (IsUnicode(flags) && contains_trail_surrogate) {
character_class_flags = RegExpCharacterClass::CONTAINS_SPLIT_SURROGATE;
}
- alternatives->at(write_posn++) =
- new (zone) RegExpCharacterClass(ranges, flags, character_class_flags);
+ alternatives->at(write_posn++) = new (zone)
+ RegExpCharacterClass(zone, ranges, flags, character_class_flags);
} else {
// Just copy any trivial alternatives.
for (int j = first_in_run; j < i; j++) {
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index e45eeeb492..89046a56f3 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -85,8 +85,7 @@ namespace internal {
* bool direct_call = false,
* Isolate* isolate);
* The call is performed by NativeRegExpMacroAssembler::Execute()
- * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
- * in mips/simulator-mips.h.
+ * (in regexp-macro-assembler.cc) via the GeneratedCode wrapper.
*/
#define __ ACCESS_MASM(masm_)
@@ -509,11 +508,11 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
// One byte space characters are '\t'..'\r', ' ' and \u00a0.
Label success;
__ Branch(&success, eq, current_character(), Operand(' '));
- // Check range 0x09..0x0d.
+ // Check range 0x09..0x0D.
__ Subu(a0, current_character(), Operand('\t'));
__ Branch(&success, ls, a0, Operand('\r' - '\t'));
// \u00a0 (NBSP).
- BranchOrBacktrack(on_no_match, ne, a0, Operand(0x00a0 - '\t'));
+ BranchOrBacktrack(on_no_match, ne, a0, Operand(0x00A0 - '\t'));
__ bind(&success);
return true;
}
@@ -532,34 +531,34 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
BranchOrBacktrack(on_no_match, ls, a0, Operand('9' - '0'));
return true;
case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029).
+ // Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029).
__ Xor(a0, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
- __ Subu(a0, a0, Operand(0x0b));
- BranchOrBacktrack(on_no_match, ls, a0, Operand(0x0c - 0x0b));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C.
+ __ Subu(a0, a0, Operand(0x0B));
+ BranchOrBacktrack(on_no_match, ls, a0, Operand(0x0C - 0x0B));
if (mode_ == UC16) {
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ Subu(a0, a0, Operand(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ Subu(a0, a0, Operand(0x2028 - 0x0B));
BranchOrBacktrack(on_no_match, ls, a0, Operand(1));
}
return true;
}
case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029).
+ // Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029).
__ Xor(a0, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
- __ Subu(a0, a0, Operand(0x0b));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C.
+ __ Subu(a0, a0, Operand(0x0B));
if (mode_ == LATIN1) {
- BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0c - 0x0b));
+ BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0C - 0x0B));
} else {
Label done;
- BranchOrBacktrack(&done, ls, a0, Operand(0x0c - 0x0b));
+ BranchOrBacktrack(&done, ls, a0, Operand(0x0C - 0x0B));
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ Subu(a0, a0, Operand(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ Subu(a0, a0, Operand(0x2028 - 0x0B));
BranchOrBacktrack(on_no_match, hi, a0, Operand(1));
__ bind(&done);
}
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index 68a7f87843..841b2931fe 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -120,8 +120,7 @@ namespace internal {
* bool direct_call = false,
* Isolate* isolate);
* The call is performed by NativeRegExpMacroAssembler::Execute()
- * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
- * in mips/simulator-mips.h.
+ * (in regexp-macro-assembler.cc) via the GeneratedCode wrapper.
*
* clang-format on
*/
@@ -540,11 +539,11 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
// One byte space characters are '\t'..'\r', ' ' and \u00a0.
Label success;
__ Branch(&success, eq, current_character(), Operand(' '));
- // Check range 0x09..0x0d.
+ // Check range 0x09..0x0D.
__ Dsubu(a0, current_character(), Operand('\t'));
__ Branch(&success, ls, a0, Operand('\r' - '\t'));
// \u00a0 (NBSP).
- BranchOrBacktrack(on_no_match, ne, a0, Operand(0x00a0 - '\t'));
+ BranchOrBacktrack(on_no_match, ne, a0, Operand(0x00A0 - '\t'));
__ bind(&success);
return true;
}
@@ -563,34 +562,34 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
BranchOrBacktrack(on_no_match, ls, a0, Operand('9' - '0'));
return true;
case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029).
+ // Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029).
__ Xor(a0, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
- __ Dsubu(a0, a0, Operand(0x0b));
- BranchOrBacktrack(on_no_match, ls, a0, Operand(0x0c - 0x0b));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C.
+ __ Dsubu(a0, a0, Operand(0x0B));
+ BranchOrBacktrack(on_no_match, ls, a0, Operand(0x0C - 0x0B));
if (mode_ == UC16) {
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ Dsubu(a0, a0, Operand(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ Dsubu(a0, a0, Operand(0x2028 - 0x0B));
BranchOrBacktrack(on_no_match, ls, a0, Operand(1));
}
return true;
}
case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029).
+ // Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029).
__ Xor(a0, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
- __ Dsubu(a0, a0, Operand(0x0b));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C.
+ __ Dsubu(a0, a0, Operand(0x0B));
if (mode_ == LATIN1) {
- BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0c - 0x0b));
+ BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0C - 0x0B));
} else {
Label done;
- BranchOrBacktrack(&done, ls, a0, Operand(0x0c - 0x0b));
+ BranchOrBacktrack(&done, ls, a0, Operand(0x0C - 0x0B));
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ Dsubu(a0, a0, Operand(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ Dsubu(a0, a0, Operand(0x2028 - 0x0B));
BranchOrBacktrack(on_no_match, hi, a0, Operand(1));
__ bind(&done);
}
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index bc3e643369..1187fc04b8 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -86,8 +86,7 @@ namespace internal {
* bool direct_call = false,
* Isolate* isolate);
* The call is performed by NativeRegExpMacroAssembler::Execute()
- * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
- * in ppc/simulator-ppc.h.
+ * (in regexp-macro-assembler.cc) via the GeneratedCode wrapper.
*/
#define __ ACCESS_MASM(masm_)
@@ -522,12 +521,12 @@ bool RegExpMacroAssemblerPPC::CheckSpecialCharacterClass(uc16 type,
Label success;
__ cmpi(current_character(), Operand(' '));
__ beq(&success);
- // Check range 0x09..0x0d
+ // Check range 0x09..0x0D
__ subi(r3, current_character(), Operand('\t'));
__ cmpli(r3, Operand('\r' - '\t'));
__ ble(&success);
// \u00a0 (NBSP).
- __ cmpi(r3, Operand(0x00a0 - '\t'));
+ __ cmpi(r3, Operand(0x00A0 - '\t'));
BranchOrBacktrack(ne, on_no_match);
__ bind(&success);
return true;
@@ -549,37 +548,37 @@ bool RegExpMacroAssemblerPPC::CheckSpecialCharacterClass(uc16 type,
BranchOrBacktrack(le, on_no_match);
return true;
case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
__ xori(r3, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ subi(r3, r3, Operand(0x0b));
- __ cmpli(r3, Operand(0x0c - 0x0b));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C
+ __ subi(r3, r3, Operand(0x0B));
+ __ cmpli(r3, Operand(0x0C - 0x0B));
BranchOrBacktrack(le, on_no_match);
if (mode_ == UC16) {
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ subi(r3, r3, Operand(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ subi(r3, r3, Operand(0x2028 - 0x0B));
__ cmpli(r3, Operand(1));
BranchOrBacktrack(le, on_no_match);
}
return true;
}
case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
__ xori(r3, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ subi(r3, r3, Operand(0x0b));
- __ cmpli(r3, Operand(0x0c - 0x0b));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C
+ __ subi(r3, r3, Operand(0x0B));
+ __ cmpli(r3, Operand(0x0C - 0x0B));
if (mode_ == LATIN1) {
BranchOrBacktrack(gt, on_no_match);
} else {
Label done;
__ ble(&done);
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ subi(r3, r3, Operand(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ subi(r3, r3, Operand(0x2028 - 0x0B));
__ cmpli(r3, Operand(1));
BranchOrBacktrack(gt, on_no_match);
__ bind(&done);
diff --git a/deps/v8/src/regexp/regexp-ast.h b/deps/v8/src/regexp/regexp-ast.h
index e60621f8b6..1a94832f71 100644
--- a/deps/v8/src/regexp/regexp-ast.h
+++ b/deps/v8/src/regexp/regexp-ast.h
@@ -306,11 +306,17 @@ class RegExpCharacterClass final : public RegExpTree {
typedef base::Flags<Flag> CharacterClassFlags;
RegExpCharacterClass(
- ZoneList<CharacterRange>* ranges, JSRegExp::Flags flags,
+ Zone* zone, ZoneList<CharacterRange>* ranges, JSRegExp::Flags flags,
CharacterClassFlags character_class_flags = CharacterClassFlags())
: set_(ranges),
flags_(flags),
- character_class_flags_(character_class_flags) {}
+ character_class_flags_(character_class_flags) {
+ // Convert the empty set of ranges to the negated Everything() range.
+ if (ranges->is_empty()) {
+ ranges->Add(CharacterRange::Everything(), zone);
+ character_class_flags_ ^= NEGATED;
+ }
+ }
RegExpCharacterClass(uc16 type, JSRegExp::Flags flags)
: set_(type),
flags_(flags),
@@ -352,7 +358,7 @@ class RegExpCharacterClass final : public RegExpTree {
private:
CharacterSet set_;
const JSRegExp::Flags flags_;
- const CharacterClassFlags character_class_flags_;
+ CharacterClassFlags character_class_flags_;
};
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index 600757a72b..af285abcb0 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -286,9 +286,15 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
Address stack_base = stack_scope.stack()->stack_base();
int direct_call = 0;
- int result = CALL_GENERATED_REGEXP_CODE(
- isolate, code->entry(), input, start_offset, input_start, input_end,
- output, output_size, stack_base, direct_call, isolate);
+
+ using RegexpMatcherSig = int(
+ String * input, int start_offset, // NOLINT(readability/casting)
+ const byte* input_start, const byte* input_end, int* output,
+ int output_size, Address stack_base, int direct_call, Isolate* isolate);
+
+ auto fn = GeneratedCode<RegexpMatcherSig>::FromCode(code);
+ int result = fn.Call(input, start_offset, input_start, input_end, output,
+ output_size, stack_base, direct_call, isolate);
DCHECK(result >= RETRY);
if (result == EXCEPTION && !isolate->has_pending_exception()) {
@@ -299,7 +305,7 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
return static_cast<Result>(result);
}
-
+// clang-format off
const byte NativeRegExpMacroAssembler::word_character_map[] = {
0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
@@ -308,18 +314,18 @@ const byte NativeRegExpMacroAssembler::word_character_map[] = {
0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
- 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // '0' - '7'
- 0xffu, 0xffu, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, // '8' - '9'
-
- 0x00u, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'A' - 'G'
- 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'H' - 'O'
- 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'P' - 'W'
- 0xffu, 0xffu, 0xffu, 0x00u, 0x00u, 0x00u, 0x00u, 0xffu, // 'X' - 'Z', '_'
-
- 0x00u, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'a' - 'g'
- 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'h' - 'o'
- 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'p' - 'w'
- 0xffu, 0xffu, 0xffu, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, // 'x' - 'z'
+ 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, // '0' - '7'
+ 0xFFu, 0xFFu, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, // '8' - '9'
+
+ 0x00u, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, // 'A' - 'G'
+ 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, // 'H' - 'O'
+ 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, // 'P' - 'W'
+ 0xFFu, 0xFFu, 0xFFu, 0x00u, 0x00u, 0x00u, 0x00u, 0xFFu, // 'X' - 'Z', '_'
+
+ 0x00u, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, // 'a' - 'g'
+ 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, // 'h' - 'o'
+ 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, // 'p' - 'w'
+ 0xFFu, 0xFFu, 0xFFu, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, // 'x' - 'z'
// Latin-1 range
0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
@@ -341,7 +347,7 @@ const byte NativeRegExpMacroAssembler::word_character_map[] = {
0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
};
-
+// clang-format on
Address NativeRegExpMacroAssembler::GrowStack(Address stack_pointer,
Address* stack_base,
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index 2c6aa5b23a..a7724c5d42 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -280,12 +280,12 @@ RegExpTree* RegExpParser::ParseDisjunction() {
// Everything.
CharacterRange::AddClassEscape('*', ranges, false, zone());
} else {
- // Everything except \x0a, \x0d, \u2028 and \u2029
+ // Everything except \x0A, \x0D, \u2028 and \u2029
CharacterRange::AddClassEscape('.', ranges, false, zone());
}
RegExpCharacterClass* cc =
- new (zone()) RegExpCharacterClass(ranges, builder->flags());
+ new (zone()) RegExpCharacterClass(zone(), ranges, builder->flags());
builder->AddCharacterClass(cc);
break;
}
@@ -332,8 +332,8 @@ RegExpTree* RegExpParser::ParseDisjunction() {
new (zone()) ZoneList<CharacterRange>(2, zone());
CharacterRange::AddClassEscape(
c, ranges, unicode() && builder->ignore_case(), zone());
- RegExpCharacterClass* cc =
- new (zone()) RegExpCharacterClass(ranges, builder->flags());
+ RegExpCharacterClass* cc = new (zone())
+ RegExpCharacterClass(zone(), ranges, builder->flags());
builder->AddCharacterClass(cc);
break;
}
@@ -348,8 +348,8 @@ RegExpTree* RegExpParser::ParseDisjunction() {
if (!ParsePropertyClass(ranges, p == 'P')) {
return ReportError(CStrVector("Invalid property name"));
}
- RegExpCharacterClass* cc =
- new (zone()) RegExpCharacterClass(ranges, builder->flags());
+ RegExpCharacterClass* cc = new (zone())
+ RegExpCharacterClass(zone(), ranges, builder->flags());
builder->AddCharacterClass(cc);
} else {
// With /u, no identity escapes except for syntax characters
@@ -451,7 +451,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
builder->AddCharacter('\\');
} else {
Advance(2);
- builder->AddCharacter(controlLetter & 0x1f);
+ builder->AddCharacter(controlLetter & 0x1F);
}
break;
}
@@ -1145,7 +1145,7 @@ bool RegExpParser::ParseUnicodeEscape(uc32* value) {
if (current() == '{' && unicode()) {
int start = position();
Advance();
- if (ParseUnlimitedLengthHexNumber(0x10ffff, value)) {
+ if (ParseUnlimitedLengthHexNumber(0x10FFFF, value)) {
if (current() == '}') {
Advance();
return true;
@@ -1255,10 +1255,15 @@ bool LookupSpecialPropertyValueName(const char* name,
ZoneList<CharacterRange>* result,
bool negate, Zone* zone) {
if (NameEquals(name, "Any")) {
- if (!negate) result->Add(CharacterRange::Everything(), zone);
+ if (negate) {
+ // Leave the list of character ranges empty, since the negation of 'Any'
+ // is the empty set.
+ } else {
+ result->Add(CharacterRange::Everything(), zone);
+ }
} else if (NameEquals(name, "ASCII")) {
result->Add(negate ? CharacterRange::Range(0x80, String::kMaxCodePoint)
- : CharacterRange::Range(0x0, 0x7f),
+ : CharacterRange::Range(0x0, 0x7F),
zone);
} else if (NameEquals(name, "Assigned")) {
return LookupPropertyValueName(UCHAR_GENERAL_CATEGORY, "Unassigned",
@@ -1486,8 +1491,8 @@ uc32 RegExpParser::ParseClassCharacterEscape() {
if (letter >= 'A' && letter <= 'Z') {
Advance(2);
// Control letters mapped to ASCII control characters in the range
- // 0x00-0x1f.
- return controlLetter & 0x1f;
+ // 0x00-0x1F.
+ return controlLetter & 0x1F;
}
if (unicode()) {
// With /u, invalid escapes are not treated as identity escapes.
@@ -1497,7 +1502,7 @@ uc32 RegExpParser::ParseClassCharacterEscape() {
if ((controlLetter >= '0' && controlLetter <= '9') ||
controlLetter == '_') {
Advance(2);
- return controlLetter & 0x1f;
+ return controlLetter & 0x1F;
}
// We match JSC in reading the backslash as a literal
// character instead of as starting an escape.
@@ -1672,14 +1677,10 @@ RegExpTree* RegExpParser::ParseCharacterClass(const RegExpBuilder* builder) {
return ReportError(CStrVector(kUnterminated));
}
Advance();
- if (ranges->length() == 0) {
- ranges->Add(CharacterRange::Everything(), zone());
- is_negated = !is_negated;
- }
RegExpCharacterClass::CharacterClassFlags character_class_flags;
if (is_negated) character_class_flags = RegExpCharacterClass::NEGATED;
- return new (zone())
- RegExpCharacterClass(ranges, builder->flags(), character_class_flags);
+ return new (zone()) RegExpCharacterClass(zone(), ranges, builder->flags(),
+ character_class_flags);
}
@@ -1853,7 +1854,8 @@ void RegExpBuilder::AddCharacterClass(RegExpCharacterClass* cc) {
void RegExpBuilder::AddCharacterClassForDesugaring(uc32 c) {
AddTerm(new (zone()) RegExpCharacterClass(
- CharacterRange::List(zone(), CharacterRange::Singleton(c)), flags_));
+ zone(), CharacterRange::List(zone(), CharacterRange::Singleton(c)),
+ flags_));
}
diff --git a/deps/v8/src/regexp/regexp-utils.cc b/deps/v8/src/regexp/regexp-utils.cc
index 16427e2933..d483125dd6 100644
--- a/deps/v8/src/regexp/regexp-utils.cc
+++ b/deps/v8/src/regexp/regexp-utils.cc
@@ -134,7 +134,7 @@ bool RegExpUtils::IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj) {
// TODO(ishell): Update this check once map changes for constant field
// tracking are landing.
-#if defined(DEBUG) || defined(ENABLE_SLOWFAST_SWITCH)
+#ifdef V8_ENABLE_FORCE_SLOW_PATH
if (isolate->force_slow_path()) return false;
#endif
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
index fc9548fc78..4f8f234171 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -88,8 +88,7 @@ namespace internal {
* bool direct_call = false,
* Isolate* isolate);
* The call is performed by NativeRegExpMacroAssembler::Execute()
- * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
- * in s390/simulator-s390.h.
+ * (in regexp-macro-assembler.cc) via the GeneratedCode wrapper.
*/
#define __ ACCESS_MASM(masm_)
@@ -493,12 +492,12 @@ bool RegExpMacroAssemblerS390::CheckSpecialCharacterClass(uc16 type,
Label success;
__ CmpP(current_character(), Operand(' '));
__ beq(&success);
- // Check range 0x09..0x0d
+ // Check range 0x09..0x0D
__ SubP(r2, current_character(), Operand('\t'));
__ CmpLogicalP(r2, Operand('\r' - '\t'));
__ ble(&success);
// \u00a0 (NBSP).
- __ CmpLogicalP(r2, Operand(0x00a0 - '\t'));
+ __ CmpLogicalP(r2, Operand(0x00A0 - '\t'));
BranchOrBacktrack(ne, on_no_match);
__ bind(&success);
return true;
@@ -520,37 +519,37 @@ bool RegExpMacroAssemblerS390::CheckSpecialCharacterClass(uc16 type,
BranchOrBacktrack(le, on_no_match);
return true;
case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
__ XorP(r2, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ SubP(r2, Operand(0x0b));
- __ CmpLogicalP(r2, Operand(0x0c - 0x0b));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C
+ __ SubP(r2, Operand(0x0B));
+ __ CmpLogicalP(r2, Operand(0x0C - 0x0B));
BranchOrBacktrack(le, on_no_match);
if (mode_ == UC16) {
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ SubP(r2, Operand(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ SubP(r2, Operand(0x2028 - 0x0B));
__ CmpLogicalP(r2, Operand(1));
BranchOrBacktrack(le, on_no_match);
}
return true;
}
case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
__ XorP(r2, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ SubP(r2, Operand(0x0b));
- __ CmpLogicalP(r2, Operand(0x0c - 0x0b));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C
+ __ SubP(r2, Operand(0x0B));
+ __ CmpLogicalP(r2, Operand(0x0C - 0x0B));
if (mode_ == LATIN1) {
BranchOrBacktrack(gt, on_no_match);
} else {
Label done;
__ ble(&done);
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ SubP(r2, Operand(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ SubP(r2, Operand(0x2028 - 0x0B));
__ CmpLogicalP(r2, Operand(1));
BranchOrBacktrack(gt, on_no_match);
__ bind(&done);
@@ -773,7 +772,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
// and the following use of that register.
__ lay(r2, MemOperand(r2, num_saved_registers_ * kIntSize));
for (int i = 0; i < num_saved_registers_;) {
- if (false && i < num_saved_registers_ - 4) {
+ if ((false) && i < num_saved_registers_ - 4) {
// TODO(john.yan): Can be optimized by SIMD instructions
__ LoadMultipleP(r3, r6, register_location(i + 3));
if (mode_ == UC16) {
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index 1e21182c35..eb57b29602 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -551,12 +551,12 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
Label success;
__ cmpl(current_character(), Immediate(' '));
__ j(equal, &success, Label::kNear);
- // Check range 0x09..0x0d
+ // Check range 0x09..0x0D
__ leap(rax, Operand(current_character(), -'\t'));
__ cmpl(rax, Immediate('\r' - '\t'));
__ j(below_equal, &success, Label::kNear);
// \u00a0 (NBSP).
- __ cmpl(rax, Immediate(0x00a0 - '\t'));
+ __ cmpl(rax, Immediate(0x00A0 - '\t'));
BranchOrBacktrack(not_equal, on_no_match);
__ bind(&success);
return true;
@@ -578,39 +578,39 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
BranchOrBacktrack(below_equal, on_no_match);
return true;
case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
__ movl(rax, current_character());
__ xorp(rax, Immediate(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ subl(rax, Immediate(0x0b));
- __ cmpl(rax, Immediate(0x0c - 0x0b));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C
+ __ subl(rax, Immediate(0x0B));
+ __ cmpl(rax, Immediate(0x0C - 0x0B));
BranchOrBacktrack(below_equal, on_no_match);
if (mode_ == UC16) {
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ subl(rax, Immediate(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ subl(rax, Immediate(0x2028 - 0x0B));
__ cmpl(rax, Immediate(0x2029 - 0x2028));
BranchOrBacktrack(below_equal, on_no_match);
}
return true;
}
case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
__ movl(rax, current_character());
__ xorp(rax, Immediate(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ subl(rax, Immediate(0x0b));
- __ cmpl(rax, Immediate(0x0c - 0x0b));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C
+ __ subl(rax, Immediate(0x0B));
+ __ cmpl(rax, Immediate(0x0C - 0x0B));
if (mode_ == LATIN1) {
BranchOrBacktrack(above, on_no_match);
} else {
Label done;
BranchOrBacktrack(below_equal, &done);
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ subl(rax, Immediate(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ subl(rax, Immediate(0x2028 - 0x0B));
__ cmpl(rax, Immediate(0x2029 - 0x2028));
BranchOrBacktrack(above, on_no_match);
__ bind(&done);
diff --git a/deps/v8/src/runtime/runtime-bigint.cc b/deps/v8/src/runtime/runtime-bigint.cc
index ce513d2f92..47f644f619 100644
--- a/deps/v8/src/runtime/runtime-bigint.cc
+++ b/deps/v8/src/runtime/runtime-bigint.cc
@@ -57,7 +57,6 @@ RUNTIME_FUNCTION(Runtime_BigIntEqualToString) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(BigInt, lhs, 0);
CONVERT_ARG_HANDLE_CHECKED(String, rhs, 1);
- rhs = String::Flatten(rhs);
bool result = BigInt::EqualToString(lhs, rhs);
return *isolate->factory()->ToBoolean(result);
}
@@ -108,7 +107,7 @@ RUNTIME_FUNCTION(Runtime_BigIntBinaryOp) {
result = BigInt::Remainder(left, right);
break;
case Operation::kExponentiate:
- UNIMPLEMENTED();
+ result = BigInt::Exponentiate(left, right);
break;
case Operation::kBitwiseAnd:
result = BigInt::BitwiseAnd(left, right);
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 37e647c7dd..7869e32dd1 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -339,7 +339,6 @@ bool AddDescriptorsByTemplate(
map->InitializeDescriptors(*descriptors,
LayoutDescriptor::FastPointerLayout());
-
if (elements_dictionary->NumberOfElements() > 0) {
if (!SubstituteValues<NumberDictionary>(isolate, elements_dictionary,
receiver, args)) {
@@ -454,7 +453,6 @@ bool InitClassPrototype(Isolate* isolate,
Map::SetPrototype(map, prototype_parent);
constructor->set_prototype_or_initial_map(*prototype);
map->SetConstructor(*constructor);
-
Handle<FixedArray> computed_properties(
class_boilerplate->instance_computed_properties(), isolate);
Handle<NumberDictionary> elements_dictionary_template(
@@ -467,8 +465,8 @@ bool InitClassPrototype(Isolate* isolate,
Handle<NameDictionary> properties_dictionary_template =
Handle<NameDictionary>::cast(properties_template);
- map->set_dictionary_map(true);
- map->set_migration_target(false);
+ map->set_is_dictionary_map(true);
+ map->set_is_migration_target(false);
map->set_may_have_interesting_symbols(true);
map->set_construction_counter(Map::kNoSlackTracking);
@@ -519,10 +517,10 @@ bool InitClassConstructor(Isolate* isolate,
Handle<NameDictionary> properties_dictionary_template =
Handle<NameDictionary>::cast(properties_template);
- map->set_dictionary_map(true);
+ map->set_is_dictionary_map(true);
map->InitializeDescriptors(isolate->heap()->empty_descriptor_array(),
LayoutDescriptor::FastPointerLayout());
- map->set_migration_target(false);
+ map->set_is_migration_target(false);
map->set_may_have_interesting_symbols(true);
map->set_construction_counter(Map::kNoSlackTracking);
@@ -595,6 +593,14 @@ MaybeHandle<Object> DefineClass(Isolate* isolate,
DCHECK(isolate->has_pending_exception());
return MaybeHandle<Object>();
}
+ if (FLAG_trace_maps) {
+ LOG(isolate,
+ MapEvent("InitialMap", nullptr, constructor->map(),
+ "init class constructor", constructor->shared()->DebugName()));
+ LOG(isolate, MapEvent("InitialMap", nullptr, prototype->map(),
+ "init class prototype"));
+ }
+
return prototype;
}
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 92ba3e6c3f..14b61b0ac6 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -57,6 +57,24 @@ RUNTIME_FUNCTION(Runtime_CompileOptimized_Concurrent) {
return function->code();
}
+RUNTIME_FUNCTION(Runtime_FunctionFirstExecution) {
+ HandleScope scope(isolate);
+ StackLimitCheck check(isolate);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ DCHECK_EQ(function->feedback_vector()->optimization_marker(),
+ OptimizationMarker::kLogFirstExecution);
+ DCHECK(FLAG_log_function_events);
+ Handle<SharedFunctionInfo> sfi(function->shared());
+ LOG(isolate, FunctionEvent("first-execution", Script::cast(sfi->script()), -1,
+ 0, sfi->start_position(), sfi->end_position(),
+ sfi->DebugName()));
+ function->feedback_vector()->ClearOptimizationMarker();
+ // Return the code to continue execution, we don't care at this point whether
+ // this is for lazy compilation or has been eagerly complied.
+ return function->code();
+}
RUNTIME_FUNCTION(Runtime_CompileOptimized_NotConcurrent) {
HandleScope scope(isolate);
@@ -141,7 +159,6 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
Handle<JSFunction> function = deoptimizer->function();
Deoptimizer::BailoutType type = deoptimizer->bailout_type();
- bool preserve_optimized_code = deoptimizer->preserve_optimized();
// TODO(turbofan): We currently need the native context to materialize
// the arguments object, but only to get to its map.
@@ -157,7 +174,7 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
isolate->set_context(Context::cast(top_frame->context()));
// Invalidate the underlying optimized code on non-lazy deopts.
- if (type != Deoptimizer::LAZY && !preserve_optimized_code) {
+ if (type != Deoptimizer::LAZY) {
Deoptimizer::DeoptimizeFunction(*function);
}
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index d7395c7a7f..d6e028b41e 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -1554,6 +1554,7 @@ int ScriptLinePosition(Handle<Script> script, int line) {
if (script->type() == Script::TYPE_WASM) {
return WasmCompiledModule::cast(script->wasm_compiled_module())
+ ->shared()
->GetFunctionOffset(line);
}
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index c78ac8f6b1..e9433d2041 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -63,7 +63,9 @@ RUNTIME_FUNCTION(Runtime_FunctionGetSourceCode) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
if (function->IsJSFunction()) {
- return *Handle<JSFunction>::cast(function)->shared()->GetSourceCode();
+ Handle<SharedFunctionInfo> shared(
+ Handle<JSFunction>::cast(function)->shared());
+ return *SharedFunctionInfo::GetSourceCode(shared);
}
return isolate->heap()->undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 6d0e2b8439..f9e9375543 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -6,6 +6,7 @@
#include <memory>
+#include "src/api.h"
#include "src/arguments.h"
#include "src/ast/prettyprinter.h"
#include "src/bootstrapper.h"
@@ -650,5 +651,22 @@ RUNTIME_FUNCTION(Runtime_GetTemplateObject) {
description, isolate->native_context());
}
+RUNTIME_FUNCTION(Runtime_ReportMessage) {
+ // Helper to report messages and continue JS execution. This is intended to
+ // behave similarly to reporting exceptions which reach the top-level in
+ // Execution.cc, but allow the JS code to continue. This is useful for
+ // implementing algorithms such as RunMicrotasks in JS.
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(Object, message_obj, 0);
+
+ DCHECK(!isolate->has_pending_exception());
+ isolate->set_pending_exception(*message_obj);
+ isolate->ReportPendingMessagesFromJavaScript();
+ isolate->clear_pending_exception();
+ return isolate->heap()->undefined_value();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-module.cc b/deps/v8/src/runtime/runtime-module.cc
index bb16a772c0..a9fb48f887 100644
--- a/deps/v8/src/runtime/runtime-module.cc
+++ b/deps/v8/src/runtime/runtime-module.cc
@@ -19,11 +19,9 @@ RUNTIME_FUNCTION(Runtime_DynamicImportCall) {
Handle<Script> script(Script::cast(function->shared()->script()));
- while (script->eval_from_shared()->IsSharedFunctionInfo()) {
- script = handle(
- Script::cast(
- SharedFunctionInfo::cast(script->eval_from_shared())->script()),
- isolate);
+ while (script->has_eval_from_shared()) {
+ script =
+ handle(Script::cast(script->eval_from_shared()->script()), isolate);
}
RETURN_RESULT_OR_FAILURE(
diff --git a/deps/v8/src/runtime/runtime-numbers.cc b/deps/v8/src/runtime/runtime-numbers.cc
index 8e351b3c74..260e6be45b 100644
--- a/deps/v8/src/runtime/runtime-numbers.cc
+++ b/deps/v8/src/runtime/runtime-numbers.cc
@@ -69,16 +69,6 @@ RUNTIME_FUNCTION(Runtime_StringParseFloat) {
return *isolate->factory()->NewNumber(value);
}
-
-RUNTIME_FUNCTION(Runtime_NumberToString) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(number, 0);
-
- return *isolate->factory()->NumberToString(number);
-}
-
-
RUNTIME_FUNCTION(Runtime_NumberToStringSkipCache) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 057ead9407..379472bdbe 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -439,6 +439,61 @@ RUNTIME_FUNCTION(Runtime_OptimizeObjectForAddingMultipleProperties) {
return *object;
}
+RUNTIME_FUNCTION(Runtime_ObjectValues) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+
+ Handle<FixedArray> values;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, values,
+ JSReceiver::GetOwnValues(receiver, PropertyFilter::ENUMERABLE_STRINGS,
+ true));
+ return *isolate->factory()->NewJSArrayWithElements(values);
+}
+
+RUNTIME_FUNCTION(Runtime_ObjectValuesSkipFastPath) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+
+ Handle<FixedArray> value;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, value,
+ JSReceiver::GetOwnValues(receiver, PropertyFilter::ENUMERABLE_STRINGS,
+ false));
+ return *isolate->factory()->NewJSArrayWithElements(value);
+}
+
+RUNTIME_FUNCTION(Runtime_ObjectEntries) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+
+ Handle<FixedArray> entries;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, entries,
+ JSReceiver::GetOwnEntries(receiver, PropertyFilter::ENUMERABLE_STRINGS,
+ true));
+ return *isolate->factory()->NewJSArrayWithElements(entries);
+}
+
+RUNTIME_FUNCTION(Runtime_ObjectEntriesSkipFastPath) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+
+ Handle<FixedArray> entries;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, entries,
+ JSReceiver::GetOwnEntries(receiver, PropertyFilter::ENUMERABLE_STRINGS,
+ false));
+ return *isolate->factory()->NewJSArrayWithElements(entries);
+}
RUNTIME_FUNCTION(Runtime_GetProperty) {
HandleScope scope(isolate);
diff --git a/deps/v8/src/runtime/runtime-promise.cc b/deps/v8/src/runtime/runtime-promise.cc
index 1d8ca623e1..2c28cd3c98 100644
--- a/deps/v8/src/runtime/runtime-promise.cc
+++ b/deps/v8/src/runtime/runtime-promise.cc
@@ -70,22 +70,6 @@ RUNTIME_FUNCTION(Runtime_PromiseRevokeReject) {
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(Runtime_EnqueuePromiseReactionJob) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(PromiseReactionJobInfo, info, 0);
- isolate->EnqueueMicrotask(info);
- return isolate->heap()->undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_EnqueuePromiseResolveThenableJob) {
- HandleScope scope(isolate);
- DCHECK_EQ(args.length(), 1);
- CONVERT_ARG_HANDLE_CHECKED(PromiseResolveThenableJobInfo, info, 0);
- isolate->EnqueueMicrotask(info);
- return isolate->heap()->undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
diff --git a/deps/v8/src/runtime/runtime-proxy.cc b/deps/v8/src/runtime/runtime-proxy.cc
index 472cbdf79d..a10260c1e2 100644
--- a/deps/v8/src/runtime/runtime-proxy.cc
+++ b/deps/v8/src/runtime/runtime-proxy.cc
@@ -38,14 +38,6 @@ RUNTIME_FUNCTION(Runtime_JSProxyGetTarget) {
}
-RUNTIME_FUNCTION(Runtime_JSProxyRevoke) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSProxy, proxy, 0);
- JSProxy::Revoke(proxy);
- return isolate->heap()->undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_GetPropertyWithReceiver) {
HandleScope scope(isolate);
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index 2ba760b847..d0afcd2636 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -544,7 +544,7 @@ MUST_USE_RESULT static Object* StringReplaceGlobalAtomRegExpWithString(
int pattern_len = pattern->length();
int replacement_len = replacement->length();
- FindStringIndicesDispatch(isolate, *subject, pattern, indices, 0xffffffff);
+ FindStringIndicesDispatch(isolate, *subject, pattern, indices, 0xFFFFFFFF);
if (indices->empty()) return *subject;
@@ -834,7 +834,7 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
int pattern_length = pattern->length();
CHECK_LT(0, pattern_length);
- if (limit == 0xffffffffu) {
+ if (limit == 0xFFFFFFFFu) {
FixedArray* last_match_cache_unused;
Handle<Object> cached_answer(
RegExpResultsCache::Lookup(isolate->heap(), *subject, *pattern,
@@ -849,7 +849,7 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
}
}
- // The limit can be very large (0xffffffffu), but since the pattern
+ // The limit can be very large (0xFFFFFFFFu), but since the pattern
// isn't empty, we can never create more parts than ~half the length
// of the subject.
@@ -890,7 +890,7 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
});
}
- if (limit == 0xffffffffu) {
+ if (limit == 0xFFFFFFFFu) {
if (result->HasObjectElements()) {
RegExpResultsCache::Enter(isolate, subject, pattern, elements,
isolate->factory()->empty_fixed_array(),
@@ -1804,6 +1804,7 @@ RUNTIME_FUNCTION(Runtime_RegExpReplace) {
uint32_t next_source_position = 0;
for (const auto& result : results) {
+ HandleScope handle_scope(isolate);
Handle<Object> captures_length_obj;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, captures_length_obj,
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 61795fc6cb..76f291f90f 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -9,6 +9,7 @@
#include "src/accessors.h"
#include "src/arguments.h"
#include "src/ast/scopes.h"
+#include "src/bootstrapper.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/isolate-inl.h"
@@ -726,6 +727,9 @@ RUNTIME_FUNCTION(Runtime_NewScriptContext) {
Handle<JSFunction> closure(function->shared()->IsUserJavaScript()
? native_context->closure()
: *function);
+
+ // We do not need script contexts here during bootstrap.
+ DCHECK(!isolate->bootstrapper()->IsActive());
Handle<Context> result =
isolate->factory()->NewScriptContext(closure, scope_info);
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index 1e2d1f5a56..8f6b887f62 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -490,7 +490,7 @@ static void JoinSparseArrayWithSeparator(FixedArray* elements,
int last_array_index = static_cast<int>(array_length - 1);
// Array length must be representable as a signed 32-bit number,
// otherwise the total string length would have been too large.
- DCHECK_LE(array_length, 0x7fffffff); // Is int32_t.
+ DCHECK_LE(array_length, 0x7FFFFFFF); // Is int32_t.
int repeat = last_array_index - previous_separator_position;
WriteRepeatToFlat<Char>(separator, buffer, cursor, repeat, separator_length);
cursor += repeat * separator_length;
@@ -537,7 +537,7 @@ RUNTIME_FUNCTION(Runtime_SparseJoinWithSeparator) {
int separator_length = separator->length();
if (!overflow && separator_length > 0) {
- if (array_length <= 0x7fffffffu) {
+ if (array_length <= 0x7FFFFFFFu) {
int separator_count = static_cast<int>(array_length) - 1;
int remaining_length = String::kMaxLength - string_length;
if ((remaining_length / separator_length) >= separator_count) {
@@ -549,7 +549,7 @@ RUNTIME_FUNCTION(Runtime_SparseJoinWithSeparator) {
} else {
// Nonempty separator and at least 2^31-1 separators necessary
// means that the string is too large to create.
- STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
+ STATIC_ASSERT(String::kMaxLength < 0x7FFFFFFF);
overflow = true;
}
}
@@ -730,7 +730,7 @@ RUNTIME_FUNCTION(Runtime_StringCharFromCode) {
DCHECK_EQ(1, args.length());
if (args[0]->IsNumber()) {
CONVERT_NUMBER_CHECKED(uint32_t, code, Uint32, args[0]);
- code &= 0xffff;
+ code &= 0xFFFF;
return *isolate->factory()->LookupSingleCharacterStringFromCode(code);
}
return isolate->heap()->empty_string();
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index b3cdf3fe67..01e2b198a6 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -15,10 +15,11 @@
#include "src/frames-inl.h"
#include "src/isolate-inl.h"
#include "src/runtime-profiler.h"
-#include "src/snapshot/code-serializer.h"
#include "src/snapshot/natives.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/memory-tracing.h"
+#include "src/wasm/module-compiler.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-serialization.h"
@@ -41,8 +42,9 @@ bool IsWasmCompileAllowed(v8::Isolate* isolate, v8::Local<v8::Value> value,
DCHECK_GT(g_PerIsolateWasmControls.Get().count(isolate), 0);
const WasmCompileControls& ctrls = g_PerIsolateWasmControls.Get().at(isolate);
return (is_async && ctrls.AllowAnySizeForAsync) ||
- (v8::Local<v8::ArrayBuffer>::Cast(value)->ByteLength() <=
- ctrls.MaxWasmBufferSize);
+ (value->IsArrayBuffer() &&
+ v8::Local<v8::ArrayBuffer>::Cast(value)->ByteLength() <=
+ ctrls.MaxWasmBufferSize);
}
// Use the compile controls for instantiation, too
@@ -307,7 +309,8 @@ RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
return isolate->heap()->undefined_value();
}
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
- function->shared()->DisableOptimization(kOptimizationDisabledForTest);
+ function->shared()->DisableOptimization(
+ BailoutReason::kOptimizationDisabledForTest);
return isolate->heap()->undefined_value();
}
@@ -499,8 +502,8 @@ RUNTIME_FUNCTION(Runtime_CheckWasmWrapperElision) {
: rinfo->target_address();
if (FLAG_wasm_jit_to_native) {
wasm::WasmCode* target =
- isolate->wasm_code_manager()->LookupCode(target_address);
- if (target->kind() == wasm::WasmCode::Function) {
+ isolate->wasm_engine()->code_manager()->LookupCode(target_address);
+ if (target->kind() == wasm::WasmCode::kFunction) {
++count;
export_fct = target;
}
@@ -523,8 +526,8 @@ RUNTIME_FUNCTION(Runtime_CheckWasmWrapperElision) {
RelocInfo* rinfo = it.rinfo();
Address target_address = rinfo->target_address();
wasm::WasmCode* target =
- isolate->wasm_code_manager()->LookupCode(target_address);
- if (target->kind() == wasm::WasmCode::Function) {
+ isolate->wasm_engine()->code_manager()->LookupCode(target_address);
+ if (target->kind() == wasm::WasmCode::kFunction) {
++count;
intermediate_fct = target;
}
@@ -549,8 +552,8 @@ RUNTIME_FUNCTION(Runtime_CheckWasmWrapperElision) {
count = 0;
if (FLAG_wasm_jit_to_native) {
wasm::WasmCode::Kind target_kind = type->value() == 0
- ? wasm::WasmCode::WasmToWasmWrapper
- : wasm::WasmCode::WasmToJsWrapper;
+ ? wasm::WasmCode::kWasmToWasmWrapper
+ : wasm::WasmCode::kWasmToJsWrapper;
for (RelocIterator it(intermediate_fct->instructions(),
intermediate_fct->reloc_info(),
intermediate_fct->constant_pool(),
@@ -559,7 +562,7 @@ RUNTIME_FUNCTION(Runtime_CheckWasmWrapperElision) {
RelocInfo* rinfo = it.rinfo();
Address target_address = rinfo->target_address();
wasm::WasmCode* target =
- isolate->wasm_code_manager()->LookupCode(target_address);
+ isolate->wasm_engine()->code_manager()->LookupCode(target_address);
if (target->kind() == target_kind) {
++count;
}
@@ -614,10 +617,12 @@ RUNTIME_FUNCTION(Runtime_NotifyContextDisposed) {
RUNTIME_FUNCTION(Runtime_SetAllocationTimeout) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 2 || args.length() == 3);
-#ifdef DEBUG
- CONVERT_INT32_ARG_CHECKED(interval, 0);
+#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
CONVERT_INT32_ARG_CHECKED(timeout, 1);
isolate->heap()->set_allocation_timeout(timeout);
+#endif
+#ifdef DEBUG
+ CONVERT_INT32_ARG_CHECKED(interval, 0);
FLAG_gc_interval = interval;
if (args.length() == 3) {
// Enable/disable inline allocation if requested.
@@ -757,8 +762,7 @@ RUNTIME_FUNCTION(Runtime_Abort) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_SMI_ARG_CHECKED(message_id, 0);
- const char* message =
- GetBailoutReason(static_cast<BailoutReason>(message_id));
+ const char* message = GetAbortReason(static_cast<AbortReason>(message_id));
base::OS::PrintError("abort: %s\n", message);
isolate->PrintStack(stderr);
base::OS::Abort();
@@ -936,8 +940,7 @@ RUNTIME_FUNCTION(Runtime_IsWasmCode) {
RUNTIME_FUNCTION(Runtime_IsWasmTrapHandlerEnabled) {
DisallowHeapAllocation no_gc;
DCHECK_EQ(0, args.length());
- bool is_enabled = trap_handler::UseTrapHandler();
- return isolate->heap()->ToBoolean(is_enabled);
+ return isolate->heap()->ToBoolean(trap_handler::IsTrapHandlerEnabled());
}
RUNTIME_FUNCTION(Runtime_GetWasmRecoveredTrapCount) {
@@ -993,24 +996,14 @@ RUNTIME_FUNCTION(Runtime_SerializeWasmModule) {
CONVERT_ARG_HANDLE_CHECKED(WasmModuleObject, module_obj, 0);
Handle<WasmCompiledModule> orig(module_obj->compiled_module());
- if (FLAG_wasm_jit_to_native) {
- std::pair<std::unique_ptr<byte[]>, size_t> serialized_module =
- wasm::NativeModuleSerializer::SerializeWholeModule(isolate, orig);
- int data_size = static_cast<int>(serialized_module.second);
- void* buff = isolate->array_buffer_allocator()->Allocate(data_size);
- Handle<JSArrayBuffer> ret = isolate->factory()->NewJSArrayBuffer();
- JSArrayBuffer::Setup(ret, isolate, false, buff, data_size);
- memcpy(buff, serialized_module.first.get(), data_size);
- return *ret;
- } else {
- std::unique_ptr<ScriptData> data =
- WasmCompiledModuleSerializer::SerializeWasmModule(isolate, orig);
- void* buff = isolate->array_buffer_allocator()->Allocate(data->length());
- Handle<JSArrayBuffer> ret = isolate->factory()->NewJSArrayBuffer();
- JSArrayBuffer::Setup(ret, isolate, false, buff, data->length());
- memcpy(buff, data->data(), data->length());
- return *ret;
- }
+ std::pair<std::unique_ptr<const byte[]>, size_t> serialized_module =
+ wasm::SerializeNativeModule(isolate, orig);
+ int data_size = static_cast<int>(serialized_module.second);
+ void* buff = isolate->array_buffer_allocator()->Allocate(data_size);
+ Handle<JSArrayBuffer> ret = isolate->factory()->NewJSArrayBuffer();
+ JSArrayBuffer::Setup(ret, isolate, false, buff, data_size);
+ memcpy(buff, serialized_module.first.get(), data_size);
+ return *ret;
}
// Take an array buffer and attempt to reconstruct a compiled wasm module.
@@ -1024,39 +1017,28 @@ RUNTIME_FUNCTION(Runtime_DeserializeWasmModule) {
Address mem_start = static_cast<Address>(buffer->backing_store());
size_t mem_size = static_cast<size_t>(buffer->byte_length()->Number());
- // DeserializeWasmModule will allocate. We assume JSArrayBuffer doesn't
- // get relocated.
+ // Note that {wasm::DeserializeNativeModule} will allocate. We assume the
+ // JSArrayBuffer doesn't get relocated.
bool already_external = wire_bytes->is_external();
if (!already_external) {
wire_bytes->set_is_external(true);
isolate->heap()->UnregisterArrayBuffer(*wire_bytes);
}
- MaybeHandle<FixedArray> maybe_compiled_module;
- if (FLAG_wasm_jit_to_native) {
- maybe_compiled_module =
- wasm::NativeModuleDeserializer::DeserializeFullBuffer(
- isolate, {mem_start, mem_size},
- Vector<const uint8_t>(
- reinterpret_cast<uint8_t*>(wire_bytes->backing_store()),
- static_cast<int>(wire_bytes->byte_length()->Number())));
- } else {
- ScriptData sc(mem_start, static_cast<int>(mem_size));
- maybe_compiled_module = WasmCompiledModuleSerializer::DeserializeWasmModule(
- isolate, &sc,
- Vector<const uint8_t>(
- reinterpret_cast<uint8_t*>(wire_bytes->backing_store()),
- static_cast<int>(wire_bytes->byte_length()->Number())));
- }
+ MaybeHandle<WasmCompiledModule> maybe_compiled_module =
+ wasm::DeserializeNativeModule(
+ isolate, {mem_start, mem_size},
+ Vector<const uint8_t>(
+ reinterpret_cast<uint8_t*>(wire_bytes->backing_store()),
+ static_cast<int>(wire_bytes->byte_length()->Number())));
if (!already_external) {
wire_bytes->set_is_external(false);
isolate->heap()->RegisterNewArrayBuffer(*wire_bytes);
}
- Handle<FixedArray> compiled_module;
+ Handle<WasmCompiledModule> compiled_module;
if (!maybe_compiled_module.ToHandle(&compiled_module)) {
return isolate->heap()->undefined_value();
}
- return *WasmModuleObject::New(
- isolate, Handle<WasmCompiledModule>::cast(compiled_module));
+ return *WasmModuleObject::New(isolate, compiled_module);
}
RUNTIME_FUNCTION(Runtime_ValidateWasmInstancesChain) {
@@ -1125,11 +1107,11 @@ RUNTIME_FUNCTION(Runtime_RedirectToWasmInterpreter) {
RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
HandleScope hs(isolate);
- DCHECK_EQ(4, args.length());
- CONVERT_SMI_ARG_CHECKED(is_store, 0);
- CONVERT_SMI_ARG_CHECKED(mem_rep, 1);
- CONVERT_SMI_ARG_CHECKED(addr_low, 2);
- CONVERT_SMI_ARG_CHECKED(addr_high, 3);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(Smi, info_addr, 0);
+
+ wasm::MemoryTracingInfo* info =
+ reinterpret_cast<wasm::MemoryTracingInfo*>(info_addr);
// Find the caller wasm frame.
StackTraceFrameIterator it(isolate);
@@ -1137,8 +1119,6 @@ RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
DCHECK(it.is_wasm());
WasmCompiledFrame* frame = WasmCompiledFrame::cast(it.frame());
- uint32_t addr = (static_cast<uint32_t>(addr_low) & 0xffff) |
- (static_cast<uint32_t>(addr_high) << 16);
uint8_t* mem_start = reinterpret_cast<uint8_t*>(frame->wasm_instance()
->memory_object()
->array_buffer()
@@ -1148,9 +1128,11 @@ RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
// TODO(titzer): eliminate dependency on WasmModule definition here.
int func_start =
frame->wasm_instance()->module()->functions[func_index].code.offset();
- tracing::TraceMemoryOperation(tracing::kWasmCompiled, is_store,
- MachineRepresentation(mem_rep), addr,
- func_index, pos - func_start, mem_start);
+ wasm::ExecutionEngine eng = frame->wasm_code().is_liftoff()
+ ? wasm::ExecutionEngine::kLiftoff
+ : wasm::ExecutionEngine::kTurbofan;
+ wasm::TraceMemoryOperation(eng, info, func_index, pos - func_start,
+ mem_start);
return isolate->heap()->undefined_value();
}
@@ -1180,5 +1162,19 @@ RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTracking) {
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_FreezeWasmLazyCompilation) {
+ DCHECK_EQ(1, args.length());
+ DisallowHeapAllocation no_gc;
+ CONVERT_ARG_CHECKED(WasmInstanceObject, instance, 0);
+
+ WasmSharedModuleData* shared = instance->compiled_module()->shared();
+ CHECK(shared->has_lazy_compilation_orchestrator());
+ auto* orchestrator = Managed<wasm::LazyCompilationOrchestrator>::cast(
+ shared->lazy_compilation_orchestrator())
+ ->get();
+ orchestrator->FreezeLazyCompilationForTesting();
+ return isolate->heap()->undefined_value();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index 5820c4b6a4..85fb2d2173 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -214,101 +214,6 @@ RUNTIME_FUNCTION(Runtime_TypedArraySpeciesCreateByLength) {
return *result_array;
}
-namespace {
-
-Object* TypedArraySetFromOverlapping(Isolate* isolate,
- Handle<JSTypedArray> target,
- Handle<JSTypedArray> source,
- uint32_t offset) {
-#ifdef DEBUG
- Handle<FixedTypedArrayBase> source_elements(
- FixedTypedArrayBase::cast(source->elements()));
- Handle<FixedTypedArrayBase> target_elements(
- FixedTypedArrayBase::cast(target->elements()));
- uint8_t* source_data = static_cast<uint8_t*>(source_elements->DataPtr());
- uint8_t* target_data = static_cast<uint8_t*>(target_elements->DataPtr());
- size_t source_byte_length = NumberToSize(source->byte_length());
- size_t target_byte_length = NumberToSize(target->byte_length());
-
- CHECK_LE(offset, target->length_value());
- CHECK_LE(source->length_value(), target->length_value() - offset);
- CHECK(source->length()->IsSmi());
-
- CHECK(!target->WasNeutered());
- CHECK(!source->WasNeutered());
-
- // Assert that target and source in fact overlapping.
- CHECK(target_data + target_byte_length > source_data &&
- source_data + source_byte_length > target_data);
-#endif
-
- size_t sourceElementSize = source->element_size();
- size_t targetElementSize = target->element_size();
-
- uint32_t source_length = source->length_value();
- if (source_length == 0) return isolate->heap()->undefined_value();
-
- // Copy left part.
-
- // First un-mutated byte after the next write
- uint32_t target_ptr = 0;
- CHECK(target->byte_offset()->ToUint32(&target_ptr));
- target_ptr += (offset + 1) * targetElementSize;
-
- // Next read at sourcePtr. We do not care for memory changing before
- // sourcePtr - we have already copied it.
- uint32_t source_ptr = 0;
- CHECK(source->byte_offset()->ToUint32(&source_ptr));
-
- ElementsAccessor* source_accessor = source->GetElementsAccessor();
- ElementsAccessor* target_accessor = target->GetElementsAccessor();
-
- uint32_t left_index;
- for (left_index = 0; left_index < source_length && target_ptr <= source_ptr;
- left_index++) {
- Handle<Object> value = source_accessor->Get(source, left_index);
- target_accessor->Set(target, offset + left_index, *value);
-
- target_ptr += targetElementSize;
- source_ptr += sourceElementSize;
- }
-
- // Copy right part;
- // First unmutated byte before the next write
- CHECK(target->byte_offset()->ToUint32(&target_ptr));
- target_ptr += (offset + source_length - 1) * targetElementSize;
-
- // Next read before sourcePtr. We do not care for memory changing after
- // sourcePtr - we have already copied it.
- CHECK(target->byte_offset()->ToUint32(&source_ptr));
- source_ptr += source_length * sourceElementSize;
-
- uint32_t right_index;
- DCHECK_GE(source_length, 1);
- for (right_index = source_length - 1;
- right_index > left_index && target_ptr >= source_ptr; right_index--) {
- Handle<Object> value = source_accessor->Get(source, right_index);
- target_accessor->Set(target, offset + right_index, *value);
-
- target_ptr -= targetElementSize;
- source_ptr -= sourceElementSize;
- }
-
- std::vector<Handle<Object>> temp(right_index + 1 - left_index);
-
- for (uint32_t i = left_index; i <= right_index; i++) {
- temp[i - left_index] = source_accessor->Get(source, i);
- }
-
- for (uint32_t i = left_index; i <= right_index; i++) {
- target_accessor->Set(target, offset + i, *temp[i - left_index]);
- }
-
- return isolate->heap()->undefined_value();
-}
-
-} // namespace
-
// 22.2.3.23 %TypedArray%.prototype.set ( overloaded [ , offset ] )
RUNTIME_FUNCTION(Runtime_TypedArraySet) {
HandleScope scope(isolate);
@@ -317,6 +222,7 @@ RUNTIME_FUNCTION(Runtime_TypedArraySet) {
Handle<Smi> offset = args.at<Smi>(2);
DCHECK(!target->WasNeutered()); // Checked in TypedArrayPrototypeSet.
+ DCHECK(!obj->IsJSTypedArray()); // Should be handled by CSA.
DCHECK_LE(0, offset->value());
const uint32_t uint_offset = static_cast<uint32_t>(offset->value());
@@ -328,10 +234,6 @@ RUNTIME_FUNCTION(Runtime_TypedArraySet) {
// (Consistent with Firefox and Blink/WebKit)
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kInvalidArgument));
- } else if (obj->IsJSTypedArray()) {
- // The non-overlapping case is handled in CSA.
- Handle<JSTypedArray> source = Handle<JSTypedArray>::cast(obj);
- return TypedArraySetFromOverlapping(isolate, target, source, uint_offset);
}
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, obj,
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index e8aef3fa97..0b002d0ec6 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -16,9 +16,10 @@
#include "src/trap-handler/trap-handler.h"
#include "src/v8memory.h"
#include "src/wasm/module-compiler.h"
-#include "src/wasm/wasm-heap.h"
+#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-constants.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects.h"
-#include "src/wasm/wasm-opcodes.h"
namespace v8 {
namespace internal {
@@ -33,7 +34,7 @@ WasmInstanceObject* GetWasmInstanceOnStackTop(Isolate* isolate) {
WasmInstanceObject* owning_instance = nullptr;
if (FLAG_wasm_jit_to_native) {
owning_instance = WasmInstanceObject::GetOwningInstance(
- isolate->wasm_code_manager()->LookupCode(pc));
+ isolate->wasm_engine()->code_manager()->LookupCode(pc));
} else {
owning_instance = WasmInstanceObject::GetOwningInstanceGC(
isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code);
@@ -45,14 +46,14 @@ WasmInstanceObject* GetWasmInstanceOnStackTop(Isolate* isolate) {
Context* GetWasmContextOnStackTop(Isolate* isolate) {
return GetWasmInstanceOnStackTop(isolate)
->compiled_module()
- ->ptr_to_native_context();
+ ->native_context();
}
class ClearThreadInWasmScope {
public:
explicit ClearThreadInWasmScope(bool coming_from_wasm)
: coming_from_wasm_(coming_from_wasm) {
- DCHECK_EQ(trap_handler::UseTrapHandler() && coming_from_wasm,
+ DCHECK_EQ(trap_handler::IsTrapHandlerEnabled() && coming_from_wasm,
trap_handler::IsThreadInWasm());
if (coming_from_wasm) trap_handler::ClearThreadInWasm();
}
@@ -79,7 +80,7 @@ RUNTIME_FUNCTION(Runtime_WasmGrowMemory) {
// Set the current isolate's context.
DCHECK_NULL(isolate->context());
- isolate->set_context(instance->compiled_module()->ptr_to_native_context());
+ isolate->set_context(instance->compiled_module()->native_context());
return *isolate->factory()->NewNumberFromInt(
WasmInstanceObject::GrowMemory(isolate, instance, delta_pages));
@@ -170,7 +171,7 @@ RUNTIME_FUNCTION(Runtime_WasmGetExceptionRuntimeId) {
}
}
}
- return Smi::FromInt(wasm::WasmModule::kInvalidExceptionTag);
+ return Smi::FromInt(wasm::kInvalidExceptionTag);
}
RUNTIME_FUNCTION(Runtime_WasmExceptionGetElement) {
@@ -248,7 +249,7 @@ RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
// Set the current isolate's context.
DCHECK_NULL(isolate->context());
- isolate->set_context(instance->compiled_module()->ptr_to_native_context());
+ isolate->set_context(instance->compiled_module()->native_context());
// Find the frame pointer of the interpreter entry.
Address frame_pointer = 0;
@@ -275,7 +276,8 @@ RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
RUNTIME_FUNCTION(Runtime_WasmStackGuard) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
- DCHECK(!trap_handler::UseTrapHandler() || trap_handler::IsThreadInWasm());
+ DCHECK(!trap_handler::IsTrapHandlerEnabled() ||
+ trap_handler::IsThreadInWasm());
ClearThreadInWasmScope wasm_flag(true);
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index da16ee5fc8..487ee675ad 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -120,6 +120,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_COMPILER(F) \
F(CompileLazy, 1, 1) \
F(CompileOptimized_Concurrent, 1, 1) \
+ F(FunctionFirstExecution, 1, 1) \
F(CompileOptimized_NotConcurrent, 1, 1) \
F(EvictOptimizedCodeSlot, 1, 1) \
F(NotifyDeoptimized, 0, 1) \
@@ -338,7 +339,8 @@ namespace internal {
F(Typeof, 1, 1) \
F(UnwindAndFindExceptionHandler, 0, 1) \
F(AllowDynamicFunction, 1, 1) \
- F(GetTemplateObject, 1, 1)
+ F(GetTemplateObject, 1, 1) \
+ F(ReportMessage, 1, 1)
#define FOR_EACH_INTRINSIC_LITERALS(F) \
F(CreateRegExpLiteral, 4, 1) \
@@ -373,7 +375,6 @@ namespace internal {
F(StringToNumber, 1, 1) \
F(StringParseInt, 2, 1) \
F(StringParseFloat, 1, 1) \
- F(NumberToString, 1, 1) \
F(NumberToStringSkipCache, 1, 1) \
F(NumberToSmi, 1, 1) \
F(SmiLexicographicCompare, 2, 1) \
@@ -390,6 +391,10 @@ namespace internal {
F(ObjectCreate, 2, 1) \
F(InternalSetPrototype, 2, 1) \
F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
+ F(ObjectValues, 1, 1) \
+ F(ObjectValuesSkipFastPath, 1, 1) \
+ F(ObjectEntries, 1, 1) \
+ F(ObjectEntriesSkipFastPath, 1, 1) \
F(GetProperty, 2, 1) \
F(KeyedGetProperty, 2, 1) \
F(AddNamedProperty, 4, 1) \
@@ -461,26 +466,23 @@ namespace internal {
F(GreaterThanOrEqual, 2, 1) \
F(InstanceOf, 2, 1)
-#define FOR_EACH_INTRINSIC_PROMISE(F) \
- F(EnqueueMicrotask, 1, 1) \
- F(EnqueuePromiseReactionJob, 1, 1) \
- F(EnqueuePromiseResolveThenableJob, 1, 1) \
- F(PromiseHookInit, 2, 1) \
- F(PromiseHookResolve, 1, 1) \
- F(PromiseHookBefore, 1, 1) \
- F(PromiseHookAfter, 1, 1) \
- F(PromiseMarkAsHandled, 1, 1) \
- F(PromiseRejectEventFromStack, 2, 1) \
- F(PromiseRevokeReject, 1, 1) \
- F(PromiseResult, 1, 1) \
- F(PromiseStatus, 1, 1) \
+#define FOR_EACH_INTRINSIC_PROMISE(F) \
+ F(EnqueueMicrotask, 1, 1) \
+ F(PromiseHookInit, 2, 1) \
+ F(PromiseHookResolve, 1, 1) \
+ F(PromiseHookBefore, 1, 1) \
+ F(PromiseHookAfter, 1, 1) \
+ F(PromiseMarkAsHandled, 1, 1) \
+ F(PromiseRejectEventFromStack, 2, 1) \
+ F(PromiseRevokeReject, 1, 1) \
+ F(PromiseResult, 1, 1) \
+ F(PromiseStatus, 1, 1) \
F(ReportPromiseReject, 2, 1)
#define FOR_EACH_INTRINSIC_PROXY(F) \
F(IsJSProxy, 1, 1) \
F(JSProxyGetTarget, 1, 1) \
F(JSProxyGetHandler, 1, 1) \
- F(JSProxyRevoke, 1, 1) \
F(GetPropertyWithReceiver, 2, 1) \
F(CheckProxyHasTrap, 2, 1) \
F(SetPropertyWithReceiver, 5, 1) \
@@ -629,9 +631,10 @@ namespace internal {
F(HeapObjectVerify, 1, 1) \
F(WasmNumInterpretedCalls, 1, 1) \
F(RedirectToWasmInterpreter, 2, 1) \
- F(WasmTraceMemory, 4, 1) \
+ F(WasmTraceMemory, 1, 1) \
F(CompleteInobjectSlackTracking, 1, 1) \
- F(IsLiftoffFunction, 1, 1)
+ F(IsLiftoffFunction, 1, 1) \
+ F(FreezeWasmLazyCompilation, 1, 1)
#define FOR_EACH_INTRINSIC_TYPEDARRAY(F) \
F(ArrayBufferGetByteLength, 1, 1) \
@@ -681,6 +684,7 @@ namespace internal {
F(LoadIC_Miss, 4, 1) \
F(LoadPropertyWithInterceptor, 5, 1) \
F(StoreCallbackProperty, 6, 1) \
+ F(StoreGlobalIC_Miss, 4, 1) \
F(StoreGlobalIC_Slow, 5, 1) \
F(StoreIC_Miss, 5, 1) \
F(StorePropertyWithInterceptor, 5, 1) \
diff --git a/deps/v8/src/s390/assembler-s390-inl.h b/deps/v8/src/s390/assembler-s390-inl.h
index d8d7ce4256..6323730b99 100644
--- a/deps/v8/src/s390/assembler-s390-inl.h
+++ b/deps/v8/src/s390/assembler-s390-inl.h
@@ -69,9 +69,9 @@ void RelocInfo::apply(intptr_t delta) {
} else {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
- Address target = Assembler::target_address_at(pc_, host_);
- Assembler::set_target_address_at(nullptr, pc_, host_, target + delta,
- SKIP_ICACHE_FLUSH);
+ Address target = Assembler::target_address_at(pc_, constant_pool_);
+ Assembler::set_target_address_at(nullptr, pc_, constant_pool_,
+ target + delta, SKIP_ICACHE_FLUSH);
}
}
@@ -82,7 +82,7 @@ Address RelocInfo::target_internal_reference() {
} else {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
}
@@ -93,7 +93,7 @@ Address RelocInfo::target_internal_reference_address() {
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
Address RelocInfo::target_address_address() {
@@ -118,18 +118,6 @@ Address RelocInfo::constant_pool_entry_address() {
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
-Address Assembler::target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- return target_address_at(pc, constant_pool);
-}
-
-void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
-}
-
Address Assembler::target_address_from_return_address(Address pc) {
// Returns the address of the call target from the return address that will
// be returned to after a call.
@@ -153,15 +141,15 @@ Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
HeapObject* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return HeapObject::cast(
- reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)));
+ return HeapObject::cast(reinterpret_cast<Object*>(
+ Assembler::target_address_at(pc_, constant_pool_)));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
if (rmode_ == EMBEDDED_OBJECT) {
return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
- Assembler::target_address_at(pc_, host_)));
+ Assembler::target_address_at(pc_, constant_pool_)));
} else {
return Handle<HeapObject>::cast(origin->code_target_object_handle_at(pc_));
}
@@ -171,7 +159,7 @@ void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
+ Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
@@ -183,7 +171,7 @@ void RelocInfo::set_target_object(HeapObject* target,
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
Address RelocInfo::target_runtime_entry(Assembler* origin) {
@@ -209,10 +197,10 @@ void RelocInfo::WipeOut(Isolate* isolate) {
} else if (IsInternalReferenceEncoded(rmode_)) {
// mov sequence
// Currently used only by deserializer, no need to flush.
- Assembler::set_target_address_at(isolate, pc_, host_, nullptr,
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr,
SKIP_ICACHE_FLUSH);
} else {
- Assembler::set_target_address_at(isolate, pc_, host_, nullptr);
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
}
}
@@ -294,14 +282,14 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
// There is a FIXED_SEQUENCE assumption here
void Assembler::deserialization_set_special_target_at(
Isolate* isolate, Address instruction_payload, Code* code, Address target) {
- set_target_address_at(isolate, instruction_payload, code, target);
+ set_target_address_at(isolate, instruction_payload,
+ code ? code->constant_pool() : nullptr, target);
}
void Assembler::deserialization_set_target_internal_reference_at(
Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
if (RelocInfo::IsInternalReferenceEncoded(mode)) {
- Code* code = nullptr;
- set_target_address_at(isolate, pc, code, target, SKIP_ICACHE_FLUSH);
+ set_target_address_at(isolate, pc, nullptr, target, SKIP_ICACHE_FLUSH);
} else {
Memory::Address_at(pc) = target;
}
diff --git a/deps/v8/src/s390/assembler-s390.cc b/deps/v8/src/s390/assembler-s390.cc
index 70701beb72..166da1c451 100644
--- a/deps/v8/src/s390/assembler-s390.cc
+++ b/deps/v8/src/s390/assembler-s390.cc
@@ -271,22 +271,23 @@ bool RelocInfo::IsCodedSpecially() {
bool RelocInfo::IsInConstantPool() { return false; }
Address RelocInfo::embedded_address() const {
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
uint32_t RelocInfo::embedded_size() const {
- return static_cast<uint32_t>(
- reinterpret_cast<intptr_t>(Assembler::target_address_at(pc_, host_)));
+ return static_cast<uint32_t>(reinterpret_cast<intptr_t>(
+ Assembler::target_address_at(pc_, constant_pool_)));
}
void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, host_, address, flush_mode);
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
+ flush_mode);
}
void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, host_,
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_,
reinterpret_cast<Address>(size), flush_mode);
}
@@ -441,7 +442,7 @@ int Assembler::target_at(int pos) {
} else if (LLILF == opcode || BRCL == opcode || LARL == opcode ||
BRASL == opcode) {
int32_t imm32 =
- static_cast<int32_t>(instr & (static_cast<uint64_t>(0xffffffff)));
+ static_cast<int32_t>(instr & (static_cast<uint64_t>(0xFFFFFFFF)));
if (LLILF != opcode)
imm32 <<= 1; // BR* + LARL treat immediate in # of halfwords
if (imm32 == 0) return kEndOfChain;
@@ -465,14 +466,14 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
if (BRC == opcode || BRCT == opcode || BRCTG == opcode) {
int16_t imm16 = target_pos - pos;
- instr &= (~0xffff);
+ instr &= (~0xFFFF);
DCHECK(is_int16(imm16));
instr_at_put<FourByteInstr>(pos, instr | (imm16 >> 1));
return;
} else if (BRCL == opcode || LARL == opcode || BRASL == opcode) {
// Immediate is in # of halfwords
int32_t imm32 = target_pos - pos;
- instr &= (~static_cast<uint64_t>(0xffffffff));
+ instr &= (~static_cast<uint64_t>(0xFFFFFFFF));
instr_at_put<SixByteInstr>(pos, instr | (imm32 >> 1));
return;
} else if (LLILF == opcode) {
@@ -480,7 +481,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
// Emitted label constant, not part of a branch.
// Make label relative to Code* of generated Code object.
int32_t imm32 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
- instr &= (~static_cast<uint64_t>(0xffffffff));
+ instr &= (~static_cast<uint64_t>(0xFFFFFFFF));
instr_at_put<SixByteInstr>(pos, instr | imm32);
return;
}
@@ -1491,8 +1492,8 @@ void Assembler::ark(Register r1, Register r2, Register r3) {
void Assembler::asi(const MemOperand& opnd, const Operand& imm) {
DCHECK(is_int8(imm.immediate()));
DCHECK(is_int20(opnd.offset()));
- siy_form(ASI, Operand(0xff & imm.immediate()), opnd.rb(),
- 0xfffff & opnd.offset());
+ siy_form(ASI, Operand(0xFF & imm.immediate()), opnd.rb(),
+ 0xFFFFF & opnd.offset());
}
// -----------------------
@@ -1515,8 +1516,8 @@ void Assembler::agrk(Register r1, Register r2, Register r3) {
void Assembler::agsi(const MemOperand& opnd, const Operand& imm) {
DCHECK(is_int8(imm.immediate()));
DCHECK(is_int20(opnd.offset()));
- siy_form(AGSI, Operand(0xff & imm.immediate()), opnd.rb(),
- 0xfffff & opnd.offset());
+ siy_form(AGSI, Operand(0xFF & imm.immediate()), opnd.rb(),
+ 0xFFFFF & opnd.offset());
}
// -------------------------------
@@ -2091,9 +2092,9 @@ void Assembler::fidbra(DoubleRegister d1, DoubleRegister d2, FIDBRA_MASK3 m3) {
bool Assembler::IsNop(SixByteInstr instr, int type) {
DCHECK((0 == type) || (DEBUG_BREAK_NOP == type));
if (DEBUG_BREAK_NOP == type) {
- return ((instr & 0xffffffff) == 0xa53b0000); // oill r3, 0
+ return ((instr & 0xFFFFFFFF) == 0xA53B0000); // oill r3, 0
}
- return ((instr & 0xffff) == 0x1800); // lr r0,r0
+ return ((instr & 0xFFFF) == 0x1800); // lr r0,r0
}
// dummy instruction reserved for special use.
@@ -2213,8 +2214,7 @@ void Assembler::EmitRelocations() {
it != relocations_.end(); it++) {
RelocInfo::Mode rmode = it->rmode();
Address pc = buffer_ + it->position();
- Code* code = nullptr;
- RelocInfo rinfo(pc, rmode, it->data(), code);
+ RelocInfo rinfo(pc, rmode, it->data(), nullptr);
// Fix up internal references now that they are guaranteed to be bound.
if (RelocInfo::IsInternalReference(rmode)) {
@@ -2223,8 +2223,8 @@ void Assembler::EmitRelocations() {
Memory::Address_at(pc) = buffer_ + pos;
} else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
// mov sequence
- intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, code));
- set_target_address_at(nullptr, pc, code, buffer_ + pos,
+ intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, nullptr));
+ set_target_address_at(nullptr, pc, nullptr, buffer_ + pos,
SKIP_ICACHE_FLUSH);
}
diff --git a/deps/v8/src/s390/assembler-s390.h b/deps/v8/src/s390/assembler-s390.h
index e9863197a7..4a5945de87 100644
--- a/deps/v8/src/s390/assembler-s390.h
+++ b/deps/v8/src/s390/assembler-s390.h
@@ -276,6 +276,7 @@ constexpr Register kLithiumScratch = r1; // lithium scratch.
constexpr Register kRootRegister = r10; // Roots array pointer.
constexpr Register cp = r13; // JavaScript context pointer.
+constexpr bool kPadArguments = false;
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
@@ -556,10 +557,6 @@ class Assembler : public AssemblerBase {
INLINE(static void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
- INLINE(static Address target_address_at(Address pc, Code* code));
- INLINE(static void set_target_address_at(
- Isolate* isolate, Address pc, Code* code, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
diff --git a/deps/v8/src/s390/code-stubs-s390.cc b/deps/v8/src/s390/code-stubs-s390.cc
index d33d09c657..783b995c72 100644
--- a/deps/v8/src/s390/code-stubs-s390.cc
+++ b/deps/v8/src/s390/code-stubs-s390.cc
@@ -122,8 +122,8 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// scratch_high LSR 31 equals zero.
// New result = (result eor 0) + 0 = result.
// If the input was negative, we have to negate the result.
- // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
- // New result = (result eor 0xffffffff) + 1 = 0 - result.
+ // Input_high ASR 31 equals 0xFFFFFFFF and scratch_high LSR 31 equals 1.
+ // New result = (result eor 0xFFFFFFFF) + 1 = 0 - result.
__ ShiftRightArith(r0, scratch_high, Operand(31));
#if V8_TARGET_ARCH_S390X
__ lgfr(r0, r0);
@@ -495,6 +495,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// kCEntryFPAddress
// Frame type
__ lay(sp, MemOperand(sp, -5 * kPointerSize));
+
// Push a bad frame pointer to fail if it is used.
__ LoadImmP(r10, Operand(-1));
@@ -512,6 +513,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ lay(fp,
MemOperand(sp, -EntryFrameConstants::kCallerFPOffset + kPointerSize));
+ __ InitializeRootRegister();
+
// If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js;
ExternalReference js_entry_sp(IsolateAddressId::kJSEntrySPAddress, isolate());
@@ -564,12 +567,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// r4: receiver
// r5: argc
// r6: argv
- if (type() == StackFrame::CONSTRUCT_ENTRY) {
- __ Call(BUILTIN_CODE(isolate(), JSConstructEntryTrampoline),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(BUILTIN_CODE(isolate(), JSEntryTrampoline), RelocInfo::CODE_TARGET);
- }
+ __ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
// Unlink this frame from the handler chain.
__ PopStackHandler();
@@ -783,7 +781,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -822,7 +820,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
if (FLAG_debug_code) {
__ LoadP(r7, FieldMemOperand(r4, 0));
__ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex);
- __ Assert(eq, kExpectedAllocationSite);
+ __ Assert(eq, AbortReason::kExpectedAllocationSite);
}
// Save the resulting elements kind in type info. We can't just store r5
@@ -846,7 +844,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -917,9 +915,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ LoadP(r6, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ TestIfSmi(r6);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
__ CompareObjectType(r6, r6, r7, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
// We should either have undefined in r4 or a valid AllocationSite
__ AssertUndefinedOrAllocationSite(r4, r6);
@@ -996,9 +994,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ LoadP(r5, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ TestIfSmi(r5);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
__ CompareObjectType(r5, r5, r6, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
// Figure out the right elements kind
@@ -1013,7 +1011,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ CmpP(r5, Operand(PACKED_ELEMENTS));
__ beq(&done);
__ CmpP(r5, Operand(HOLEY_ELEMENTS));
- __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ Assert(
+ eq,
+ AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
__ bind(&done);
}
@@ -1118,7 +1118,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
if (__ emit_debug_code()) {
__ LoadlW(r3, MemOperand(r9, kLevelOffset));
__ CmpP(r3, r8);
- __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
+ __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
}
__ SubP(r8, Operand(1));
__ StoreW(r8, MemOperand(r9, kLevelOffset));
diff --git a/deps/v8/src/s390/codegen-s390.cc b/deps/v8/src/s390/codegen-s390.cc
index e6c627da3a..df02570783 100644
--- a/deps/v8/src/s390/codegen-s390.cc
+++ b/deps/v8/src/s390/codegen-s390.cc
@@ -20,8 +20,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
#else
size_t allocated = 0;
- byte* buffer =
- AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@@ -38,8 +37,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
- CHECK(base::OS::SetPermissions(buffer, allocated,
- base::OS::MemoryPermission::kReadExecute));
+ CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
diff --git a/deps/v8/src/s390/constants-s390.cc b/deps/v8/src/s390/constants-s390.cc
index da53613bc7..bda7f61cf4 100644
--- a/deps/v8/src/s390/constants-s390.cc
+++ b/deps/v8/src/s390/constants-s390.cc
@@ -22,12 +22,12 @@ Instruction::OpcodeFormatType Instruction::OpcodeFormatTable[] = {
ONE_BYTE_OPCODE, // 0x07
ONE_BYTE_OPCODE, // 0x08
ONE_BYTE_OPCODE, // 0x09
- ONE_BYTE_OPCODE, // 0x0a
- ONE_BYTE_OPCODE, // 0x0b
- ONE_BYTE_OPCODE, // 0x0c
- ONE_BYTE_OPCODE, // 0x0d
- ONE_BYTE_OPCODE, // 0x0e
- ONE_BYTE_OPCODE, // 0x0f
+ ONE_BYTE_OPCODE, // 0x0A
+ ONE_BYTE_OPCODE, // 0x0B
+ ONE_BYTE_OPCODE, // 0x0C
+ ONE_BYTE_OPCODE, // 0x0D
+ ONE_BYTE_OPCODE, // 0x0E
+ ONE_BYTE_OPCODE, // 0x0F
ONE_BYTE_OPCODE, // 0x10
ONE_BYTE_OPCODE, // 0x11
ONE_BYTE_OPCODE, // 0x12
@@ -38,12 +38,12 @@ Instruction::OpcodeFormatType Instruction::OpcodeFormatTable[] = {
ONE_BYTE_OPCODE, // 0x17
ONE_BYTE_OPCODE, // 0x18
ONE_BYTE_OPCODE, // 0x19
- ONE_BYTE_OPCODE, // 0x1a
- ONE_BYTE_OPCODE, // 0x1b
- ONE_BYTE_OPCODE, // 0x1c
- ONE_BYTE_OPCODE, // 0x1d
- ONE_BYTE_OPCODE, // 0x1e
- ONE_BYTE_OPCODE, // 0x1f
+ ONE_BYTE_OPCODE, // 0x1A
+ ONE_BYTE_OPCODE, // 0x1B
+ ONE_BYTE_OPCODE, // 0x1C
+ ONE_BYTE_OPCODE, // 0x1D
+ ONE_BYTE_OPCODE, // 0x1E
+ ONE_BYTE_OPCODE, // 0x1F
ONE_BYTE_OPCODE, // 0x20
ONE_BYTE_OPCODE, // 0x21
ONE_BYTE_OPCODE, // 0x22
@@ -54,12 +54,12 @@ Instruction::OpcodeFormatType Instruction::OpcodeFormatTable[] = {
ONE_BYTE_OPCODE, // 0x27
ONE_BYTE_OPCODE, // 0x28
ONE_BYTE_OPCODE, // 0x29
- ONE_BYTE_OPCODE, // 0x2a
- ONE_BYTE_OPCODE, // 0x2b
- ONE_BYTE_OPCODE, // 0x2c
- ONE_BYTE_OPCODE, // 0x2d
- ONE_BYTE_OPCODE, // 0x2e
- ONE_BYTE_OPCODE, // 0x2f
+ ONE_BYTE_OPCODE, // 0x2A
+ ONE_BYTE_OPCODE, // 0x2B
+ ONE_BYTE_OPCODE, // 0x2C
+ ONE_BYTE_OPCODE, // 0x2D
+ ONE_BYTE_OPCODE, // 0x2E
+ ONE_BYTE_OPCODE, // 0x2F
ONE_BYTE_OPCODE, // 0x30
ONE_BYTE_OPCODE, // 0x31
ONE_BYTE_OPCODE, // 0x32
@@ -70,12 +70,12 @@ Instruction::OpcodeFormatType Instruction::OpcodeFormatTable[] = {
ONE_BYTE_OPCODE, // 0x37
ONE_BYTE_OPCODE, // 0x38
ONE_BYTE_OPCODE, // 0x39
- ONE_BYTE_OPCODE, // 0x3a
- ONE_BYTE_OPCODE, // 0x3b
- ONE_BYTE_OPCODE, // 0x3c
- ONE_BYTE_OPCODE, // 0x3d
- ONE_BYTE_OPCODE, // 0x3e
- ONE_BYTE_OPCODE, // 0x3f
+ ONE_BYTE_OPCODE, // 0x3A
+ ONE_BYTE_OPCODE, // 0x3B
+ ONE_BYTE_OPCODE, // 0x3C
+ ONE_BYTE_OPCODE, // 0x3D
+ ONE_BYTE_OPCODE, // 0x3E
+ ONE_BYTE_OPCODE, // 0x3F
ONE_BYTE_OPCODE, // 0x40
ONE_BYTE_OPCODE, // 0x41
ONE_BYTE_OPCODE, // 0x42
@@ -86,12 +86,12 @@ Instruction::OpcodeFormatType Instruction::OpcodeFormatTable[] = {
ONE_BYTE_OPCODE, // 0x47
ONE_BYTE_OPCODE, // 0x48
ONE_BYTE_OPCODE, // 0x49
- ONE_BYTE_OPCODE, // 0x4a
- ONE_BYTE_OPCODE, // 0x4b
- ONE_BYTE_OPCODE, // 0x4c
- ONE_BYTE_OPCODE, // 0x4d
- ONE_BYTE_OPCODE, // 0x4e
- ONE_BYTE_OPCODE, // 0x4f
+ ONE_BYTE_OPCODE, // 0x4A
+ ONE_BYTE_OPCODE, // 0x4B
+ ONE_BYTE_OPCODE, // 0x4C
+ ONE_BYTE_OPCODE, // 0x4D
+ ONE_BYTE_OPCODE, // 0x4E
+ ONE_BYTE_OPCODE, // 0x4F
ONE_BYTE_OPCODE, // 0x50
ONE_BYTE_OPCODE, // 0x51
ONE_BYTE_OPCODE, // 0x52
@@ -102,12 +102,12 @@ Instruction::OpcodeFormatType Instruction::OpcodeFormatTable[] = {
ONE_BYTE_OPCODE, // 0x57
ONE_BYTE_OPCODE, // 0x58
ONE_BYTE_OPCODE, // 0x59
- ONE_BYTE_OPCODE, // 0x5a
- ONE_BYTE_OPCODE, // 0x5b
- ONE_BYTE_OPCODE, // 0x5c
- ONE_BYTE_OPCODE, // 0x5d
- ONE_BYTE_OPCODE, // 0x5e
- ONE_BYTE_OPCODE, // 0x5f
+ ONE_BYTE_OPCODE, // 0x5A
+ ONE_BYTE_OPCODE, // 0x5B
+ ONE_BYTE_OPCODE, // 0x5C
+ ONE_BYTE_OPCODE, // 0x5D
+ ONE_BYTE_OPCODE, // 0x5E
+ ONE_BYTE_OPCODE, // 0x5F
ONE_BYTE_OPCODE, // 0x60
ONE_BYTE_OPCODE, // 0x61
ONE_BYTE_OPCODE, // 0x62
@@ -118,12 +118,12 @@ Instruction::OpcodeFormatType Instruction::OpcodeFormatTable[] = {
ONE_BYTE_OPCODE, // 0x67
ONE_BYTE_OPCODE, // 0x68
ONE_BYTE_OPCODE, // 0x69
- ONE_BYTE_OPCODE, // 0x6a
- ONE_BYTE_OPCODE, // 0x6b
- ONE_BYTE_OPCODE, // 0x6c
- ONE_BYTE_OPCODE, // 0x6d
- ONE_BYTE_OPCODE, // 0x6e
- ONE_BYTE_OPCODE, // 0x6f
+ ONE_BYTE_OPCODE, // 0x6A
+ ONE_BYTE_OPCODE, // 0x6B
+ ONE_BYTE_OPCODE, // 0x6C
+ ONE_BYTE_OPCODE, // 0x6D
+ ONE_BYTE_OPCODE, // 0x6E
+ ONE_BYTE_OPCODE, // 0x6F
ONE_BYTE_OPCODE, // 0x70
ONE_BYTE_OPCODE, // 0x71
ONE_BYTE_OPCODE, // 0x72
@@ -134,12 +134,12 @@ Instruction::OpcodeFormatType Instruction::OpcodeFormatTable[] = {
ONE_BYTE_OPCODE, // 0x77
ONE_BYTE_OPCODE, // 0x78
ONE_BYTE_OPCODE, // 0x79
- ONE_BYTE_OPCODE, // 0x7a
- ONE_BYTE_OPCODE, // 0x7b
- ONE_BYTE_OPCODE, // 0x7c
- ONE_BYTE_OPCODE, // 0x7d
- ONE_BYTE_OPCODE, // 0x7e
- ONE_BYTE_OPCODE, // 0x7f
+ ONE_BYTE_OPCODE, // 0x7A
+ ONE_BYTE_OPCODE, // 0x7B
+ ONE_BYTE_OPCODE, // 0x7C
+ ONE_BYTE_OPCODE, // 0x7D
+ ONE_BYTE_OPCODE, // 0x7E
+ ONE_BYTE_OPCODE, // 0x7F
ONE_BYTE_OPCODE, // 0x80
ONE_BYTE_OPCODE, // 0x81
ONE_BYTE_OPCODE, // 0x82
@@ -150,12 +150,12 @@ Instruction::OpcodeFormatType Instruction::OpcodeFormatTable[] = {
ONE_BYTE_OPCODE, // 0x87
ONE_BYTE_OPCODE, // 0x88
ONE_BYTE_OPCODE, // 0x89
- ONE_BYTE_OPCODE, // 0x8a
- ONE_BYTE_OPCODE, // 0x8b
- ONE_BYTE_OPCODE, // 0x8c
- ONE_BYTE_OPCODE, // 0x8d
- ONE_BYTE_OPCODE, // 0x8e
- ONE_BYTE_OPCODE, // 0x8f
+ ONE_BYTE_OPCODE, // 0x8A
+ ONE_BYTE_OPCODE, // 0x8B
+ ONE_BYTE_OPCODE, // 0x8C
+ ONE_BYTE_OPCODE, // 0x8D
+ ONE_BYTE_OPCODE, // 0x8E
+ ONE_BYTE_OPCODE, // 0x8F
ONE_BYTE_OPCODE, // 0x90
ONE_BYTE_OPCODE, // 0x91
ONE_BYTE_OPCODE, // 0x92
@@ -166,108 +166,108 @@ Instruction::OpcodeFormatType Instruction::OpcodeFormatTable[] = {
ONE_BYTE_OPCODE, // 0x97
ONE_BYTE_OPCODE, // 0x98
ONE_BYTE_OPCODE, // 0x99
- ONE_BYTE_OPCODE, // 0x9a
- ONE_BYTE_OPCODE, // 0x9b
- TWO_BYTE_DISJOINT_OPCODE, // 0x9c
- TWO_BYTE_DISJOINT_OPCODE, // 0x9d
- TWO_BYTE_DISJOINT_OPCODE, // 0x9e
- TWO_BYTE_DISJOINT_OPCODE, // 0x9f
- TWO_BYTE_DISJOINT_OPCODE, // 0xa0
- TWO_BYTE_DISJOINT_OPCODE, // 0xa1
- TWO_BYTE_DISJOINT_OPCODE, // 0xa2
- TWO_BYTE_DISJOINT_OPCODE, // 0xa3
- TWO_BYTE_DISJOINT_OPCODE, // 0xa4
- THREE_NIBBLE_OPCODE, // 0xa5
- TWO_BYTE_DISJOINT_OPCODE, // 0xa6
- THREE_NIBBLE_OPCODE, // 0xa7
- ONE_BYTE_OPCODE, // 0xa8
- ONE_BYTE_OPCODE, // 0xa9
- ONE_BYTE_OPCODE, // 0xaa
- ONE_BYTE_OPCODE, // 0xab
- ONE_BYTE_OPCODE, // 0xac
- ONE_BYTE_OPCODE, // 0xad
- ONE_BYTE_OPCODE, // 0xae
- ONE_BYTE_OPCODE, // 0xaf
- ONE_BYTE_OPCODE, // 0xb0
- ONE_BYTE_OPCODE, // 0xb1
- TWO_BYTE_OPCODE, // 0xb2
- TWO_BYTE_OPCODE, // 0xb3
- TWO_BYTE_DISJOINT_OPCODE, // 0xb4
- TWO_BYTE_DISJOINT_OPCODE, // 0xb5
- TWO_BYTE_DISJOINT_OPCODE, // 0xb6
- TWO_BYTE_DISJOINT_OPCODE, // 0xb7
- TWO_BYTE_DISJOINT_OPCODE, // 0xb8
- TWO_BYTE_OPCODE, // 0xb9
- ONE_BYTE_OPCODE, // 0xba
- ONE_BYTE_OPCODE, // 0xbb
- ONE_BYTE_OPCODE, // 0xbc
- ONE_BYTE_OPCODE, // 0xbd
- ONE_BYTE_OPCODE, // 0xbe
- ONE_BYTE_OPCODE, // 0xbf
- THREE_NIBBLE_OPCODE, // 0xc0
- THREE_NIBBLE_OPCODE, // 0xc1
- THREE_NIBBLE_OPCODE, // 0xc2
- THREE_NIBBLE_OPCODE, // 0xc3
- THREE_NIBBLE_OPCODE, // 0xc4
- THREE_NIBBLE_OPCODE, // 0xc5
- THREE_NIBBLE_OPCODE, // 0xc6
- ONE_BYTE_OPCODE, // 0xc7
- THREE_NIBBLE_OPCODE, // 0xc8
- THREE_NIBBLE_OPCODE, // 0xc9
- THREE_NIBBLE_OPCODE, // 0xca
- THREE_NIBBLE_OPCODE, // 0xcb
- THREE_NIBBLE_OPCODE, // 0xcc
- TWO_BYTE_DISJOINT_OPCODE, // 0xcd
- TWO_BYTE_DISJOINT_OPCODE, // 0xce
- TWO_BYTE_DISJOINT_OPCODE, // 0xcf
- ONE_BYTE_OPCODE, // 0xd0
- ONE_BYTE_OPCODE, // 0xd1
- ONE_BYTE_OPCODE, // 0xd2
- ONE_BYTE_OPCODE, // 0xd3
- ONE_BYTE_OPCODE, // 0xd4
- ONE_BYTE_OPCODE, // 0xd5
- ONE_BYTE_OPCODE, // 0xd6
- ONE_BYTE_OPCODE, // 0xd7
- ONE_BYTE_OPCODE, // 0xd8
- ONE_BYTE_OPCODE, // 0xd9
- ONE_BYTE_OPCODE, // 0xda
- ONE_BYTE_OPCODE, // 0xdb
- ONE_BYTE_OPCODE, // 0xdc
- ONE_BYTE_OPCODE, // 0xdd
- ONE_BYTE_OPCODE, // 0xde
- ONE_BYTE_OPCODE, // 0xdf
- ONE_BYTE_OPCODE, // 0xe0
- ONE_BYTE_OPCODE, // 0xe1
- ONE_BYTE_OPCODE, // 0xe2
- TWO_BYTE_DISJOINT_OPCODE, // 0xe3
- TWO_BYTE_DISJOINT_OPCODE, // 0xe4
- TWO_BYTE_OPCODE, // 0xe5
- TWO_BYTE_DISJOINT_OPCODE, // 0xe6
- TWO_BYTE_DISJOINT_OPCODE, // 0xe7
- ONE_BYTE_OPCODE, // 0xe8
- ONE_BYTE_OPCODE, // 0xe9
- ONE_BYTE_OPCODE, // 0xea
- TWO_BYTE_DISJOINT_OPCODE, // 0xeb
- TWO_BYTE_DISJOINT_OPCODE, // 0xec
- TWO_BYTE_DISJOINT_OPCODE, // 0xed
- ONE_BYTE_OPCODE, // 0xee
- ONE_BYTE_OPCODE, // 0xef
- ONE_BYTE_OPCODE, // 0xf0
- ONE_BYTE_OPCODE, // 0xf1
- ONE_BYTE_OPCODE, // 0xf2
- ONE_BYTE_OPCODE, // 0xf3
- ONE_BYTE_OPCODE, // 0xf4
- ONE_BYTE_OPCODE, // 0xf5
- ONE_BYTE_OPCODE, // 0xf6
- ONE_BYTE_OPCODE, // 0xf7
- ONE_BYTE_OPCODE, // 0xf8
- ONE_BYTE_OPCODE, // 0xf9
- ONE_BYTE_OPCODE, // 0xfa
- ONE_BYTE_OPCODE, // 0xfb
- ONE_BYTE_OPCODE, // 0xfc
- ONE_BYTE_OPCODE, // 0xfd
- TWO_BYTE_DISJOINT_OPCODE, // 0xfe
- TWO_BYTE_DISJOINT_OPCODE, // 0xff
+ ONE_BYTE_OPCODE, // 0x9A
+ ONE_BYTE_OPCODE, // 0x9B
+ TWO_BYTE_DISJOINT_OPCODE, // 0x9C
+ TWO_BYTE_DISJOINT_OPCODE, // 0x9D
+ TWO_BYTE_DISJOINT_OPCODE, // 0x9E
+ TWO_BYTE_DISJOINT_OPCODE, // 0x9F
+ TWO_BYTE_DISJOINT_OPCODE, // 0xA0
+ TWO_BYTE_DISJOINT_OPCODE, // 0xA1
+ TWO_BYTE_DISJOINT_OPCODE, // 0xA2
+ TWO_BYTE_DISJOINT_OPCODE, // 0xA3
+ TWO_BYTE_DISJOINT_OPCODE, // 0xA4
+ THREE_NIBBLE_OPCODE, // 0xA5
+ TWO_BYTE_DISJOINT_OPCODE, // 0xA6
+ THREE_NIBBLE_OPCODE, // 0xA7
+ ONE_BYTE_OPCODE, // 0xA8
+ ONE_BYTE_OPCODE, // 0xA9
+ ONE_BYTE_OPCODE, // 0xAA
+ ONE_BYTE_OPCODE, // 0xAB
+ ONE_BYTE_OPCODE, // 0xAC
+ ONE_BYTE_OPCODE, // 0xAD
+ ONE_BYTE_OPCODE, // 0xAE
+ ONE_BYTE_OPCODE, // 0xAF
+ ONE_BYTE_OPCODE, // 0xB0
+ ONE_BYTE_OPCODE, // 0xB1
+ TWO_BYTE_OPCODE, // 0xB2
+ TWO_BYTE_OPCODE, // 0xB3
+ TWO_BYTE_DISJOINT_OPCODE, // 0xB4
+ TWO_BYTE_DISJOINT_OPCODE, // 0xB5
+ TWO_BYTE_DISJOINT_OPCODE, // 0xB6
+ TWO_BYTE_DISJOINT_OPCODE, // 0xB7
+ TWO_BYTE_DISJOINT_OPCODE, // 0xB8
+ TWO_BYTE_OPCODE, // 0xB9
+ ONE_BYTE_OPCODE, // 0xBA
+ ONE_BYTE_OPCODE, // 0xBB
+ ONE_BYTE_OPCODE, // 0xBC
+ ONE_BYTE_OPCODE, // 0xBD
+ ONE_BYTE_OPCODE, // 0xBE
+ ONE_BYTE_OPCODE, // 0xBF
+ THREE_NIBBLE_OPCODE, // 0xC0
+ THREE_NIBBLE_OPCODE, // 0xC1
+ THREE_NIBBLE_OPCODE, // 0xC2
+ THREE_NIBBLE_OPCODE, // 0xC3
+ THREE_NIBBLE_OPCODE, // 0xC4
+ THREE_NIBBLE_OPCODE, // 0xC5
+ THREE_NIBBLE_OPCODE, // 0xC6
+ ONE_BYTE_OPCODE, // 0xC7
+ THREE_NIBBLE_OPCODE, // 0xC8
+ THREE_NIBBLE_OPCODE, // 0xC9
+ THREE_NIBBLE_OPCODE, // 0xCA
+ THREE_NIBBLE_OPCODE, // 0xCB
+ THREE_NIBBLE_OPCODE, // 0xCC
+ TWO_BYTE_DISJOINT_OPCODE, // 0xCD
+ TWO_BYTE_DISJOINT_OPCODE, // 0xCE
+ TWO_BYTE_DISJOINT_OPCODE, // 0xCF
+ ONE_BYTE_OPCODE, // 0xD0
+ ONE_BYTE_OPCODE, // 0xD1
+ ONE_BYTE_OPCODE, // 0xD2
+ ONE_BYTE_OPCODE, // 0xD3
+ ONE_BYTE_OPCODE, // 0xD4
+ ONE_BYTE_OPCODE, // 0xD5
+ ONE_BYTE_OPCODE, // 0xD6
+ ONE_BYTE_OPCODE, // 0xD7
+ ONE_BYTE_OPCODE, // 0xD8
+ ONE_BYTE_OPCODE, // 0xD9
+ ONE_BYTE_OPCODE, // 0xDA
+ ONE_BYTE_OPCODE, // 0xDB
+ ONE_BYTE_OPCODE, // 0xDC
+ ONE_BYTE_OPCODE, // 0xDD
+ ONE_BYTE_OPCODE, // 0xDE
+ ONE_BYTE_OPCODE, // 0xDF
+ ONE_BYTE_OPCODE, // 0xE0
+ ONE_BYTE_OPCODE, // 0xE1
+ ONE_BYTE_OPCODE, // 0xE2
+ TWO_BYTE_DISJOINT_OPCODE, // 0xE3
+ TWO_BYTE_DISJOINT_OPCODE, // 0xE4
+ TWO_BYTE_OPCODE, // 0xE5
+ TWO_BYTE_DISJOINT_OPCODE, // 0xE6
+ TWO_BYTE_DISJOINT_OPCODE, // 0xE7
+ ONE_BYTE_OPCODE, // 0xE8
+ ONE_BYTE_OPCODE, // 0xE9
+ ONE_BYTE_OPCODE, // 0xEA
+ TWO_BYTE_DISJOINT_OPCODE, // 0xEB
+ TWO_BYTE_DISJOINT_OPCODE, // 0xEC
+ TWO_BYTE_DISJOINT_OPCODE, // 0xED
+ ONE_BYTE_OPCODE, // 0xEE
+ ONE_BYTE_OPCODE, // 0xEF
+ ONE_BYTE_OPCODE, // 0xF0
+ ONE_BYTE_OPCODE, // 0xF1
+ ONE_BYTE_OPCODE, // 0xF2
+ ONE_BYTE_OPCODE, // 0xF3
+ ONE_BYTE_OPCODE, // 0xF4
+ ONE_BYTE_OPCODE, // 0xF5
+ ONE_BYTE_OPCODE, // 0xF6
+ ONE_BYTE_OPCODE, // 0xF7
+ ONE_BYTE_OPCODE, // 0xF8
+ ONE_BYTE_OPCODE, // 0xF9
+ ONE_BYTE_OPCODE, // 0xFA
+ ONE_BYTE_OPCODE, // 0xFB
+ ONE_BYTE_OPCODE, // 0xFC
+ ONE_BYTE_OPCODE, // 0xFD
+ TWO_BYTE_DISJOINT_OPCODE, // 0xFE
+ TWO_BYTE_DISJOINT_OPCODE, // 0xFF
};
// These register names are defined in a way to match the native disassembler
diff --git a/deps/v8/src/s390/interface-descriptors-s390.cc b/deps/v8/src/s390/interface-descriptors-s390.cc
index a8eb807131..3cb4f2e375 100644
--- a/deps/v8/src/s390/interface-descriptors-s390.cc
+++ b/deps/v8/src/s390/interface-descriptors-s390.cc
@@ -43,8 +43,6 @@ const Register LoadDescriptor::SlotRegister() { return r2; }
const Register LoadWithVectorDescriptor::VectorRegister() { return r5; }
-const Register LoadICProtoArrayDescriptor::HandlerRegister() { return r6; }
-
const Register StoreDescriptor::ReceiverRegister() { return r3; }
const Register StoreDescriptor::NameRegister() { return r4; }
const Register StoreDescriptor::ValueRegister() { return r2; }
@@ -198,6 +196,12 @@ void TransitionElementsKindDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void AbortJSDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
data->InitializePlatformSpecific(0, nullptr, nullptr);
diff --git a/deps/v8/src/s390/macro-assembler-s390.cc b/deps/v8/src/s390/macro-assembler-s390.cc
index 44f1ba5abb..fe24884378 100644
--- a/deps/v8/src/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/s390/macro-assembler-s390.cc
@@ -443,7 +443,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
DCHECK(object != value);
if (emit_debug_code()) {
CmpP(value, MemOperand(address));
- Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
if (remembered_set_action == OMIT_REMEMBERED_SET &&
@@ -1057,9 +1057,10 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
LoadP(cp, MemOperand(ip));
#ifdef DEBUG
+ mov(r1, Operand(Context::kInvalidContext));
mov(ip,
Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
- StoreP(MemOperand(ip), Operand(0, kRelocInfo_NONEPTR), r0);
+ StoreP(r1, MemOperand(ip));
#endif
// Tear down the exit frame, pop the arguments, and return.
@@ -1115,7 +1116,7 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
if (FLAG_debug_code) {
CmpLogicalP(src_reg, dst_reg);
- Check(lt, kStackAccessBelowStackPointer);
+ Check(lt, AbortReason::kStackAccessBelowStackPointer);
}
// Restore caller's frame pointer and return address now as they will be
@@ -1352,7 +1353,7 @@ void MacroAssembler::MaybeDropFrames() {
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
// Link the current handler as the next handler.
@@ -1362,6 +1363,10 @@ void MacroAssembler::PushStackHandler() {
// Buy the full stack frame for 5 slots.
lay(sp, MemOperand(sp, -StackHandlerConstants::kSize));
+ // Store padding.
+ mov(r0, Operand(Smi::kZero));
+ StoreP(r0, MemOperand(sp)); // Padding.
+
// Copy the old handler into the next handler slot.
mvc(MemOperand(sp, StackHandlerConstants::kNextOffset), MemOperand(r7),
kPointerSize);
@@ -1370,15 +1375,16 @@ void MacroAssembler::PushStackHandler() {
}
void MacroAssembler::PopStackHandler() {
- STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
// Pop the Next Handler into r3 and store it into Handler Address reference.
Pop(r3);
mov(ip,
Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
-
StoreP(r3, MemOperand(ip));
+
+ Drop(1); // Drop padding.
}
void MacroAssembler::CompareObjectType(Register object, Register map,
@@ -1392,7 +1398,7 @@ void MacroAssembler::CompareObjectType(Register object, Register map,
void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
InstanceType type) {
STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
- STATIC_ASSERT(LAST_TYPE <= 0xffff);
+ STATIC_ASSERT(LAST_TYPE <= 0xFFFF);
LoadHalfWordP(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
CmpP(type_reg, Operand(type));
}
@@ -1547,12 +1553,11 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
}
-void TurboAssembler::Assert(Condition cond, BailoutReason reason,
- CRegister cr) {
+void TurboAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) {
if (emit_debug_code()) Check(cond, reason, cr);
}
-void TurboAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
+void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
Label L;
b(cond, &L);
Abort(reason);
@@ -1560,11 +1565,11 @@ void TurboAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
bind(&L);
}
-void TurboAssembler::Abort(BailoutReason reason) {
+void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
#ifdef DEBUG
- const char* msg = GetBailoutReason(reason);
+ const char* msg = GetAbortReason(reason);
if (msg != nullptr) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -1617,7 +1622,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
- Check(ne, kOperandIsASmi, cr0);
+ Check(ne, AbortReason::kOperandIsASmi, cr0);
}
}
@@ -1625,7 +1630,7 @@ void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
- Check(eq, kOperandIsNotSmi, cr0);
+ Check(eq, AbortReason::kOperandIsNotASmi, cr0);
}
}
@@ -1633,11 +1638,11 @@ void MacroAssembler::AssertFixedArray(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
- Check(ne, kOperandIsASmiAndNotAFixedArray, cr0);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFixedArray, cr0);
push(object);
CompareObjectType(object, object, object, FIXED_ARRAY_TYPE);
pop(object);
- Check(eq, kOperandIsNotAFixedArray);
+ Check(eq, AbortReason::kOperandIsNotAFixedArray);
}
}
@@ -1645,11 +1650,11 @@ void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
- Check(ne, kOperandIsASmiAndNotAFunction, cr0);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, cr0);
push(object);
CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
pop(object);
- Check(eq, kOperandIsNotAFunction);
+ Check(eq, AbortReason::kOperandIsNotAFunction);
}
}
@@ -1657,18 +1662,18 @@ void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
- Check(ne, kOperandIsASmiAndNotABoundFunction, cr0);
+ Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, cr0);
push(object);
CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
pop(object);
- Check(eq, kOperandIsNotABoundFunction);
+ Check(eq, AbortReason::kOperandIsNotABoundFunction);
}
}
void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
TestIfSmi(object);
- Check(ne, kOperandIsASmiAndNotAGeneratorObject, cr0);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, cr0);
// Load map
Register map = object;
@@ -1687,7 +1692,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
bind(&do_check);
// Restore generator object to register and perform assertion
pop(object);
- Check(eq, kOperandIsNotAGeneratorObject);
+ Check(eq, AbortReason::kOperandIsNotAGeneratorObject);
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
@@ -1699,7 +1704,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
beq(&done_checking, Label::kNear);
LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
- Assert(eq, kExpectedUndefinedOrCell);
+ Assert(eq, AbortReason::kExpectedUndefinedOrCell);
bind(&done_checking);
}
}
@@ -3301,7 +3306,7 @@ void TurboAssembler::LoadIntLiteral(Register dst, int value) {
void TurboAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
intptr_t value = reinterpret_cast<intptr_t>(smi);
#if V8_TARGET_ARCH_S390X
- DCHECK_EQ(value & 0xffffffff, 0);
+ DCHECK_EQ(value & 0xFFFFFFFF, 0);
// The smi value is loaded in upper 32-bits. Lower 32-bit are zeros.
llihf(dst, Operand(value >> 32));
#else
@@ -3402,7 +3407,7 @@ void TurboAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
void TurboAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi) {
if (dst != src) LoadRR(dst, src);
#if V8_TARGET_ARCH_S390X
- DCHECK_EQ(reinterpret_cast<intptr_t>(smi) & 0xffffffff, 0);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(smi) & 0xFFFFFFFF, 0);
int value = static_cast<int>(reinterpret_cast<intptr_t>(smi) >> 32);
nihf(dst, Operand(value));
#else
diff --git a/deps/v8/src/s390/macro-assembler-s390.h b/deps/v8/src/s390/macro-assembler-s390.h
index 4076c171ad..fcc62f21a9 100644
--- a/deps/v8/src/s390/macro-assembler-s390.h
+++ b/deps/v8/src/s390/macro-assembler-s390.h
@@ -873,13 +873,13 @@ class TurboAssembler : public Assembler {
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
+ void Assert(Condition cond, AbortReason reason, CRegister cr = cr7);
// Like Assert(), but always enabled.
- void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
+ void Check(Condition cond, AbortReason reason, CRegister cr = cr7);
// Print a message to stdout and abort execution.
- void Abort(BailoutReason reason);
+ void Abort(AbortReason reason);
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() { return has_frame_; }
diff --git a/deps/v8/src/s390/simulator-s390.cc b/deps/v8/src/s390/simulator-s390.cc
index a130f359f0..f6754bdd4b 100644
--- a/deps/v8/src/s390/simulator-s390.cc
+++ b/deps/v8/src/s390/simulator-s390.cc
@@ -227,7 +227,7 @@ void S390Debugger::Debug() {
// If at a breakpoint, proceed past it.
if ((reinterpret_cast<Instruction*>(sim_->get_pc()))
- ->InstructionBits() == 0x7d821008) {
+ ->InstructionBits() == 0x7D821008) {
sim_->set_pc(sim_->get_pc() + sizeof(FourByteInstr));
} else {
sim_->ExecuteInstruction(
@@ -273,7 +273,7 @@ void S390Debugger::Debug() {
} else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
// If at a breakpoint, proceed past it.
if ((reinterpret_cast<Instruction*>(sim_->get_pc()))
- ->InstructionBits() == 0x7d821008) {
+ ->InstructionBits() == 0x7D821008) {
sim_->set_pc(sim_->get_pc() + sizeof(FourByteInstr));
} else {
// Execute the one instruction we broke at with breakpoints disabled.
@@ -331,7 +331,7 @@ void S390Debugger::Debug() {
PrintF("%3s: %f 0x%08x %08x\n",
GetRegConfig()->GetDoubleRegisterName(i), dvalue,
static_cast<uint32_t>(as_words >> 32),
- static_cast<uint32_t>(as_words & 0xffffffff));
+ static_cast<uint32_t>(as_words & 0xFFFFFFFF));
}
} else if (arg1[0] == 'r' &&
(arg1[1] >= '0' && arg1[1] <= '2' &&
@@ -353,7 +353,7 @@ void S390Debugger::Debug() {
uint64_t as_words = bit_cast<uint64_t>(dvalue);
PrintF("%s: %f 0x%08x %08x\n", arg1, dvalue,
static_cast<uint32_t>(as_words >> 32),
- static_cast<uint32_t>(as_words & 0xffffffff));
+ static_cast<uint32_t>(as_words & 0xFFFFFFFF));
} else {
PrintF("%s unrecognized\n", arg1);
}
@@ -661,6 +661,15 @@ void Simulator::set_last_debugger_input(char* input) {
last_debugger_input_ = input;
}
+void Simulator::SetRedirectInstruction(Instruction* instruction) {
+// we use TRAP4 here (0xBF22)
+#if V8_TARGET_LITTLE_ENDIAN
+ instruction->SetInstructionBits(0x1000FFB2);
+#else
+ instruction->SetInstructionBits(0xB2FF0000 | kCallRtRedirected);
+#endif
+}
+
void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
void* start_addr, size_t size) {
intptr_t start = reinterpret_cast<intptr_t>(start_addr);
@@ -728,15 +737,6 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
}
}
-void Simulator::Initialize(Isolate* isolate) {
- if (isolate->simulator_initialized()) return;
- isolate->set_simulator_initialized(true);
- ::v8::internal::ExternalReference::set_redirector(isolate,
- &RedirectExternalReference);
- static base::OnceType once = V8_ONCE_INIT;
- base::CallOnce(&once, &Simulator::EvalTableInit);
-}
-
Simulator::EvaluateFuncType Simulator::EvalTable[] = {nullptr};
void Simulator::EvalTableInit() {
@@ -1493,7 +1493,8 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
- Initialize(isolate);
+ static base::OnceType once = V8_ONCE_INIT;
+ base::CallOnce(&once, &Simulator::EvalTableInit);
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
#if V8_TARGET_ARCH_S390X
@@ -1538,119 +1539,6 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
Simulator::~Simulator() { free(stack_); }
-// When the generated code calls an external reference we need to catch that in
-// the simulator. The external reference will be a function compiled for the
-// host architecture. We need to call that function instead of trying to
-// execute it with the simulator. We do that by redirecting the external
-// reference to a svc (Supervisor Call) instruction that is handled by
-// the simulator. We write the original destination of the jump just at a known
-// offset from the svc instruction so the simulator knows what to call.
-class Redirection {
- public:
- Redirection(Isolate* isolate, void* external_function,
- ExternalReference::Type type)
- : external_function_(external_function),
-// we use TRAP4 here (0xBF22)
-#if V8_TARGET_LITTLE_ENDIAN
- swi_instruction_(0x1000FFB2),
-#else
- swi_instruction_(0xB2FF0000 | kCallRtRedirected),
-#endif
- type_(type),
- next_(nullptr) {
- next_ = isolate->simulator_redirection();
- Simulator::current(isolate)->FlushICache(
- isolate->simulator_i_cache(),
- reinterpret_cast<void*>(&swi_instruction_), sizeof(FourByteInstr));
- isolate->set_simulator_redirection(this);
- if (ABI_USES_FUNCTION_DESCRIPTORS) {
- function_descriptor_[0] = reinterpret_cast<intptr_t>(&swi_instruction_);
- function_descriptor_[1] = 0;
- function_descriptor_[2] = 0;
- }
- }
-
- void* address() {
- if (ABI_USES_FUNCTION_DESCRIPTORS) {
- return reinterpret_cast<void*>(function_descriptor_);
- } else {
- return reinterpret_cast<void*>(&swi_instruction_);
- }
- }
-
- void* external_function() { return external_function_; }
- ExternalReference::Type type() { return type_; }
-
- static Redirection* Get(Isolate* isolate, void* external_function,
- ExternalReference::Type type) {
- Redirection* current = isolate->simulator_redirection();
- for (; current != nullptr; current = current->next_) {
- if (current->external_function_ == external_function &&
- current->type_ == type) {
- return current;
- }
- }
- return new Redirection(isolate, external_function, type);
- }
-
- static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
- char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
- char* addr_of_redirection =
- addr_of_swi - offsetof(Redirection, swi_instruction_);
- return reinterpret_cast<Redirection*>(addr_of_redirection);
- }
-
- static Redirection* FromAddress(void* address) {
- int delta = ABI_USES_FUNCTION_DESCRIPTORS
- ? offsetof(Redirection, function_descriptor_)
- : offsetof(Redirection, swi_instruction_);
- char* addr_of_redirection = reinterpret_cast<char*>(address) - delta;
- return reinterpret_cast<Redirection*>(addr_of_redirection);
- }
-
- static void* ReverseRedirection(intptr_t reg) {
- Redirection* redirection = FromAddress(reinterpret_cast<void*>(reg));
- return redirection->external_function();
- }
-
- static void DeleteChain(Redirection* redirection) {
- while (redirection != nullptr) {
- Redirection* next = redirection->next_;
- delete redirection;
- redirection = next;
- }
- }
-
- private:
- void* external_function_;
- uint32_t swi_instruction_;
- ExternalReference::Type type_;
- Redirection* next_;
- intptr_t function_descriptor_[3];
-};
-
-// static
-void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
- Redirection* first) {
- Redirection::DeleteChain(first);
- if (i_cache != nullptr) {
- for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
- entry = i_cache->Next(entry)) {
- delete static_cast<CachePage*>(entry->value);
- }
- delete i_cache;
- }
-}
-
-void* Simulator::RedirectExternalReference(Isolate* isolate,
- void* external_function,
- ExternalReference::Type type) {
- base::LockGuard<base::Mutex> lock_guard(
- isolate->simulator_redirection_mutex());
- Redirection* redirection = Redirection::Get(isolate, external_function, type);
- return redirection->address();
-}
-
// Get the active Simulator for the current thread.
Simulator* Simulator::current(Isolate* isolate) {
v8::internal::Isolate::PerIsolateThreadData* isolate_data =
@@ -1761,9 +1649,9 @@ void Simulator::SetFpResult(const double& result) {
void Simulator::TrashCallerSaveRegisters() {
// We don't trash the registers with the return value.
#if 0 // A good idea to trash volatile registers, needs to be done
- registers_[2] = 0x50Bad4U;
- registers_[3] = 0x50Bad4U;
- registers_[12] = 0x50Bad4U;
+ registers_[2] = 0x50BAD4U;
+ registers_[3] = 0x50BAD4U;
+ registers_[12] = 0x50BAD4U;
#endif
}
@@ -1884,7 +1772,7 @@ void Simulator::Format(Instruction* instr, const char* format) {
bool Simulator::CarryFrom(int32_t left, int32_t right, int32_t carry) {
uint32_t uleft = static_cast<uint32_t>(left);
uint32_t uright = static_cast<uint32_t>(right);
- uint32_t urest = 0xffffffffU - uleft;
+ uint32_t urest = 0xFFFFFFFFU - uleft;
return (uright > urest) ||
(carry && (((uright + 1) > urest) || (uright > (urest - 1))));
@@ -1971,7 +1859,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
bool stack_aligned =
(get_register(sp) & (::v8::internal::FLAG_sim_stack_alignment - 1)) ==
0;
- Redirection* redirection = Redirection::FromSwiInstruction(instr);
+ Redirection* redirection = Redirection::FromInstruction(instr);
const int kArgCount = 9;
const int kRegisterArgCount = 5;
int arg0_regnum = 2;
@@ -2342,7 +2230,7 @@ void Simulator::DisableStop(uint32_t code) {
void Simulator::IncreaseStopCounter(uint32_t code) {
DCHECK_LE(code, kMaxStopCode);
DCHECK(isWatchedStop(code));
- if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
+ if ((watched_stops_[code].count & ~(1 << 31)) == 0x7FFFFFFF) {
PrintF(
"Stop counter for code %i has overflowed.\n"
"Enabling this code and reseting the counter to 0.\n",
@@ -2409,7 +2297,7 @@ int16_t Simulator::ByteReverse(int16_t hword) {
#if defined(__GNUC__)
return __builtin_bswap16(hword);
#else
- return (hword << 8) | ((hword >> 8) & 0x00ff);
+ return (hword << 8) | ((hword >> 8) & 0x00FF);
#endif
}
@@ -2418,9 +2306,9 @@ int32_t Simulator::ByteReverse(int32_t word) {
return __builtin_bswap32(word);
#else
int32_t result = word << 24;
- result |= (word << 8) & 0x00ff0000;
- result |= (word >> 8) & 0x0000ff00;
- result |= (word >> 24) & 0x00000ff;
+ result |= (word << 8) & 0x00FF0000;
+ result |= (word >> 8) & 0x0000FF00;
+ result |= (word >> 24) & 0x00000FF;
return result;
#endif
}
@@ -2592,7 +2480,8 @@ void Simulator::CallInternal(byte* entry, int reg_arg_count) {
set_register(r13, r13_val);
}
-intptr_t Simulator::Call(byte* entry, int argument_count, ...) {
+intptr_t Simulator::CallImpl(byte* entry, int argument_count,
+ const intptr_t* arguments) {
// Adjust JS-based stack limit to C-based stack limit.
isolate_->stack_guard()->AdjustStackLimitForSimulator();
@@ -2606,16 +2495,13 @@ intptr_t Simulator::Call(byte* entry, int argument_count, ...) {
int64_t r12_val = get_register(r12);
int64_t r13_val = get_register(r13);
- va_list parameters;
- va_start(parameters, argument_count);
// Set up arguments
// First 5 arguments passed in registers r2-r6.
- int reg_arg_count = (argument_count > 5) ? 5 : argument_count;
+ int reg_arg_count = std::min(5, argument_count);
int stack_arg_count = argument_count - reg_arg_count;
for (int i = 0; i < reg_arg_count; i++) {
- intptr_t value = va_arg(parameters, intptr_t);
- set_register(i + 2, value);
+ set_register(i + 2, arguments[i]);
}
// Remaining arguments passed on stack.
@@ -2631,11 +2517,8 @@ intptr_t Simulator::Call(byte* entry, int argument_count, ...) {
// Store remaining arguments on stack, from low to high memory.
intptr_t* stack_argument =
reinterpret_cast<intptr_t*>(entry_stack + kCalleeRegisterSaveAreaSize);
- for (int i = 0; i < stack_arg_count; i++) {
- intptr_t value = va_arg(parameters, intptr_t);
- stack_argument[i] = value;
- }
- va_end(parameters);
+ memcpy(stack_argument, arguments + reg_arg_count,
+ stack_arg_count * sizeof(*arguments));
set_register(sp, entry_stack);
// Prepare to execute the code at entry
@@ -2716,8 +2599,7 @@ intptr_t Simulator::Call(byte* entry, int argument_count, ...) {
set_register(sp, original_stack);
// Return value register
- intptr_t result = get_register(r2);
- return result;
+ return get_register(r2);
}
void Simulator::CallFP(byte* entry, double d0, double d1) {
@@ -3663,7 +3545,7 @@ EVALUATE(EX) {
char new_instr_buf[8];
char* addr = reinterpret_cast<char*>(&new_instr_buf[0]);
- the_instr |= static_cast<SixByteInstr>(r1_val & 0xff)
+ the_instr |= static_cast<SixByteInstr>(r1_val & 0xFF)
<< (8 * inst_length - 16);
Instruction::SetInstructionBits<SixByteInstr>(
reinterpret_cast<byte*>(addr), static_cast<SixByteInstr>(the_instr));
@@ -4004,9 +3886,9 @@ EVALUATE(BXH) {
DECODE_RS_A_INSTRUCTION(r1, r3, b2, d2);
// r1_val is the first operand, r3_val is the increment
- int32_t r1_val = r1 == 0 ? 0 : get_register(r1);
- int32_t r3_val = r2 == 0 ? 0 : get_register(r3);
- intptr_t b2_val = b2 == 0 ? 0 : get_register(b2);
+ int32_t r1_val = (r1 == 0) ? 0 : get_register(r1);
+ int32_t r3_val = (r3 == 0) ? 0 : get_register(r3);
+ intptr_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t branch_address = b2_val + d2;
// increment r1_val
r1_val += r3_val;
@@ -5464,7 +5346,7 @@ EVALUATE(TRAP4) {
int64_t sp_addr = get_register(sp);
for (int i = 0; i < kCalleeRegisterSaveAreaSize / kPointerSize; ++i) {
// we dont want to whack the RA (r14)
- if (i != 14) (reinterpret_cast<intptr_t*>(sp_addr))[i] = 0xdeadbabe;
+ if (i != 14) (reinterpret_cast<intptr_t*>(sp_addr))[i] = 0xDEADBABE;
}
SoftwareInterrupt(instr);
return length;
@@ -6948,7 +6830,7 @@ EVALUATE(LLGFR) {
DCHECK_OPCODE(LLGFR);
DECODE_RRE_INSTRUCTION(r1, r2);
int32_t r2_val = get_low_register<int32_t>(r2);
- uint64_t r2_finalval = (static_cast<uint64_t>(r2_val) & 0x00000000ffffffff);
+ uint64_t r2_finalval = (static_cast<uint64_t>(r2_val) & 0x00000000FFFFFFFF);
set_register(r1, r2_finalval);
return length;
}
@@ -8017,8 +7899,8 @@ EVALUATE(LRVH) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t mem_addr = b2_val + x2_val + d2;
int16_t mem_val = ReadH(mem_addr, instr);
- int32_t result = ByteReverse(mem_val) & 0x0000ffff;
- result |= r1_val & 0xffff0000;
+ int32_t result = ByteReverse(mem_val) & 0x0000FFFF;
+ result |= r1_val & 0xFFFF0000;
set_low_register(r1, result);
return length;
}
diff --git a/deps/v8/src/s390/simulator-s390.h b/deps/v8/src/s390/simulator-s390.h
index a214b198df..1ff8020e6a 100644
--- a/deps/v8/src/s390/simulator-s390.h
+++ b/deps/v8/src/s390/simulator-s390.h
@@ -5,7 +5,7 @@
// Declares a Simulator for S390 instructions if we are not generating a native
// S390 binary. This Simulator allows us to run and debug S390 code generation
// on regular desktop machines.
-// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
+// V8 calls into generated code via the GeneratedCode wrapper,
// which will start execution in the Simulator or forwards to the real entry
// on a S390 hardware platform.
@@ -14,56 +14,13 @@
#include "src/allocation.h"
-#if !defined(USE_SIMULATOR)
-// Running without a simulator on a native s390 platform.
-
-namespace v8 {
-namespace internal {
-
-// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- (entry(p0, p1, p2, p3, p4))
-
-typedef int (*s390_regexp_matcher)(String*, int, const byte*, const byte*, int*,
- int, Address, int, Isolate*);
-
-// Call the generated regexp code directly. The code at the entry address
-// should act as a function matching the type ppc_regexp_matcher.
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- (FUNCTION_CAST<s390_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
- p8))
-
-// The stack limit beyond which we will throw stack overflow errors in
-// generated code. Because generated code on s390 uses the C stack, we
-// just use the C stack limit.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
- uintptr_t c_limit) {
- USE(isolate);
- return c_limit;
- }
-
- static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
- uintptr_t try_catch_address) {
- USE(isolate);
- return try_catch_address;
- }
-
- static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
- USE(isolate);
- }
-};
-} // namespace internal
-} // namespace v8
-
-#else // !defined(USE_SIMULATOR)
+#if defined(USE_SIMULATOR)
// Running with a simulator.
#include "src/assembler.h"
#include "src/base/hashmap.h"
#include "src/s390/constants-s390.h"
+#include "src/simulator-base.h"
namespace v8 {
namespace internal {
@@ -94,7 +51,7 @@ class CachePage {
char validity_map_[kValidityMapSize]; // One byte per line.
};
-class Simulator {
+class Simulator : public SimulatorBase {
public:
friend class S390Debugger;
enum Register {
@@ -206,15 +163,11 @@ class Simulator {
// Executes S390 instructions until the PC reaches end_sim_pc.
void Execute();
- // Call on program start.
- static void Initialize(Isolate* isolate);
-
- static void TearDown(base::CustomMatcherHashMap* i_cache, Redirection* first);
+ template <typename Return, typename... Args>
+ Return Call(byte* entry, Args... args) {
+ return VariadicCall<Return>(this, &Simulator::CallImpl, entry, args...);
+ }
- // V8 generally calls into generated JS code with 5 parameters and into
- // generated RegExp code with 7 parameters. This is a convenience function,
- // which sets up the simulator state and grabs the result on return.
- intptr_t Call(byte* entry, int argument_count, ...);
// Alternative: call a 2-argument double function.
void CallFP(byte* entry, double d0, double d1);
int32_t CallFPReturnsInt(byte* entry, double d0, double d1);
@@ -230,6 +183,9 @@ class Simulator {
void set_last_debugger_input(char* input);
char* last_debugger_input() { return last_debugger_input_; }
+ // Redirection support.
+ static void SetRedirectInstruction(Instruction* instruction);
+
// ICache checking.
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size);
@@ -250,6 +206,8 @@ class Simulator {
end_sim_pc = -2
};
+ intptr_t CallImpl(byte* entry, int argument_count, const intptr_t* arguments);
+
// Unsupported instructions use Format to print an error and stop execution.
void Format(Instruction* instr, const char* format);
@@ -440,11 +398,6 @@ class Simulator {
static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
void* page);
- // Runtime call support. Uses the isolate in a thread-safe way.
- static void* RedirectExternalReference(
- Isolate* isolate, void* external_function,
- v8::internal::ExternalReference::Type type);
-
// Handle arguments and return value for runtime FP functions.
void GetFpArgs(double* x, double* y, intptr_t* z);
void SetFpResult(const double& result);
@@ -1248,43 +1201,8 @@ class Simulator {
#undef EVALUATE
};
-// When running with the simulator transition into simulated execution at this
-// point.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(isolate)->Call( \
- FUNCTION_ADDR(entry), 5, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2, \
- (intptr_t)p3, (intptr_t)p4))
-
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- Simulator::current(isolate)->Call( \
- entry, 9, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2, (intptr_t)p3, \
- (intptr_t)p4, (intptr_t)p5, (intptr_t)p6, (intptr_t)p7, (intptr_t)p8)
-
-// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code. The JS-based limit normally points near the end of
-// the simulator stack. When the C-based limit is exhausted we reflect that by
-// lowering the JS-based limit as well, to make stack checks trigger.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
- uintptr_t c_limit) {
- return Simulator::current(isolate)->StackLimit(c_limit);
- }
-
- static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
- uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(isolate);
- return sim->PushAddress(try_catch_address);
- }
-
- static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
- Simulator::current(isolate)->PopAddress();
- }
-};
-
} // namespace internal
} // namespace v8
-#endif // !defined(USE_SIMULATOR)
+#endif // defined(USE_SIMULATOR)
#endif // V8_S390_SIMULATOR_S390_H_
diff --git a/deps/v8/src/safepoint-table.cc b/deps/v8/src/safepoint-table.cc
index 06a6888465..83031a2f36 100644
--- a/deps/v8/src/safepoint-table.cc
+++ b/deps/v8/src/safepoint-table.cc
@@ -52,9 +52,7 @@ SafepointTable::SafepointTable(Address instruction_start,
SafepointTable::SafepointTable(Code* code)
: SafepointTable(code->instruction_start(), code->safepoint_table_offset(),
- code->stack_slots(), true) {
- DCHECK(code->is_turbofanned());
-}
+ code->stack_slots(), true) {}
unsigned SafepointTable::find_return_pc(unsigned pc_offset) {
for (unsigned i = 0; i < length(); i++) {
@@ -134,28 +132,20 @@ Safepoint SafepointTableBuilder::DefineSafepoint(
int arguments,
Safepoint::DeoptMode deopt_mode) {
DCHECK_GE(arguments, 0);
- DeoptimizationInfo info;
- info.pc = assembler->pc_offset();
- info.arguments = arguments;
- info.has_doubles = (kind & Safepoint::kWithDoubles);
- info.trampoline = -1;
- deoptimization_info_.Add(info, zone_);
- deopt_index_list_.Add(Safepoint::kNoDeoptimizationIndex, zone_);
+ deoptimization_info_.Add(
+ DeoptimizationInfo(zone_, assembler->pc_offset(), arguments, kind),
+ zone_);
if (deopt_mode == Safepoint::kNoLazyDeopt) {
- last_lazy_safepoint_ = deopt_index_list_.length();
+ last_lazy_safepoint_ = deoptimization_info_.length();
}
- indexes_.Add(new(zone_) ZoneList<int>(8, zone_), zone_);
- registers_.Add((kind & Safepoint::kWithRegisters)
- ? new (zone_) ZoneList<int>(4, zone_)
- : nullptr,
- zone_);
- return Safepoint(indexes_.last(), registers_.last());
+ DeoptimizationInfo& new_info = deoptimization_info_.last();
+ return Safepoint(new_info.indexes, new_info.registers);
}
void SafepointTableBuilder::RecordLazyDeoptimizationIndex(int index) {
- while (last_lazy_safepoint_ < deopt_index_list_.length()) {
- deopt_index_list_[last_lazy_safepoint_++] = index;
+ while (last_lazy_safepoint_ < deoptimization_info_.length()) {
+ deoptimization_info_[last_lazy_safepoint_++].deopt_index = index;
}
}
@@ -201,17 +191,17 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
// Emit sorted table of pc offsets together with deoptimization indexes.
for (int i = 0; i < length; i++) {
- assembler->dd(deoptimization_info_[i].pc);
- assembler->dd(EncodeExceptPC(deoptimization_info_[i],
- deopt_index_list_[i]));
- assembler->dd(deoptimization_info_[i].trampoline);
+ const DeoptimizationInfo& info = deoptimization_info_[i];
+ assembler->dd(info.pc);
+ assembler->dd(EncodeExceptPC(info));
+ assembler->dd(info.trampoline);
}
// Emit table of bitmaps.
ZoneList<uint8_t> bits(bytes_per_entry, zone_);
for (int i = 0; i < length; i++) {
- ZoneList<int>* indexes = indexes_[i];
- ZoneList<int>* registers = registers_[i];
+ ZoneList<int>* indexes = deoptimization_info_[i].indexes;
+ ZoneList<int>* registers = deoptimization_info_[i].registers;
bits.Clear();
bits.AddBlock(0, bytes_per_entry, zone_);
@@ -248,13 +238,10 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
emitted_ = true;
}
-
-uint32_t SafepointTableBuilder::EncodeExceptPC(const DeoptimizationInfo& info,
- unsigned index) {
- uint32_t encoding = SafepointEntry::DeoptimizationIndexField::encode(index);
- encoding |= SafepointEntry::ArgumentsField::encode(info.arguments);
- encoding |= SafepointEntry::SaveDoublesField::encode(info.has_doubles);
- return encoding;
+uint32_t SafepointTableBuilder::EncodeExceptPC(const DeoptimizationInfo& info) {
+ return SafepointEntry::DeoptimizationIndexField::encode(info.deopt_index) |
+ SafepointEntry::ArgumentsField::encode(info.arguments) |
+ SafepointEntry::SaveDoublesField::encode(info.has_doubles);
}
void SafepointTableBuilder::RemoveDuplicates() {
@@ -264,44 +251,36 @@ void SafepointTableBuilder::RemoveDuplicates() {
// pointers and without deoptimization info.
int length = deoptimization_info_.length();
- DCHECK_EQ(length, deopt_index_list_.length());
- DCHECK_EQ(length, indexes_.length());
- DCHECK_EQ(length, registers_.length());
-
if (length < 2) return;
// Check that all entries (1, length] are identical to entry 0.
+ const DeoptimizationInfo& first_info = deoptimization_info_[0];
for (int i = 1; i < length; ++i) {
- if (!IsIdenticalExceptForPc(0, i)) return;
+ if (!IsIdenticalExceptForPc(first_info, deoptimization_info_[i])) return;
}
- // If we get here, all entries were identical. Rewind all lists to just one
+ // If we get here, all entries were identical. Rewind the list to just one
// entry, and set the pc to kMaxUInt32.
deoptimization_info_.Rewind(1);
- deopt_index_list_.Rewind(1);
- indexes_.Rewind(1);
- registers_.Rewind(1);
deoptimization_info_[0].pc = kMaxUInt32;
}
-bool SafepointTableBuilder::IsIdenticalExceptForPc(int index1,
- int index2) const {
- DeoptimizationInfo& deopt_info_1 = deoptimization_info_[index1];
- DeoptimizationInfo& deopt_info_2 = deoptimization_info_[index2];
- if (deopt_info_1.arguments != deopt_info_2.arguments) return false;
- if (deopt_info_1.has_doubles != deopt_info_2.has_doubles) return false;
+bool SafepointTableBuilder::IsIdenticalExceptForPc(
+ const DeoptimizationInfo& info1, const DeoptimizationInfo& info2) const {
+ if (info1.arguments != info2.arguments) return false;
+ if (info1.has_doubles != info2.has_doubles) return false;
- if (deopt_index_list_[index1] != deopt_index_list_[index2]) return false;
+ if (info1.deopt_index != info2.deopt_index) return false;
- ZoneList<int>* indexes1 = indexes_[index1];
- ZoneList<int>* indexes2 = indexes_[index2];
+ ZoneList<int>* indexes1 = info1.indexes;
+ ZoneList<int>* indexes2 = info2.indexes;
if (indexes1->length() != indexes2->length()) return false;
for (int i = 0; i < indexes1->length(); ++i) {
if (indexes1->at(i) != indexes2->at(i)) return false;
}
- ZoneList<int>* registers1 = registers_[index1];
- ZoneList<int>* registers2 = registers_[index2];
+ ZoneList<int>* registers1 = info1.registers;
+ ZoneList<int>* registers2 = info2.registers;
if (registers1) {
if (!registers2) return false;
if (registers1->length() != registers2->length()) return false;
diff --git a/deps/v8/src/safepoint-table.h b/deps/v8/src/safepoint-table.h
index 9f063bac20..5c6b413fa1 100644
--- a/deps/v8/src/safepoint-table.h
+++ b/deps/v8/src/safepoint-table.h
@@ -194,8 +194,8 @@ class Safepoint BASE_EMBEDDED {
private:
Safepoint(ZoneList<int>* indexes, ZoneList<int>* registers)
: indexes_(indexes), registers_(registers) {}
- ZoneList<int>* indexes_;
- ZoneList<int>* registers_;
+ ZoneList<int>* const indexes_;
+ ZoneList<int>* const registers_;
friend class SafepointTableBuilder;
};
@@ -205,9 +205,6 @@ class SafepointTableBuilder BASE_EMBEDDED {
public:
explicit SafepointTableBuilder(Zone* zone)
: deoptimization_info_(32, zone),
- deopt_index_list_(32, zone),
- indexes_(32, zone),
- registers_(32, zone),
emitted_(false),
last_lazy_safepoint_(0),
zone_(zone) { }
@@ -225,7 +222,7 @@ class SafepointTableBuilder BASE_EMBEDDED {
// outstanding safepoints.
void RecordLazyDeoptimizationIndex(int index);
void BumpLastLazySafepointIndex() {
- last_lazy_safepoint_ = deopt_index_list_.length();
+ last_lazy_safepoint_ = deoptimization_info_.length();
}
// Emit the safepoint table after the body. The number of bits per
@@ -244,18 +241,30 @@ class SafepointTableBuilder BASE_EMBEDDED {
unsigned arguments;
bool has_doubles;
int trampoline;
+ ZoneList<int>* indexes;
+ ZoneList<int>* registers;
+ unsigned deopt_index;
+ DeoptimizationInfo(Zone* zone, unsigned pc, unsigned arguments,
+ Safepoint::Kind kind)
+ : pc(pc),
+ arguments(arguments),
+ has_doubles(kind & Safepoint::kWithDoubles),
+ trampoline(-1),
+ indexes(new (zone) ZoneList<int>(8, zone)),
+ registers(kind & Safepoint::kWithRegisters
+ ? new (zone) ZoneList<int>(4, zone)
+ : nullptr),
+ deopt_index(Safepoint::kNoDeoptimizationIndex) {}
};
- uint32_t EncodeExceptPC(const DeoptimizationInfo& info, unsigned index);
+ uint32_t EncodeExceptPC(const DeoptimizationInfo&);
- bool IsIdenticalExceptForPc(int index1, int index2) const;
+ bool IsIdenticalExceptForPc(const DeoptimizationInfo&,
+ const DeoptimizationInfo&) const;
// If all entries are identical, replace them by 1 entry with pc = kMaxUInt32.
void RemoveDuplicates();
ZoneList<DeoptimizationInfo> deoptimization_info_;
- ZoneList<unsigned> deopt_index_list_;
- ZoneList<ZoneList<int>*> indexes_;
- ZoneList<ZoneList<int>*> registers_;
unsigned offset_;
bool emitted_;
diff --git a/deps/v8/src/simulator-base.cc b/deps/v8/src/simulator-base.cc
new file mode 100644
index 0000000000..72a5daefce
--- /dev/null
+++ b/deps/v8/src/simulator-base.cc
@@ -0,0 +1,95 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/simulator-base.h"
+
+#include "src/assembler.h"
+#include "src/isolate.h"
+#include "src/simulator.h"
+
+#if defined(USE_SIMULATOR)
+
+namespace v8 {
+namespace internal {
+
+// static
+base::Mutex* SimulatorBase::redirection_mutex_ = nullptr;
+
+// static
+Redirection* SimulatorBase::redirection_ = nullptr;
+
+// static
+void SimulatorBase::InitializeOncePerProcess() {
+ DCHECK_NULL(redirection_mutex_);
+ redirection_mutex_ = new base::Mutex();
+}
+
+// static
+void SimulatorBase::GlobalTearDown() {
+ delete redirection_mutex_;
+ redirection_mutex_ = nullptr;
+
+ Redirection::DeleteChain(redirection_);
+ redirection_ = nullptr;
+}
+
+// static
+void SimulatorBase::Initialize(Isolate* isolate) {
+ ExternalReference::set_redirector(isolate, &RedirectExternalReference);
+}
+
+// static
+void SimulatorBase::TearDown(base::CustomMatcherHashMap* i_cache) {
+ if (i_cache != nullptr) {
+ for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
+ entry = i_cache->Next(entry)) {
+ delete static_cast<CachePage*>(entry->value);
+ }
+ delete i_cache;
+ }
+}
+
+// static
+void* SimulatorBase::RedirectExternalReference(Isolate* isolate,
+ void* external_function,
+ ExternalReference::Type type) {
+ base::LockGuard<base::Mutex> lock_guard(Simulator::redirection_mutex());
+ Redirection* redirection = Redirection::Get(isolate, external_function, type);
+ return redirection->address_of_instruction();
+}
+
+Redirection::Redirection(Isolate* isolate, void* external_function,
+ ExternalReference::Type type)
+ : external_function_(external_function), type_(type), next_(nullptr) {
+ next_ = Simulator::redirection();
+ Simulator::SetRedirectInstruction(
+ reinterpret_cast<Instruction*>(address_of_instruction()));
+ Simulator::FlushICache(isolate->simulator_i_cache(),
+ reinterpret_cast<void*>(&instruction_),
+ sizeof(instruction_));
+ Simulator::set_redirection(this);
+#if ABI_USES_FUNCTION_DESCRIPTORS
+ function_descriptor_[0] = reinterpret_cast<intptr_t>(&instruction_);
+ function_descriptor_[1] = 0;
+ function_descriptor_[2] = 0;
+#endif
+}
+
+// static
+Redirection* Redirection::Get(Isolate* isolate, void* external_function,
+ ExternalReference::Type type) {
+ Redirection* current = Simulator::redirection();
+ for (; current != nullptr; current = current->next_) {
+ if (current->external_function_ == external_function &&
+ current->type_ == type) {
+ return current;
+ }
+ }
+ return new Redirection(isolate, external_function, type);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // defined(USE_SIMULATOR)
diff --git a/deps/v8/src/simulator-base.h b/deps/v8/src/simulator-base.h
new file mode 100644
index 0000000000..27dc87d050
--- /dev/null
+++ b/deps/v8/src/simulator-base.h
@@ -0,0 +1,163 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SIMULATOR_BASE_H_
+#define V8_SIMULATOR_BASE_H_
+
+#include <type_traits>
+
+#include "src/assembler.h"
+#include "src/globals.h"
+
+#if defined(USE_SIMULATOR)
+
+namespace v8 {
+namespace internal {
+
+class Instruction;
+class Redirection;
+
+class SimulatorBase {
+ public:
+ // Call on process start and exit.
+ static void InitializeOncePerProcess();
+ static void GlobalTearDown();
+
+ // Call on isolate initialization and teardown.
+ static void Initialize(Isolate* isolate);
+ static void TearDown(base::CustomMatcherHashMap* i_cache);
+
+ static base::Mutex* redirection_mutex() { return redirection_mutex_; }
+ static Redirection* redirection() { return redirection_; }
+ static void set_redirection(Redirection* r) { redirection_ = r; }
+
+ protected:
+ template <typename Return, typename SimT, typename CallImpl, typename... Args>
+ static Return VariadicCall(SimT* sim, CallImpl call, byte* entry,
+ Args... args) {
+ // Convert all arguments to intptr_t. Fails if any argument is not integral
+ // or pointer.
+ std::array<intptr_t, sizeof...(args)> args_arr{ConvertArg(args)...};
+ intptr_t ret = (sim->*call)(entry, args_arr.size(), args_arr.data());
+ return ConvertReturn<Return>(ret);
+ }
+
+ private:
+ // Runtime call support. Uses the isolate in a thread-safe way.
+ static void* RedirectExternalReference(Isolate* isolate,
+ void* external_function,
+ ExternalReference::Type type);
+
+ static base::Mutex* redirection_mutex_;
+ static Redirection* redirection_;
+
+ // Helper methods to convert arbitrary integer or pointer arguments to the
+ // needed generic argument type intptr_t.
+
+ // Convert integral argument to intptr_t.
+ template <typename T>
+ static typename std::enable_if<std::is_integral<T>::value, intptr_t>::type
+ ConvertArg(T arg) {
+ static_assert(sizeof(T) <= sizeof(intptr_t), "type bigger than ptrsize");
+ return static_cast<intptr_t>(arg);
+ }
+
+ // Convert pointer-typed argument to intptr_t.
+ template <typename T>
+ static typename std::enable_if<std::is_pointer<T>::value, intptr_t>::type
+ ConvertArg(T arg) {
+ return reinterpret_cast<intptr_t>(arg);
+ }
+
+ // Convert back integral return types.
+ template <typename T>
+ static typename std::enable_if<std::is_integral<T>::value, T>::type
+ ConvertReturn(intptr_t ret) {
+ static_assert(sizeof(T) <= sizeof(intptr_t), "type bigger than ptrsize");
+ return static_cast<T>(ret);
+ }
+
+ // Convert back pointer-typed return types.
+ template <typename T>
+ static typename std::enable_if<std::is_pointer<T>::value, T>::type
+ ConvertReturn(intptr_t ret) {
+ return reinterpret_cast<T>(ret);
+ }
+
+ // Convert back void return type (i.e. no return).
+ template <typename T>
+ static typename std::enable_if<std::is_void<T>::value, T>::type ConvertReturn(
+ intptr_t ret) {}
+};
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator. The external reference will be a function compiled for the
+// host architecture. We need to call that function instead of trying to
+// execute it with the simulator. We do that by redirecting the external
+// reference to a trapping instruction that is handled by the simulator. We
+// write the original destination of the jump just at a known offset from the
+// trapping instruction so the simulator knows what to call.
+//
+// The following are trapping instructions used for various architectures:
+// - V8_TARGET_ARCH_ARM: svc (Supervisor Call)
+// - V8_TARGET_ARCH_ARM64: svc (Supervisor Call)
+// - V8_TARGET_ARCH_MIPS: swi (software-interrupt)
+// - V8_TARGET_ARCH_MIPS64: swi (software-interrupt)
+// - V8_TARGET_ARCH_PPC: svc (Supervisor Call)
+// - V8_TARGET_ARCH_S390: svc (Supervisor Call)
+class Redirection {
+ public:
+ Redirection(Isolate* isolate, void* external_function,
+ ExternalReference::Type type);
+
+ Address address_of_instruction() {
+#if ABI_USES_FUNCTION_DESCRIPTORS
+ return reinterpret_cast<Address>(function_descriptor_);
+#else
+ return reinterpret_cast<Address>(&instruction_);
+#endif
+ }
+
+ void* external_function() { return external_function_; }
+ ExternalReference::Type type() { return type_; }
+
+ static Redirection* Get(Isolate* isolate, void* external_function,
+ ExternalReference::Type type);
+
+ static Redirection* FromInstruction(Instruction* instruction) {
+ Address addr_of_instruction = reinterpret_cast<Address>(instruction);
+ Address addr_of_redirection =
+ addr_of_instruction - offsetof(Redirection, instruction_);
+ return reinterpret_cast<Redirection*>(addr_of_redirection);
+ }
+
+ static void* ReverseRedirection(intptr_t reg) {
+ Redirection* redirection = FromInstruction(
+ reinterpret_cast<Instruction*>(reinterpret_cast<void*>(reg)));
+ return redirection->external_function();
+ }
+
+ static void DeleteChain(Redirection* redirection) {
+ while (redirection != nullptr) {
+ Redirection* next = redirection->next_;
+ delete redirection;
+ redirection = next;
+ }
+ }
+
+ private:
+ void* external_function_;
+ uint32_t instruction_;
+ ExternalReference::Type type_;
+ Redirection* next_;
+#if ABI_USES_FUNCTION_DESCRIPTORS
+ intptr_t function_descriptor_[3];
+#endif
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // defined(USE_SIMULATOR)
+#endif // V8_SIMULATOR_BASE_H_
diff --git a/deps/v8/src/simulator.h b/deps/v8/src/simulator.h
index 6eab8cf976..a2af7f59d5 100644
--- a/deps/v8/src/simulator.h
+++ b/deps/v8/src/simulator.h
@@ -5,6 +5,9 @@
#ifndef V8_SIMULATOR_H_
#define V8_SIMULATOR_H_
+#include "src/globals.h"
+#include "src/objects/code.h"
+
#if V8_TARGET_ARCH_IA32
#include "src/ia32/simulator-ia32.h"
#elif V8_TARGET_ARCH_X64
@@ -25,4 +28,109 @@
#error Unsupported target architecture.
#endif
+namespace v8 {
+namespace internal {
+
+#if defined(USE_SIMULATOR)
+// Running with a simulator.
+
+// The simulator has its own stack. Thus it has a different stack limit from
+// the C-based native code. The JS-based limit normally points near the end of
+// the simulator stack. When the C-based limit is exhausted we reflect that by
+// lowering the JS-based limit as well, to make stack checks trigger.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+ uintptr_t c_limit) {
+ return Simulator::current(isolate)->StackLimit(c_limit);
+ }
+
+ static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
+ uintptr_t try_catch_address) {
+ return Simulator::current(isolate)->PushAddress(try_catch_address);
+ }
+
+ static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+ Simulator::current(isolate)->PopAddress();
+ }
+};
+
+#else // defined(USE_SIMULATOR)
+// Running without a simulator on a native platform.
+
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code uses the C stack, we just use
+// the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+ uintptr_t c_limit) {
+ USE(isolate);
+ return c_limit;
+ }
+
+ static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
+ uintptr_t try_catch_address) {
+ USE(isolate);
+ return try_catch_address;
+ }
+
+ static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+ USE(isolate);
+ }
+};
+
+#endif // defined(USE_SIMULATOR)
+
+// Use this class either as {GeneratedCode<ret, arg1, arg2>} or
+// {GeneratedCode<ret(arg1, arg2)>} (see specialization below).
+template <typename Return, typename... Args>
+class GeneratedCode {
+ public:
+ using Signature = Return(Args...);
+
+ template <typename T>
+ static GeneratedCode FromAddress(Isolate* isolate, T* addr) {
+ return GeneratedCode(isolate, reinterpret_cast<Signature*>(addr));
+ }
+
+ static GeneratedCode FromCode(Code* code) {
+ return FromAddress(code->GetIsolate(), code->entry());
+ }
+
+#ifdef USE_SIMULATOR
+ // Defined in simulator-base.h.
+ Return Call(Args... args) {
+ return Simulator::current(isolate_)->template Call<Return>(
+ reinterpret_cast<byte*>(fn_ptr_), args...);
+ }
+#else
+ DISABLE_CFI_ICALL Return Call(Args... args) {
+ // When running without a simulator we call the entry directly.
+ return fn_ptr_(args...);
+ }
+#endif
+
+ private:
+ friend class GeneratedCode<Return(Args...)>;
+ Isolate* isolate_;
+ Signature* fn_ptr_;
+ GeneratedCode(Isolate* isolate, Signature* fn_ptr)
+ : isolate_(isolate), fn_ptr_(fn_ptr) {}
+};
+
+// Allow to use {GeneratedCode<ret(arg1, arg2)>} instead of
+// {GeneratedCode<ret, arg1, arg2>}.
+template <typename Return, typename... Args>
+class GeneratedCode<Return(Args...)> : public GeneratedCode<Return, Args...> {
+ public:
+ // Automatically convert from {GeneratedCode<ret, arg1, arg2>} to
+ // {GeneratedCode<ret(arg1, arg2)>}.
+ GeneratedCode(GeneratedCode<Return, Args...> other)
+ : GeneratedCode<Return, Args...>(other.isolate_, other.fn_ptr_) {}
+};
+
+} // namespace internal
+} // namespace v8
+
#endif // V8_SIMULATOR_H_
diff --git a/deps/v8/src/snapshot/builtin-deserializer-allocator.h b/deps/v8/src/snapshot/builtin-deserializer-allocator.h
index 6fc7bfaf6b..eb04b54025 100644
--- a/deps/v8/src/snapshot/builtin-deserializer-allocator.h
+++ b/deps/v8/src/snapshot/builtin-deserializer-allocator.h
@@ -69,7 +69,7 @@ class BuiltinDeserializerAllocator final {
// Builtin deserialization does not bake reservations into the snapshot, hence
// this is a nop.
- void DecodeReservation(Vector<const SerializedData::Reservation> res) {}
+ void DecodeReservation(std::vector<SerializedData::Reservation> res) {}
// These methods are used to pre-allocate builtin objects prior to
// deserialization.
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index 3350ef3c0f..4210845573 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -82,7 +82,7 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
case Code::REGEXP: // No regexp literals initialized yet.
case Code::NUMBER_OF_KINDS: // Pseudo enum value.
case Code::BYTECODE_HANDLER: // No direct references to handlers.
- CHECK(false);
+ break; // hit UNREACHABLE below.
case Code::BUILTIN:
SerializeBuiltinReference(code_object, how_to_code, where_to_point, 0);
return;
@@ -106,12 +106,37 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
}
if (obj->IsScript()) {
+ Script* script_obj = Script::cast(obj);
+ DCHECK_NE(script_obj->compilation_type(), Script::COMPILATION_TYPE_EVAL);
// Wrapper object is a context-dependent JSValue. Reset it here.
- Script::cast(obj)->set_wrapper(isolate()->heap()->undefined_value());
+ script_obj->set_wrapper(isolate()->heap()->undefined_value());
+ // We want to differentiate between undefined and uninitialized_symbol for
+ // context_data for now. It is hack to allow debugging for scripts that are
+ // included as a part of custom snapshot. (see debug::Script::IsEmbedded())
+ Object* context_data = script_obj->context_data();
+ if (context_data != isolate()->heap()->undefined_value() &&
+ context_data != isolate()->heap()->uninitialized_symbol()) {
+ script_obj->set_context_data(isolate()->heap()->undefined_value());
+ }
+ // We don't want to serialize host options to avoid serializing unnecessary
+ // object graph.
+ FixedArray* host_options = script_obj->host_defined_options();
+ script_obj->set_host_defined_options(
+ isolate()->heap()->empty_fixed_array());
+ SerializeGeneric(obj, how_to_code, where_to_point);
+ script_obj->set_host_defined_options(host_options);
+ script_obj->set_context_data(context_data);
+ return;
}
if (obj->IsSharedFunctionInfo()) {
SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
+ // TODO(7110): Enable serializing of Asm modules once the AsmWasmData
+ // is context independent.
+ DCHECK(!sfi->IsApiFunction() && !sfi->HasAsmWasmData());
+ // Do not serialize when a debugger is active.
+ DCHECK(sfi->debug_info()->IsSmi());
+
// Mark SFI to indicate whether the code is cached.
bool was_deserialized = sfi->deserialized();
sfi->set_deserialized(sfi->is_compiled());
@@ -120,6 +145,11 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
return;
}
+ if (obj->IsBytecodeArray()) {
+ // Clear the stack frame cache if present
+ BytecodeArray::cast(obj)->ClearFrameCacheFromSourcePositionTable();
+ }
+
// Past this point we should not see any (context-specific) maps anymore.
CHECK(!obj->IsMap());
// There should be no references to the global object embedded.
@@ -221,8 +251,9 @@ std::unique_ptr<ScriptData> WasmCompiledModuleSerializer::SerializeWasmModule(
Isolate* isolate, Handle<FixedArray> input) {
Handle<WasmCompiledModule> compiled_module =
Handle<WasmCompiledModule>::cast(input);
- WasmCompiledModuleSerializer wasm_cs(isolate, 0, isolate->native_context(),
- handle(compiled_module->module_bytes()));
+ WasmCompiledModuleSerializer wasm_cs(
+ isolate, 0, isolate->native_context(),
+ handle(compiled_module->shared()->module_bytes()));
ScriptData* data = wasm_cs.Serialize(compiled_module);
return std::unique_ptr<ScriptData>(data);
}
@@ -432,11 +463,13 @@ ScriptData* SerializedCodeData::GetScriptData() {
return result;
}
-Vector<const SerializedData::Reservation> SerializedCodeData::Reservations()
+std::vector<SerializedData::Reservation> SerializedCodeData::Reservations()
const {
- return Vector<const Reservation>(
- reinterpret_cast<const Reservation*>(data_ + kHeaderSize),
- GetHeaderValue(kNumReservationsOffset));
+ uint32_t size = GetHeaderValue(kNumReservationsOffset);
+ std::vector<Reservation> reservations(size);
+ memcpy(reservations.data(), data_ + kHeaderSize,
+ size * sizeof(SerializedData::Reservation));
+ return reservations;
}
Vector<const byte> SerializedCodeData::Payload() const {
diff --git a/deps/v8/src/snapshot/code-serializer.h b/deps/v8/src/snapshot/code-serializer.h
index 7f8ff5cc8b..edc1c2bf1d 100644
--- a/deps/v8/src/snapshot/code-serializer.h
+++ b/deps/v8/src/snapshot/code-serializer.h
@@ -129,7 +129,7 @@ class SerializedCodeData : public SerializedData {
// Return ScriptData object and relinquish ownership over it to the caller.
ScriptData* GetScriptData();
- Vector<const Reservation> Reservations() const;
+ std::vector<Reservation> Reservations() const;
Vector<const byte> Payload() const;
Vector<const uint32_t> CodeStubKeys() const;
diff --git a/deps/v8/src/snapshot/default-deserializer-allocator.cc b/deps/v8/src/snapshot/default-deserializer-allocator.cc
index b352409f7e..5b34bfa540 100644
--- a/deps/v8/src/snapshot/default-deserializer-allocator.cc
+++ b/deps/v8/src/snapshot/default-deserializer-allocator.cc
@@ -121,7 +121,7 @@ HeapObject* DefaultDeserializerAllocator::GetObject(AllocationSpace space,
}
void DefaultDeserializerAllocator::DecodeReservation(
- Vector<const SerializedData::Reservation> res) {
+ std::vector<SerializedData::Reservation> res) {
DCHECK_EQ(0, reservations_[NEW_SPACE].size());
STATIC_ASSERT(NEW_SPACE == 0);
int current_space = NEW_SPACE;
diff --git a/deps/v8/src/snapshot/default-deserializer-allocator.h b/deps/v8/src/snapshot/default-deserializer-allocator.h
index 08d9f48cec..124c637fc6 100644
--- a/deps/v8/src/snapshot/default-deserializer-allocator.h
+++ b/deps/v8/src/snapshot/default-deserializer-allocator.h
@@ -44,7 +44,7 @@ class DefaultDeserializerAllocator final {
// ------- Reservation Methods -------
// Methods related to memory reservations (prior to deserialization).
- void DecodeReservation(Vector<const SerializedData::Reservation> res);
+ void DecodeReservation(std::vector<SerializedData::Reservation> res);
bool ReserveSpace();
// Atomically reserves space for the two given deserializers. Guarantees
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 5d7d551c98..4b51e89e85 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -192,14 +192,21 @@ HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
if (isolate_->external_reference_redirector()) {
call_handler_infos_.push_back(CallHandlerInfo::cast(obj));
}
- } else if (obj->IsExternalOneByteString()) {
- DCHECK(obj->map() == isolate_->heap()->native_source_string_map());
- ExternalOneByteString* string = ExternalOneByteString::cast(obj);
- DCHECK(string->is_short());
- string->set_resource(
- NativesExternalStringResource::DecodeForDeserialization(
- string->resource()));
- isolate_->heap()->RegisterExternalString(string);
+ } else if (obj->IsExternalString()) {
+ if (obj->map() == isolate_->heap()->native_source_string_map()) {
+ ExternalOneByteString* string = ExternalOneByteString::cast(obj);
+ DCHECK(string->is_short());
+ string->set_resource(
+ NativesExternalStringResource::DecodeForDeserialization(
+ string->resource()));
+ } else {
+ ExternalString* string = ExternalString::cast(obj);
+ uint32_t index = string->resource_as_uint32();
+ Address address =
+ reinterpret_cast<Address>(isolate_->api_external_references()[index]);
+ string->set_address_as_resource(address);
+ }
+ isolate_->heap()->RegisterExternalString(String::cast(obj));
} else if (obj->IsJSTypedArray()) {
JSTypedArray* typed_array = JSTypedArray::cast(obj);
CHECK(typed_array->byte_offset()->IsSmi());
@@ -234,6 +241,13 @@ HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
void* backing_store = off_heap_backing_stores_[store_index->value()];
fta->set_external_pointer(backing_store);
}
+ } else if (obj->IsBytecodeArray()) {
+ // TODO(mythria): Remove these once we store the default values for these
+ // fields in the serializer.
+ BytecodeArray* bytecode_array = BytecodeArray::cast(obj);
+ bytecode_array->set_interrupt_budget(
+ interpreter::Interpreter::kInterruptBudget);
+ bytecode_array->set_osr_loop_nesting_level(0);
}
// Check alignment.
DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(), obj->RequiredAlignment()));
@@ -496,8 +510,7 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
case kSynchronize:
// If we get here then that indicates that you have a mismatch between
// the number of GC roots when serializing and deserializing.
- CHECK(false);
- break;
+ UNREACHABLE();
// Deserialize raw data of variable length.
case kVariableRawData: {
@@ -635,13 +648,31 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
#undef SINGLE_CASE
default:
- CHECK(false);
+ UNREACHABLE();
}
}
CHECK_EQ(limit, current);
return true;
}
+namespace {
+
+int FixupJSConstructStub(Isolate* isolate, int builtin_id) {
+ if (isolate->serializer_enabled()) return builtin_id;
+
+ if (FLAG_harmony_restrict_constructor_return &&
+ builtin_id == Builtins::kJSConstructStubGenericUnrestrictedReturn) {
+ return Builtins::kJSConstructStubGenericRestrictedReturn;
+ } else if (!FLAG_harmony_restrict_constructor_return &&
+ builtin_id == Builtins::kJSConstructStubGenericRestrictedReturn) {
+ return Builtins::kJSConstructStubGenericUnrestrictedReturn;
+ } else {
+ return builtin_id;
+ }
+}
+
+} // namespace
+
template <class AllocatorT>
template <int where, int how, int within, int space_number_if_any>
Object** Deserializer<AllocatorT>::ReadDataCase(Isolate* isolate,
@@ -692,7 +723,8 @@ Object** Deserializer<AllocatorT>::ReadDataCase(Isolate* isolate,
emit_write_barrier = isolate->heap()->InNewSpace(new_object);
} else {
DCHECK_EQ(where, kBuiltin);
- int builtin_id = MaybeReplaceWithDeserializeLazy(source_.GetInt());
+ int raw_id = MaybeReplaceWithDeserializeLazy(source_.GetInt());
+ int builtin_id = FixupJSConstructStub(isolate, raw_id);
new_object = isolate->builtins()->builtin(builtin_id);
emit_write_barrier = false;
}
diff --git a/deps/v8/src/snapshot/mksnapshot.cc b/deps/v8/src/snapshot/mksnapshot.cc
index b1ecd61f2f..33c6c4a115 100644
--- a/deps/v8/src/snapshot/mksnapshot.cc
+++ b/deps/v8/src/snapshot/mksnapshot.cc
@@ -94,7 +94,7 @@ class SnapshotWriter {
static void WriteSnapshotData(FILE* fp,
const i::Vector<const i::byte>& blob) {
for (int i = 0; i < blob.length(); i++) {
- if ((i & 0x1f) == 0x1f) fprintf(fp, "\n");
+ if ((i & 0x1F) == 0x1F) fprintf(fp, "\n");
if (i > 0) fprintf(fp, ",");
fprintf(fp, "%u", static_cast<unsigned char>(blob.at(i)));
}
diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/partial-serializer.cc
index 11b21a17b3..baac565a11 100644
--- a/deps/v8/src/snapshot/partial-serializer.cc
+++ b/deps/v8/src/snapshot/partial-serializer.cc
@@ -17,7 +17,8 @@ PartialSerializer::PartialSerializer(
: Serializer(isolate),
startup_serializer_(startup_serializer),
serialize_embedder_fields_(callback),
- can_be_rehashed_(true) {
+ can_be_rehashed_(true),
+ context_(nullptr) {
InitializeCodeAddressMap();
}
@@ -25,24 +26,23 @@ PartialSerializer::~PartialSerializer() {
OutputStatistics("PartialSerializer");
}
-void PartialSerializer::Serialize(Object** o, bool include_global_proxy) {
- DCHECK((*o)->IsNativeContext());
-
- Context* context = Context::cast(*o);
- reference_map()->AddAttachedReference(context->global_proxy());
+void PartialSerializer::Serialize(Context** o, bool include_global_proxy) {
+ context_ = *o;
+ DCHECK(context_->IsNativeContext());
+ reference_map()->AddAttachedReference(context_->global_proxy());
// The bootstrap snapshot has a code-stub context. When serializing the
// partial snapshot, it is chained into the weak context list on the isolate
// and it's next context pointer may point to the code-stub context. Clear
// it before serializing, it will get re-added to the context list
// explicitly when it's loaded.
- context->set(Context::NEXT_CONTEXT_LINK,
- isolate()->heap()->undefined_value());
- DCHECK(!context->global_object()->IsUndefined(context->GetIsolate()));
+ context_->set(Context::NEXT_CONTEXT_LINK,
+ isolate()->heap()->undefined_value());
+ DCHECK(!context_->global_object()->IsUndefined(context_->GetIsolate()));
// Reset math random cache to get fresh random numbers.
- context->set_math_random_index(Smi::kZero);
- context->set_math_random_cache(isolate()->heap()->undefined_value());
+ context_->set_math_random_index(Smi::kZero);
+ context_->set_math_random_cache(isolate()->heap()->undefined_value());
- VisitRootPointer(Root::kPartialSnapshotCache, o);
+ VisitRootPointer(Root::kPartialSnapshotCache, reinterpret_cast<Object**>(o));
SerializeDeferredObjects();
SerializeEmbedderFields();
Pad();
@@ -87,6 +87,8 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
DCHECK(!obj->IsInternalizedString());
// Function and object templates are not context specific.
DCHECK(!obj->IsTemplateInfo());
+ // We should not end up at another native context.
+ DCHECK_IMPLIES(obj != context_, !obj->IsNativeContext());
FlushSkip(skip);
diff --git a/deps/v8/src/snapshot/partial-serializer.h b/deps/v8/src/snapshot/partial-serializer.h
index b436c40cbe..3225b004cb 100644
--- a/deps/v8/src/snapshot/partial-serializer.h
+++ b/deps/v8/src/snapshot/partial-serializer.h
@@ -21,7 +21,7 @@ class PartialSerializer : public Serializer<> {
~PartialSerializer() override;
// Serialize the objects reachable from a single object pointer.
- void Serialize(Object** o, bool include_global_proxy);
+ void Serialize(Context** o, bool include_global_proxy);
bool can_be_rehashed() const { return can_be_rehashed_; }
@@ -41,6 +41,7 @@ class PartialSerializer : public Serializer<> {
// Indicates whether we only serialized hash tables that we can rehash.
// TODO(yangguo): generalize rehashing, and remove this flag.
bool can_be_rehashed_;
+ Context* context_;
DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
};
diff --git a/deps/v8/src/snapshot/serializer-common.cc b/deps/v8/src/snapshot/serializer-common.cc
index f201342105..71436fe8fd 100644
--- a/deps/v8/src/snapshot/serializer-common.cc
+++ b/deps/v8/src/snapshot/serializer-common.cc
@@ -55,6 +55,17 @@ ExternalReferenceEncoder::~ExternalReferenceEncoder() {
#endif // DEBUG
}
+Maybe<ExternalReferenceEncoder::Value> ExternalReferenceEncoder::TryEncode(
+ Address address) {
+ Maybe<uint32_t> maybe_index = map_->Get(address);
+ if (maybe_index.IsNothing()) return Nothing<Value>();
+ Value result(maybe_index.FromJust());
+#ifdef DEBUG
+ if (result.is_from_api()) count_[result.index()]++;
+#endif // DEBUG
+ return Just<Value>(result);
+}
+
ExternalReferenceEncoder::Value ExternalReferenceEncoder::Encode(
Address address) {
Maybe<uint32_t> maybe_index = map_->Get(address);
diff --git a/deps/v8/src/snapshot/serializer-common.h b/deps/v8/src/snapshot/serializer-common.h
index 6482c350f7..7d3d66a08d 100644
--- a/deps/v8/src/snapshot/serializer-common.h
+++ b/deps/v8/src/snapshot/serializer-common.h
@@ -22,6 +22,7 @@ class ExternalReferenceEncoder {
class Value {
public:
explicit Value(uint32_t raw) : value_(raw) {}
+ Value() : value_(0) {}
static uint32_t Encode(uint32_t index, bool is_from_api) {
return Index::encode(index) | IsFromAPI::encode(is_from_api);
}
@@ -40,6 +41,7 @@ class ExternalReferenceEncoder {
~ExternalReferenceEncoder();
Value Encode(Address key);
+ Maybe<Value> TryEncode(Address key);
const char* NameOfAddress(Isolate* isolate, Address address) const;
@@ -255,6 +257,7 @@ class SerializedData {
public:
class Reservation {
public:
+ Reservation() : reservation_(0) {}
explicit Reservation(uint32_t size)
: reservation_(ChunkSizeBits::encode(size)) {}
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index fd96850890..87e4fe8fdc 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -454,13 +454,24 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeJSArrayBuffer() {
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::SerializeExternalString() {
Heap* heap = serializer_->isolate()->heap();
+ // For external strings with known resources, we replace the resource field
+ // with the encoded external reference, which we restore upon deserialize.
+ // for native native source code strings, we replace the resource field
+ // with the native source id.
+ // For the rest we serialize them to look like ordinary sequential strings.
if (object_->map() != heap->native_source_string_map()) {
- // Usually we cannot recreate resources for external strings. To work
- // around this, external strings are serialized to look like ordinary
- // sequential strings.
- // The exception are native source code strings, since we can recreate
- // their resources.
- SerializeExternalStringAsSequentialString();
+ ExternalString* string = ExternalString::cast(object_);
+ Address resource = string->resource_as_address();
+ ExternalReferenceEncoder::Value reference;
+ if (serializer_->external_reference_encoder_.TryEncode(resource).To(
+ &reference)) {
+ DCHECK(reference.is_from_api());
+ string->set_uint32_as_resource(reference.index());
+ SerializeObject();
+ string->set_address_as_resource(resource);
+ } else {
+ SerializeExternalStringAsSequentialString();
+ }
} else {
ExternalOneByteString* string = ExternalOneByteString::cast(object_);
DCHECK(string->is_short());
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index eda25fbd35..22dcb26c8c 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -252,7 +252,7 @@ class Serializer : public SerializerDeserializer {
AllocatorT allocator_;
#ifdef OBJECT_PRINT
- static const int kInstanceTypes = 256;
+ static const int kInstanceTypes = LAST_TYPE + 1;
int* instance_type_count_;
size_t* instance_type_size_;
#endif // OBJECT_PRINT
diff --git a/deps/v8/src/snapshot/snapshot-common.cc b/deps/v8/src/snapshot/snapshot-common.cc
index e7efd87bd8..2bf50cc748 100644
--- a/deps/v8/src/snapshot/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot-common.cc
@@ -139,7 +139,7 @@ Code* Snapshot::DeserializeHandler(Isolate* isolate,
}
if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
- isolate->logger()->LogCodeObject(code);
+ isolate->logger()->LogBytecodeHandler(bytecode, operand_scale, code);
}
return code;
@@ -358,10 +358,12 @@ SnapshotData::SnapshotData(const Serializer<AllocatorT>* serializer) {
template SnapshotData::SnapshotData(
const Serializer<DefaultSerializerAllocator>* serializer);
-Vector<const SerializedData::Reservation> SnapshotData::Reservations() const {
- return Vector<const Reservation>(
- reinterpret_cast<const Reservation*>(data_ + kHeaderSize),
- GetHeaderValue(kNumReservationsOffset));
+std::vector<SerializedData::Reservation> SnapshotData::Reservations() const {
+ uint32_t size = GetHeaderValue(kNumReservationsOffset);
+ std::vector<SerializedData::Reservation> reservations(size);
+ memcpy(reservations.data(), data_ + kHeaderSize,
+ size * sizeof(SerializedData::Reservation));
+ return reservations;
}
Vector<const byte> SnapshotData::Payload() const {
diff --git a/deps/v8/src/snapshot/snapshot-empty.cc b/deps/v8/src/snapshot/snapshot-empty.cc
index a13f2e8870..c6ea6a2bf1 100644
--- a/deps/v8/src/snapshot/snapshot-empty.cc
+++ b/deps/v8/src/snapshot/snapshot-empty.cc
@@ -15,8 +15,8 @@ namespace internal {
// These are meant for use with snapshot-external.cc. Should this file
// be compiled with those options we just supply these dummy implementations
// below. This happens when compiling the mksnapshot utility.
-void SetNativesFromFile(StartupData* data) { CHECK(false); }
-void SetSnapshotFromFile(StartupData* data) { CHECK(false); }
+void SetNativesFromFile(StartupData* data) { UNREACHABLE(); }
+void SetSnapshotFromFile(StartupData* data) { UNREACHABLE(); }
void ReadNatives() {}
void DisposeNatives() {}
#endif // V8_USE_EXTERNAL_STARTUP_DATA
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.cc b/deps/v8/src/snapshot/snapshot-source-sink.cc
index 77b19d51a1..49e0f2298a 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.cc
+++ b/deps/v8/src/snapshot/snapshot-source-sink.cc
@@ -16,14 +16,14 @@ void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
DCHECK_LT(integer, 1 << 30);
integer <<= 2;
int bytes = 1;
- if (integer > 0xff) bytes = 2;
- if (integer > 0xffff) bytes = 3;
- if (integer > 0xffffff) bytes = 4;
+ if (integer > 0xFF) bytes = 2;
+ if (integer > 0xFFFF) bytes = 3;
+ if (integer > 0xFFFFFF) bytes = 4;
integer |= (bytes - 1);
- Put(static_cast<int>(integer & 0xff), "IntPart1");
- if (bytes > 1) Put(static_cast<int>((integer >> 8) & 0xff), "IntPart2");
- if (bytes > 2) Put(static_cast<int>((integer >> 16) & 0xff), "IntPart3");
- if (bytes > 3) Put(static_cast<int>((integer >> 24) & 0xff), "IntPart4");
+ Put(static_cast<int>(integer & 0xFF), "IntPart1");
+ if (bytes > 1) Put(static_cast<int>((integer >> 8) & 0xFF), "IntPart2");
+ if (bytes > 2) Put(static_cast<int>((integer >> 16) & 0xFF), "IntPart3");
+ if (bytes > 3) Put(static_cast<int>((integer >> 24) & 0xFF), "IntPart4");
}
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index 2ffe5b6086..8f37e00c4a 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -31,7 +31,7 @@ class SnapshotData : public SerializedData {
: SerializedData(const_cast<byte*>(snapshot.begin()), snapshot.length()) {
}
- Vector<const Reservation> Reservations() const;
+ std::vector<Reservation> Reservations() const;
virtual Vector<const byte> Payload() const;
Vector<const byte> RawData() const {
diff --git a/deps/v8/src/snapshot/startup-deserializer.cc b/deps/v8/src/snapshot/startup-deserializer.cc
index 91432e185a..e6f853fe0e 100644
--- a/deps/v8/src/snapshot/startup-deserializer.cc
+++ b/deps/v8/src/snapshot/startup-deserializer.cc
@@ -37,7 +37,7 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
isolate->heap()->IterateSmiRoots(this);
isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
isolate->heap()->RepairFreeListsAfterDeserialization();
- isolate->heap()->IterateWeakRoots(this, VISIT_ALL);
+ isolate->heap()->IterateWeakRoots(this, VISIT_FOR_SERIALIZATION);
DeserializeDeferredObjects();
RestoreExternalReferenceRedirectors(accessor_infos());
RestoreExternalReferenceRedirectors(call_handler_infos());
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
index 8b4a79b8b1..5ae6e33b87 100644
--- a/deps/v8/src/snapshot/startup-serializer.cc
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -95,7 +95,7 @@ void StartupSerializer::SerializeWeakReferencesAndDeferred() {
// one entry with 'undefined' to terminate the partial snapshot cache.
Object* undefined = isolate()->heap()->undefined_value();
VisitRootPointer(Root::kPartialSnapshotCache, &undefined);
- isolate()->heap()->IterateWeakRoots(this, VISIT_ALL);
+ isolate()->heap()->IterateWeakRoots(this, VISIT_FOR_SERIALIZATION);
SerializeDeferredObjects();
Pad();
}
@@ -122,8 +122,7 @@ void StartupSerializer::SerializeStrongReferences() {
CHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse());
// No active or weak handles.
CHECK(isolate->handle_scope_implementer()->blocks()->empty());
- CHECK_EQ(0, isolate->global_handles()->global_handles_count());
- CHECK_EQ(0, isolate->eternal_handles()->NumberOfHandles());
+
// Visit smi roots.
// Clear the stack limits to make the snapshot reproducible.
// Reset it again afterwards.
@@ -131,8 +130,7 @@ void StartupSerializer::SerializeStrongReferences() {
isolate->heap()->IterateSmiRoots(this);
isolate->heap()->SetStackLimits();
// First visit immortal immovables to make sure they end up in the first page.
- isolate->heap()->IterateStrongRoots(this,
- VISIT_ONLY_STRONG_FOR_SERIALIZATION);
+ isolate->heap()->IterateStrongRoots(this, VISIT_FOR_SERIALIZATION);
}
void StartupSerializer::VisitRootPointers(Root root, Object** start,
@@ -185,5 +183,36 @@ bool StartupSerializer::MustBeDeferred(HeapObject* object) {
return !object->IsMap();
}
+SerializedHandleChecker::SerializedHandleChecker(
+ Isolate* isolate, std::vector<Context*>* contexts)
+ : isolate_(isolate) {
+ AddToSet(isolate->heap()->serialized_objects());
+ for (auto const& context : *contexts) {
+ AddToSet(context->serialized_objects());
+ }
+}
+
+void SerializedHandleChecker::AddToSet(FixedArray* serialized) {
+ int length = serialized->length();
+ for (int i = 0; i < length; i++) serialized_.insert(serialized->get(i));
+}
+
+void SerializedHandleChecker::VisitRootPointers(Root root, Object** start,
+ Object** end) {
+ for (Object** p = start; p < end; p++) {
+ if (serialized_.find(*p) != serialized_.end()) continue;
+ PrintF("%s handle not serialized: ",
+ root == Root::kGlobalHandles ? "global" : "eternal");
+ (*p)->Print();
+ ok_ = false;
+ }
+}
+
+bool SerializedHandleChecker::CheckGlobalAndEternalHandles() {
+ isolate_->global_handles()->IterateAllRoots(this);
+ isolate_->eternal_handles()->IterateAllRoots(this);
+ return ok_;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/startup-serializer.h b/deps/v8/src/snapshot/startup-serializer.h
index 69985388e9..ae2a9f49df 100644
--- a/deps/v8/src/snapshot/startup-serializer.h
+++ b/deps/v8/src/snapshot/startup-serializer.h
@@ -83,6 +83,20 @@ class StartupSerializer : public Serializer<> {
DISALLOW_COPY_AND_ASSIGN(StartupSerializer);
};
+class SerializedHandleChecker : public RootVisitor {
+ public:
+ SerializedHandleChecker(Isolate* isolate, std::vector<Context*>* contexts);
+ virtual void VisitRootPointers(Root root, Object** start, Object** end);
+ bool CheckGlobalAndEternalHandles();
+
+ private:
+ void AddToSet(FixedArray* serialized);
+
+ Isolate* isolate_;
+ std::unordered_set<Object*> serialized_;
+ bool ok_ = true;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index be508f4f45..7693a229bf 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -129,7 +129,7 @@ void StringStream::Add(Vector<const char> format, Vector<FmtElm> elms) {
int value = current.data_.u_int_;
if (0x20 <= value && value <= 0x7F) {
Put(value);
- } else if (value <= 0xff) {
+ } else if (value <= 0xFF) {
Add("\\x%02x", value);
} else {
Add("\\u%04x", value);
diff --git a/deps/v8/src/strtod.cc b/deps/v8/src/strtod.cc
index 4bdd5378fa..8d42b4c202 100644
--- a/deps/v8/src/strtod.cc
+++ b/deps/v8/src/strtod.cc
@@ -35,7 +35,7 @@ static const int kMinDecimalPower = -324;
// 2^64 = 18446744073709551616
static const uint64_t kMaxUint64 = V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF);
-
+// clang-format off
static const double exact_powers_of_ten[] = {
1.0, // 10^0
10.0,
@@ -59,9 +59,10 @@ static const double exact_powers_of_ten[] = {
10000000000000000000.0,
100000000000000000000.0, // 10^20
1000000000000000000000.0,
- // 10^22 = 0x21e19e0c9bab2400000 = 0x878678326eac9 * 2^22
+ // 10^22 = 0x21E19E0C9BAB2400000 = 0x878678326EAC9 * 2^22
10000000000000000000000.0
};
+// clang-format on
static const int kExactPowersOfTenSize = arraysize(exact_powers_of_ten);
// Maximum number of significant digits in the decimal representation.
@@ -162,8 +163,11 @@ static bool DoubleStrtod(Vector<const char> trimmed,
// therefore accurate.
// Note that the ARM and MIPS simulators are compiled for 32bits. They
// therefore exhibit the same problem.
+ USE(exact_powers_of_ten);
+ USE(kMaxExactDoubleIntegerDecimalDigits);
+ USE(kExactPowersOfTenSize);
return false;
-#endif
+#else
if (trimmed.length() <= kMaxExactDoubleIntegerDecimalDigits) {
int read_digits;
// The trimmed input fits into a double.
@@ -201,6 +205,7 @@ static bool DoubleStrtod(Vector<const char> trimmed,
}
}
return false;
+#endif
}
@@ -213,13 +218,20 @@ static DiyFp AdjustmentPowerOfTen(int exponent) {
// distance.
DCHECK_EQ(PowersOfTenCache::kDecimalExponentDistance, 8);
switch (exponent) {
- case 1: return DiyFp(V8_2PART_UINT64_C(0xa0000000, 00000000), -60);
- case 2: return DiyFp(V8_2PART_UINT64_C(0xc8000000, 00000000), -57);
- case 3: return DiyFp(V8_2PART_UINT64_C(0xfa000000, 00000000), -54);
- case 4: return DiyFp(V8_2PART_UINT64_C(0x9c400000, 00000000), -50);
- case 5: return DiyFp(V8_2PART_UINT64_C(0xc3500000, 00000000), -47);
- case 6: return DiyFp(V8_2PART_UINT64_C(0xf4240000, 00000000), -44);
- case 7: return DiyFp(V8_2PART_UINT64_C(0x98968000, 00000000), -40);
+ case 1:
+ return DiyFp(V8_2PART_UINT64_C(0xA0000000, 00000000), -60);
+ case 2:
+ return DiyFp(V8_2PART_UINT64_C(0xC8000000, 00000000), -57);
+ case 3:
+ return DiyFp(V8_2PART_UINT64_C(0xFA000000, 00000000), -54);
+ case 4:
+ return DiyFp(V8_2PART_UINT64_C(0x9C400000, 00000000), -50);
+ case 5:
+ return DiyFp(V8_2PART_UINT64_C(0xC3500000, 00000000), -47);
+ case 6:
+ return DiyFp(V8_2PART_UINT64_C(0xF4240000, 00000000), -44);
+ case 7:
+ return DiyFp(V8_2PART_UINT64_C(0x98968000, 00000000), -40);
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/third_party/utf8-decoder/LICENSE b/deps/v8/src/third_party/utf8-decoder/LICENSE
new file mode 100644
index 0000000000..b59bef2fb6
--- /dev/null
+++ b/deps/v8/src/third_party/utf8-decoder/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2008-2009 Bjoern Hoehrmann <bjoern@hoehrmann.de>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/deps/v8/src/third_party/utf8-decoder/README.v8 b/deps/v8/src/third_party/utf8-decoder/README.v8
new file mode 100644
index 0000000000..e1e13ce53f
--- /dev/null
+++ b/deps/v8/src/third_party/utf8-decoder/README.v8
@@ -0,0 +1,18 @@
+Name: DFA UTF-8 Decoder
+Short Name: utf8-decoder
+URL: http://bjoern.hoehrmann.de/utf-8/decoder/dfa/
+Version: 0
+License: MIT
+License File: NOT_SHIPPED
+Security Critical: no
+
+Description:
+Decodes UTF-8 bytes using a fast and simple definite finite automata.
+
+Local modifications:
+- Rejection state has been mapped to row 0 (instead of row 1) of the DFA,
+ saving some 50 bytes and making the table easier to reason about.
+- The transitions have been remapped to represent both a state transition and a
+ bit mask for the incoming byte.
+- The caller must now zero out the code point buffer after successful or
+ unsuccessful state transitions.
diff --git a/deps/v8/src/third_party/utf8-decoder/utf8-decoder.h b/deps/v8/src/third_party/utf8-decoder/utf8-decoder.h
new file mode 100644
index 0000000000..5668e5ad9e
--- /dev/null
+++ b/deps/v8/src/third_party/utf8-decoder/utf8-decoder.h
@@ -0,0 +1,78 @@
+// See http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ for details.
+// The remapped transition table is justified at
+// https://docs.google.com/spreadsheets/d/1AZcQwuEL93HmNCljJWUwFMGqf7JAQ0puawZaUgP0E14
+
+#include <stdint.h>
+
+#ifndef __UTF8_DFA_DECODER_H
+#define __UTF8_DFA_DECODER_H
+
+namespace Utf8DfaDecoder {
+
+enum State : uint8_t {
+ kReject = 0,
+ kAccept = 12,
+ kTwoByte = 24,
+ kThreeByte = 36,
+ kThreeByteLowMid = 48,
+ kFourByte = 60,
+ kFourByteLow = 72,
+ kThreeByteHigh = 84,
+ kFourByteMidHigh = 96,
+};
+
+static inline void Decode(uint8_t byte, State* state, uint32_t* buffer) {
+ // This first table maps bytes to character to a transition.
+ static constexpr uint8_t transitions[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 00-0F
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 10-1F
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20-2F
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 30-3F
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 40-4F
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 50-5F
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 60-6F
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 70-7F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 80-8F
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 90-9F
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // A0-AF
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // B0-BF
+ 9, 9, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, // C0-CF
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, // D0-DF
+ 10, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 5, 5, // E0-EF
+ 11, 7, 7, 7, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, // F0-FF
+ };
+
+ // This second table maps a state to a new state when adding a transition.
+ // 00-7F
+ // | 80-8F
+ // | | 90-9F
+ // | | | A0-BF
+ // | | | | C2-DF
+ // | | | | | E1-EC, EE, EF
+ // | | | | | | ED
+ // | | | | | | | F1-F3
+ // | | | | | | | | F4
+ // | | | | | | | | | C0, C1, F5-FF
+ // | | | | | | | | | | E0
+ // | | | | | | | | | | | F0
+ static constexpr uint8_t states[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // REJECT = 0
+ 12, 0, 0, 0, 24, 36, 48, 60, 72, 0, 84, 96, // ACCEPT = 12
+ 0, 12, 12, 12, 0, 0, 0, 0, 0, 0, 0, 0, // 2-byte = 24
+ 0, 24, 24, 24, 0, 0, 0, 0, 0, 0, 0, 0, // 3-byte = 36
+ 0, 24, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 3-byte low/mid = 48
+ 0, 36, 36, 36, 0, 0, 0, 0, 0, 0, 0, 0, // 4-byte = 60
+ 0, 36, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 4-byte low = 72
+ 0, 0, 0, 24, 0, 0, 0, 0, 0, 0, 0, 0, // 3-byte high = 84
+ 0, 0, 36, 36, 0, 0, 0, 0, 0, 0, 0, 0, // 4-byte mid/high = 96
+ };
+
+ DCHECK_NE(*state, State::kReject);
+ uint8_t type = transitions[byte];
+ *state = static_cast<State>(states[*state + type]);
+ *buffer = (*buffer << 6) | (byte & (0x7F >> (type >> 1)));
+}
+
+} // namespace Utf8DfaDecoder
+
+#endif /* __UTF8_DFA_DECODER_H */
diff --git a/deps/v8/src/tracing/traced-value.cc b/deps/v8/src/tracing/traced-value.cc
index 9b2a45c991..de9382e65b 100644
--- a/deps/v8/src/tracing/traced-value.cc
+++ b/deps/v8/src/tracing/traced-value.cc
@@ -43,7 +43,7 @@ void EscapeAndAppendString(const char* value, std::string* result) {
*result += "\\\\";
break;
default:
- if (c < '\040') {
+ if (c < '\x20') {
base::OS::SNPrintF(
number_buffer, arraysize(number_buffer), "\\u%04X",
static_cast<unsigned>(static_cast<unsigned char>(c)));
diff --git a/deps/v8/src/tracing/tracing-category-observer.cc b/deps/v8/src/tracing/tracing-category-observer.cc
index 3e286620dc..28c107d88f 100644
--- a/deps/v8/src/tracing/tracing-category-observer.cc
+++ b/deps/v8/src/tracing/tracing-category-observer.cc
@@ -4,6 +4,7 @@
#include "src/tracing/tracing-category-observer.h"
+#include "src/base/atomic-utils.h"
#include "src/flags.h"
#include "src/tracing/trace-event.h"
#include "src/v8.h"
@@ -37,12 +38,16 @@ void TracingCategoryObserver::OnTraceEnabled() {
TRACE_EVENT_CATEGORY_GROUP_ENABLED(
TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats"), &enabled);
if (enabled) {
- v8::internal::FLAG_runtime_stats |= ENABLED_BY_TRACING;
+ base::AsAtomic32::Relaxed_Store(
+ &v8::internal::FLAG_runtime_stats,
+ (v8::internal::FLAG_runtime_stats | ENABLED_BY_TRACING));
}
TRACE_EVENT_CATEGORY_GROUP_ENABLED(
TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats_sampling"), &enabled);
if (enabled) {
- v8::internal::FLAG_runtime_stats |= ENABLED_BY_SAMPLING;
+ base::AsAtomic32::Relaxed_Store(
+ &v8::internal::FLAG_runtime_stats,
+ v8::internal::FLAG_runtime_stats | ENABLED_BY_SAMPLING);
}
TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
&enabled);
@@ -57,8 +62,10 @@ void TracingCategoryObserver::OnTraceEnabled() {
}
void TracingCategoryObserver::OnTraceDisabled() {
- v8::internal::FLAG_runtime_stats &=
- ~(ENABLED_BY_TRACING | ENABLED_BY_SAMPLING);
+ base::AsAtomic32::Relaxed_Store(
+ &v8::internal::FLAG_runtime_stats,
+ v8::internal::FLAG_runtime_stats &
+ ~(ENABLED_BY_TRACING | ENABLED_BY_SAMPLING));
v8::internal::FLAG_gc_stats &= ~ENABLED_BY_TRACING;
v8::internal::FLAG_ic_stats &= ~ENABLED_BY_TRACING;
}
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc
index 8e087b2e67..1d6f9a05be 100644
--- a/deps/v8/src/transitions.cc
+++ b/deps/v8/src/transitions.cc
@@ -17,7 +17,7 @@ void TransitionsAccessor::Initialize() {
encoding_ = kUninitialized;
} else if (HeapObject::cast(raw_transitions_)->IsWeakCell()) {
encoding_ = kWeakCell;
- } else if (StoreHandler::IsHandler(raw_transitions_)) {
+ } else if (HeapObject::cast(raw_transitions_)->IsStoreHandler()) {
encoding_ = kHandler;
} else if (HeapObject::cast(raw_transitions_)->IsTransitionArray()) {
encoding_ = kFullTransitionArray;
@@ -250,7 +250,7 @@ Object* TransitionsAccessor::SearchHandler(Name* name,
int transition = transitions()->Search(kData, name, NONE);
if (transition == kNotFound) return nullptr;
Object* raw_handler = transitions()->GetRawTarget(transition);
- if (StoreHandler::IsHandler(raw_handler)) {
+ if (raw_handler->IsStoreHandler()) {
return StoreHandler::ValidHandlerOrNull(raw_handler, name,
out_transition);
}
diff --git a/deps/v8/src/trap-handler/trap-handler.h b/deps/v8/src/trap-handler/trap-handler.h
index 612cf51b45..1e02eeb34c 100644
--- a/deps/v8/src/trap-handler/trap-handler.h
+++ b/deps/v8/src/trap-handler/trap-handler.h
@@ -66,7 +66,7 @@ void ReleaseHandlerData(int index);
#define THREAD_LOCAL __thread
#endif
-inline bool UseTrapHandler() {
+inline bool IsTrapHandlerEnabled() {
return FLAG_wasm_trap_handler && V8_TRAP_HANDLER_SUPPORTED;
}
@@ -75,14 +75,14 @@ extern THREAD_LOCAL int g_thread_in_wasm_code;
inline bool IsThreadInWasm() { return g_thread_in_wasm_code; }
inline void SetThreadInWasm() {
- if (UseTrapHandler()) {
+ if (IsTrapHandlerEnabled()) {
DCHECK(!IsThreadInWasm());
g_thread_in_wasm_code = true;
}
}
inline void ClearThreadInWasm() {
- if (UseTrapHandler()) {
+ if (IsTrapHandlerEnabled()) {
DCHECK(IsThreadInWasm());
g_thread_in_wasm_code = false;
}
diff --git a/deps/v8/src/type-hints.cc b/deps/v8/src/type-hints.cc
index 11ce1561f9..d74a913901 100644
--- a/deps/v8/src/type-hints.cc
+++ b/deps/v8/src/type-hints.cc
@@ -23,6 +23,8 @@ std::ostream& operator<<(std::ostream& os, BinaryOperationHint hint) {
return os << "NumberOrOddball";
case BinaryOperationHint::kString:
return os << "String";
+ case BinaryOperationHint::kBigInt:
+ return os << "BigInt";
case BinaryOperationHint::kAny:
return os << "Any";
}
@@ -45,6 +47,8 @@ std::ostream& operator<<(std::ostream& os, CompareOperationHint hint) {
return os << "String";
case CompareOperationHint::kSymbol:
return os << "Symbol";
+ case CompareOperationHint::kBigInt:
+ return os << "BigInt";
case CompareOperationHint::kReceiver:
return os << "Receiver";
case CompareOperationHint::kAny:
diff --git a/deps/v8/src/type-hints.h b/deps/v8/src/type-hints.h
index 66102eae9a..e9ac639723 100644
--- a/deps/v8/src/type-hints.h
+++ b/deps/v8/src/type-hints.h
@@ -20,6 +20,7 @@ enum class BinaryOperationHint : uint8_t {
kNumber,
kNumberOrOddball,
kString,
+ kBigInt,
kAny
};
@@ -38,6 +39,7 @@ enum class CompareOperationHint : uint8_t {
kInternalizedString,
kString,
kSymbol,
+ kBigInt,
kReceiver,
kAny
};
diff --git a/deps/v8/src/unicode-inl.h b/deps/v8/src/unicode-inl.h
index ebebfaa1bd..7c0386ce52 100644
--- a/deps/v8/src/unicode-inl.h
+++ b/deps/v8/src/unicode-inl.h
@@ -113,8 +113,8 @@ unsigned Utf8::Encode(char* str,
uchar Utf8::ValueOf(const byte* bytes, size_t length, size_t* cursor) {
if (length <= 0) return kBadChar;
byte first = bytes[0];
- // Characters between 0000 and 0007F are encoded as a single character
- if (first <= kMaxOneByteChar) {
+ // Characters between 0000 and 007F are encoded as a single character
+ if (V8_LIKELY(first <= kMaxOneByteChar)) {
*cursor += 1;
return first;
}
diff --git a/deps/v8/src/unicode.cc b/deps/v8/src/unicode.cc
index 22e5ca606e..4d7896ec37 100644
--- a/deps/v8/src/unicode.cc
+++ b/deps/v8/src/unicode.cc
@@ -21,7 +21,7 @@ static const uchar kSentinel = static_cast<uchar>(-1);
/**
* \file
- * Implementations of functions for working with unicode.
+ * Implementations of functions for working with Unicode.
*/
typedef signed short int16_t; // NOLINT
@@ -46,7 +46,7 @@ static inline bool IsStart(int32_t entry) {
#ifndef V8_INTL_SUPPORT
/**
- * Look up a character in the unicode table using a mix of binary and
+ * Look up a character in the Unicode table using a mix of binary and
* interpolation search. For a uniformly distributed array
* interpolation search beats binary search by a wide margin. However,
* in this case interpolation search degenerates because of some very
@@ -193,306 +193,91 @@ static int LookupMapping(const int32_t* table,
}
}
-static inline uint8_t NonASCIISequenceLength(byte first) {
- // clang-format off
- static const uint8_t lengths[256] = {
- // The first 128 entries correspond to ASCII characters.
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* OO - Of */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 10 - 1f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 20 - 2f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 30 - 3f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 40 - 4f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 50 - 5f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 60 - 6f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 70 - 7f */
- // The following 64 entries correspond to continuation bytes.
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 80 - 8f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 90 - 9f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* a0 - af */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* b0 - bf */
- // The next are two invalid overlong encodings and 30 two-byte sequences.
- 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, /* c0-c1 + c2-cf */
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, /* d0-df */
- // 16 three-byte sequences.
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, /* e0-ef */
- // 5 four-byte sequences, followed by sequences that could only encode
- // code points outside of the unicode range.
- 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; /* f0-f4 + f5-ff */
- // clang-format on
- return lengths[first];
-}
-
-
-static inline bool IsContinuationCharacter(byte chr) {
- return chr >= 0x80 && chr <= 0xBF;
-}
-
// This method decodes an UTF-8 value according to RFC 3629 and
// https://encoding.spec.whatwg.org/#utf-8-decoder .
uchar Utf8::CalculateValue(const byte* str, size_t max_length, size_t* cursor) {
+ DCHECK_GT(max_length, 0);
DCHECK_GT(str[0], kMaxOneByteChar);
- size_t length = NonASCIISequenceLength(str[0]);
-
- // Check continuation characters.
- size_t max_count = std::min(length, max_length);
- size_t count = 1;
- while (count < max_count && IsContinuationCharacter(str[count])) {
- count++;
- }
+ State state = State::kAccept;
+ Utf8IncrementalBuffer buffer = 0;
+ uchar t;
- if (length >= 3 && count < 2) {
- // Not enough continuation bytes to check overlong sequences.
- *cursor += 1;
- return kBadChar;
- }
+ size_t i = 0;
+ do {
+ t = ValueOfIncremental(str[i], &i, &state, &buffer);
+ } while (i < max_length && t == kIncomplete);
- // Check overly long sequences & other conditions.
- if (length == 3) {
- if (str[0] == 0xE0 && (str[1] < 0xA0 || str[1] > 0xBF)) {
- // Overlong three-byte sequence? The first byte generates a kBadChar.
- *cursor += 1;
- return kBadChar;
- } else if (str[0] == 0xED && (str[1] < 0x80 || str[1] > 0x9F)) {
- // High and low surrogate halves? The first byte generates a kBadChar.
- *cursor += 1;
- return kBadChar;
- }
- } else if (length == 4) {
- if (str[0] == 0xF0 && (str[1] < 0x90 || str[1] > 0xBF)) {
- // Overlong four-byte sequence. The first byte generates a kBadChar.
- *cursor += 1;
- return kBadChar;
- } else if (str[0] == 0xF4 && (str[1] < 0x80 || str[1] > 0x8F)) {
- // Code points outside of the unicode range. The first byte generates a
- // kBadChar.
- *cursor += 1;
- return kBadChar;
- }
- }
-
- *cursor += count;
-
- if (count != length) {
- // Not enough continuation characters.
- return kBadChar;
- }
-
- // All errors have been handled, so we only have to assemble the result.
- switch (length) {
- case 2:
- return ((str[0] << 6) + str[1]) - 0x00003080;
- case 3:
- return ((str[0] << 12) + (str[1] << 6) + str[2]) - 0x000E2080;
- case 4:
- return ((str[0] << 18) + (str[1] << 12) + (str[2] << 6) + str[3]) -
- 0x03C82080;
- }
-
- UNREACHABLE();
+ *cursor += i;
+ return (state == State::kAccept) ? t : kBadChar;
}
-/*
-Overlong sequence detection: Since Blink's TextCodecUTF8 rejects multi-byte
-characters which could be expressed with less bytes, we must too.
-
-Each continuation byte (10xxxxxx) carries 6 bits of payload. The lead bytes of
-1, 2, 3 and 4-byte characters are 0xxxxxxx, 110xxxxx, 1110xxxx and 11110xxx, and
-carry 7, 5, 4, and 3 bits of payload, respectively.
-
-Thus, a two-byte character can contain 11 bits of payload, a three-byte
-character 16, and a four-byte character 21.
-
-If we encounter a two-byte character which contains 7 bits or less, a three-byte
-character which contains 11 bits or less, or a four-byte character which
-contains 16 bits or less, we reject the character and generate a kBadChar for
-each of the bytes. This is because Blink handles overlong sequences by rejecting
-the first byte of the character (returning kBadChar); thus the rest are lonely
-continuation bytes and generate a kBadChar each.
-*/
-
-uchar Utf8::ValueOfIncremental(byte next, Utf8IncrementalBuffer* buffer) {
+// Decodes UTF-8 bytes incrementally, allowing the decoding of bytes as they
+// stream in. This **must** be followed by a call to ValueOfIncrementalFinish
+// when the stream is complete, to ensure incomplete sequences are handled.
+uchar Utf8::ValueOfIncremental(byte next, size_t* cursor, State* state,
+ Utf8IncrementalBuffer* buffer) {
DCHECK_NOT_NULL(buffer);
+ State old_state = *state;
+ *cursor += 1;
- // The common case: 1-byte Utf8 (and no incomplete char in the buffer)
- if (V8_LIKELY(next <= kMaxOneByteChar && *buffer == 0)) {
+ if (V8_LIKELY(next <= kMaxOneByteChar && old_state == State::kAccept)) {
+ DCHECK_EQ(0u, *buffer);
return static_cast<uchar>(next);
}
- if (*buffer == 0) {
- // We're at the start of a new character.
- uint32_t kind = NonASCIISequenceLength(next);
- CHECK_LE(kind, 4);
- if (kind >= 2) {
- // Start of 2..4 byte character, and no buffer.
-
- // The mask for the lower bits depends on the kind, and is
- // 0x1F, 0x0F, 0x07 for kinds 2, 3, 4 respectively. We can get that
- // with one shift.
- uint8_t mask = 0x7f >> kind;
+ // So we're at the lead byte of a 2/3/4 sequence, or we're at a continuation
+ // char in that sequence.
+ Utf8DfaDecoder::Decode(next, state, buffer);
- // Store the kind in the top nibble, and kind - 1 (i.e., remaining bytes)
- // in 2nd nibble, and the value in the bottom three. The 2nd nibble is
- // intended as a counter about how many bytes are still needed.
- uint32_t character_info = kind << 28 | (kind - 1) << 24;
- DCHECK_EQ(character_info & mask, 0);
- *buffer = character_info | (next & mask);
- return kIncomplete;
- } else {
- // No buffer, and not the start of a 1-byte char (handled at the
- // beginning), and not the start of a 2..4 byte char (or the start of an
- // overlong / invalid sequence)? Bad char.
+ switch (*state) {
+ case State::kAccept: {
+ uchar t = *buffer;
*buffer = 0;
- return kBadChar;
- }
- } else if (*buffer <= 0xff) {
- // We have one unprocessed byte left (from the last else case in this if
- // statement).
- uchar previous = *buffer;
- *buffer = 0;
- uchar t = ValueOfIncremental(previous, buffer);
- if (t == kIncomplete) {
- // If we have an incomplete character, process both the previous and the
- // next byte at once.
- return ValueOfIncremental(next, buffer);
- } else {
- // Otherwise, process the previous byte and save the next byte for next
- // time.
- DCHECK_EQ(0u, *buffer);
- *buffer = next;
return t;
}
- } else if (IsContinuationCharacter(next)) {
- // We're inside of a character, as described by buffer.
-
- // How many bytes (excluding this one) do we still expect?
- uint8_t bytes_expected = *buffer >> 28;
- uint8_t bytes_left = (*buffer >> 24) & 0x0f;
- // Two-byte overlong sequence detection is handled by
- // NonASCIISequenceLength, so we don't need to check anything here.
- if (bytes_expected == 3 && bytes_left == 2) {
- // Check that there are at least 12 bytes of payload.
- uint8_t lead_payload = *buffer & (0x7f >> bytes_expected);
- DCHECK_LE(lead_payload, 0xf);
- if (lead_payload == 0 && next < 0xa0) {
- // 0xa0 = 0b10100000 (payload: 100000). Overlong sequence: 0 bits from
- // the first byte, at most 5 from the second byte, and at most 6 from
- // the third -> in total at most 11.
-
- *buffer = next;
- return kBadChar;
- } else if (lead_payload == 0xd && next > 0x9f) {
- // The resulting code point would be on a range which is reserved for
- // UTF-16 surrogate halves.
- *buffer = next;
- return kBadChar;
- }
- } else if (bytes_expected == 4 && bytes_left == 3) {
- // Check that there are at least 17 bytes of payload.
- uint8_t lead_payload = *buffer & (0x7f >> bytes_expected);
+ case State::kReject:
+ *state = State::kAccept;
+ *buffer = 0;
- // If the lead byte was bigger than 0xf4 (payload: 4), it's not a start of
- // any valid character, and this is detected by NonASCIISequenceLength.
- DCHECK_LE(lead_payload, 0x4);
- if (lead_payload == 0 && next < 0x90) {
- // 0x90 = 10010000 (payload 10000). Overlong sequence: 0 bits from the
- // first byte, at most 4 from the second byte, at most 12 from the third
- // and fourth bytes -> in total at most 16.
- *buffer = next;
- return kBadChar;
- } else if (lead_payload == 4 && next > 0x8f) {
- // Invalid code point; value greater than 0b100001111000000000000
- // (0x10ffff).
- *buffer = next;
- return kBadChar;
+ // If we hit a bad byte, we need to determine if we were trying to start
+ // a sequence or continue one. If we were trying to start a sequence,
+ // that means it's just an invalid lead byte and we need to continue to
+ // the next (which we already did above). If we were already in a
+ // sequence, we need to reprocess this same byte after resetting to the
+ // initial state.
+ if (old_state != State::kAccept) {
+ // We were trying to continue a sequence, so let's reprocess this byte
+ // next time.
+ *cursor -= 1;
}
- }
+ return kBadChar;
- bytes_left--;
- // Update the value.
- uint32_t value = ((*buffer & 0xffffff) << 6) | (next & 0x3F);
- if (bytes_left) {
- *buffer = (bytes_expected << 28 | bytes_left << 24 | value);
+ default:
return kIncomplete;
- } else {
-#ifdef DEBUG
- // Check that overlong sequences were already detected.
- bool sequence_was_too_long = (bytes_expected == 2 && value < 0x80) ||
- (bytes_expected == 3 && value < 0x800) ||
- (bytes_expected == 4 && value < 0x8000);
- DCHECK(!sequence_was_too_long);
-#endif
- *buffer = 0;
- return value;
- }
- } else {
- // Within a character, but not a continuation character? Then the
- // previous char was a bad char. But we need to save the current
- // one.
- *buffer = next;
- return kBadChar;
}
}
-uchar Utf8::ValueOfIncrementalFinish(Utf8IncrementalBuffer* buffer) {
- DCHECK_NOT_NULL(buffer);
- if (*buffer == 0) {
+// Finishes the incremental decoding, ensuring that if an unfinished sequence
+// is left that it is replaced by a replacement char.
+uchar Utf8::ValueOfIncrementalFinish(State* state) {
+ if (*state == State::kAccept) {
return kBufferEmpty;
} else {
- // Process left-over chars. An incomplete char at the end maps to kBadChar.
- uchar t = ValueOfIncremental(0, buffer);
- return (t == kIncomplete) ? kBadChar : t;
+ DCHECK_GT(*state, State::kAccept);
+ *state = State::kAccept;
+ return kBadChar;
}
}
bool Utf8::ValidateEncoding(const byte* bytes, size_t length) {
- const byte* cursor = bytes;
- const byte* end = bytes + length;
-
- while (cursor < end) {
- // Skip over single-byte values.
- if (*cursor <= kMaxOneByteChar) {
- ++cursor;
- continue;
- }
-
- // Get the length the the character.
- size_t seq_length = NonASCIISequenceLength(*cursor);
- // For some invalid characters NonASCIISequenceLength returns 0.
- if (seq_length == 0) return false;
-
- const byte* char_end = cursor + seq_length;
-
- // Return false if we do not have enough bytes for the character.
- if (char_end > end) return false;
-
- // Check if the bytes of the character are continuation bytes.
- for (const byte* i = cursor + 1; i < char_end; ++i) {
- if (!IsContinuationCharacter(*i)) return false;
- }
-
- // Check overly long sequences & other conditions.
- if (seq_length == 3) {
- if (cursor[0] == 0xE0 && (cursor[1] < 0xA0 || cursor[1] > 0xBF)) {
- // Overlong three-byte sequence?
- return false;
- } else if (cursor[0] == 0xED && (cursor[1] < 0x80 || cursor[1] > 0x9F)) {
- // High and low surrogate halves?
- return false;
- }
- } else if (seq_length == 4) {
- if (cursor[0] == 0xF0 && (cursor[1] < 0x90 || cursor[1] > 0xBF)) {
- // Overlong four-byte sequence.
- return false;
- } else if (cursor[0] == 0xF4 && (cursor[1] < 0x80 || cursor[1] > 0x8F)) {
- // Code points outside of the unicode range.
- return false;
- }
- }
- cursor = char_end;
+ State state = State::kAccept;
+ Utf8IncrementalBuffer throw_away = 0;
+ for (size_t i = 0; i < length && state != State::kReject; i++) {
+ Utf8DfaDecoder::Decode(bytes[i], &state, &throw_away);
}
- return true;
+ return state == State::kAccept;
}
// Uppercase: point.category == 'Lu'
@@ -3333,7 +3118,7 @@ int CanonicalizationRange::Convert(uchar c,
}
-const uchar UnicodeData::kMaxCodePoint = 65533;
+const uchar UnicodeData::kMaxCodePoint = 0xFFFD;
int UnicodeData::GetByteCount() {
#ifndef V8_INTL_SUPPORT // NOLINT
diff --git a/deps/v8/src/unicode.h b/deps/v8/src/unicode.h
index 04d58f3650..c6ce9a8eb2 100644
--- a/deps/v8/src/unicode.h
+++ b/deps/v8/src/unicode.h
@@ -7,6 +7,7 @@
#include <sys/types.h>
#include "src/globals.h"
+#include "src/third_party/utf8-decoder/utf8-decoder.h"
#include "src/utils.h"
/**
* \file
@@ -129,6 +130,8 @@ class Utf16 {
class V8_EXPORT_PRIVATE Utf8 {
public:
+ using State = Utf8DfaDecoder::State;
+
static inline uchar Length(uchar chr, int previous);
static inline unsigned EncodeOneByte(char* out, uint8_t c);
static inline unsigned Encode(char* out,
@@ -158,9 +161,9 @@ class V8_EXPORT_PRIVATE Utf8 {
static inline uchar ValueOf(const byte* str, size_t length, size_t* cursor);
typedef uint32_t Utf8IncrementalBuffer;
- static uchar ValueOfIncremental(byte next_byte,
+ static uchar ValueOfIncremental(byte next_byte, size_t* cursor, State* state,
Utf8IncrementalBuffer* buffer);
- static uchar ValueOfIncrementalFinish(Utf8IncrementalBuffer* buffer);
+ static uchar ValueOfIncrementalFinish(State* state);
// Excludes non-characters from the set of valid code points.
static inline bool IsValidCharacter(uchar c);
diff --git a/deps/v8/src/uri.cc b/deps/v8/src/uri.cc
index 3ebf58857b..775c0ede2c 100644
--- a/deps/v8/src/uri.cc
+++ b/deps/v8/src/uri.cc
@@ -38,8 +38,8 @@ bool IsReservedPredicate(uc16 c) {
bool IsReplacementCharacter(const uint8_t* octets, int length) {
// The replacement character is at codepoint U+FFFD in the Unicode Specials
// table. Its UTF-8 encoding is 0xEF 0xBF 0xBD.
- if (length != 3 || octets[0] != 0xef || octets[1] != 0xbf ||
- octets[2] != 0xbd) {
+ if (length != 3 || octets[0] != 0xEF || octets[1] != 0xBF ||
+ octets[2] != 0xBD) {
return false;
}
return true;
@@ -444,7 +444,7 @@ static MaybeHandle<String> EscapePrivate(Isolate* isolate,
}
// We don't allow strings that are longer than a maximal length.
- DCHECK_LT(String::kMaxLength, 0x7fffffff - 6); // Cannot overflow.
+ DCHECK_LT(String::kMaxLength, 0x7FFFFFFF - 6); // Cannot overflow.
if (escaped_length > String::kMaxLength) break; // Provoke exception.
}
}
@@ -468,10 +468,10 @@ static MaybeHandle<String> EscapePrivate(Isolate* isolate,
dest->SeqOneByteStringSet(dest_position + 1, 'u');
dest->SeqOneByteStringSet(dest_position + 2, HexCharOfValue(c >> 12));
dest->SeqOneByteStringSet(dest_position + 3,
- HexCharOfValue((c >> 8) & 0xf));
+ HexCharOfValue((c >> 8) & 0xF));
dest->SeqOneByteStringSet(dest_position + 4,
- HexCharOfValue((c >> 4) & 0xf));
- dest->SeqOneByteStringSet(dest_position + 5, HexCharOfValue(c & 0xf));
+ HexCharOfValue((c >> 4) & 0xF));
+ dest->SeqOneByteStringSet(dest_position + 5, HexCharOfValue(c & 0xF));
dest_position += 6;
} else if (IsNotEscaped(c)) {
dest->SeqOneByteStringSet(dest_position, c);
@@ -479,7 +479,7 @@ static MaybeHandle<String> EscapePrivate(Isolate* isolate,
} else {
dest->SeqOneByteStringSet(dest_position, '%');
dest->SeqOneByteStringSet(dest_position + 1, HexCharOfValue(c >> 4));
- dest->SeqOneByteStringSet(dest_position + 2, HexCharOfValue(c & 0xf));
+ dest->SeqOneByteStringSet(dest_position + 2, HexCharOfValue(c & 0xF));
dest_position += 3;
}
}
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index e6e98fabba..5b5d95ce9a 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -384,9 +384,9 @@ class BitField64 : public BitFieldBase<T, shift, size, uint64_t> { };
#define DEFINE_BIT_FIELD_RANGE_TYPE(Name, Type, Size, _) \
k##Name##Start, k##Name##End = k##Name##Start + Size - 1,
-#define DEFINE_BIT_RANGES(LIST_MACRO) \
- struct LIST_MACRO##_Ranges { \
- enum { LIST_MACRO(DEFINE_BIT_FIELD_RANGE_TYPE, _) }; \
+#define DEFINE_BIT_RANGES(LIST_MACRO) \
+ struct LIST_MACRO##_Ranges { \
+ enum { LIST_MACRO(DEFINE_BIT_FIELD_RANGE_TYPE, _) kBitsCount }; \
};
#define DEFINE_BIT_FIELD_TYPE(Name, Type, Size, RangesName) \
@@ -641,7 +641,7 @@ class Access {
template<typename T>
class SetOncePointer {
public:
- SetOncePointer() : pointer_(nullptr) {}
+ SetOncePointer() = default;
bool is_set() const { return pointer_ != nullptr; }
@@ -655,8 +655,16 @@ class SetOncePointer {
pointer_ = value;
}
+ T* operator=(T* value) {
+ set(value);
+ return value;
+ }
+
+ bool operator==(std::nullptr_t) const { return pointer_ == nullptr; }
+ bool operator!=(std::nullptr_t) const { return pointer_ != nullptr; }
+
private:
- T* pointer_;
+ T* pointer_ = nullptr;
};
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 16107fdefc..a6d97e8ff1 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -19,6 +19,7 @@
#include "src/objects-inl.h"
#include "src/profiler/heap-profiler.h"
#include "src/runtime-profiler.h"
+#include "src/simulator.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
#include "src/tracing/tracing-category-observer.h"
@@ -42,6 +43,9 @@ bool V8::Initialize() {
void V8::TearDown() {
+#if defined(USE_SIMULATOR)
+ Simulator::GlobalTearDown();
+#endif
Bootstrapper::TearDownExtensions();
ElementsAccessor::TearDown();
RegisteredExtension::UnregisterAll();
@@ -65,10 +69,15 @@ void V8::InitializeOncePerProcessImpl() {
FLAG_max_semi_space_size = 1;
}
- base::OS::Initialize(FLAG_random_seed, FLAG_hard_abort, FLAG_gc_fake_mmap);
+ base::OS::Initialize(FLAG_hard_abort, FLAG_gc_fake_mmap);
+
+ if (FLAG_random_seed) SetRandomMmapSeed(FLAG_random_seed);
Isolate::InitializeOncePerProcess();
+#if defined(USE_SIMULATOR)
+ Simulator::InitializeOncePerProcess();
+#endif
sampler::Sampler::SetUp();
CpuFeatures::Probe(false);
ElementsAccessor::InitializeOncePerProcess();
@@ -116,7 +125,7 @@ void V8::SetNativesBlob(StartupData* natives_blob) {
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
base::CallOnce(&init_natives_once, &SetNativesFromFile, natives_blob);
#else
- CHECK(false);
+ UNREACHABLE();
#endif
}
@@ -125,7 +134,7 @@ void V8::SetSnapshotBlob(StartupData* snapshot_blob) {
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
base::CallOnce(&init_snapshot_once, &SetSnapshotFromFile, snapshot_blob);
#else
- CHECK(false);
+ UNREACHABLE();
#endif
}
} // namespace internal
diff --git a/deps/v8/src/v8.gyp b/deps/v8/src/v8.gyp
index a7ce858022..218b173af2 100644
--- a/deps/v8/src/v8.gyp
+++ b/deps/v8/src/v8.gyp
@@ -1044,6 +1044,10 @@
'heap/spaces.h',
'heap/store-buffer.cc',
'heap/store-buffer.h',
+ 'heap/stress-marking-observer.cc',
+ 'heap/stress-marking-observer.h',
+ 'heap/stress-scavenge-observer.cc',
+ 'heap/stress-scavenge-observer.h',
'heap/sweeper.cc',
'heap/sweeper.h',
'heap/worklist.h',
@@ -1160,11 +1164,15 @@
'objects/code.h',
'objects/compilation-cache.h',
'objects/compilation-cache-inl.h',
+ 'objects/data-handler.h',
+ 'objects/data-handler-inl.h',
'objects/debug-objects-inl.h',
'objects/debug-objects.cc',
'objects/debug-objects.h',
'objects/descriptor-array.h',
'objects/dictionary.h',
+ 'objects/fixed-array.h',
+ 'objects/fixed-array-inl.h',
'objects/frame-array.h',
'objects/frame-array-inl.h',
'objects/hash-table-inl.h',
@@ -1173,6 +1181,8 @@
'objects/intl-objects.h',
'objects/js-array.h',
'objects/js-array-inl.h',
+ 'objects/js-collection.h',
+ 'objects/js-collection-inl.h',
'objects/js-regexp.h',
'objects/js-regexp-inl.h',
'objects/literal-objects.cc',
@@ -1336,6 +1346,8 @@
'safepoint-table.h',
'setup-isolate.h',
'signature.h',
+ 'simulator-base.cc',
+ 'simulator-base.h',
'simulator.h',
'snapshot/builtin-deserializer-allocator.cc',
'snapshot/builtin-deserializer-allocator.h',
@@ -1396,6 +1408,7 @@
'strtod.h',
'ic/stub-cache.cc',
'ic/stub-cache.h',
+ 'third_party/utf8-decoder/utf8-decoder.h',
'tracing/trace-event.cc',
'tracing/trace-event.h',
'tracing/traced-value.cc',
@@ -1430,6 +1443,8 @@
'v8threads.h',
'value-serializer.cc',
'value-serializer.h',
+ 'vector-slot-pair.cc',
+ 'vector-slot-pair.h',
'vector.h',
'version.cc',
'version.h',
@@ -1437,9 +1452,11 @@
'visitors.h',
'vm-state-inl.h',
'vm-state.h',
+ 'wasm/baseline/liftoff-assembler-defs.h',
'wasm/baseline/liftoff-assembler.cc',
'wasm/baseline/liftoff-assembler.h',
'wasm/baseline/liftoff-compiler.cc',
+ 'wasm/baseline/liftoff-register.h',
'wasm/compilation-manager.cc',
'wasm/compilation-manager.h',
'wasm/decoder.h',
@@ -1461,15 +1478,18 @@
'wasm/streaming-decoder.h',
'wasm/wasm-api.cc',
'wasm/wasm-api.h',
+ 'wasm/wasm-code-manager.cc',
+ 'wasm/wasm-code-manager.h',
'wasm/wasm-code-specialization.cc',
'wasm/wasm-code-specialization.h',
'wasm/wasm-code-wrapper.cc',
'wasm/wasm-code-wrapper.h',
+ 'wasm/wasm-constants.h',
'wasm/wasm-debug.cc',
+ 'wasm/wasm-engine.cc',
+ 'wasm/wasm-engine.h',
'wasm/wasm-external-refs.cc',
'wasm/wasm-external-refs.h',
- 'wasm/wasm-heap.cc',
- 'wasm/wasm-heap.h',
'wasm/wasm-js.cc',
'wasm/wasm-js.h',
'wasm/wasm-limits.h',
@@ -1895,6 +1915,8 @@
'base/once.cc',
'base/once.h',
'base/optional.h',
+ 'base/page-allocator.cc',
+ 'base/page-allocator.h',
'base/platform/elapsed-timer.h',
'base/platform/time.cc',
'base/platform/time.h',
@@ -2068,10 +2090,9 @@
'-L/usr/local/lib -lexecinfo',
]},
'sources': [
- 'base/debug/stack_trace_posix.cc',
'base/platform/platform-openbsd.cc',
'base/platform/platform-posix.h',
- 'base/platform/platform-posix.cc',
+ 'base/platform/platform-posix.cc'
'base/platform/platform-posix-time.h',
'base/platform/platform-posix-time.cc',
],
@@ -2337,12 +2358,10 @@
'js/macros.py',
'messages.h',
'js/prologue.js',
- 'js/v8natives.js',
'js/array.js',
'js/typedarray.js',
'js/messages.js',
'js/spread.js',
- 'js/proxy.js',
'debug/mirrors.js',
'debug/debug.js',
'debug/liveedit.js',
@@ -2466,6 +2485,8 @@
'objects-inl.h',
'objects/code.h',
'objects/code-inl.h',
+ 'objects/fixed-array.h',
+ 'objects/fixed-array-inl.h',
'objects/js-array.h',
'objects/js-array-inl.h',
'objects/js-regexp.h',
@@ -2568,5 +2589,41 @@
},
],
},
+ {
+ 'target_name': 'v8_monolith',
+ 'type': 'static_library',
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ '../include',
+ ],
+ },
+ 'actions': [
+ {
+ 'action_name': 'build_with_gn',
+ 'inputs': [
+ '../tools/node/build_gn.py',
+ ],
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/obj/libv8_monolith.a',
+ '<(INTERMEDIATE_DIR)/args.gn',
+ ],
+ 'action': [
+ '../tools/node/build_gn.py',
+ '<(CONFIGURATION_NAME)',
+ '../',
+ '<(INTERMEDIATE_DIR)',
+ 'v8_promise_internal_field_count=<(v8_promise_internal_field_count)',
+ 'target_cpu="<(target_arch)"',
+ 'target_os="<(OS)"',
+ 'v8_target_cpu="<(v8_target_arch)"',
+ 'v8_embedder_string="<(v8_embedder_string)"',
+ 'v8_use_snapshot=<(v8_use_snapshot)',
+ 'v8_optimized_debug=<(v8_optimized_debug)',
+ 'v8_enable_disassembler=<(v8_enable_disassembler)',
+ 'v8_postmortem_support=<(v8_postmortem_support)',
+ ],
+ },
+ ],
+ },
],
}
diff --git a/deps/v8/src/value-serializer.cc b/deps/v8/src/value-serializer.cc
index 974ee2c76d..5e2ab19877 100644
--- a/deps/v8/src/value-serializer.cc
+++ b/deps/v8/src/value-serializer.cc
@@ -214,11 +214,11 @@ void ValueSerializer::WriteVarint(T value) {
uint8_t stack_buffer[sizeof(T) * 8 / 7 + 1];
uint8_t* next_byte = &stack_buffer[0];
do {
- *next_byte = (value & 0x7f) | 0x80;
+ *next_byte = (value & 0x7F) | 0x80;
next_byte++;
value >>= 7;
} while (value);
- *(next_byte - 1) &= 0x7f;
+ *(next_byte - 1) &= 0x7F;
WriteRawBytes(stack_buffer, next_byte - stack_buffer);
}
@@ -848,7 +848,7 @@ Maybe<bool> ValueSerializer::WriteWasmModule(Handle<WasmModuleObject> object) {
WriteTag(SerializationTag::kWasmModule);
WriteRawBytes(&encoding_tag, sizeof(encoding_tag));
- Handle<String> wire_bytes(compiled_part->module_bytes(), isolate_);
+ Handle<String> wire_bytes(compiled_part->shared()->module_bytes(), isolate_);
int wire_bytes_length = wire_bytes->length();
WriteVarint<uint32_t>(wire_bytes_length);
uint8_t* destination;
@@ -856,20 +856,10 @@ Maybe<bool> ValueSerializer::WriteWasmModule(Handle<WasmModuleObject> object) {
String::WriteToFlat(*wire_bytes, destination, 0, wire_bytes_length);
}
- if (FLAG_wasm_jit_to_native) {
- std::pair<std::unique_ptr<byte[]>, size_t> serialized_module =
- wasm::NativeModuleSerializer::SerializeWholeModule(isolate_,
- compiled_part);
- WriteVarint<uint32_t>(static_cast<uint32_t>(serialized_module.second));
- WriteRawBytes(serialized_module.first.get(), serialized_module.second);
- } else {
- std::unique_ptr<ScriptData> script_data =
- WasmCompiledModuleSerializer::SerializeWasmModule(isolate_,
- compiled_part);
- int script_data_length = script_data->length();
- WriteVarint<uint32_t>(script_data_length);
- WriteRawBytes(script_data->data(), script_data_length);
- }
+ std::pair<std::unique_ptr<const byte[]>, size_t> serialized_module =
+ wasm::SerializeNativeModule(isolate_, compiled_part);
+ WriteVarint<uint32_t>(static_cast<uint32_t>(serialized_module.second));
+ WriteRawBytes(serialized_module.first.get(), serialized_module.second);
return ThrowIfOutOfMemory();
}
@@ -1032,7 +1022,7 @@ Maybe<T> ValueDeserializer::ReadVarint() {
if (position_ >= end_) return Nothing<T>();
uint8_t byte = *position_;
if (V8_LIKELY(shift < sizeof(T) * 8)) {
- value |= static_cast<T>(byte & 0x7f) << shift;
+ value |= static_cast<T>(byte & 0x7F) << shift;
shift += 7;
}
has_another_byte = byte & 0x80;
@@ -1716,23 +1706,11 @@ MaybeHandle<JSObject> ValueDeserializer::ReadWasmModule() {
}
// Try to deserialize the compiled module first.
- Handle<FixedArray> compiled_part;
+ Handle<WasmCompiledModule> compiled_module;
MaybeHandle<JSObject> result;
- if (FLAG_wasm_jit_to_native) {
- if (wasm::NativeModuleDeserializer::DeserializeFullBuffer(
- isolate_, compiled_bytes, wire_bytes)
- .ToHandle(&compiled_part)) {
- result = WasmModuleObject::New(
- isolate_, Handle<WasmCompiledModule>::cast(compiled_part));
- }
- } else {
- ScriptData script_data(compiled_bytes.start(), compiled_bytes.length());
- if (WasmCompiledModuleSerializer::DeserializeWasmModule(
- isolate_, &script_data, wire_bytes)
- .ToHandle(&compiled_part)) {
- result = WasmModuleObject::New(
- isolate_, Handle<WasmCompiledModule>::cast(compiled_part));
- }
+ if (wasm::DeserializeNativeModule(isolate_, compiled_bytes, wire_bytes)
+ .ToHandle(&compiled_module)) {
+ result = WasmModuleObject::New(isolate_, compiled_module);
}
if (result.is_null()) {
wasm::ErrorThrower thrower(isolate_, "ValueDeserializer::ReadWasmModule");
diff --git a/deps/v8/src/vector-slot-pair.cc b/deps/v8/src/vector-slot-pair.cc
new file mode 100644
index 0000000000..e639a9037e
--- /dev/null
+++ b/deps/v8/src/vector-slot-pair.cc
@@ -0,0 +1,39 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/vector-slot-pair.h"
+
+#include "src/feedback-vector.h"
+
+namespace v8 {
+namespace internal {
+
+VectorSlotPair::VectorSlotPair() {}
+
+int VectorSlotPair::index() const {
+ return vector_.is_null() ? -1 : FeedbackVector::GetIndex(slot_);
+}
+
+bool operator==(VectorSlotPair const& lhs, VectorSlotPair const& rhs) {
+ return lhs.slot() == rhs.slot() &&
+ lhs.vector().location() == rhs.vector().location();
+}
+
+bool operator!=(VectorSlotPair const& lhs, VectorSlotPair const& rhs) {
+ return !(lhs == rhs);
+}
+
+std::ostream& operator<<(std::ostream& os, const VectorSlotPair& pair) {
+ if (pair.IsValid()) {
+ return os << "VectorSlotPair(" << pair.slot() << ")";
+ }
+ return os << "VectorSlotPair(INVALID)";
+}
+
+size_t hash_value(VectorSlotPair const& p) {
+ return base::hash_combine(p.slot(), p.vector().location());
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/vector-slot-pair.h b/deps/v8/src/vector-slot-pair.h
new file mode 100644
index 0000000000..cd9434c630
--- /dev/null
+++ b/deps/v8/src/vector-slot-pair.h
@@ -0,0 +1,47 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_VECTOR_SLOT_PAIR_H_
+#define V8_VECTOR_SLOT_PAIR_H_
+
+#include "src/globals.h"
+#include "src/handles.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+class FeedbackVector;
+
+// Defines a pair of {FeedbackVector} and {FeedbackSlot}, which
+// is used to access the type feedback for a certain {Node}.
+class V8_EXPORT_PRIVATE VectorSlotPair {
+ public:
+ VectorSlotPair();
+ VectorSlotPair(Handle<FeedbackVector> vector, FeedbackSlot slot)
+ : vector_(vector), slot_(slot) {}
+
+ bool IsValid() const { return !vector_.is_null() && !slot_.IsInvalid(); }
+
+ Handle<FeedbackVector> vector() const { return vector_; }
+ FeedbackSlot slot() const { return slot_; }
+
+ int index() const;
+
+ private:
+ Handle<FeedbackVector> vector_;
+ FeedbackSlot slot_;
+};
+
+bool operator==(VectorSlotPair const&, VectorSlotPair const&);
+bool operator!=(VectorSlotPair const&, VectorSlotPair const&);
+
+std::ostream& operator<<(std::ostream& os, const VectorSlotPair& pair);
+
+size_t hash_value(VectorSlotPair const&);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_VECTOR_SLOT_PAIR_H_
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm-defs.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm-defs.h
deleted file mode 100644
index d115b3f83d..0000000000
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm-defs.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_DEFS_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_DEFS_H_
-
-#include "src/reglist.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-// TODO(clemensh): Implement the LiftoffAssembler on this platform.
-static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
-
-static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_DEFS_H_
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index d632e39aff..7f7993d34f 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -11,52 +11,168 @@ namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
+void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
-void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
+ UNIMPLEMENTED();
+}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
- int size) {}
-
-void LiftoffAssembler::SpillContext(Register context) {}
-
-void LiftoffAssembler::Load(Register dst, Register src_addr,
- uint32_t offset_imm, int size,
- PinnedRegisterScope pinned) {}
-
-void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
- Register src, int size,
- PinnedRegisterScope pinned) {}
-
-void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
- uint32_t caller_slot_idx) {}
-
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
-
-void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
-
-void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
-
-void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
-
-void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
-
-#define DEFAULT_I32_BINOP(name, internal_name) \
- void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
- Register rhs) {}
-
-// clang-format off
-DEFAULT_I32_BINOP(add, add)
-DEFAULT_I32_BINOP(sub, sub)
-DEFAULT_I32_BINOP(mul, imul)
-DEFAULT_I32_BINOP(and, and)
-DEFAULT_I32_BINOP(or, or)
-DEFAULT_I32_BINOP(xor, xor)
-// clang-format on
-
-#undef DEFAULT_I32_BINOP
-
-void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
+ int size) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type, LiftoffRegList pinned,
+ uint32_t* protected_load_pc) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
+ uint32_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned,
+ uint32_t* protected_store_pc) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
+ uint32_t caller_slot_idx) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
+ UNIMPLEMENTED();
+}
+
+#define UNIMPLEMENTED_GP_BINOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ UNIMPLEMENTED(); \
+ }
+#define UNIMPLEMENTED_GP_UNOP(name) \
+ bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
+ UNIMPLEMENTED(); \
+ }
+#define UNIMPLEMENTED_FP_BINOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
+ DoubleRegister rhs) { \
+ UNIMPLEMENTED(); \
+ }
+
+UNIMPLEMENTED_GP_BINOP(i32_add)
+UNIMPLEMENTED_GP_BINOP(i32_sub)
+UNIMPLEMENTED_GP_BINOP(i32_mul)
+UNIMPLEMENTED_GP_BINOP(i32_and)
+UNIMPLEMENTED_GP_BINOP(i32_or)
+UNIMPLEMENTED_GP_BINOP(i32_xor)
+UNIMPLEMENTED_GP_BINOP(i32_shl)
+UNIMPLEMENTED_GP_BINOP(i32_sar)
+UNIMPLEMENTED_GP_BINOP(i32_shr)
+UNIMPLEMENTED_GP_UNOP(i32_eqz)
+UNIMPLEMENTED_GP_UNOP(i32_clz)
+UNIMPLEMENTED_GP_UNOP(i32_ctz)
+UNIMPLEMENTED_GP_UNOP(i32_popcnt)
+UNIMPLEMENTED_GP_BINOP(ptrsize_add)
+UNIMPLEMENTED_FP_BINOP(f32_add)
+UNIMPLEMENTED_FP_BINOP(f32_sub)
+UNIMPLEMENTED_FP_BINOP(f32_mul)
+
+#undef UNIMPLEMENTED_GP_BINOP
+#undef UNIMPLEMENTED_GP_UNOP
+#undef UNIMPLEMENTED_FP_BINOP
+
+void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
+ uint32_t src_index) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
+ uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
+ uint32_t param_idx,
+ uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64-defs.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64-defs.h
deleted file mode 100644
index 18f49fae68..0000000000
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64-defs.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_DEFS_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_DEFS_H_
-
-#include "src/reglist.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-// TODO(clemensh): Implement the LiftoffAssembler on this platform.
-static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
-
-static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_DEFS_H_
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 2578301ad5..8d28c2b21c 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -11,52 +11,168 @@ namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
+void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
-void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
+ UNIMPLEMENTED();
+}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
- int size) {}
-
-void LiftoffAssembler::SpillContext(Register context) {}
-
-void LiftoffAssembler::Load(Register dst, Register src_addr,
- uint32_t offset_imm, int size,
- PinnedRegisterScope pinned) {}
-
-void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
- Register src, int size,
- PinnedRegisterScope pinned) {}
-
-void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
- uint32_t caller_slot_idx) {}
-
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
-
-void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
-
-void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
-
-void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
-
-void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
-
-#define DEFAULT_I32_BINOP(name, internal_name) \
- void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
- Register rhs) {}
-
-// clang-format off
-DEFAULT_I32_BINOP(add, add)
-DEFAULT_I32_BINOP(sub, sub)
-DEFAULT_I32_BINOP(mul, imul)
-DEFAULT_I32_BINOP(and, and)
-DEFAULT_I32_BINOP(or, or)
-DEFAULT_I32_BINOP(xor, xor)
-// clang-format on
-
-#undef DEFAULT_I32_BINOP
-
-void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
+ int size) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type, LiftoffRegList pinned,
+ uint32_t* protected_load_pc) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
+ uint32_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned,
+ uint32_t* protected_store_pc) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
+ uint32_t caller_slot_idx) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
+ UNIMPLEMENTED();
+}
+
+#define UNIMPLEMENTED_GP_BINOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ UNIMPLEMENTED(); \
+ }
+#define UNIMPLEMENTED_GP_UNOP(name) \
+ bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
+ UNIMPLEMENTED(); \
+ }
+#define UNIMPLEMENTED_FP_BINOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
+ DoubleRegister rhs) { \
+ UNIMPLEMENTED(); \
+ }
+
+UNIMPLEMENTED_GP_BINOP(i32_add)
+UNIMPLEMENTED_GP_BINOP(i32_sub)
+UNIMPLEMENTED_GP_BINOP(i32_mul)
+UNIMPLEMENTED_GP_BINOP(i32_and)
+UNIMPLEMENTED_GP_BINOP(i32_or)
+UNIMPLEMENTED_GP_BINOP(i32_xor)
+UNIMPLEMENTED_GP_BINOP(i32_shl)
+UNIMPLEMENTED_GP_BINOP(i32_sar)
+UNIMPLEMENTED_GP_BINOP(i32_shr)
+UNIMPLEMENTED_GP_UNOP(i32_eqz)
+UNIMPLEMENTED_GP_UNOP(i32_clz)
+UNIMPLEMENTED_GP_UNOP(i32_ctz)
+UNIMPLEMENTED_GP_UNOP(i32_popcnt)
+UNIMPLEMENTED_GP_BINOP(ptrsize_add)
+UNIMPLEMENTED_FP_BINOP(f32_add)
+UNIMPLEMENTED_FP_BINOP(f32_sub)
+UNIMPLEMENTED_FP_BINOP(f32_mul)
+
+#undef UNIMPLEMENTED_GP_BINOP
+#undef UNIMPLEMENTED_GP_UNOP
+#undef UNIMPLEMENTED_FP_BINOP
+
+void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
+ uint32_t src_index) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
+ uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
+ uint32_t param_idx,
+ uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32-defs.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32-defs.h
deleted file mode 100644
index 6fd95caf41..0000000000
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32-defs.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_DEFS_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_DEFS_H_
-
-#include "src/reglist.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
-
-static constexpr RegList kLiftoffAssemblerGpCacheRegs =
- Register::ListOf<eax, ecx, edx, ebx, esi, edi>();
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_DEFS_H_
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 696e2544c0..a8b5b32bdc 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -19,32 +19,50 @@ namespace liftoff {
inline Operand GetStackSlot(uint32_t index) {
// ebp-8 holds the stack marker, ebp-16 is the wasm context, first stack slot
// is located at ebp-24.
- constexpr int32_t kStackSlotSize = 8;
constexpr int32_t kFirstStackSlotOffset = -24;
- return Operand(ebp, kFirstStackSlotOffset - index * kStackSlotSize);
+ return Operand(
+ ebp, kFirstStackSlotOffset - index * LiftoffAssembler::kStackSlotSize);
}
// TODO(clemensh): Make this a constexpr variable once Operand is constexpr.
inline Operand GetContextOperand() { return Operand(ebp, -16); }
+static constexpr LiftoffRegList kByteRegs =
+ LiftoffRegList::FromBits<Register::ListOf<eax, ecx, edx, ebx>()>();
+static_assert(kByteRegs.GetNumRegsSet() == 4, "should have four byte regs");
+static_assert((kByteRegs & kGpCacheRegList) == kByteRegs,
+ "kByteRegs only contains gp cache registers");
+
+// Use this register to store the address of the last argument pushed on the
+// stack for a call to C.
+static constexpr Register kCCallLastArgAddrReg = eax;
+
} // namespace liftoff
-void LiftoffAssembler::ReserveStackSpace(uint32_t space) {
- stack_space_ = space;
- sub(esp, Immediate(space));
+static constexpr DoubleRegister kScratchDoubleReg = xmm7;
+
+void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) {
+ DCHECK_LE(bytes, kMaxInt);
+ sub(esp, Immediate(bytes));
}
-void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
switch (value.type()) {
case kWasmI32:
if (value.to_i32() == 0) {
- xor_(reg, reg);
+ xor_(reg.gp(), reg.gp());
} else {
- mov(reg, Immediate(value.to_i32()));
+ mov(reg.gp(), Immediate(value.to_i32()));
}
break;
+ case kWasmF32: {
+ Register tmp = GetUnusedRegister(kGpReg).gp();
+ mov(tmp, Immediate(value.to_f32_boxed().get_bits()));
+ movd(reg.fp(), tmp);
+ break;
+ }
default:
- UNIMPLEMENTED();
+ UNREACHABLE();
}
}
@@ -60,46 +78,109 @@ void LiftoffAssembler::SpillContext(Register context) {
mov(liftoff::GetContextOperand(), context);
}
-void LiftoffAssembler::Load(Register dst, Register src_addr,
- uint32_t offset_imm, int size,
- PinnedRegisterScope pinned) {
- Operand src_op = Operand(src_addr, offset_imm);
+void LiftoffAssembler::FillContextInto(Register dst) {
+ mov(dst, liftoff::GetContextOperand());
+}
+
+void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type, LiftoffRegList pinned,
+ uint32_t* protected_load_pc) {
+ Operand src_op = offset_reg == no_reg
+ ? Operand(src_addr, offset_imm)
+ : Operand(src_addr, offset_reg, times_1, offset_imm);
if (offset_imm > kMaxInt) {
// The immediate can not be encoded in the operand. Load it to a register
// first.
- Register src = GetUnusedRegister(kGpReg, pinned);
+ Register src = GetUnusedRegister(kGpReg, pinned).gp();
mov(src, Immediate(offset_imm));
+ if (offset_reg != no_reg) {
+ emit_ptrsize_add(src, src, offset_reg);
+ }
src_op = Operand(src_addr, src, times_1, 0);
}
- DCHECK_EQ(4, size);
- mov(dst, src_op);
+ if (protected_load_pc) *protected_load_pc = pc_offset();
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ movzx_b(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load8S:
+ movsx_b(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16U:
+ movzx_w(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16S:
+ movsx_w(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load:
+ mov(dst.gp(), src_op);
+ break;
+ case LoadType::kF32Load:
+ movss(dst.fp(), src_op);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
-void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
- Register src, int size,
- PinnedRegisterScope pinned) {
- Operand dst_op = Operand(dst_addr, offset_imm);
+void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
+ uint32_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned,
+ uint32_t* protected_store_pc) {
+ Operand dst_op = offset_reg == no_reg
+ ? Operand(dst_addr, offset_imm)
+ : Operand(dst_addr, offset_reg, times_1, offset_imm);
if (offset_imm > kMaxInt) {
// The immediate can not be encoded in the operand. Load it to a register
// first.
- Register dst = GetUnusedRegister(kGpReg, pinned);
+ Register dst = pinned.set(GetUnusedRegister(kGpReg, pinned).gp());
mov(dst, Immediate(offset_imm));
+ if (offset_reg != no_reg) {
+ emit_ptrsize_add(dst, dst, offset_reg);
+ }
dst_op = Operand(dst_addr, dst, times_1, 0);
}
- DCHECK_EQ(4, size);
- mov(dst_op, src);
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ // Only the lower 4 registers can be addressed as 8-bit registers.
+ if (src.gp().is_byte_register()) {
+ mov_b(dst_op, src.gp());
+ } else {
+ Register byte_src = GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
+ mov(byte_src, src.gp());
+ mov_b(dst_op, byte_src);
+ }
+ break;
+ case StoreType::kI32Store16:
+ mov_w(dst_op, src.gp());
+ break;
+ case StoreType::kI32Store:
+ mov(dst_op, src.gp());
+ break;
+ case StoreType::kF32Store:
+ movss(dst_op, src.fp());
+ break;
+ default:
+ UNREACHABLE();
+ }
}
-void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
+void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx) {
- constexpr int32_t kCallerStackSlotSize = 4;
- mov(dst, Operand(ebp, kCallerStackSlotSize * (caller_slot_idx + 1)));
+ Operand src(ebp, kPointerSize * (caller_slot_idx + 1));
+ if (dst.is_gp()) {
+ mov(dst.gp(), src);
+ } else {
+ movss(dst.fp(), src);
+ }
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
DCHECK_NE(dst_index, src_index);
- if (cache_state_.has_unused_register()) {
- Register reg = GetUnusedRegister(kGpReg);
+ if (cache_state_.has_unused_register(kGpReg)) {
+ LiftoffRegister reg = GetUnusedRegister(kGpReg);
Fill(reg, src_index);
Spill(dst_index, reg);
} else {
@@ -108,23 +189,60 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
}
}
-void LiftoffAssembler::MoveToReturnRegister(Register reg) {
- if (reg != eax) mov(eax, reg);
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
+ // TODO(wasm): Extract the destination register from the CallDescriptor.
+ // TODO(wasm): Add multi-return support.
+ LiftoffRegister dst =
+ reg.is_gp() ? LiftoffRegister(eax) : LiftoffRegister(xmm1);
+ if (reg != dst) Move(dst, reg);
+}
+
+void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
+ // The caller should check that the registers are not equal. For most
+ // occurences, this is already guaranteed, so no need to check within this
+ // method.
+ DCHECK_NE(dst, src);
+ DCHECK_EQ(dst.reg_class(), src.reg_class());
+ // TODO(clemensh): Handle different sizes here.
+ if (dst.is_gp()) {
+ mov(dst.gp(), src.gp());
+ } else {
+ movsd(dst.fp(), src.fp());
+ }
}
-void LiftoffAssembler::Spill(uint32_t index, Register reg) {
- // TODO(clemensh): Handle different types here.
- mov(liftoff::GetStackSlot(index), reg);
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
+ Operand dst = liftoff::GetStackSlot(index);
+ // TODO(clemensh): Handle different sizes here.
+ if (reg.is_gp()) {
+ mov(dst, reg.gp());
+ } else {
+ movsd(dst, reg.fp());
+ }
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- // TODO(clemensh): Handle different types here.
- mov(liftoff::GetStackSlot(index), Immediate(value.to_i32()));
+ Operand dst = liftoff::GetStackSlot(index);
+ switch (value.type()) {
+ case kWasmI32:
+ mov(dst, Immediate(value.to_i32()));
+ break;
+ case kWasmF32:
+ mov(dst, Immediate(value.to_f32_boxed().get_bits()));
+ break;
+ default:
+ UNREACHABLE();
+ }
}
-void LiftoffAssembler::Fill(Register reg, uint32_t index) {
- // TODO(clemensh): Handle different types here.
- mov(reg, liftoff::GetStackSlot(index));
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
+ Operand src = liftoff::GetStackSlot(index);
+ // TODO(clemensh): Handle different sizes here.
+ if (reg.is_gp()) {
+ mov(reg.gp(), src);
+ } else {
+ movsd(reg.fp(), src);
+ }
}
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
@@ -163,11 +281,303 @@ COMMUTATIVE_I32_BINOP(or, or_)
COMMUTATIVE_I32_BINOP(xor, xor_)
// clang-format on
-#undef DEFAULT_I32_BINOP
+#undef COMMUTATIVE_I32_BINOP
+
+namespace liftoff {
+inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
+ Register lhs, Register rhs,
+ void (Assembler::*emit_shift)(Register)) {
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(dst, lhs, rhs);
+ // If dst is ecx, compute into a tmp register first, then move to ecx.
+ if (dst == ecx) {
+ Register tmp = assm->GetUnusedRegister(kGpReg, pinned).gp();
+ assm->mov(tmp, lhs);
+ if (rhs != ecx) assm->mov(ecx, rhs);
+ (assm->*emit_shift)(tmp);
+ assm->mov(ecx, tmp);
+ return;
+ }
+
+ // Move rhs into ecx. If ecx is in use, move its content to a tmp register
+ // first. If lhs is ecx, lhs is now the tmp register.
+ Register tmp_reg = no_reg;
+ if (rhs != ecx) {
+ if (lhs == ecx || assm->cache_state()->is_used(LiftoffRegister(ecx))) {
+ tmp_reg = assm->GetUnusedRegister(kGpReg, pinned).gp();
+ assm->mov(tmp_reg, ecx);
+ if (lhs == ecx) lhs = tmp_reg;
+ }
+ assm->mov(ecx, rhs);
+ }
+
+ // Do the actual shift.
+ if (dst != lhs) assm->mov(dst, lhs);
+ (assm->*emit_shift)(dst);
+
+ // Restore ecx if needed.
+ if (tmp_reg.is_valid()) assm->mov(ecx, tmp_reg);
+}
+} // namespace liftoff
+
+void LiftoffAssembler::emit_i32_shl(Register dst, Register lhs, Register rhs) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shl_cl);
+}
+
+void LiftoffAssembler::emit_i32_sar(Register dst, Register lhs, Register rhs) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::sar_cl);
+}
+
+void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, Register rhs) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shr_cl);
+}
+
+bool LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
+ Register tmp_byte_reg = dst;
+ // Only the lower 4 registers can be addressed as 8-bit registers.
+ if (!dst.is_byte_register()) {
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(src);
+ tmp_byte_reg = GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
+ }
+
+ test(src, src);
+ setcc(zero, tmp_byte_reg);
+ movzx_b(dst, tmp_byte_reg);
+ return true;
+}
+
+bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
+ Label nonzero_input;
+ Label continuation;
+ test(src, src);
+ j(not_zero, &nonzero_input, Label::kNear);
+ mov(dst, Immediate(32));
+ jmp(&continuation, Label::kNear);
+
+ bind(&nonzero_input);
+ // Get most significant bit set (MSBS).
+ bsr(dst, src);
+ // CLZ = 31 - MSBS = MSBS ^ 31.
+ xor_(dst, 31);
+
+ bind(&continuation);
+ return true;
+}
+
+bool LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
+ Label nonzero_input;
+ Label continuation;
+ test(src, src);
+ j(not_zero, &nonzero_input, Label::kNear);
+ mov(dst, Immediate(32));
+ jmp(&continuation, Label::kNear);
+
+ bind(&nonzero_input);
+ // Get least significant bit set, which equals number of trailing zeros.
+ bsf(dst, src);
+
+ bind(&continuation);
+ return true;
+}
+
+bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
+ if (!CpuFeatures::IsSupported(POPCNT)) return false;
+ CpuFeatureScope scope(this, POPCNT);
+ popcnt(dst, src);
+ return true;
+}
+
+void LiftoffAssembler::emit_ptrsize_add(Register dst, Register lhs,
+ Register rhs) {
+ emit_i32_add(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vaddss(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ addss(dst, lhs);
+ } else {
+ if (dst != lhs) movss(dst, lhs);
+ addss(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vsubss(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ movss(kScratchDoubleReg, rhs);
+ movss(dst, lhs);
+ subss(dst, kScratchDoubleReg);
+ } else {
+ if (dst != lhs) movss(dst, lhs);
+ subss(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmulss(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ mulss(dst, lhs);
+ } else {
+ if (dst != lhs) movss(dst, lhs);
+ mulss(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_i32_test(Register reg) { test(reg, reg); }
+
+void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
+ cmp(lhs, rhs);
+}
+
+void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
+
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
+ j(cond, label);
+}
+
+void LiftoffAssembler::StackCheck(Label* ool_code) {
+ Register limit = GetUnusedRegister(kGpReg).gp();
+ mov(limit, Immediate(ExternalReference::address_of_stack_limit(isolate())));
+ cmp(esp, Operand(limit, 0));
+ j(below_equal, ool_code);
+}
+
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
+ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()), 0);
+}
+
+void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
+ TurboAssembler::AssertUnreachable(reason);
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
+ uint32_t src_index) {
+ switch (src.loc()) {
+ case VarState::kStack:
+ DCHECK_NE(kWasmF64, src.type()); // TODO(clemensh): Implement this.
+ push(liftoff::GetStackSlot(src_index));
+ break;
+ case VarState::kRegister:
+ PushCallerFrameSlot(src.reg());
+ break;
+ case VarState::kI32Const:
+ push(Immediate(src.i32_const()));
+ break;
+ }
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
+ if (reg.is_gp()) {
+ push(reg.gp());
+ } else {
+ sub(esp, Immediate(kPointerSize));
+ movss(Operand(esp, 0), reg.fp());
+ }
+}
+
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ LiftoffRegList gp_regs = regs & kGpCacheRegList;
+ while (!gp_regs.is_empty()) {
+ LiftoffRegister reg = gp_regs.GetFirstRegSet();
+ push(reg.gp());
+ gp_regs.clear(reg);
+ }
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ unsigned num_fp_regs = fp_regs.GetNumRegsSet();
+ if (num_fp_regs) {
+ sub(esp, Immediate(num_fp_regs * kStackSlotSize));
+ unsigned offset = 0;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ movsd(Operand(esp, offset), reg.fp());
+ fp_regs.clear(reg);
+ offset += sizeof(double);
+ }
+ DCHECK_EQ(offset, num_fp_regs * sizeof(double));
+ }
+}
+
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ unsigned fp_offset = 0;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ movsd(reg.fp(), Operand(esp, fp_offset));
+ fp_regs.clear(reg);
+ fp_offset += sizeof(double);
+ }
+ if (fp_offset) add(esp, Immediate(fp_offset));
+ LiftoffRegList gp_regs = regs & kGpCacheRegList;
+ while (!gp_regs.is_empty()) {
+ LiftoffRegister reg = gp_regs.GetLastRegSet();
+ pop(reg.gp());
+ gp_regs.clear(reg);
+ }
+}
+
+void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
+ DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
+ ret(static_cast<int>(num_stack_slots * kPointerSize));
+}
+
+void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
+ for (size_t param = 0; param < num_params; ++param) {
+ push(args[param]);
+ }
+ mov(liftoff::kCCallLastArgAddrReg, esp);
+ constexpr Register kScratch = ebx;
+ static_assert(kScratch != liftoff::kCCallLastArgAddrReg, "collision");
+ PrepareCallCFunction(num_params, kScratch);
+}
+
+void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
+ uint32_t num_params) {
+ int offset = kPointerSize * static_cast<int>(num_params - 1 - param_idx);
+ lea(dst, Operand(liftoff::kCCallLastArgAddrReg, offset));
+}
+
+void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
+ uint32_t param_idx,
+ uint32_t num_params) {
+ constexpr Register kScratch = ebx;
+ static_assert(kScratch != liftoff::kCCallLastArgAddrReg, "collision");
+ int offset = kPointerSize * static_cast<int>(num_params - 1 - param_idx);
+ lea(kScratch, Operand(liftoff::kCCallLastArgAddrReg, offset));
+ mov(Operand(esp, param_idx * kPointerSize), kScratch);
+}
+
+void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
+ CallCFunction(ext_ref, static_cast<int>(num_params));
+}
+
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ wasm_call(addr, RelocInfo::WASM_CALL);
+}
+
+void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
+ // Set context to zero.
+ xor_(esi, esi);
+ CallRuntimeDelayed(zone, fid);
+}
+
+void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
+ sub(esp, Immediate(size));
+ mov(addr, esp);
+}
-void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {
- test(reg, reg);
- j(zero, label);
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ add(esp, Immediate(size));
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
new file mode 100644
index 0000000000..3eef1e1960
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
@@ -0,0 +1,64 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_DEFS_H_
+#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_DEFS_H_
+
+#include "src/reglist.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "src/ia32/assembler-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/x64/assembler-x64.h"
+#endif
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+#if V8_TARGET_ARCH_IA32
+
+constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
+
+constexpr RegList kLiftoffAssemblerGpCacheRegs =
+ Register::ListOf<eax, ecx, edx, ebx, esi, edi>();
+
+// Omit xmm7, which is the kScratchDoubleReg.
+constexpr RegList kLiftoffAssemblerFpCacheRegs =
+ DoubleRegister::ListOf<xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6>();
+
+#elif V8_TARGET_ARCH_X64
+
+constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
+
+constexpr RegList kLiftoffAssemblerGpCacheRegs =
+ Register::ListOf<rax, rcx, rdx, rbx, rsi, rdi>();
+
+constexpr RegList kLiftoffAssemblerFpCacheRegs =
+ DoubleRegister::ListOf<xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7>();
+
+#else
+
+constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
+
+constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
+
+constexpr RegList kLiftoffAssemblerFpCacheRegs = 0xff;
+
+#endif
+
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
+constexpr Condition kEqual = equal;
+constexpr Condition kUnsignedGreaterEqual = above_equal;
+#else
+// On unimplemented platforms, just make this compile.
+constexpr Condition kEqual = static_cast<Condition>(0);
+constexpr Condition kUnsignedGreaterEqual = static_cast<Condition>(0);
+#endif
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_DEFS_H_
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index 8a68fe4d91..121cfeea6a 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -29,20 +29,21 @@ namespace {
class StackTransferRecipe {
struct RegisterMove {
- Register dst;
- Register src;
- constexpr RegisterMove(Register dst, Register src) : dst(dst), src(src) {}
+ LiftoffRegister dst;
+ LiftoffRegister src;
+ constexpr RegisterMove(LiftoffRegister dst, LiftoffRegister src)
+ : dst(dst), src(src) {}
};
struct RegisterLoad {
- Register dst;
+ LiftoffRegister dst;
bool is_constant_load; // otherwise load it from the stack.
union {
uint32_t stack_slot;
WasmValue constant;
};
- RegisterLoad(Register dst, WasmValue constant)
+ RegisterLoad(LiftoffRegister dst, WasmValue constant)
: dst(dst), is_constant_load(true), constant(constant) {}
- RegisterLoad(Register dst, uint32_t stack_slot)
+ RegisterLoad(LiftoffRegister dst, uint32_t stack_slot)
: dst(dst), is_constant_load(false), stack_slot(stack_slot) {}
};
@@ -54,18 +55,16 @@ class StackTransferRecipe {
// First, execute register moves. Then load constants and stack values into
// registers.
- if ((move_dst_regs & move_src_regs) == 0) {
+ if ((move_dst_regs & move_src_regs).is_empty()) {
// No overlap in src and dst registers. Just execute the moves in any
// order.
for (RegisterMove& rm : register_moves) asm_->Move(rm.dst, rm.src);
register_moves.clear();
} else {
// Keep use counters of src registers.
- constexpr size_t kRegArrSize =
- LiftoffAssembler::CacheState::kMaxRegisterCode + 1;
- uint32_t src_reg_use_count[kRegArrSize] = {0};
+ uint32_t src_reg_use_count[kAfterMaxLiftoffRegCode] = {0};
for (RegisterMove& rm : register_moves) {
- ++src_reg_use_count[rm.src.code()];
+ ++src_reg_use_count[rm.src.liftoff_code()];
}
// Now repeatedly iterate the list of register moves, and execute those
// whose dst register does not appear as src any more. The remaining moves
@@ -77,11 +76,11 @@ class StackTransferRecipe {
while (!register_moves.empty()) {
int executed_moves = 0;
for (auto& rm : register_moves) {
- if (src_reg_use_count[rm.dst.code()] == 0) {
+ if (src_reg_use_count[rm.dst.liftoff_code()] == 0) {
asm_->Move(rm.dst, rm.src);
++executed_moves;
- DCHECK_LT(0, src_reg_use_count[rm.src.code()]);
- --src_reg_use_count[rm.src.code()];
+ DCHECK_LT(0, src_reg_use_count[rm.src.liftoff_code()]);
+ --src_reg_use_count[rm.src.liftoff_code()];
} else if (executed_moves) {
// Compaction: Move not-executed moves to the beginning of the list.
(&rm)[-executed_moves] = rm;
@@ -89,17 +88,18 @@ class StackTransferRecipe {
}
if (executed_moves == 0) {
// There is a cycle. Spill one register, then continue.
- Register spill_reg = register_moves.back().src;
+ // TODO(clemensh): Use an unused register if available.
+ LiftoffRegister spill_reg = register_moves.back().src;
asm_->Spill(next_spill_slot, spill_reg);
// Remember to reload into the destination register later.
LoadStackSlot(register_moves.back().dst, next_spill_slot);
- DCHECK_EQ(1, src_reg_use_count[spill_reg.code()]);
- src_reg_use_count[spill_reg.code()] = 0;
+ DCHECK_EQ(1, src_reg_use_count[spill_reg.liftoff_code()]);
+ src_reg_use_count[spill_reg.liftoff_code()] = 0;
++next_spill_slot;
executed_moves = 1;
}
- constexpr RegisterMove dummy(no_reg, no_reg);
- register_moves.resize(register_moves.size() - executed_moves, dummy);
+ register_moves.erase(register_moves.end() - executed_moves,
+ register_moves.end());
}
}
@@ -127,43 +127,50 @@ class StackTransferRecipe {
case VarState::kRegister:
asm_->Spill(dst_index, src.reg());
break;
- case VarState::kConstant:
- // TODO(clemensh): Handle other types than i32.
+ case VarState::kI32Const:
asm_->Spill(dst_index, WasmValue(src.i32_const()));
break;
}
break;
case VarState::kRegister:
- switch (src.loc()) {
- case VarState::kStack:
- LoadStackSlot(dst.reg(), src_index);
- break;
- case VarState::kRegister:
- if (dst.reg() != src.reg()) MoveRegister(dst.reg(), src.reg());
- break;
- case VarState::kConstant:
- LoadConstant(dst.reg(), WasmValue(src.i32_const()));
- break;
- }
+ LoadIntoRegister(dst.reg(), src, src_index);
break;
- case VarState::kConstant:
+ case VarState::kI32Const:
DCHECK_EQ(dst, src);
break;
}
}
- void MoveRegister(Register dst, Register src) {
- DCHECK_EQ(0, move_dst_regs & dst.bit());
- move_dst_regs |= dst.bit();
- move_src_regs |= src.bit();
+ void LoadIntoRegister(LiftoffRegister dst,
+ const LiftoffAssembler::VarState& src,
+ uint32_t src_index) {
+ switch (src.loc()) {
+ case VarState::kStack:
+ LoadStackSlot(dst, src_index);
+ break;
+ case VarState::kRegister:
+ DCHECK_EQ(dst.reg_class(), src.reg_class());
+ if (dst != src.reg()) MoveRegister(dst, src.reg());
+ break;
+ case VarState::kI32Const:
+ LoadConstant(dst, WasmValue(src.i32_const()));
+ break;
+ }
+ }
+
+ void MoveRegister(LiftoffRegister dst, LiftoffRegister src) {
+ DCHECK_NE(dst, src);
+ DCHECK(!move_dst_regs.has(dst));
+ move_dst_regs.set(dst);
+ move_src_regs.set(src);
register_moves.emplace_back(dst, src);
}
- void LoadConstant(Register dst, WasmValue value) {
+ void LoadConstant(LiftoffRegister dst, WasmValue value) {
register_loads.emplace_back(dst, value);
}
- void LoadStackSlot(Register dst, uint32_t stack_index) {
+ void LoadStackSlot(LiftoffRegister dst, uint32_t stack_index) {
register_loads.emplace_back(dst, stack_index);
}
@@ -171,8 +178,8 @@ class StackTransferRecipe {
// TODO(clemensh): Avoid unconditionally allocating on the heap.
std::vector<RegisterMove> register_moves;
std::vector<RegisterLoad> register_loads;
- RegList move_dst_regs = 0;
- RegList move_src_regs = 0;
+ LiftoffRegList move_dst_regs;
+ LiftoffRegList move_src_regs;
LiftoffAssembler* const asm_;
};
@@ -199,14 +206,15 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
for (; src_idx < src_end; ++src_idx, ++dst_idx) {
auto& dst = stack_state[dst_idx];
auto& src = source.stack_state[src_idx];
- Register reg = no_reg;
+ // Just initialize to any register; will be overwritten before use.
+ LiftoffRegister reg(Register::from_code<0>());
+ RegClass rc = src.is_reg() ? src.reg_class() : reg_class_for(src.type());
if (src.is_reg() && is_free(src.reg())) {
reg = src.reg();
- } else if (has_unused_register()) {
- reg = unused_register();
+ } else if (has_unused_register(rc)) {
+ reg = unused_register(rc);
} else {
// Make this a stack slot.
- DCHECK(src.is_stack());
dst = VarState(src.type());
continue;
}
@@ -224,20 +232,19 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
if (is_used(src.reg())) {
// Make this a stack slot.
dst = VarState(src.type());
- continue;
+ } else {
+ dst = VarState(src.type(), src.reg());
+ inc_used(src.reg());
}
- dst = VarState(src.type(), src.reg());
- inc_used(src.reg());
} else if (src.is_const()) {
dst = src;
} else {
- // Keep this a stack slot (which is the initial value).
DCHECK(src.is_stack());
- DCHECK(dst.is_stack());
- continue;
+ // Make this a stack slot.
+ dst = VarState(src.type());
}
}
- last_spilled_reg = source.last_spilled_reg;
+ last_spilled_regs = source.last_spilled_regs;
}
void LiftoffAssembler::CacheState::Steal(CacheState& source) {
@@ -250,6 +257,8 @@ void LiftoffAssembler::CacheState::Split(const CacheState& source) {
*this = source;
}
+// TODO(clemensh): Provide a reasonably sized buffer, based on wasm function
+// size.
LiftoffAssembler::LiftoffAssembler(Isolate* isolate)
: TurboAssembler(isolate, nullptr, 0, CodeObjectRequired::kYes) {}
@@ -259,35 +268,48 @@ LiftoffAssembler::~LiftoffAssembler() {
}
}
-Register LiftoffAssembler::GetBinaryOpTargetRegister(
- RegClass rc, PinnedRegisterScope pinned) {
+LiftoffRegister LiftoffAssembler::GetBinaryOpTargetRegister(
+ RegClass rc, LiftoffRegList pinned) {
auto& slot_lhs = *(cache_state_.stack_state.end() - 2);
if (slot_lhs.is_reg() && GetNumUses(slot_lhs.reg()) == 1) {
+ DCHECK_EQ(rc, slot_lhs.reg().reg_class());
return slot_lhs.reg();
}
auto& slot_rhs = *(cache_state_.stack_state.end() - 1);
if (slot_rhs.is_reg() && GetNumUses(slot_rhs.reg()) == 1) {
+ DCHECK_EQ(rc, slot_rhs.reg().reg_class());
return slot_rhs.reg();
}
return GetUnusedRegister(rc, pinned);
}
-Register LiftoffAssembler::PopToRegister(RegClass rc,
- PinnedRegisterScope pinned) {
+LiftoffRegister LiftoffAssembler::GetUnaryOpTargetRegister(
+ RegClass rc, LiftoffRegList pinned) {
+ auto& slot_src = cache_state_.stack_state.back();
+ if (slot_src.is_reg() && GetNumUses(slot_src.reg()) == 1) {
+ DCHECK_EQ(rc, slot_src.reg().reg_class());
+ return slot_src.reg();
+ }
+ return GetUnusedRegister(rc, pinned);
+}
+
+LiftoffRegister LiftoffAssembler::PopToRegister(RegClass rc,
+ LiftoffRegList pinned) {
DCHECK(!cache_state_.stack_state.empty());
VarState slot = cache_state_.stack_state.back();
cache_state_.stack_state.pop_back();
switch (slot.loc()) {
case VarState::kStack: {
- Register reg = GetUnusedRegister(rc, pinned);
+ LiftoffRegister reg = GetUnusedRegister(rc, pinned);
Fill(reg, cache_state_.stack_height());
return reg;
}
case VarState::kRegister:
+ DCHECK_EQ(rc, slot.reg_class());
cache_state_.dec_used(slot.reg());
return slot.reg();
- case VarState::kConstant: {
- Register reg = GetUnusedRegister(rc, pinned);
+ case VarState::kI32Const: {
+ LiftoffRegister reg = GetUnusedRegister(rc, pinned);
LoadConstant(reg, WasmValue(slot.i32_const()));
return reg;
}
@@ -333,7 +355,7 @@ void LiftoffAssembler::Spill(uint32_t index) {
Spill(index, slot.reg());
cache_state_.dec_used(slot.reg());
break;
- case VarState::kConstant:
+ case VarState::kI32Const:
Spill(index, WasmValue(slot.i32_const()));
break;
}
@@ -346,25 +368,112 @@ void LiftoffAssembler::SpillLocals() {
}
}
-Register LiftoffAssembler::SpillOneRegister(RegClass rc,
- PinnedRegisterScope pinned_regs) {
- DCHECK_EQ(kGpReg, rc);
+void LiftoffAssembler::SpillAllRegisters() {
+ for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) {
+ auto& slot = cache_state_.stack_state[i];
+ if (!slot.is_reg()) continue;
+ Spill(i, slot.reg());
+ slot.MakeStack();
+ }
+ cache_state_.reset_used_registers();
+}
+void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_desc) {
+ uint32_t num_params = static_cast<uint32_t>(sig->parameter_count());
+ // Parameter 0 is the wasm context.
+ constexpr size_t kFirstActualParameter = 1;
+ DCHECK_EQ(kFirstActualParameter + num_params, call_desc->ParameterCount());
+
+ // Input 0 is the call target.
+ constexpr size_t kInputShift = 1;
+
+ // Spill all cache slots which are not being used as parameters.
+ // Don't update any register use counters, they will be reset later anyway.
+ for (uint32_t idx = 0, end = cache_state_.stack_height() - num_params;
+ idx < end; ++idx) {
+ VarState& slot = cache_state_.stack_state[idx];
+ if (!slot.is_reg()) continue;
+ Spill(idx, slot.reg());
+ slot.MakeStack();
+ }
+
+ StackTransferRecipe stack_transfers(this);
+
+ // Now move all parameter values into the right slot for the call.
+ // Process parameters backward, such that we can just pop values from the
+ // stack.
+ for (uint32_t i = num_params; i > 0; --i) {
+ uint32_t param = i - 1;
+ ValueType type = sig->GetParam(param);
+ RegClass rc = reg_class_for(type);
+ compiler::LinkageLocation loc = call_desc->GetInputLocation(
+ param + kFirstActualParameter + kInputShift);
+ const VarState& slot = cache_state_.stack_state.back();
+ uint32_t stack_idx = cache_state_.stack_height() - 1;
+ if (loc.IsRegister()) {
+ DCHECK(!loc.IsAnyRegister());
+ int reg_code = loc.AsRegister();
+ LiftoffRegister reg = LiftoffRegister::from_code(rc, reg_code);
+ stack_transfers.LoadIntoRegister(reg, slot, stack_idx);
+ } else {
+ DCHECK(loc.IsCallerFrameSlot());
+ PushCallerFrameSlot(slot, stack_idx);
+ }
+ cache_state_.stack_state.pop_back();
+ }
+
+ // Execute the stack transfers before filling the context register.
+ stack_transfers.Execute();
+
+ // Reset register use counters.
+ cache_state_.reset_used_registers();
+
+ // Fill the wasm context into the right register.
+ compiler::LinkageLocation context_loc =
+ call_desc->GetInputLocation(kInputShift);
+ DCHECK(context_loc.IsRegister() && !context_loc.IsAnyRegister());
+ int context_reg_code = context_loc.AsRegister();
+ LiftoffRegister context_reg(Register::from_code(context_reg_code));
+ FillContextInto(context_reg.gp());
+}
+
+void LiftoffAssembler::FinishCall(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_desc) {
+ size_t return_count = call_desc->ReturnCount();
+ DCHECK_EQ(return_count, sig->return_count());
+ if (return_count != 0) {
+ DCHECK_EQ(1, return_count);
+ compiler::LinkageLocation return_loc = call_desc->GetReturnLocation(0);
+ int return_reg_code = return_loc.AsRegister();
+ ValueType return_type = sig->GetReturn(0);
+ LiftoffRegister return_reg =
+ LiftoffRegister::from_code(reg_class_for(return_type), return_reg_code);
+ DCHECK(!cache_state_.is_used(return_reg));
+ PushRegister(return_type, return_reg);
+ }
+}
+
+LiftoffRegister LiftoffAssembler::SpillOneRegister(LiftoffRegList candidates,
+ LiftoffRegList pinned) {
// Spill one cached value to free a register.
- Register spill_reg = cache_state_.GetNextSpillReg(pinned_regs);
- int remaining_uses = cache_state_.register_use_count[spill_reg.code()];
+ LiftoffRegister spill_reg = cache_state_.GetNextSpillReg(candidates, pinned);
+ SpillRegister(spill_reg);
+ return spill_reg;
+}
+
+void LiftoffAssembler::SpillRegister(LiftoffRegister reg) {
+ int remaining_uses = cache_state_.get_use_count(reg);
DCHECK_LT(0, remaining_uses);
for (uint32_t idx = cache_state_.stack_height() - 1;; --idx) {
DCHECK_GT(cache_state_.stack_height(), idx);
- auto& slot = cache_state_.stack_state[idx];
- if (!slot.is_reg() || slot.reg() != spill_reg) continue;
- Spill(idx, spill_reg);
- slot.MakeStack();
+ auto* slot = &cache_state_.stack_state[idx];
+ if (!slot->is_reg() || slot->reg() != reg) continue;
+ Spill(idx, reg);
+ slot->MakeStack();
if (--remaining_uses == 0) break;
}
- cache_state_.register_use_count[spill_reg.code()] = 0;
- cache_state_.used_registers &= ~spill_reg.bit();
- return spill_reg;
+ cache_state_.clear_used(reg);
}
void LiftoffAssembler::set_num_locals(uint32_t num_locals) {
@@ -378,7 +487,20 @@ void LiftoffAssembler::set_num_locals(uint32_t num_locals) {
}
uint32_t LiftoffAssembler::GetTotalFrameSlotCount() const {
- return kPointerSize * (num_locals() + kMaxValueStackHeight);
+ return num_locals() + kMaxValueStackHeight;
+}
+
+std::ostream& operator<<(std::ostream& os, VarState slot) {
+ os << WasmOpcodes::TypeName(slot.type()) << ":";
+ switch (slot.loc()) {
+ case VarState::kStack:
+ return os << "s";
+ case VarState::kRegister:
+ return os << slot.reg();
+ case VarState::kI32Const:
+ return os << "c" << slot.i32_const();
+ }
+ UNREACHABLE();
}
#undef __
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 55deb593f8..b91f6d7c88 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -5,38 +5,21 @@
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_
+#include <iosfwd>
#include <memory>
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
+#include "src/base/bits.h"
#include "src/frames.h"
#include "src/macro-assembler.h"
+#include "src/wasm/baseline/liftoff-assembler-defs.h"
+#include "src/wasm/baseline/liftoff-register.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-value.h"
-// Include platform specific definitions.
-#if V8_TARGET_ARCH_IA32
-#include "src/wasm/baseline/ia32/liftoff-assembler-ia32-defs.h"
-#elif V8_TARGET_ARCH_X64
-#include "src/wasm/baseline/x64/liftoff-assembler-x64-defs.h"
-#elif V8_TARGET_ARCH_ARM64
-#include "src/wasm/baseline/arm64/liftoff-assembler-arm64-defs.h"
-#elif V8_TARGET_ARCH_ARM
-#include "src/wasm/baseline/arm/liftoff-assembler-arm-defs.h"
-#elif V8_TARGET_ARCH_PPC
-#include "src/wasm/baseline/ppc/liftoff-assembler-ppc-defs.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "src/wasm/baseline/mips/liftoff-assembler-mips-defs.h"
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/wasm/baseline/mips64/liftoff-assembler-mips64-defs.h"
-#elif V8_TARGET_ARCH_S390
-#include "src/wasm/baseline/s390/liftoff-assembler-s390-defs.h"
-#else
-#error Unsupported architecture.
-#endif
-
namespace v8 {
namespace internal {
namespace wasm {
@@ -44,51 +27,26 @@ namespace wasm {
// Forward declarations.
struct ModuleEnv;
-enum RegClass { kNoReg, kGpReg, kFpReg };
-
-// TODO(clemensh): Switch to a switch once we require C++14 support.
-static constexpr RegClass reg_class_for(ValueType type) {
- return type == kWasmI32 || type == kWasmI64 // int types
- ? kGpReg
- : type == kWasmF32 || type == kWasmF64 // float types
- ? kFpReg
- : kNoReg; // other (unsupported) types
-}
-
class LiftoffAssembler : public TurboAssembler {
public:
// TODO(clemensh): Remove this limitation by allocating more stack space if
// needed.
static constexpr int kMaxValueStackHeight = 8;
- class PinnedRegisterScope {
- public:
- PinnedRegisterScope() : pinned_regs_(0) {}
- explicit PinnedRegisterScope(RegList regs) : pinned_regs_(regs) {}
-
- Register pin(Register reg) {
- pinned_regs_ |= reg.bit();
- return reg;
- }
-
- RegList pinned_regs() const { return pinned_regs_; }
- bool has(Register reg) const { return (pinned_regs_ & reg.bit()) != 0; }
-
- private:
- RegList pinned_regs_ = 0;
- };
- static_assert(IS_TRIVIALLY_COPYABLE(PinnedRegisterScope),
- "PinnedRegisterScope can be passed by value");
+ // Each slot in our stack frame currently has exactly 8 bytes.
+ static constexpr uint32_t kStackSlotSize = 8;
class VarState {
public:
- enum Location : uint8_t { kStack, kRegister, kConstant };
+ enum Location : uint8_t { kStack, kRegister, kI32Const };
explicit VarState(ValueType type) : loc_(kStack), type_(type) {}
- explicit VarState(ValueType type, Register r)
- : loc_(kRegister), type_(type), reg_(r) {}
+ explicit VarState(ValueType type, LiftoffRegister r)
+ : loc_(kRegister), type_(type), reg_(r) {
+ DCHECK_EQ(r.reg_class(), reg_class_for(type));
+ }
explicit VarState(ValueType type, uint32_t i32_const)
- : loc_(kConstant), type_(type), i32_const_(i32_const) {
+ : loc_(kI32Const), type_(type), i32_const_(i32_const) {
DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
}
@@ -99,29 +57,33 @@ class LiftoffAssembler : public TurboAssembler {
return true;
case kRegister:
return reg_ == other.reg_;
- case kConstant:
+ case kI32Const:
return i32_const_ == other.i32_const_;
}
UNREACHABLE();
}
bool is_stack() const { return loc_ == kStack; }
+ bool is_gp_reg() const { return loc_ == kRegister && reg_.is_gp(); }
+ bool is_fp_reg() const { return loc_ == kRegister && reg_.is_fp(); }
bool is_reg() const { return loc_ == kRegister; }
- bool is_const() const { return loc_ == kConstant; }
+ bool is_const() const { return loc_ == kI32Const; }
ValueType type() const { return type_; }
Location loc() const { return loc_; }
uint32_t i32_const() const {
- DCHECK_EQ(loc_, kConstant);
+ DCHECK_EQ(loc_, kI32Const);
return i32_const_;
}
-
- Register reg() const {
+ Register gp_reg() const { return reg().gp(); }
+ DoubleRegister fp_reg() const { return reg().fp(); }
+ LiftoffRegister reg() const {
DCHECK_EQ(loc_, kRegister);
return reg_;
}
+ RegClass reg_class() const { return reg().reg_class(); }
void MakeStack() { loc_ = kStack; }
@@ -132,10 +94,11 @@ class LiftoffAssembler : public TurboAssembler {
ValueType type_;
union {
- Register reg_; // used if loc_ == kRegister
- uint32_t i32_const_; // used if loc_ == kConstant
+ LiftoffRegister reg_; // used if loc_ == kRegister
+ uint32_t i32_const_; // used if loc_ == kI32Const
};
};
+
static_assert(IS_TRIVIALLY_COPYABLE(VarState),
"VarState should be trivially copyable");
@@ -147,80 +110,102 @@ class LiftoffAssembler : public TurboAssembler {
// TODO(clemensh): Improve memory management here; avoid std::vector.
std::vector<VarState> stack_state;
- RegList used_registers = 0;
- // TODO(clemensh): Replace this by CountLeadingZeros(kGpCacheRegs) once that
- // method is constexpr.
- static constexpr int kMaxRegisterCode = 7;
- uint32_t register_use_count[kMaxRegisterCode + 1] = {0};
+ LiftoffRegList used_registers;
+ uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
+ LiftoffRegList last_spilled_regs;
// TODO(clemensh): Remove stack_base; use ControlBase::stack_depth.
uint32_t stack_base = 0;
- Register last_spilled_reg = Register::from_code<0>();
-
- // InitMerge: Initialize this CacheState from the {source} cache state, but
- // make sure that other code paths can still jump here (i.e. avoid constants
- // in the locals or the merge region as specified by {arity}).
- // TODO(clemensh): Don't copy the full parent state (this makes us N^2).
- void InitMerge(const CacheState& source, uint32_t num_locals,
- uint32_t arity);
- void Steal(CacheState& source);
+ bool has_unused_register(RegClass rc, LiftoffRegList pinned = {}) const {
+ DCHECK(rc == kGpReg || rc == kFpReg);
+ LiftoffRegList candidates = GetCacheRegList(rc);
+ return has_unused_register(candidates, pinned);
+ }
- void Split(const CacheState& source);
+ bool has_unused_register(LiftoffRegList candidates,
+ LiftoffRegList pinned = {}) const {
+ LiftoffRegList available_regs = candidates & ~used_registers & ~pinned;
+ return !available_regs.is_empty();
+ }
- bool has_unused_register(PinnedRegisterScope pinned_scope = {}) const {
- RegList available_regs =
- kGpCacheRegs & ~used_registers & ~pinned_scope.pinned_regs();
- return available_regs != 0;
+ LiftoffRegister unused_register(RegClass rc,
+ LiftoffRegList pinned = {}) const {
+ DCHECK(rc == kGpReg || rc == kFpReg);
+ LiftoffRegList candidates = GetCacheRegList(rc);
+ return unused_register(candidates);
}
- Register unused_register(PinnedRegisterScope pinned_scope = {}) const {
- RegList available_regs =
- kGpCacheRegs & ~used_registers & ~pinned_scope.pinned_regs();
- Register reg =
- Register::from_code(base::bits::CountTrailingZeros(available_regs));
- DCHECK_EQ(0, used_registers & reg.bit());
- return reg;
+ LiftoffRegister unused_register(LiftoffRegList candidates,
+ LiftoffRegList pinned = {}) const {
+ LiftoffRegList available_regs = candidates & ~used_registers & ~pinned;
+ return available_regs.GetFirstRegSet();
}
- void inc_used(Register reg) {
- used_registers |= reg.bit();
- DCHECK_GE(kMaxRegisterCode, reg.code());
- ++register_use_count[reg.code()];
+ void inc_used(LiftoffRegister reg) {
+ used_registers.set(reg);
+ DCHECK_GT(kMaxInt, register_use_count[reg.liftoff_code()]);
+ ++register_use_count[reg.liftoff_code()];
}
// Returns whether this was the last use.
- bool dec_used(Register reg) {
+ bool dec_used(LiftoffRegister reg) {
DCHECK(is_used(reg));
- DCHECK_GE(kMaxRegisterCode, reg.code());
- if (--register_use_count[reg.code()] == 0) {
- used_registers &= ~reg.bit();
- return true;
- }
- return false;
+ int code = reg.liftoff_code();
+ DCHECK_LT(0, register_use_count[code]);
+ if (--register_use_count[code] != 0) return false;
+ used_registers.clear(reg);
+ return true;
}
- bool is_used(Register reg) const {
- DCHECK_GE(kMaxRegisterCode, reg.code());
- bool used = used_registers & reg.bit();
- DCHECK_EQ(used, register_use_count[reg.code()] != 0);
+ bool is_used(LiftoffRegister reg) const {
+ bool used = used_registers.has(reg);
+ DCHECK_EQ(used, register_use_count[reg.liftoff_code()] != 0);
return used;
}
- bool is_free(Register reg) const { return !is_used(reg); }
+ uint32_t get_use_count(LiftoffRegister reg) const {
+ DCHECK_GT(arraysize(register_use_count), reg.liftoff_code());
+ return register_use_count[reg.liftoff_code()];
+ }
+
+ void clear_used(LiftoffRegister reg) {
+ register_use_count[reg.liftoff_code()] = 0;
+ used_registers.clear(reg);
+ }
- uint32_t stack_height() const {
- return static_cast<uint32_t>(stack_state.size());
+ bool is_free(LiftoffRegister reg) const { return !is_used(reg); }
+
+ void reset_used_registers() {
+ used_registers = {};
+ memset(register_use_count, 0, sizeof(register_use_count));
+ }
+
+ LiftoffRegister GetNextSpillReg(LiftoffRegList candidates,
+ LiftoffRegList pinned = {}) {
+ LiftoffRegList unpinned = candidates.MaskOut(pinned);
+ DCHECK(!unpinned.is_empty());
+ // This method should only be called if none of the candidates is free.
+ DCHECK(unpinned.MaskOut(used_registers).is_empty());
+ LiftoffRegList unspilled = unpinned.MaskOut(last_spilled_regs);
+ if (unspilled.is_empty()) {
+ unspilled = unpinned;
+ last_spilled_regs = {};
+ }
+ LiftoffRegister reg = unspilled.GetFirstRegSet();
+ last_spilled_regs.set(reg);
+ return reg;
}
- Register GetNextSpillReg(PinnedRegisterScope scope = {}) {
- uint32_t mask = (1u << (last_spilled_reg.code() + 1)) - 1;
- RegList unpinned_regs = kGpCacheRegs & ~scope.pinned_regs();
- DCHECK_NE(0, unpinned_regs);
- RegList remaining_regs = unpinned_regs & ~mask;
- if (!remaining_regs) remaining_regs = unpinned_regs;
- last_spilled_reg =
- Register::from_code(base::bits::CountTrailingZeros(remaining_regs));
- return last_spilled_reg;
+ // TODO(clemensh): Don't copy the full parent state (this makes us N^2).
+ void InitMerge(const CacheState& source, uint32_t num_locals,
+ uint32_t arity);
+
+ void Steal(CacheState& source);
+
+ void Split(const CacheState& source);
+
+ uint32_t stack_height() const {
+ return static_cast<uint32_t>(stack_state.size());
}
private:
@@ -233,27 +218,39 @@ class LiftoffAssembler : public TurboAssembler {
explicit LiftoffAssembler(Isolate* isolate);
~LiftoffAssembler();
- Register GetBinaryOpTargetRegister(RegClass, PinnedRegisterScope = {});
+ LiftoffRegister GetBinaryOpTargetRegister(RegClass,
+ LiftoffRegList pinned = {});
+ LiftoffRegister GetUnaryOpTargetRegister(RegClass,
+ LiftoffRegList pinned = {});
- Register PopToRegister(RegClass, PinnedRegisterScope = {});
+ LiftoffRegister PopToRegister(RegClass, LiftoffRegList pinned = {});
- void PushRegister(ValueType type, Register reg) {
+ void PushRegister(ValueType type, LiftoffRegister reg) {
+ DCHECK_EQ(reg_class_for(type), reg.reg_class());
cache_state_.inc_used(reg);
cache_state_.stack_state.emplace_back(type, reg);
}
- uint32_t GetNumUses(Register reg) const {
- DCHECK_GE(CacheState::kMaxRegisterCode, reg.code());
- return cache_state_.register_use_count[reg.code()];
+ void SpillRegister(LiftoffRegister);
+
+ uint32_t GetNumUses(LiftoffRegister reg) {
+ return cache_state_.get_use_count(reg);
}
- Register GetUnusedRegister(RegClass rc,
- PinnedRegisterScope pinned_regs = {}) {
- DCHECK_EQ(kGpReg, rc);
- if (cache_state_.has_unused_register(pinned_regs)) {
- return cache_state_.unused_register(pinned_regs);
+ // Get an unused register for class {rc}, potentially spilling to free one.
+ LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned = {}) {
+ DCHECK(rc == kGpReg || rc == kFpReg);
+ LiftoffRegList candidates = GetCacheRegList(rc);
+ return GetUnusedRegister(candidates, pinned);
+ }
+
+ // Get an unused register of {candidates}, potentially spilling to free one.
+ LiftoffRegister GetUnusedRegister(LiftoffRegList candidates,
+ LiftoffRegList pinned = {}) {
+ if (cache_state_.has_unused_register(candidates, pinned)) {
+ return cache_state_.unused_register(candidates, pinned);
}
- return SpillOneRegister(rc, pinned_regs);
+ return SpillOneRegister(candidates, pinned);
}
void DropStackSlot(VarState* slot) {
@@ -271,40 +268,102 @@ class LiftoffAssembler : public TurboAssembler {
void Spill(uint32_t index);
void SpillLocals();
+ void SpillAllRegisters();
+
+ // Load parameters into the right registers / stack slots for the call.
+ void PrepareCall(wasm::FunctionSig*, compiler::CallDescriptor*);
+ // Process return values of the call.
+ void FinishCall(wasm::FunctionSig*, compiler::CallDescriptor*);
////////////////////////////////////
// Platform-specific part. //
////////////////////////////////////
- inline void ReserveStackSpace(uint32_t);
+ inline void ReserveStackSpace(uint32_t bytes);
- inline void LoadConstant(Register, WasmValue);
+ inline void LoadConstant(LiftoffRegister, WasmValue);
inline void LoadFromContext(Register dst, uint32_t offset, int size);
inline void SpillContext(Register context);
- inline void Load(Register dst, Register src_addr, uint32_t offset_imm,
- int size, PinnedRegisterScope = {});
- inline void Store(Register dst_addr, uint32_t offset_imm, Register src,
- int size, PinnedRegisterScope = {});
- inline void LoadCallerFrameSlot(Register, uint32_t caller_slot_idx);
+ inline void FillContextInto(Register dst);
+ inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg,
+ uint32_t offset_imm, LoadType type, LiftoffRegList pinned,
+ uint32_t* protected_load_pc = nullptr);
+ inline void Store(Register dst_addr, Register offset_reg, uint32_t offset_imm,
+ LiftoffRegister src, StoreType type, LiftoffRegList pinned,
+ uint32_t* protected_store_pc = nullptr);
+ inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx);
inline void MoveStackValue(uint32_t dst_index, uint32_t src_index);
- inline void MoveToReturnRegister(Register);
+ inline void MoveToReturnRegister(LiftoffRegister);
+ // TODO(clemensh): Pass the type to {Move}, to emit more efficient code.
+ inline void Move(LiftoffRegister dst, LiftoffRegister src);
- inline void Spill(uint32_t index, Register);
+ inline void Spill(uint32_t index, LiftoffRegister);
inline void Spill(uint32_t index, WasmValue);
- inline void Fill(Register, uint32_t index);
+ inline void Fill(LiftoffRegister, uint32_t index);
+ // i32 binops.
inline void emit_i32_add(Register dst, Register lhs, Register rhs);
inline void emit_i32_sub(Register dst, Register lhs, Register rhs);
inline void emit_i32_mul(Register dst, Register lhs, Register rhs);
inline void emit_i32_and(Register dst, Register lhs, Register rhs);
inline void emit_i32_or(Register dst, Register lhs, Register rhs);
inline void emit_i32_xor(Register dst, Register lhs, Register rhs);
+ inline void emit_i32_shl(Register dst, Register lhs, Register rhs);
+ inline void emit_i32_sar(Register dst, Register lhs, Register rhs);
+ inline void emit_i32_shr(Register dst, Register lhs, Register rhs);
+
+ // i32 unops.
+ inline bool emit_i32_eqz(Register dst, Register src);
+ inline bool emit_i32_clz(Register dst, Register src);
+ inline bool emit_i32_ctz(Register dst, Register src);
+ inline bool emit_i32_popcnt(Register dst, Register src);
+
+ inline void emit_ptrsize_add(Register dst, Register lhs, Register rhs);
+
+ inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs);
+ inline void emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs);
+ inline void emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs);
+
+ inline void emit_i32_test(Register);
+ inline void emit_i32_compare(Register, Register);
+ inline void emit_jump(Label*);
+ inline void emit_cond_jump(Condition, Label*);
- inline void JumpIfZero(Register, Label*);
+ inline void StackCheck(Label* ool_code);
- // Platform-specific constant.
- static constexpr RegList kGpCacheRegs = kLiftoffAssemblerGpCacheRegs;
+ inline void CallTrapCallbackForTesting();
+
+ inline void AssertUnreachable(AbortReason reason);
+
+ // Push a value to the stack (will become a caller frame slot).
+ inline void PushCallerFrameSlot(const VarState& src, uint32_t src_index);
+ inline void PushCallerFrameSlot(LiftoffRegister reg);
+ inline void PushRegisters(LiftoffRegList);
+ inline void PopRegisters(LiftoffRegList);
+
+ inline void DropStackSlotsAndRet(uint32_t num_stack_slots);
+
+ // Push arguments on the stack (in the caller frame), then align the stack.
+ // The address of the last argument will be stored to {arg_addr_dst}. Previous
+ // arguments will be located at pointer sized buckets above that address.
+ inline void PrepareCCall(uint32_t num_params, const Register* args);
+ inline void SetCCallRegParamAddr(Register dst, uint32_t param_idx,
+ uint32_t num_params);
+ inline void SetCCallStackParamAddr(uint32_t stack_param_idx,
+ uint32_t param_idx, uint32_t num_params);
+ inline void CallC(ExternalReference ext_ref, uint32_t num_params);
+
+ inline void CallNativeWasmCode(Address addr);
+
+ inline void CallRuntime(Zone* zone, Runtime::FunctionId fid);
+
+ // Reserve space in the current frame, store address to space in {addr}.
+ inline void AllocateStackSlot(Register addr, uint32_t size);
+ inline void DeallocateStackSlot(uint32_t size);
////////////////////////////////////
// End of platform-specific part. //
@@ -314,7 +373,6 @@ class LiftoffAssembler : public TurboAssembler {
void set_num_locals(uint32_t num_locals);
uint32_t GetTotalFrameSlotCount() const;
- size_t GetSafepointTableOffset() const { return 0; }
ValueType local_type(uint32_t index) {
DCHECK_GT(num_locals_, index);
@@ -332,12 +390,7 @@ class LiftoffAssembler : public TurboAssembler {
CacheState* cache_state() { return &cache_state_; }
private:
- static_assert(
- base::bits::CountPopulation(kGpCacheRegs) >= 2,
- "We need at least two cache registers to execute binary operations");
-
uint32_t num_locals_ = 0;
- uint32_t stack_space_ = 0;
static constexpr uint32_t kInlineLocalTypes = 8;
union {
ValueType local_types_[kInlineLocalTypes];
@@ -347,9 +400,12 @@ class LiftoffAssembler : public TurboAssembler {
"Reconsider this inlining if ValueType gets bigger");
CacheState cache_state_;
- Register SpillOneRegister(RegClass, PinnedRegisterScope = {});
+ LiftoffRegister SpillOneRegister(LiftoffRegList candidates,
+ LiftoffRegList pinned);
};
+std::ostream& operator<<(std::ostream& os, LiftoffAssembler::VarState);
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index a0aea7503a..255ee0347e 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -5,11 +5,13 @@
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/assembler-inl.h"
+#include "src/base/optional.h"
#include "src/compiler/linkage.h"
#include "src/compiler/wasm-compiler.h"
#include "src/counters.h"
#include "src/macro-assembler-inl.h"
#include "src/wasm/function-body-decoder-impl.h"
+#include "src/wasm/memory-tracing.h"
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-opcodes.h"
@@ -18,7 +20,7 @@ namespace internal {
namespace wasm {
constexpr auto kRegister = LiftoffAssembler::VarState::kRegister;
-constexpr auto kConstant = LiftoffAssembler::VarState::kConstant;
+constexpr auto kI32Const = LiftoffAssembler::VarState::kI32Const;
constexpr auto kStack = LiftoffAssembler::VarState::kStack;
namespace {
@@ -37,9 +39,13 @@ namespace {
class MovableLabel {
public:
Label* get() { return label_.get(); }
+ MovableLabel() : MovableLabel(new Label()) {}
+
+ static MovableLabel None() { return MovableLabel(nullptr); }
private:
- std::unique_ptr<Label> label_ = base::make_unique<Label>();
+ std::unique_ptr<Label> label_;
+ explicit MovableLabel(Label* label) : label_(label) {}
};
#else
// On all other platforms, just store the Label directly.
@@ -47,6 +53,8 @@ class MovableLabel {
public:
Label* get() { return &label_; }
+ static MovableLabel None() { return MovableLabel(); }
+
private:
Label label_;
};
@@ -62,18 +70,68 @@ class LiftoffCompiler {
using Value = ValueBase;
+ struct ElseState {
+ MovableLabel label;
+ LiftoffAssembler::CacheState state;
+ };
+
struct Control : public ControlWithNamedConstructors<Control, Value> {
MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(Control);
+ std::unique_ptr<ElseState> else_state;
LiftoffAssembler::CacheState label_state;
MovableLabel label;
};
using Decoder = WasmFullDecoder<validate, LiftoffCompiler>;
+ struct OutOfLineCode {
+ MovableLabel label;
+ MovableLabel continuation;
+ Builtins::Name builtin;
+ wasm::WasmCodePosition position;
+ LiftoffRegList regs_to_save;
+ uint32_t pc; // for trap handler.
+
+ // Named constructors:
+ static OutOfLineCode Trap(Builtins::Name b, wasm::WasmCodePosition pos,
+ uint32_t pc) {
+ return {{}, {}, b, pos, {}, pc};
+ }
+ static OutOfLineCode StackCheck(wasm::WasmCodePosition pos,
+ LiftoffRegList regs) {
+ return {{}, MovableLabel::None(), Builtins::kWasmStackGuard, pos, regs,
+ 0};
+ }
+ };
+
LiftoffCompiler(LiftoffAssembler* liftoff_asm,
- compiler::CallDescriptor* call_desc, compiler::ModuleEnv* env)
- : asm_(liftoff_asm), call_desc_(call_desc), env_(env) {}
+ compiler::CallDescriptor* call_desc, compiler::ModuleEnv* env,
+ compiler::RuntimeExceptionSupport runtime_exception_support,
+ SourcePositionTableBuilder* source_position_table_builder,
+ std::vector<trap_handler::ProtectedInstructionData>*
+ protected_instructions,
+ Zone* compilation_zone, std::unique_ptr<Zone>* codegen_zone)
+ : asm_(liftoff_asm),
+ call_desc_(call_desc),
+ env_(env),
+ min_size_(env_->module->initial_pages * wasm::kWasmPageSize),
+ max_size_((env_->module->has_maximum_pages
+ ? env_->module->maximum_pages
+ : wasm::kV8MaxWasmMemoryPages) *
+ wasm::kWasmPageSize),
+ runtime_exception_support_(runtime_exception_support),
+ source_position_table_builder_(source_position_table_builder),
+ protected_instructions_(protected_instructions),
+ compilation_zone_(compilation_zone),
+ codegen_zone_(codegen_zone),
+ safepoint_table_builder_(compilation_zone_) {
+ // Check for overflow in max_size_.
+ DCHECK_EQ(max_size_, uint64_t{env_->module->has_maximum_pages
+ ? env_->module->maximum_pages
+ : wasm::kV8MaxWasmMemoryPages} *
+ wasm::kWasmPageSize);
+ }
bool ok() const { return ok_; }
@@ -84,16 +142,27 @@ class LiftoffCompiler {
BindUnboundLabels(decoder);
}
+ int GetSafepointTableOffset() const {
+ return safepoint_table_builder_.GetCodeOffset();
+ }
+
void BindUnboundLabels(Decoder* decoder) {
-#ifndef DEBUG
- return;
-#endif
+#ifdef DEBUG
// Bind all labels now, otherwise their destructor will fire a DCHECK error
// if they where referenced before.
for (uint32_t i = 0, e = decoder->control_depth(); i < e; ++i) {
- Label* label = decoder->control_at(i)->label.get();
+ Control* c = decoder->control_at(i);
+ Label* label = c->label.get();
if (!label->is_bound()) __ bind(label);
+ if (c->else_state) {
+ Label* else_label = c->else_state->label.get();
+ if (!else_label->is_bound()) __ bind(else_label);
+ }
+ }
+ for (auto& ool : out_of_line_code_) {
+ if (!ool.label.get()->is_bound()) __ bind(ool.label.get());
}
+#endif
}
void CheckStackSizeLimit(Decoder* decoder) {
@@ -112,21 +181,75 @@ class LiftoffCompiler {
}
}
+ void ProcessParameter(uint32_t param_idx, uint32_t input_location) {
+ ValueType type = __ local_type(param_idx);
+ RegClass rc = reg_class_for(type);
+ compiler::LinkageLocation param_loc =
+ call_desc_->GetInputLocation(input_location);
+ if (param_loc.IsRegister()) {
+ DCHECK(!param_loc.IsAnyRegister());
+ int reg_code = param_loc.AsRegister();
+ LiftoffRegister reg =
+ rc == kGpReg ? LiftoffRegister(Register::from_code(reg_code))
+ : LiftoffRegister(DoubleRegister::from_code(reg_code));
+ LiftoffRegList cache_regs =
+ rc == kGpReg ? kGpCacheRegList : kFpCacheRegList;
+ if (cache_regs.has(reg)) {
+ // This is a cache register, just use it.
+ __ PushRegister(type, reg);
+ return;
+ }
+ // Move to a cache register.
+ LiftoffRegister cache_reg = __ GetUnusedRegister(rc);
+ __ Move(cache_reg, reg);
+ __ PushRegister(type, reg);
+ return;
+ }
+ if (param_loc.IsCallerFrameSlot()) {
+ LiftoffRegister tmp_reg = __ GetUnusedRegister(rc);
+ __ LoadCallerFrameSlot(tmp_reg, -param_loc.AsCallerFrameSlot());
+ __ PushRegister(type, tmp_reg);
+ return;
+ }
+ UNREACHABLE();
+ }
+
+ void StackCheck(wasm::WasmCodePosition position) {
+ if (FLAG_wasm_no_stack_checks || !runtime_exception_support_) return;
+ out_of_line_code_.push_back(
+ OutOfLineCode::StackCheck(position, __ cache_state()->used_registers));
+ OutOfLineCode& ool = out_of_line_code_.back();
+ __ StackCheck(ool.label.get());
+ __ bind(ool.continuation.get());
+ }
+
void StartFunctionBody(Decoder* decoder, Control* block) {
if (!kLiftoffAssemblerImplementedOnThisPlatform) {
unsupported(decoder, "platform");
return;
}
__ EnterFrame(StackFrame::WASM_COMPILED);
- __ ReserveStackSpace(__ GetTotalFrameSlotCount());
+ __ set_has_frame(true);
+ __ ReserveStackSpace(LiftoffAssembler::kStackSlotSize *
+ __ GetTotalFrameSlotCount());
// Parameter 0 is the wasm context.
uint32_t num_params =
static_cast<uint32_t>(call_desc_->ParameterCount()) - 1;
for (uint32_t i = 0; i < __ num_locals(); ++i) {
- // We can currently only handle i32 parameters and locals.
- if (__ local_type(i) != kWasmI32) {
- unsupported(decoder, "non-i32 param/local");
- return;
+ switch (__ local_type(i)) {
+ case kWasmI32:
+ case kWasmF32:
+ // supported.
+ break;
+ case kWasmI64:
+ unsupported(decoder, "i64 param/local");
+ return;
+ case kWasmF64:
+ unsupported(decoder, "f64 param/local");
+ return;
+ default:
+ unsupported(decoder, "exotic param/local");
+ return;
}
}
// Input 0 is the call target, the context is at 1.
@@ -140,87 +263,153 @@ class LiftoffCompiler {
__ SpillContext(context_reg);
uint32_t param_idx = 0;
for (; param_idx < num_params; ++param_idx) {
- constexpr uint32_t kFirstActualParamIndex = kContextParameterIndex + 1;
- ValueType type = __ local_type(param_idx);
- compiler::LinkageLocation param_loc =
- call_desc_->GetInputLocation(param_idx + kFirstActualParamIndex);
- if (param_loc.IsRegister()) {
- DCHECK(!param_loc.IsAnyRegister());
- Register param_reg = Register::from_code(param_loc.AsRegister());
- if (param_reg.bit() & __ kGpCacheRegs) {
- // This is a cache register, just use it.
- __ PushRegister(type, param_reg);
- } else {
- // No cache register. Push to the stack.
- __ Spill(param_idx, param_reg);
- __ cache_state()->stack_state.emplace_back(type);
- }
- } else if (param_loc.IsCallerFrameSlot()) {
- Register tmp_reg = __ GetUnusedRegister(reg_class_for(type));
- __ LoadCallerFrameSlot(tmp_reg, -param_loc.AsCallerFrameSlot());
- __ PushRegister(type, tmp_reg);
- } else {
- UNIMPLEMENTED();
- }
+ constexpr int kFirstActualParameterIndex = kContextParameterIndex + 1;
+ ProcessParameter(param_idx, param_idx + kFirstActualParameterIndex);
}
+ // Set to a gp register, to mark this uninitialized.
+ LiftoffRegister zero_double_reg(Register::from_code<0>());
+ DCHECK(zero_double_reg.is_gp());
for (; param_idx < __ num_locals(); ++param_idx) {
ValueType type = decoder->GetLocalType(param_idx);
switch (type) {
case kWasmI32:
__ cache_state()->stack_state.emplace_back(kWasmI32, uint32_t{0});
break;
+ case kWasmF32:
+ if (zero_double_reg.is_gp()) {
+ // Note: This might spill one of the registers used to hold
+ // parameters.
+ zero_double_reg = __ GetUnusedRegister(kFpReg);
+ __ LoadConstant(zero_double_reg, WasmValue(0.f));
+ }
+ __ PushRegister(kWasmF32, zero_double_reg);
+ break;
default:
UNIMPLEMENTED();
}
}
block->label_state.stack_base = __ num_locals();
+
+ // The function-prologue stack check is associated with position 0, which
+ // is never a position of any instruction in the function.
+ StackCheck(0);
+
DCHECK_EQ(__ num_locals(), param_idx);
DCHECK_EQ(__ num_locals(), __ cache_state()->stack_height());
CheckStackSizeLimit(decoder);
}
- void FinishFunction(Decoder* decoder) {}
+ void GenerateOutOfLineCode(OutOfLineCode& ool) {
+ __ bind(ool.label.get());
+ const bool is_stack_check = ool.builtin == Builtins::kWasmStackGuard;
+ if (!runtime_exception_support_) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ // In this mode, we never generate stack checks.
+ DCHECK(!is_stack_check);
+ __ CallTrapCallbackForTesting();
+ __ LeaveFrame(StackFrame::WASM_COMPILED);
+ __ Ret();
+ return;
+ }
+
+ if (!is_stack_check && env_->use_trap_handler) {
+ uint32_t pc = static_cast<uint32_t>(__ pc_offset());
+ DCHECK_EQ(pc, __ pc_offset());
+ protected_instructions_->emplace_back(
+ trap_handler::ProtectedInstructionData{ool.pc, pc});
+ }
+
+ if (!ool.regs_to_save.is_empty()) __ PushRegisters(ool.regs_to_save);
+
+ source_position_table_builder_->AddPosition(
+ __ pc_offset(), SourcePosition(ool.position), false);
+ __ Call(__ isolate()->builtins()->builtin_handle(ool.builtin),
+ RelocInfo::CODE_TARGET);
+ safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ DCHECK_EQ(ool.continuation.get()->is_bound(), is_stack_check);
+ if (!ool.regs_to_save.is_empty()) __ PopRegisters(ool.regs_to_save);
+ if (is_stack_check) {
+ __ emit_jump(ool.continuation.get());
+ } else {
+ __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
+ }
+ }
+
+ void FinishFunction(Decoder* decoder) {
+ for (OutOfLineCode& ool : out_of_line_code_) {
+ GenerateOutOfLineCode(ool);
+ }
+ safepoint_table_builder_.Emit(asm_, __ GetTotalFrameSlotCount());
+ }
void OnFirstError(Decoder* decoder) {
ok_ = false;
BindUnboundLabels(decoder);
}
- void Block(Decoder* decoder, Control* new_block) {
- // Note: This is called for blocks and loops.
- DCHECK_EQ(new_block, decoder->control_at(0));
+ void NextInstruction(Decoder* decoder, WasmOpcode) {
+ TraceCacheState(decoder);
+ }
+
+ void Block(Decoder* decoder, Control* block) {
+ block->label_state.stack_base = __ cache_state()->stack_height();
+ }
- new_block->label_state.stack_base = __ cache_state()->stack_height();
+ void Loop(Decoder* decoder, Control* loop) {
+ loop->label_state.stack_base = __ cache_state()->stack_height();
- if (new_block->is_loop()) {
- // Before entering a loop, spill all locals to the stack, in order to free
- // the cache registers, and to avoid unnecessarily reloading stack values
- // into registers at branches.
- // TODO(clemensh): Come up with a better strategy here, involving
- // pre-analysis of the function.
- __ SpillLocals();
+ // Before entering a loop, spill all locals to the stack, in order to free
+ // the cache registers, and to avoid unnecessarily reloading stack values
+ // into registers at branches.
+ // TODO(clemensh): Come up with a better strategy here, involving
+ // pre-analysis of the function.
+ __ SpillLocals();
- // Loop labels bind at the beginning of the block, block labels at the
- // end.
- __ bind(new_block->label.get());
+ // Loop labels bind at the beginning of the block.
+ __ bind(loop->label.get());
- new_block->label_state.Split(*__ cache_state());
- }
- }
+ // Save the current cache state for the merge when jumping to this loop.
+ loop->label_state.Split(*__ cache_state());
- void Loop(Decoder* decoder, Control* block) { Block(decoder, block); }
+ // Execute a stack check in the loop header.
+ StackCheck(decoder->position());
+ }
void Try(Decoder* decoder, Control* block) { unsupported(decoder, "try"); }
+
void If(Decoder* decoder, const Value& cond, Control* if_block) {
- unsupported(decoder, "if");
+ DCHECK_EQ(if_block, decoder->control_at(0));
+ DCHECK(if_block->is_if());
+
+ if (if_block->start_merge.arity > 0 || if_block->end_merge.arity > 1)
+ return unsupported(decoder, "multi-value if");
+
+ // Allocate the else state.
+ if_block->else_state = base::make_unique<ElseState>();
+
+ // Test the condition, jump to else if zero.
+ Register value = __ PopToRegister(kGpReg).gp();
+ __ emit_i32_test(value);
+ __ emit_cond_jump(kEqual, if_block->else_state->label.get());
+
+ if_block->label_state.stack_base = __ cache_state()->stack_height();
+ // Store the state (after popping the value) for executing the else branch.
+ if_block->else_state->state.Split(*__ cache_state());
}
void FallThruTo(Decoder* decoder, Control* c) {
if (c->end_merge.reached) {
__ MergeFullStackWith(c->label_state);
+ } else if (c->is_onearmed_if()) {
+ c->label_state.InitMerge(*__ cache_state(), __ num_locals(),
+ c->br_merge()->arity);
+ __ MergeFullStackWith(c->label_state);
} else {
c->label_state.Split(*__ cache_state());
}
+ TraceCacheState(decoder);
}
void PopControl(Decoder* decoder, Control* c) {
@@ -234,36 +423,148 @@ class LiftoffCompiler {
void EndControl(Decoder* decoder, Control* c) {}
+ void GenerateCCall(Register res_reg, uint32_t num_args,
+ const Register* arg_regs, ExternalReference ext_ref) {
+ static constexpr int kNumReturns = 1;
+ static constexpr int kMaxArgs = 2;
+ static constexpr MachineType kReps[]{
+ MachineType::Uint32(), MachineType::Pointer(), MachineType::Pointer()};
+ static_assert(arraysize(kReps) == kNumReturns + kMaxArgs, "mismatch");
+ DCHECK_LE(num_args, kMaxArgs);
+
+ MachineSignature sig(kNumReturns, num_args, kReps);
+ compiler::CallDescriptor* desc =
+ compiler::Linkage::GetSimplifiedCDescriptor(compilation_zone_, &sig);
+
+ // Before making a call, spill all cache registers.
+ __ SpillAllRegisters();
+
+ // Store arguments on our stack, then align the stack for calling to C.
+ uint32_t num_params = static_cast<uint32_t>(desc->ParameterCount());
+ __ PrepareCCall(num_params, arg_regs);
+
+ // Set parameters (in sp[0], sp[8], ...).
+ uint32_t num_stack_params = 0;
+ for (uint32_t param = 0; param < num_params; ++param) {
+ constexpr size_t kInputShift = 1; // Input 0 is the call target.
+
+ compiler::LinkageLocation loc =
+ desc->GetInputLocation(param + kInputShift);
+ if (loc.IsRegister()) {
+ Register reg = Register::from_code(loc.AsRegister());
+ // Load address of that parameter to the register.
+ __ SetCCallRegParamAddr(reg, param, num_params);
+ } else {
+ DCHECK(loc.IsCallerFrameSlot());
+ __ SetCCallStackParamAddr(num_stack_params, param, num_params);
+ ++num_stack_params;
+ }
+ }
+
+ // Now execute the call.
+ __ CallC(ext_ref, num_params);
+
+ // Load return value.
+ compiler::LinkageLocation return_loc = desc->GetReturnLocation(0);
+ DCHECK(return_loc.IsRegister());
+ Register return_reg = Register::from_code(return_loc.AsRegister());
+ if (return_reg != res_reg) {
+ __ Move(LiftoffRegister(res_reg), LiftoffRegister(return_reg));
+ }
+ }
+
+ void I32UnOp(bool (LiftoffAssembler::*emit_fn)(Register, Register),
+ ExternalReference (*fallback_fn)(Isolate*)) {
+ LiftoffRegList pinned;
+ LiftoffRegister dst_reg = pinned.set(__ GetUnaryOpTargetRegister(kGpReg));
+ LiftoffRegister src_reg = pinned.set(__ PopToRegister(kGpReg, pinned));
+ if (!emit_fn || !(asm_->*emit_fn)(dst_reg.gp(), src_reg.gp())) {
+ ExternalReference ext_ref = fallback_fn(asm_->isolate());
+ Register args[] = {src_reg.gp()};
+ GenerateCCall(dst_reg.gp(), arraysize(args), args, ext_ref);
+ }
+ __ PushRegister(kWasmI32, dst_reg);
+ }
+
void UnOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*,
const Value& value, Value* result) {
- unsupported(decoder, "unary operation");
+#define CASE_UNOP(opcode, type, fn, ext_ref_fn) \
+ case WasmOpcode::kExpr##opcode: \
+ type##UnOp(&LiftoffAssembler::emit_##fn, ext_ref_fn); \
+ break;
+ switch (opcode) {
+ CASE_UNOP(I32Eqz, I32, i32_eqz, nullptr)
+ CASE_UNOP(I32Clz, I32, i32_clz, nullptr)
+ CASE_UNOP(I32Ctz, I32, i32_ctz, nullptr)
+ CASE_UNOP(I32Popcnt, I32, i32_popcnt,
+ &ExternalReference::wasm_word32_popcnt)
+ default:
+ return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
+ }
+#undef CASE_UNOP
+ }
+
+ void I32BinOp(void (LiftoffAssembler::*emit_fn)(Register, Register,
+ Register)) {
+ LiftoffRegList pinned;
+ LiftoffRegister dst_reg = pinned.set(__ GetBinaryOpTargetRegister(kGpReg));
+ LiftoffRegister rhs_reg = pinned.set(__ PopToRegister(kGpReg, pinned));
+ LiftoffRegister lhs_reg = __ PopToRegister(kGpReg, pinned);
+ (asm_->*emit_fn)(dst_reg.gp(), lhs_reg.gp(), rhs_reg.gp());
+ __ PushRegister(kWasmI32, dst_reg);
+ }
+
+ void I32CCallBinOp(ExternalReference ext_ref) {
+ LiftoffRegList pinned;
+ LiftoffRegister dst_reg = pinned.set(__ GetBinaryOpTargetRegister(kGpReg));
+ LiftoffRegister rhs_reg = pinned.set(__ PopToRegister(kGpReg, pinned));
+ LiftoffRegister lhs_reg = __ PopToRegister(kGpReg, pinned);
+ Register args[] = {lhs_reg.gp(), rhs_reg.gp()};
+ GenerateCCall(dst_reg.gp(), arraysize(args), args, ext_ref);
+ __ PushRegister(kWasmI32, dst_reg);
+ }
+
+ void F32BinOp(void (LiftoffAssembler::*emit_fn)(DoubleRegister,
+ DoubleRegister,
+ DoubleRegister)) {
+ LiftoffRegList pinned;
+ LiftoffRegister target_reg =
+ pinned.set(__ GetBinaryOpTargetRegister(kFpReg));
+ LiftoffRegister rhs_reg = pinned.set(__ PopToRegister(kFpReg, pinned));
+ LiftoffRegister lhs_reg = __ PopToRegister(kFpReg, pinned);
+ (asm_->*emit_fn)(target_reg.fp(), lhs_reg.fp(), rhs_reg.fp());
+ __ PushRegister(kWasmF32, target_reg);
}
void BinOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*,
const Value& lhs, const Value& rhs, Value* result) {
- void (LiftoffAssembler::*emit_fn)(Register, Register, Register);
-#define CASE_EMIT_FN(opcode, fn) \
- case WasmOpcode::kExpr##opcode: \
- emit_fn = &LiftoffAssembler::emit_##fn; \
+#define CASE_BINOP(opcode, type, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ return type##BinOp(&LiftoffAssembler::emit_##fn);
+#define CASE_CCALL_BINOP(opcode, type, ext_ref_fn) \
+ case WasmOpcode::kExpr##opcode: \
+ type##CCallBinOp(ExternalReference::ext_ref_fn(asm_->isolate())); \
break;
switch (opcode) {
- CASE_EMIT_FN(I32Add, i32_add)
- CASE_EMIT_FN(I32Sub, i32_sub)
- CASE_EMIT_FN(I32Mul, i32_mul)
- CASE_EMIT_FN(I32And, i32_and)
- CASE_EMIT_FN(I32Ior, i32_or)
- CASE_EMIT_FN(I32Xor, i32_xor)
+ CASE_BINOP(I32Add, I32, i32_add)
+ CASE_BINOP(I32Sub, I32, i32_sub)
+ CASE_BINOP(I32Mul, I32, i32_mul)
+ CASE_BINOP(I32And, I32, i32_and)
+ CASE_BINOP(I32Ior, I32, i32_or)
+ CASE_BINOP(I32Xor, I32, i32_xor)
+ CASE_BINOP(I32Shl, I32, i32_shl)
+ CASE_BINOP(I32ShrS, I32, i32_sar)
+ CASE_BINOP(I32ShrU, I32, i32_shr)
+ CASE_CCALL_BINOP(I32Rol, I32, wasm_word32_rol)
+ CASE_CCALL_BINOP(I32Ror, I32, wasm_word32_ror)
+ CASE_BINOP(F32Add, F32, f32_add)
+ CASE_BINOP(F32Sub, F32, f32_sub)
+ CASE_BINOP(F32Mul, F32, f32_mul)
default:
return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
}
-#undef CASE_EMIT_FN
-
- LiftoffAssembler::PinnedRegisterScope pinned_regs;
- Register target_reg = pinned_regs.pin(__ GetBinaryOpTargetRegister(kGpReg));
- Register rhs_reg = pinned_regs.pin(__ PopToRegister(kGpReg, pinned_regs));
- Register lhs_reg = __ PopToRegister(kGpReg, pinned_regs);
- (asm_->*emit_fn)(target_reg, lhs_reg, rhs_reg);
- __ PushRegister(kWasmI32, target_reg);
+#undef CASE_BINOP
+#undef CASE_CCALL_BINOP
}
void I32Const(Decoder* decoder, Value* result, int32_t value) {
@@ -274,9 +575,14 @@ class LiftoffCompiler {
void I64Const(Decoder* decoder, Value* result, int64_t value) {
unsupported(decoder, "i64.const");
}
+
void F32Const(Decoder* decoder, Value* result, float value) {
- unsupported(decoder, "f32.const");
+ LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
+ __ LoadConstant(reg, WasmValue(value));
+ __ PushRegister(kWasmF32, reg);
+ CheckStackSizeLimit(decoder);
}
+
void F64Const(Decoder* decoder, Value* result, double value) {
unsupported(decoder, "f64.const");
}
@@ -295,37 +601,58 @@ class LiftoffCompiler {
}
if (!values.is_empty()) {
if (values.size() > 1) return unsupported(decoder, "multi-return");
- // TODO(clemensh): Handle other types.
- if (values[0].type != kWasmI32)
- return unsupported(decoder, "non-i32 return");
- Register reg = __ PopToRegister(kGpReg);
+ RegClass rc = reg_class_for(values[0].type);
+ LiftoffRegister reg = __ PopToRegister(rc);
__ MoveToReturnRegister(reg);
}
__ LeaveFrame(StackFrame::WASM_COMPILED);
- __ Ret();
+ __ DropStackSlotsAndRet(
+ static_cast<uint32_t>(call_desc_->StackParameterCount()));
}
void GetLocal(Decoder* decoder, Value* result,
const LocalIndexOperand<validate>& operand) {
auto& slot = __ cache_state()->stack_state[operand.index];
+ DCHECK_EQ(slot.type(), operand.type);
switch (slot.loc()) {
case kRegister:
- __ PushRegister(operand.type, slot.reg());
+ __ PushRegister(slot.type(), slot.reg());
break;
- case kConstant:
+ case kI32Const:
__ cache_state()->stack_state.emplace_back(operand.type,
slot.i32_const());
break;
case kStack: {
auto rc = reg_class_for(operand.type);
- Register reg = __ GetUnusedRegister(rc);
+ LiftoffRegister reg = __ GetUnusedRegister(rc);
__ Fill(reg, operand.index);
- __ PushRegister(operand.type, reg);
- } break;
+ __ PushRegister(slot.type(), reg);
+ break;
+ }
}
CheckStackSizeLimit(decoder);
}
+ void SetLocalFromStackSlot(LiftoffAssembler::VarState& dst_slot,
+ uint32_t local_index) {
+ auto& state = *__ cache_state();
+ if (dst_slot.is_reg()) {
+ LiftoffRegister slot_reg = dst_slot.reg();
+ if (state.get_use_count(slot_reg) == 1) {
+ __ Fill(dst_slot.reg(), state.stack_height() - 1);
+ return;
+ }
+ state.dec_used(slot_reg);
+ }
+ ValueType type = dst_slot.type();
+ DCHECK_EQ(type, __ local_type(local_index));
+ RegClass rc = reg_class_for(type);
+ LiftoffRegister dst_reg = __ GetUnusedRegister(rc);
+ __ Fill(dst_reg, __ cache_state()->stack_height() - 1);
+ dst_slot = LiftoffAssembler::VarState(type, dst_reg);
+ __ cache_state()->inc_used(dst_reg);
+ }
+
void SetLocal(uint32_t local_index, bool is_tee) {
auto& state = *__ cache_state();
auto& source_slot = state.stack_state.back();
@@ -336,31 +663,13 @@ class LiftoffCompiler {
target_slot = source_slot;
if (is_tee) state.inc_used(target_slot.reg());
break;
- case kConstant:
+ case kI32Const:
__ DropStackSlot(&target_slot);
target_slot = source_slot;
break;
- case kStack: {
- switch (target_slot.loc()) {
- case kRegister:
- if (state.register_use_count[target_slot.reg().code()] == 1) {
- __ Fill(target_slot.reg(), state.stack_height() - 1);
- break;
- } else {
- state.dec_used(target_slot.reg());
- // and fall through to use a new register.
- }
- case kConstant:
- case kStack: {
- ValueType type = __ local_type(local_index);
- Register target_reg = __ GetUnusedRegister(reg_class_for(type));
- __ Fill(target_reg, state.stack_height() - 1);
- target_slot = LiftoffAssembler::VarState(type, target_reg);
- state.inc_used(target_reg);
- } break;
- }
+ case kStack:
+ SetLocalFromStackSlot(target_slot, local_index);
break;
- }
}
if (!is_tee) __ cache_state()->stack_state.pop_back();
}
@@ -380,31 +689,34 @@ class LiftoffCompiler {
const auto* global = &env_->module->globals[operand.index];
if (global->type != kWasmI32 && global->type != kWasmI64)
return unsupported(decoder, "non-int global");
- LiftoffAssembler::PinnedRegisterScope pinned;
- Register addr = pinned.pin(__ GetUnusedRegister(kGpReg));
+ LiftoffRegList pinned;
+ Register addr = pinned.set(__ GetUnusedRegister(kGpReg)).gp();
__ LoadFromContext(addr, offsetof(WasmContext, globals_start),
kPointerSize);
- Register value =
- pinned.pin(__ GetUnusedRegister(reg_class_for(global->type), pinned));
- int size = 1 << ElementSizeLog2Of(global->type);
- if (size > kPointerSize)
+ LiftoffRegister value =
+ pinned.set(__ GetUnusedRegister(reg_class_for(global->type), pinned));
+ LoadType type =
+ global->type == kWasmI32 ? LoadType::kI32Load : LoadType::kI64Load;
+ if (type.size() > kPointerSize)
return unsupported(decoder, "global > kPointerSize");
- __ Load(value, addr, global->offset, size, pinned);
+ __ Load(value, addr, no_reg, global->offset, type, pinned);
__ PushRegister(global->type, value);
+ CheckStackSizeLimit(decoder);
}
void SetGlobal(Decoder* decoder, const Value& value,
const GlobalIndexOperand<validate>& operand) {
auto* global = &env_->module->globals[operand.index];
if (global->type != kWasmI32) return unsupported(decoder, "non-i32 global");
- LiftoffAssembler::PinnedRegisterScope pinned;
- Register addr = pinned.pin(__ GetUnusedRegister(kGpReg));
+ LiftoffRegList pinned;
+ Register addr = pinned.set(__ GetUnusedRegister(kGpReg)).gp();
__ LoadFromContext(addr, offsetof(WasmContext, globals_start),
kPointerSize);
- Register reg =
- pinned.pin(__ PopToRegister(reg_class_for(global->type), pinned));
- int size = 1 << ElementSizeLog2Of(global->type);
- __ Store(addr, global->offset, reg, size, pinned);
+ LiftoffRegister reg =
+ pinned.set(__ PopToRegister(reg_class_for(global->type), pinned));
+ StoreType type =
+ global->type == kWasmI32 ? StoreType::kI32Store : StoreType::kI64Store;
+ __ Store(addr, no_reg, global->offset, reg, type, pinned);
}
void Unreachable(Decoder* decoder) { unsupported(decoder, "unreachable"); }
@@ -414,7 +726,7 @@ class LiftoffCompiler {
unsupported(decoder, "select");
}
- void Br(Decoder* decoder, Control* target) {
+ void Br(Control* target) {
if (!target->br_merge()->reached) {
target->label_state.InitMerge(*__ cache_state(), __ num_locals(),
target->br_merge()->arity);
@@ -423,12 +735,17 @@ class LiftoffCompiler {
__ jmp(target->label.get());
}
+ void Br(Decoder* decoder, Control* target) {
+ Br(target);
+ }
+
void BrIf(Decoder* decoder, const Value& cond, Control* target) {
Label cont_false;
- Register value = __ PopToRegister(kGpReg);
- __ JumpIfZero(value, &cont_false);
+ Register value = __ PopToRegister(kGpReg).gp();
+ __ emit_i32_test(value);
+ __ emit_cond_jump(kEqual, &cont_false);
- Br(decoder, target);
+ Br(target);
__ bind(&cont_false);
}
@@ -436,30 +753,221 @@ class LiftoffCompiler {
const Value& key) {
unsupported(decoder, "br_table");
}
+
void Else(Decoder* decoder, Control* if_block) {
- unsupported(decoder, "else");
+ if (if_block->reachable()) __ emit_jump(if_block->label.get());
+ __ bind(if_block->else_state->label.get());
+ __ cache_state()->Steal(if_block->else_state->state);
+ }
+
+ Label* AddOutOfLineTrap(wasm::WasmCodePosition position, uint32_t pc = 0) {
+ DCHECK(!FLAG_wasm_no_bounds_checks);
+ // The pc is needed exactly if trap handlers are enabled.
+ DCHECK_EQ(pc != 0, env_->use_trap_handler);
+
+ out_of_line_code_.push_back(OutOfLineCode::Trap(
+ Builtins::kThrowWasmTrapMemOutOfBounds, position, pc));
+ return out_of_line_code_.back().label.get();
+ }
+
+ void BoundsCheckMem(uint32_t access_size, uint32_t offset, Register index,
+ wasm::WasmCodePosition position, LiftoffRegList pinned) {
+ DCHECK(!env_->use_trap_handler);
+ if (FLAG_wasm_no_bounds_checks) return;
+
+ Label* trap_label = AddOutOfLineTrap(position);
+
+ if (access_size > max_size_ || offset > max_size_ - access_size) {
+ // The access will be out of bounds, even for the largest memory.
+ __ emit_jump(trap_label);
+ return;
+ }
+ uint32_t end_offset = offset + access_size - 1;
+
+ // If the end offset is larger than the smallest memory, dynamically check
+ // the end offset against the actual memory size, which is not known at
+ // compile time. Otherwise, only one check is required (see below).
+ LiftoffRegister end_offset_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LiftoffRegister mem_size = __ GetUnusedRegister(kGpReg, pinned);
+ __ LoadFromContext(mem_size.gp(), offsetof(WasmContext, mem_size), 4);
+ __ LoadConstant(end_offset_reg, WasmValue(end_offset));
+ if (end_offset >= min_size_) {
+ __ emit_i32_compare(end_offset_reg.gp(), mem_size.gp());
+ __ emit_cond_jump(kUnsignedGreaterEqual, trap_label);
+ }
+
+ // Just reuse the end_offset register for computing the effective size.
+ LiftoffRegister effective_size_reg = end_offset_reg;
+ __ emit_i32_sub(effective_size_reg.gp(), mem_size.gp(),
+ end_offset_reg.gp());
+
+ __ emit_i32_compare(index, effective_size_reg.gp());
+ __ emit_cond_jump(kUnsignedGreaterEqual, trap_label);
+ }
+
+ void TraceMemoryOperation(bool is_store, MachineRepresentation rep,
+ Register index, uint32_t offset,
+ WasmCodePosition position) {
+ // Before making the runtime call, spill all cache registers.
+ __ SpillAllRegisters();
+
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
+ // Get one register for computing the address (offset + index).
+ LiftoffRegister address = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ // Compute offset+index in address.
+ __ LoadConstant(address, WasmValue(offset));
+ __ emit_i32_add(address.gp(), address.gp(), index);
+
+ // Get a register to hold the stack slot for wasm::MemoryTracingInfo.
+ LiftoffRegister info = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ // Allocate stack slot for wasm::MemoryTracingInfo.
+ __ AllocateStackSlot(info.gp(), sizeof(wasm::MemoryTracingInfo));
+
+ // Now store all information into the wasm::MemoryTracingInfo struct.
+ __ Store(info.gp(), no_reg, offsetof(wasm::MemoryTracingInfo, address),
+ address, StoreType::kI32Store, pinned);
+ __ LoadConstant(address, WasmValue(is_store ? 1 : 0));
+ __ Store(info.gp(), no_reg, offsetof(wasm::MemoryTracingInfo, is_store),
+ address, StoreType::kI32Store8, pinned);
+ __ LoadConstant(address, WasmValue(static_cast<int>(rep)));
+ __ Store(info.gp(), no_reg, offsetof(wasm::MemoryTracingInfo, mem_rep),
+ address, StoreType::kI32Store8, pinned);
+
+ source_position_table_builder_->AddPosition(
+ __ pc_offset(), SourcePosition(position), false);
+
+ Register args[] = {info.gp()};
+ GenerateRuntimeCall(arraysize(args), args);
+ }
+
+ void GenerateRuntimeCall(int num_args, Register* args) {
+ compiler::CallDescriptor* desc =
+ compiler::Linkage::GetRuntimeCallDescriptor(
+ compilation_zone_, Runtime::kWasmTraceMemory, num_args,
+ compiler::Operator::kNoProperties,
+ compiler::CallDescriptor::kNoFlags);
+ // Currently, only one argument is supported. More arguments require some
+ // caution for the parallel register moves (reuse StackTransferRecipe).
+ DCHECK_EQ(1, num_args);
+ constexpr size_t kInputShift = 1; // Input 0 is the call target.
+ compiler::LinkageLocation param_loc = desc->GetInputLocation(kInputShift);
+ if (param_loc.IsRegister()) {
+ Register reg = Register::from_code(param_loc.AsRegister());
+ __ Move(LiftoffRegister(reg), LiftoffRegister(args[0]));
+ } else {
+ DCHECK(param_loc.IsCallerFrameSlot());
+ __ PushCallerFrameSlot(LiftoffRegister(args[0]));
+ }
+
+ // Allocate the codegen zone if not done before.
+ if (!*codegen_zone_) {
+ codegen_zone_->reset(
+ new Zone(__ isolate()->allocator(), "LiftoffCodegenZone"));
+ }
+ __ CallRuntime(codegen_zone_->get(), Runtime::kWasmTraceMemory);
+ __ DeallocateStackSlot(sizeof(wasm::MemoryTracingInfo));
}
- void LoadMem(Decoder* decoder, ValueType type, MachineType mem_type,
- const MemoryAccessOperand<validate>& operand, const Value& index,
- Value* result) {
- unsupported(decoder, "memory load");
+
+ void LoadMem(Decoder* decoder, LoadType type,
+ const MemoryAccessOperand<validate>& operand,
+ const Value& index_val, Value* result) {
+ ValueType value_type = type.value_type();
+ if (value_type != kWasmI32 && value_type != kWasmF32)
+ return unsupported(decoder, "unsupported load type");
+ LiftoffRegList pinned;
+ Register index = pinned.set(__ PopToRegister(kGpReg)).gp();
+ if (!env_->use_trap_handler) {
+ // Emit an explicit bounds check.
+ BoundsCheckMem(type.size(), operand.offset, index, decoder->position(),
+ pinned);
+ }
+ Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ __ LoadFromContext(addr, offsetof(WasmContext, mem_start), kPointerSize);
+ RegClass rc = reg_class_for(value_type);
+ LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
+ uint32_t protected_load_pc = 0;
+ __ Load(value, addr, index, operand.offset, type, pinned,
+ &protected_load_pc);
+ if (env_->use_trap_handler) {
+ AddOutOfLineTrap(decoder->position(), protected_load_pc);
+ }
+ __ PushRegister(value_type, value);
+ CheckStackSizeLimit(decoder);
+
+ if (FLAG_wasm_trace_memory) {
+ TraceMemoryOperation(false, type.mem_type().representation(), index,
+ operand.offset, decoder->position());
+ }
}
- void StoreMem(Decoder* decoder, ValueType type, MachineType mem_type,
+
+ void StoreMem(Decoder* decoder, StoreType type,
const MemoryAccessOperand<validate>& operand,
- const Value& index, const Value& value) {
- unsupported(decoder, "memory store");
+ const Value& index_val, const Value& value_val) {
+ ValueType value_type = type.value_type();
+ if (value_type != kWasmI32 && value_type != kWasmF32)
+ return unsupported(decoder, "unsupported store type");
+ RegClass rc = reg_class_for(value_type);
+ LiftoffRegList pinned;
+ LiftoffRegister value = pinned.set(__ PopToRegister(rc));
+ Register index = pinned.set(__ PopToRegister(kGpReg, pinned)).gp();
+ if (!env_->use_trap_handler) {
+ // Emit an explicit bounds check.
+ BoundsCheckMem(type.size(), operand.offset, index, decoder->position(),
+ pinned);
+ }
+ Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ __ LoadFromContext(addr, offsetof(WasmContext, mem_start), kPointerSize);
+ uint32_t protected_store_pc = 0;
+ __ Store(addr, index, operand.offset, value, type, pinned,
+ &protected_store_pc);
+ if (env_->use_trap_handler) {
+ AddOutOfLineTrap(decoder->position(), protected_store_pc);
+ }
+ if (FLAG_wasm_trace_memory) {
+ TraceMemoryOperation(true, type.mem_rep(), index, operand.offset,
+ decoder->position());
+ }
}
+
void CurrentMemoryPages(Decoder* decoder, Value* result) {
unsupported(decoder, "current_memory");
}
void GrowMemory(Decoder* decoder, const Value& value, Value* result) {
unsupported(decoder, "grow_memory");
}
+
void CallDirect(Decoder* decoder,
const CallFunctionOperand<validate>& operand,
const Value args[], Value returns[]) {
- unsupported(decoder, "call");
+ if (operand.sig->return_count() > 1)
+ return unsupported(decoder, "multi-return");
+
+ compiler::CallDescriptor* call_desc =
+ compiler::GetWasmCallDescriptor(compilation_zone_, operand.sig);
+
+ __ PrepareCall(operand.sig, call_desc);
+
+ source_position_table_builder_->AddPosition(
+ __ pc_offset(), SourcePosition(decoder->position()), false);
+
+ if (FLAG_wasm_jit_to_native) {
+ // Just encode the function index. This will be patched at instantiation.
+ Address addr = reinterpret_cast<Address>(operand.index);
+ __ CallNativeWasmCode(addr);
+ } else {
+ Handle<Code> target = operand.index < env_->function_code.size()
+ ? env_->function_code[operand.index]
+ : env_->default_function_code;
+ __ Call(target, RelocInfo::CODE_TARGET);
+ }
+
+ safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+
+ __ FinishCall(operand.sig, call_desc);
}
+
void CallIndirect(Decoder* decoder, const Value& index,
const CallIndirectOperand<validate>& operand,
const Value args[], Value returns[]) {
@@ -500,10 +1008,46 @@ class LiftoffCompiler {
}
private:
- LiftoffAssembler* asm_;
- compiler::CallDescriptor* call_desc_;
- compiler::ModuleEnv* env_;
+ LiftoffAssembler* const asm_;
+ compiler::CallDescriptor* const call_desc_;
+ compiler::ModuleEnv* const env_;
+ // {min_size_} and {max_size_} are cached values computed from the ModuleEnv.
+ const uint32_t min_size_;
+ const uint32_t max_size_;
+ const compiler::RuntimeExceptionSupport runtime_exception_support_;
bool ok_ = true;
+ std::vector<OutOfLineCode> out_of_line_code_;
+ SourcePositionTableBuilder* const source_position_table_builder_;
+ std::vector<trap_handler::ProtectedInstructionData>* protected_instructions_;
+ // Zone used to store information during compilation. The result will be
+ // stored independently, such that this zone can die together with the
+ // LiftoffCompiler after compilation.
+ Zone* compilation_zone_;
+ // This zone is allocated when needed, held externally, and survives until
+ // code generation (in FinishCompilation).
+ std::unique_ptr<Zone>* codegen_zone_;
+ SafepointTableBuilder safepoint_table_builder_;
+
+ void TraceCacheState(Decoder* decoder) const {
+#ifdef DEBUG
+ if (!FLAG_trace_liftoff || !FLAG_trace_wasm_decoder) return;
+ OFStream os(stdout);
+ for (int control_depth = decoder->control_depth() - 1; control_depth >= -1;
+ --control_depth) {
+ LiftoffAssembler::CacheState* cache_state =
+ control_depth == -1
+ ? asm_->cache_state()
+ : &decoder->control_at(control_depth)->label_state;
+ bool first = true;
+ for (LiftoffAssembler::VarState& slot : cache_state->stack_state) {
+ os << (first ? "" : "-") << slot;
+ first = false;
+ }
+ if (control_depth != -1) PrintF("; ");
+ }
+ os << "\n";
+#endif
+ }
};
} // namespace
@@ -518,9 +1062,15 @@ bool compiler::WasmCompilationUnit::ExecuteLiftoffCompilation() {
Zone zone(isolate_->allocator(), "LiftoffCompilationZone");
const wasm::WasmModule* module = env_ ? env_->module : nullptr;
auto* call_desc = compiler::GetWasmCallDescriptor(&zone, func_body_.sig);
+ base::Optional<TimedHistogramScope> liftoff_compile_time_scope(
+ base::in_place, counters()->liftoff_compile_time());
wasm::WasmFullDecoder<wasm::Decoder::kValidate, wasm::LiftoffCompiler>
- decoder(&zone, module, func_body_, &liftoff_.asm_, call_desc, env_);
+ decoder(&zone, module, func_body_, &liftoff_.asm_, call_desc, env_,
+ runtime_exception_support_,
+ &liftoff_.source_position_table_builder_,
+ protected_instructions_.get(), &zone, &liftoff_.codegen_zone_);
decoder.Decode();
+ liftoff_compile_time_scope.reset();
if (!decoder.interface().ok()) {
// Liftoff compilation failed.
isolate_->counters()->liftoff_unsupported_functions()->Increment();
@@ -539,6 +1089,8 @@ bool compiler::WasmCompilationUnit::ExecuteLiftoffCompilation() {
// Record the memory cost this unit places on the system until
// it is finalized.
memory_cost_ = liftoff_.asm_.pc_offset();
+ liftoff_.safepoint_table_offset_ =
+ decoder.interface().GetSafepointTableOffset();
isolate_->counters()->liftoff_compiled_functions()->Increment();
return true;
}
diff --git a/deps/v8/src/wasm/baseline/liftoff-register.h b/deps/v8/src/wasm/baseline/liftoff-register.h
new file mode 100644
index 0000000000..bb5ef5be4a
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/liftoff-register.h
@@ -0,0 +1,242 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_LIFTOFF_REGISTER_H_
+#define V8_WASM_BASELINE_LIFTOFF_REGISTER_H_
+
+#include <iosfwd>
+#include <memory>
+
+// Clients of this interface shouldn't depend on lots of compiler internals.
+// Do not include anything from src/compiler here!
+#include "src/base/bits.h"
+#include "src/wasm/baseline/liftoff-assembler-defs.h"
+#include "src/wasm/wasm-opcodes.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+enum RegClass { kNoReg, kGpReg, kFpReg };
+
+// TODO(clemensh): Use a switch once we require C++14 support.
+static inline constexpr RegClass reg_class_for(ValueType type) {
+ return type == kWasmI32 || type == kWasmI64 // int types
+ ? kGpReg
+ : type == kWasmF32 || type == kWasmF64 // float types
+ ? kFpReg
+ : kNoReg; // other (unsupported) types
+}
+
+// RegForClass<rc>: Register for rc==kGpReg, DoubleRegister for rc==kFpReg, void
+// for all other values of rc.
+template <RegClass rc>
+using RegForClass = typename std::conditional<
+ rc == kGpReg, Register,
+ typename std::conditional<rc == kFpReg, DoubleRegister, void>::type>::type;
+
+// Maximum code of a gp cache register.
+static constexpr int kMaxGpRegCode =
+ 8 * sizeof(kLiftoffAssemblerGpCacheRegs) -
+ base::bits::CountLeadingZeros(kLiftoffAssemblerGpCacheRegs);
+// Maximum code of an fp cache register.
+static constexpr int kMaxFpRegCode =
+ 8 * sizeof(kLiftoffAssemblerFpCacheRegs) -
+ base::bits::CountLeadingZeros(kLiftoffAssemblerFpCacheRegs);
+// LiftoffRegister encodes both gp and fp in a unified index space.
+// [0 .. kMaxGpRegCode] encodes gp registers,
+// [kMaxGpRegCode+1 .. kMaxGpRegCode + kMaxFpRegCode] encodes fp registers.
+static constexpr int kAfterMaxLiftoffGpRegCode = kMaxGpRegCode + 1;
+static constexpr int kAfterMaxLiftoffFpRegCode =
+ kAfterMaxLiftoffGpRegCode + kMaxFpRegCode + 1;
+static constexpr int kAfterMaxLiftoffRegCode = kAfterMaxLiftoffFpRegCode;
+static_assert(kAfterMaxLiftoffRegCode < 256,
+ "liftoff register codes can be stored in one uint8_t");
+
+class LiftoffRegister {
+ public:
+ explicit LiftoffRegister(Register reg) : LiftoffRegister(reg.code()) {
+ DCHECK_EQ(reg, gp());
+ }
+ explicit LiftoffRegister(DoubleRegister reg)
+ : LiftoffRegister(kAfterMaxLiftoffGpRegCode + reg.code()) {
+ DCHECK_EQ(reg, fp());
+ }
+
+ static LiftoffRegister from_liftoff_code(int code) {
+ DCHECK_LE(0, code);
+ DCHECK_GT(kAfterMaxLiftoffRegCode, code);
+ return LiftoffRegister(code);
+ }
+
+ static LiftoffRegister from_code(RegClass rc, int code) {
+ switch (rc) {
+ case kGpReg:
+ return LiftoffRegister(Register::from_code(code));
+ case kFpReg:
+ return LiftoffRegister(DoubleRegister::from_code(code));
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ constexpr bool is_gp() const { return code_ < kAfterMaxLiftoffGpRegCode; }
+ constexpr bool is_fp() const {
+ return code_ >= kAfterMaxLiftoffGpRegCode &&
+ code_ < kAfterMaxLiftoffFpRegCode;
+ }
+
+ Register gp() const {
+ DCHECK(is_gp());
+ return Register::from_code(code_);
+ }
+
+ DoubleRegister fp() const {
+ DCHECK(is_fp());
+ return DoubleRegister::from_code(code_ - kAfterMaxLiftoffGpRegCode);
+ }
+
+ int liftoff_code() const { return code_; }
+
+ RegClass reg_class() const {
+ DCHECK(is_gp() || is_fp());
+ return is_gp() ? kGpReg : kFpReg;
+ }
+
+ bool operator==(const LiftoffRegister other) const {
+ return code_ == other.code_;
+ }
+ bool operator!=(const LiftoffRegister other) const {
+ return code_ != other.code_;
+ }
+
+ private:
+ uint8_t code_;
+
+ explicit constexpr LiftoffRegister(uint8_t code) : code_(code) {}
+};
+static_assert(IS_TRIVIALLY_COPYABLE(LiftoffRegister),
+ "LiftoffRegister can efficiently be passed by value");
+
+inline std::ostream& operator<<(std::ostream& os, LiftoffRegister reg) {
+ return reg.is_gp() ? os << "gp" << reg.gp().code()
+ : os << "fp" << reg.fp().code();
+}
+
+class LiftoffRegList {
+ public:
+ static constexpr bool use_u16 = kAfterMaxLiftoffRegCode <= 16;
+ static constexpr bool use_u32 = !use_u16 && kAfterMaxLiftoffRegCode <= 32;
+ using storage_t = std::conditional<
+ use_u16, uint16_t,
+ std::conditional<use_u32, uint32_t, uint64_t>::type>::type;
+
+ static constexpr storage_t kGpMask = storage_t{kLiftoffAssemblerGpCacheRegs};
+ static constexpr storage_t kFpMask = storage_t{kLiftoffAssemblerFpCacheRegs}
+ << kAfterMaxLiftoffGpRegCode;
+
+ constexpr LiftoffRegList() = default;
+
+ Register set(Register reg) { return set(LiftoffRegister(reg)).gp(); }
+ DoubleRegister set(DoubleRegister reg) {
+ return set(LiftoffRegister(reg)).fp();
+ }
+
+ LiftoffRegister set(LiftoffRegister reg) {
+ regs_ |= storage_t{1} << reg.liftoff_code();
+ return reg;
+ }
+
+ LiftoffRegister clear(LiftoffRegister reg) {
+ regs_ &= ~(storage_t{1} << reg.liftoff_code());
+ return reg;
+ }
+
+ bool has(LiftoffRegister reg) const {
+ return (regs_ & (storage_t{1} << reg.liftoff_code())) != 0;
+ }
+
+ constexpr bool is_empty() const { return regs_ == 0; }
+
+ constexpr unsigned GetNumRegsSet() const {
+ return base::bits::CountPopulation(regs_);
+ }
+
+ constexpr LiftoffRegList operator&(const LiftoffRegList other) const {
+ return LiftoffRegList(regs_ & other.regs_);
+ }
+
+ constexpr LiftoffRegList operator~() const {
+ return LiftoffRegList(~regs_ & (kGpMask | kFpMask));
+ }
+
+ constexpr bool operator==(const LiftoffRegList other) const {
+ return regs_ == other.regs_;
+ }
+ constexpr bool operator!=(const LiftoffRegList other) const {
+ return regs_ != other.regs_;
+ }
+
+ LiftoffRegister GetFirstRegSet() const {
+ DCHECK(!is_empty());
+ unsigned first_code = base::bits::CountTrailingZeros(regs_);
+ return LiftoffRegister::from_liftoff_code(first_code);
+ }
+
+ LiftoffRegister GetLastRegSet() const {
+ DCHECK(!is_empty());
+ unsigned last_code =
+ 8 * sizeof(regs_) - 1 - base::bits::CountLeadingZeros(regs_);
+ return LiftoffRegister::from_liftoff_code(last_code);
+ }
+
+ LiftoffRegList MaskOut(const LiftoffRegList mask) const {
+ // Masking out is guaranteed to return a correct reg list, hence no checks
+ // needed.
+ return FromBits(regs_ & ~mask.regs_);
+ }
+
+ static LiftoffRegList FromBits(storage_t bits) {
+ DCHECK_EQ(bits, bits & (kGpMask | kFpMask));
+ return LiftoffRegList(bits);
+ }
+
+ template <storage_t bits>
+ static constexpr LiftoffRegList FromBits() {
+ static_assert(bits == (bits & (kGpMask | kFpMask)), "illegal reg list");
+ return LiftoffRegList(bits);
+ }
+
+ template <typename... Regs>
+ static LiftoffRegList ForRegs(Regs... regs) {
+ std::array<LiftoffRegister, sizeof...(regs)> regs_arr{
+ LiftoffRegister(regs)...};
+ LiftoffRegList list;
+ for (LiftoffRegister reg : regs_arr) list.set(reg);
+ return list;
+ }
+
+ private:
+ storage_t regs_ = 0;
+
+ // Unchecked constructor. Only use for valid bits.
+ explicit constexpr LiftoffRegList(storage_t bits) : regs_(bits) {}
+};
+static_assert(IS_TRIVIALLY_COPYABLE(LiftoffRegList),
+ "LiftoffRegList can be passed by value");
+
+static constexpr LiftoffRegList kGpCacheRegList =
+ LiftoffRegList::FromBits<LiftoffRegList::kGpMask>();
+static constexpr LiftoffRegList kFpCacheRegList =
+ LiftoffRegList::FromBits<LiftoffRegList::kFpMask>();
+
+static constexpr LiftoffRegList GetCacheRegList(RegClass rc) {
+ return rc == kGpReg ? kGpCacheRegList : kFpCacheRegList;
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_LIFTOFF_REGISTER_H_
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips-defs.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips-defs.h
deleted file mode 100644
index edc52d74b6..0000000000
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips-defs.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_DEFS_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_DEFS_H_
-
-#include "src/reglist.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-// TODO(clemensh): Implement the LiftoffAssembler on this platform.
-static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
-
-static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_DEFS_H_
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index bc3ec1667e..50ab1e82c8 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -11,52 +11,168 @@ namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
+void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
-void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
+ UNIMPLEMENTED();
+}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
- int size) {}
-
-void LiftoffAssembler::SpillContext(Register context) {}
-
-void LiftoffAssembler::Load(Register dst, Register src_addr,
- uint32_t offset_imm, int size,
- PinnedRegisterScope pinned) {}
-
-void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
- Register src, int size,
- PinnedRegisterScope pinned) {}
-
-void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
- uint32_t caller_slot_idx) {}
-
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
-
-void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
-
-void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
-
-void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
-
-void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
-
-#define DEFAULT_I32_BINOP(name, internal_name) \
- void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
- Register rhs) {}
-
-// clang-format off
-DEFAULT_I32_BINOP(add, add)
-DEFAULT_I32_BINOP(sub, sub)
-DEFAULT_I32_BINOP(mul, imul)
-DEFAULT_I32_BINOP(and, and)
-DEFAULT_I32_BINOP(or, or)
-DEFAULT_I32_BINOP(xor, xor)
-// clang-format on
-
-#undef DEFAULT_I32_BINOP
-
-void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
+ int size) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type, LiftoffRegList pinned,
+ uint32_t* protected_load_pc) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
+ uint32_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned,
+ uint32_t* protected_store_pc) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
+ uint32_t caller_slot_idx) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
+ UNIMPLEMENTED();
+}
+
+#define UNIMPLEMENTED_GP_BINOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ UNIMPLEMENTED(); \
+ }
+#define UNIMPLEMENTED_GP_UNOP(name) \
+ bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
+ UNIMPLEMENTED(); \
+ }
+#define UNIMPLEMENTED_FP_BINOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
+ DoubleRegister rhs) { \
+ UNIMPLEMENTED(); \
+ }
+
+UNIMPLEMENTED_GP_BINOP(i32_add)
+UNIMPLEMENTED_GP_BINOP(i32_sub)
+UNIMPLEMENTED_GP_BINOP(i32_mul)
+UNIMPLEMENTED_GP_BINOP(i32_and)
+UNIMPLEMENTED_GP_BINOP(i32_or)
+UNIMPLEMENTED_GP_BINOP(i32_xor)
+UNIMPLEMENTED_GP_BINOP(i32_shl)
+UNIMPLEMENTED_GP_BINOP(i32_sar)
+UNIMPLEMENTED_GP_BINOP(i32_shr)
+UNIMPLEMENTED_GP_UNOP(i32_eqz)
+UNIMPLEMENTED_GP_UNOP(i32_clz)
+UNIMPLEMENTED_GP_UNOP(i32_ctz)
+UNIMPLEMENTED_GP_UNOP(i32_popcnt)
+UNIMPLEMENTED_GP_BINOP(ptrsize_add)
+UNIMPLEMENTED_FP_BINOP(f32_add)
+UNIMPLEMENTED_FP_BINOP(f32_sub)
+UNIMPLEMENTED_FP_BINOP(f32_mul)
+
+#undef UNIMPLEMENTED_GP_BINOP
+#undef UNIMPLEMENTED_GP_UNOP
+#undef UNIMPLEMENTED_FP_BINOP
+
+void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
+ uint32_t src_index) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
+ uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
+ uint32_t param_idx,
+ uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64-defs.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64-defs.h
deleted file mode 100644
index 1652562515..0000000000
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64-defs.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_DEFS_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_DEFS_H_
-
-#include "src/reglist.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-// TODO(clemensh): Implement the LiftoffAssembler on this platform.
-static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
-
-static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_DEFS_H_
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 2a10d0712e..fd63198e24 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -11,52 +11,168 @@ namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
+void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
-void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
+ UNIMPLEMENTED();
+}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
- int size) {}
-
-void LiftoffAssembler::SpillContext(Register context) {}
-
-void LiftoffAssembler::Load(Register dst, Register src_addr,
- uint32_t offset_imm, int size,
- PinnedRegisterScope pinned) {}
-
-void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
- Register src, int size,
- PinnedRegisterScope pinned) {}
-
-void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
- uint32_t caller_slot_idx) {}
-
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
-
-void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
-
-void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
-
-void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
-
-void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
-
-#define DEFAULT_I32_BINOP(name, internal_name) \
- void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
- Register rhs) {}
-
-// clang-format off
-DEFAULT_I32_BINOP(add, add)
-DEFAULT_I32_BINOP(sub, sub)
-DEFAULT_I32_BINOP(mul, imul)
-DEFAULT_I32_BINOP(and, and)
-DEFAULT_I32_BINOP(or, or)
-DEFAULT_I32_BINOP(xor, xor)
-// clang-format on
-
-#undef DEFAULT_I32_BINOP
-
-void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
+ int size) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type, LiftoffRegList pinned,
+ uint32_t* protected_load_pc) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
+ uint32_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned,
+ uint32_t* protected_store_pc) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
+ uint32_t caller_slot_idx) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
+ UNIMPLEMENTED();
+}
+
+#define UNIMPLEMENTED_GP_BINOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ UNIMPLEMENTED(); \
+ }
+#define UNIMPLEMENTED_GP_UNOP(name) \
+ bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
+ UNIMPLEMENTED(); \
+ }
+#define UNIMPLEMENTED_FP_BINOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
+ DoubleRegister rhs) { \
+ UNIMPLEMENTED(); \
+ }
+
+UNIMPLEMENTED_GP_BINOP(i32_add)
+UNIMPLEMENTED_GP_BINOP(i32_sub)
+UNIMPLEMENTED_GP_BINOP(i32_mul)
+UNIMPLEMENTED_GP_BINOP(i32_and)
+UNIMPLEMENTED_GP_BINOP(i32_or)
+UNIMPLEMENTED_GP_BINOP(i32_xor)
+UNIMPLEMENTED_GP_BINOP(i32_shl)
+UNIMPLEMENTED_GP_BINOP(i32_sar)
+UNIMPLEMENTED_GP_BINOP(i32_shr)
+UNIMPLEMENTED_GP_UNOP(i32_eqz)
+UNIMPLEMENTED_GP_UNOP(i32_clz)
+UNIMPLEMENTED_GP_UNOP(i32_ctz)
+UNIMPLEMENTED_GP_UNOP(i32_popcnt)
+UNIMPLEMENTED_GP_BINOP(ptrsize_add)
+UNIMPLEMENTED_FP_BINOP(f32_add)
+UNIMPLEMENTED_FP_BINOP(f32_sub)
+UNIMPLEMENTED_FP_BINOP(f32_mul)
+
+#undef UNIMPLEMENTED_GP_BINOP
+#undef UNIMPLEMENTED_GP_UNOP
+#undef UNIMPLEMENTED_FP_BINOP
+
+void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
+ uint32_t src_index) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
+ uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
+ uint32_t param_idx,
+ uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc-defs.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc-defs.h
deleted file mode 100644
index b0d1317166..0000000000
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc-defs.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_DEFS_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_DEFS_H_
-
-#include "src/reglist.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-// TODO(clemensh): Implement the LiftoffAssembler on this platform.
-static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
-
-static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_DEFS_H_
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 55a1475efe..2d62d88dec 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -11,52 +11,168 @@ namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
+void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
-void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
+ UNIMPLEMENTED();
+}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
- int size) {}
-
-void LiftoffAssembler::SpillContext(Register context) {}
-
-void LiftoffAssembler::Load(Register dst, Register src_addr,
- uint32_t offset_imm, int size,
- PinnedRegisterScope pinned) {}
-
-void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
- Register src, int size,
- PinnedRegisterScope pinned) {}
-
-void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
- uint32_t caller_slot_idx) {}
-
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
-
-void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
-
-void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
-
-void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
-
-void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
-
-#define DEFAULT_I32_BINOP(name, internal_name) \
- void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
- Register rhs) {}
-
-// clang-format off
-DEFAULT_I32_BINOP(add, add)
-DEFAULT_I32_BINOP(sub, sub)
-DEFAULT_I32_BINOP(mul, imul)
-DEFAULT_I32_BINOP(and, and)
-DEFAULT_I32_BINOP(or, or)
-DEFAULT_I32_BINOP(xor, xor)
-// clang-format on
-
-#undef DEFAULT_I32_BINOP
-
-void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
+ int size) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type, LiftoffRegList pinned,
+ uint32_t* protected_load_pc) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
+ uint32_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned,
+ uint32_t* protected_store_pc) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
+ uint32_t caller_slot_idx) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
+ UNIMPLEMENTED();
+}
+
+#define UNIMPLEMENTED_GP_BINOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ UNIMPLEMENTED(); \
+ }
+#define UNIMPLEMENTED_GP_UNOP(name) \
+ bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
+ UNIMPLEMENTED(); \
+ }
+#define UNIMPLEMENTED_FP_BINOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
+ DoubleRegister rhs) { \
+ UNIMPLEMENTED(); \
+ }
+
+UNIMPLEMENTED_GP_BINOP(i32_add)
+UNIMPLEMENTED_GP_BINOP(i32_sub)
+UNIMPLEMENTED_GP_BINOP(i32_mul)
+UNIMPLEMENTED_GP_BINOP(i32_and)
+UNIMPLEMENTED_GP_BINOP(i32_or)
+UNIMPLEMENTED_GP_BINOP(i32_xor)
+UNIMPLEMENTED_GP_BINOP(i32_shl)
+UNIMPLEMENTED_GP_BINOP(i32_sar)
+UNIMPLEMENTED_GP_BINOP(i32_shr)
+UNIMPLEMENTED_GP_UNOP(i32_eqz)
+UNIMPLEMENTED_GP_UNOP(i32_clz)
+UNIMPLEMENTED_GP_UNOP(i32_ctz)
+UNIMPLEMENTED_GP_UNOP(i32_popcnt)
+UNIMPLEMENTED_GP_BINOP(ptrsize_add)
+UNIMPLEMENTED_FP_BINOP(f32_add)
+UNIMPLEMENTED_FP_BINOP(f32_sub)
+UNIMPLEMENTED_FP_BINOP(f32_mul)
+
+#undef UNIMPLEMENTED_GP_BINOP
+#undef UNIMPLEMENTED_GP_UNOP
+#undef UNIMPLEMENTED_FP_BINOP
+
+void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
+ uint32_t src_index) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
+ uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
+ uint32_t param_idx,
+ uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390-defs.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390-defs.h
deleted file mode 100644
index e60dfb923b..0000000000
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390-defs.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_DEFS_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_DEFS_H_
-
-#include "src/reglist.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-// TODO(clemensh): Implement the LiftoffAssembler on this platform.
-static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
-
-static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_DEFS_H_
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 1c56971a20..eebb8e4720 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -11,52 +11,168 @@ namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
+void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
-void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
+ UNIMPLEMENTED();
+}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
- int size) {}
-
-void LiftoffAssembler::SpillContext(Register context) {}
-
-void LiftoffAssembler::Load(Register dst, Register src_addr,
- uint32_t offset_imm, int size,
- PinnedRegisterScope pinned) {}
-
-void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
- Register src, int size,
- PinnedRegisterScope pinned) {}
-
-void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
- uint32_t caller_slot_idx) {}
-
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
-
-void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
-
-void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
-
-void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
-
-void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
-
-#define DEFAULT_I32_BINOP(name, internal_name) \
- void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
- Register rhs) {}
-
-// clang-format off
-DEFAULT_I32_BINOP(add, add)
-DEFAULT_I32_BINOP(sub, sub)
-DEFAULT_I32_BINOP(mul, imul)
-DEFAULT_I32_BINOP(and, and)
-DEFAULT_I32_BINOP(or, or)
-DEFAULT_I32_BINOP(xor, xor)
-// clang-format on
-
-#undef DEFAULT_I32_BINOP
-
-void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
+ int size) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type, LiftoffRegList pinned,
+ uint32_t* protected_load_pc) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
+ uint32_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned,
+ uint32_t* protected_store_pc) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
+ uint32_t caller_slot_idx) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
+ UNIMPLEMENTED();
+}
+
+#define UNIMPLEMENTED_GP_BINOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ UNIMPLEMENTED(); \
+ }
+#define UNIMPLEMENTED_GP_UNOP(name) \
+ bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
+ UNIMPLEMENTED(); \
+ }
+#define UNIMPLEMENTED_FP_BINOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
+ DoubleRegister rhs) { \
+ UNIMPLEMENTED(); \
+ }
+
+UNIMPLEMENTED_GP_BINOP(i32_add)
+UNIMPLEMENTED_GP_BINOP(i32_sub)
+UNIMPLEMENTED_GP_BINOP(i32_mul)
+UNIMPLEMENTED_GP_BINOP(i32_and)
+UNIMPLEMENTED_GP_BINOP(i32_or)
+UNIMPLEMENTED_GP_BINOP(i32_xor)
+UNIMPLEMENTED_GP_BINOP(i32_shl)
+UNIMPLEMENTED_GP_BINOP(i32_sar)
+UNIMPLEMENTED_GP_BINOP(i32_shr)
+UNIMPLEMENTED_GP_UNOP(i32_eqz)
+UNIMPLEMENTED_GP_UNOP(i32_clz)
+UNIMPLEMENTED_GP_UNOP(i32_ctz)
+UNIMPLEMENTED_GP_UNOP(i32_popcnt)
+UNIMPLEMENTED_GP_BINOP(ptrsize_add)
+UNIMPLEMENTED_FP_BINOP(f32_add)
+UNIMPLEMENTED_FP_BINOP(f32_sub)
+UNIMPLEMENTED_FP_BINOP(f32_mul)
+
+#undef UNIMPLEMENTED_GP_BINOP
+#undef UNIMPLEMENTED_GP_UNOP
+#undef UNIMPLEMENTED_FP_BINOP
+
+void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
+ uint32_t src_index) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
+ uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
+ uint32_t param_idx,
+ uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64-defs.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64-defs.h
deleted file mode 100644
index ce568eab97..0000000000
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64-defs.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_DEFS_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_DEFS_H_
-
-#include "src/reglist.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
-
-static constexpr RegList kLiftoffAssemblerGpCacheRegs =
- Register::ListOf<rax, rcx, rdx, rbx, rsi, rdi>();
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_DEFS_H_
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index 559965ab96..2b3b750fc4 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -19,32 +19,39 @@ namespace liftoff {
inline Operand GetStackSlot(uint32_t index) {
// rbp-8 holds the stack marker, rbp-16 is the wasm context, first stack slot
// is located at rbp-24.
- constexpr int32_t kStackSlotSize = 8;
constexpr int32_t kFirstStackSlotOffset = -24;
- return Operand(rbp, kFirstStackSlotOffset - index * kStackSlotSize);
+ return Operand(
+ rbp, kFirstStackSlotOffset - index * LiftoffAssembler::kStackSlotSize);
}
// TODO(clemensh): Make this a constexpr variable once Operand is constexpr.
inline Operand GetContextOperand() { return Operand(rbp, -16); }
+// Use this register to store the address of the last argument pushed on the
+// stack for a call to C.
+static constexpr Register kCCallLastArgAddrReg = rax;
+
} // namespace liftoff
-void LiftoffAssembler::ReserveStackSpace(uint32_t space) {
- stack_space_ = space;
- subl(rsp, Immediate(space));
+void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) {
+ DCHECK_LE(bytes, kMaxInt);
+ subp(rsp, Immediate(bytes));
}
-void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
switch (value.type()) {
case kWasmI32:
if (value.to_i32() == 0) {
- xorl(reg, reg);
+ xorl(reg.gp(), reg.gp());
} else {
- movl(reg, Immediate(value.to_i32()));
+ movl(reg.gp(), Immediate(value.to_i32()));
}
break;
+ case kWasmF32:
+ TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
+ break;
default:
- UNIMPLEMENTED();
+ UNREACHABLE();
}
}
@@ -64,54 +71,109 @@ void LiftoffAssembler::SpillContext(Register context) {
movp(liftoff::GetContextOperand(), context);
}
-void LiftoffAssembler::Load(Register dst, Register src_addr,
- uint32_t offset_imm, int size,
- PinnedRegisterScope pinned) {
- Operand src_op = Operand(src_addr, offset_imm);
+void LiftoffAssembler::FillContextInto(Register dst) {
+ movp(dst, liftoff::GetContextOperand());
+}
+
+void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type, LiftoffRegList pinned,
+ uint32_t* protected_load_pc) {
+ Operand src_op = offset_reg == no_reg
+ ? Operand(src_addr, offset_imm)
+ : Operand(src_addr, offset_reg, times_1, offset_imm);
if (offset_imm > kMaxInt) {
// The immediate can not be encoded in the operand. Load it to a register
// first.
- Register src = GetUnusedRegister(kGpReg, pinned);
+ Register src = GetUnusedRegister(kGpReg, pinned).gp();
movl(src, Immediate(offset_imm));
+ if (offset_reg != no_reg) {
+ emit_ptrsize_add(src, src, offset_reg);
+ }
src_op = Operand(src_addr, src, times_1, 0);
}
- DCHECK(size == 4 || size == 8);
- if (size == 4) {
- movl(dst, src_op);
- } else {
- movq(dst, src_op);
+ if (protected_load_pc) *protected_load_pc = pc_offset();
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ movzxbl(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load8S:
+ movsxbl(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16U:
+ movzxwl(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16S:
+ movsxwl(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load:
+ movl(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load:
+ movq(dst.gp(), src_op);
+ break;
+ case LoadType::kF32Load:
+ Movss(dst.fp(), src_op);
+ break;
+ default:
+ UNREACHABLE();
}
}
-void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
- Register src, int size,
- PinnedRegisterScope pinned) {
- Operand dst_op = Operand(dst_addr, offset_imm);
+void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
+ uint32_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned,
+ uint32_t* protected_store_pc) {
+ Operand dst_op = offset_reg == no_reg
+ ? Operand(dst_addr, offset_imm)
+ : Operand(dst_addr, offset_reg, times_1, offset_imm);
if (offset_imm > kMaxInt) {
// The immediate can not be encoded in the operand. Load it to a register
// first.
- Register dst = GetUnusedRegister(kGpReg, pinned);
+ Register dst = GetUnusedRegister(kGpReg, pinned).gp();
movl(dst, Immediate(offset_imm));
+ if (offset_reg != no_reg) {
+ emit_ptrsize_add(dst, dst, offset_reg);
+ }
dst_op = Operand(dst_addr, dst, times_1, 0);
}
- DCHECK(size == 4 || size == 8);
- if (size == 4) {
- movl(dst_op, src);
- } else {
- movp(dst_op, src);
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ movb(dst_op, src.gp());
+ break;
+ case StoreType::kI32Store16:
+ movw(dst_op, src.gp());
+ break;
+ case StoreType::kI32Store:
+ movl(dst_op, src.gp());
+ break;
+ case StoreType::kI64Store:
+ movq(dst_op, src.gp());
+ break;
+ case StoreType::kF32Store:
+ Movss(dst_op, src.fp());
+ break;
+ default:
+ UNREACHABLE();
}
}
-void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
+void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx) {
- constexpr int32_t kStackSlotSize = 8;
- movl(dst, Operand(rbp, kStackSlotSize * (caller_slot_idx + 1)));
+ Operand src(rbp, kPointerSize * (caller_slot_idx + 1));
+ // TODO(clemensh): Handle different sizes here.
+ if (dst.is_gp()) {
+ movq(dst.gp(), src);
+ } else {
+ Movsd(dst.fp(), src);
+ }
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
DCHECK_NE(dst_index, src_index);
- if (cache_state_.has_unused_register()) {
- Register reg = GetUnusedRegister(kGpReg);
+ if (cache_state_.has_unused_register(kGpReg)) {
+ LiftoffRegister reg = GetUnusedRegister(kGpReg);
Fill(reg, src_index);
Spill(dst_index, reg);
} else {
@@ -120,24 +182,60 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
}
}
-void LiftoffAssembler::MoveToReturnRegister(Register reg) {
- // TODO(clemensh): Handle different types here.
- if (reg != rax) movl(rax, reg);
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
+ // TODO(wasm): Extract the destination register from the CallDescriptor.
+ // TODO(wasm): Add multi-return support.
+ LiftoffRegister dst =
+ reg.is_gp() ? LiftoffRegister(rax) : LiftoffRegister(xmm1);
+ if (reg != dst) Move(dst, reg);
+}
+
+void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
+ // The caller should check that the registers are not equal. For most
+ // occurences, this is already guaranteed, so no need to check within this
+ // method.
+ DCHECK_NE(dst, src);
+ DCHECK_EQ(dst.reg_class(), src.reg_class());
+ // TODO(clemensh): Handle different sizes here.
+ if (dst.is_gp()) {
+ movq(dst.gp(), src.gp());
+ } else {
+ Movsd(dst.fp(), src.fp());
+ }
}
-void LiftoffAssembler::Spill(uint32_t index, Register reg) {
- // TODO(clemensh): Handle different types here.
- movl(liftoff::GetStackSlot(index), reg);
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
+ Operand dst = liftoff::GetStackSlot(index);
+ // TODO(clemensh): Handle different sizes here.
+ if (reg.is_gp()) {
+ movq(dst, reg.gp());
+ } else {
+ Movsd(dst, reg.fp());
+ }
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- // TODO(clemensh): Handle different types here.
- movl(liftoff::GetStackSlot(index), Immediate(value.to_i32()));
+ Operand dst = liftoff::GetStackSlot(index);
+ switch (value.type()) {
+ case kWasmI32:
+ movl(dst, Immediate(value.to_i32()));
+ break;
+ case kWasmF32:
+ movl(dst, Immediate(value.to_f32_boxed().get_bits()));
+ break;
+ default:
+ UNREACHABLE();
+ }
}
-void LiftoffAssembler::Fill(Register reg, uint32_t index) {
- // TODO(clemensh): Handle different types here.
- movl(reg, liftoff::GetStackSlot(index));
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
+ Operand src = liftoff::GetStackSlot(index);
+ // TODO(clemensh): Handle different sizes here.
+ if (reg.is_gp()) {
+ movq(reg.gp(), src);
+ } else {
+ Movsd(reg.fp(), src);
+ }
}
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
@@ -176,11 +274,291 @@ COMMUTATIVE_I32_BINOP(or, or)
COMMUTATIVE_I32_BINOP(xor, xor)
// clang-format on
-#undef DEFAULT_I32_BINOP
+#undef COMMUTATIVE_I32_BINOP
+
+namespace liftoff {
+inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
+ Register lhs, Register rhs,
+ void (Assembler::*emit_shift)(Register)) {
+ // If dst is rcx, compute into the scratch register first, then move to rcx.
+ if (dst == rcx) {
+ assm->movl(kScratchRegister, lhs);
+ if (rhs != rcx) assm->movl(rcx, rhs);
+ (assm->*emit_shift)(kScratchRegister);
+ assm->movl(rcx, kScratchRegister);
+ return;
+ }
+
+ // Move rhs into rcx. If rcx is in use, move its content into the scratch
+ // register. If lhs is rcx, lhs is now the scratch register.
+ bool use_scratch = false;
+ if (rhs != rcx) {
+ use_scratch =
+ lhs == rcx || assm->cache_state()->is_used(LiftoffRegister(rcx));
+ if (use_scratch) assm->movl(kScratchRegister, rcx);
+ if (lhs == rcx) lhs = kScratchRegister;
+ assm->movl(rcx, rhs);
+ }
+
+ // Do the actual shift.
+ if (dst != lhs) assm->movl(dst, lhs);
+ (assm->*emit_shift)(dst);
+
+ // Restore rcx if needed.
+ if (use_scratch) assm->movl(rcx, kScratchRegister);
+}
+} // namespace liftoff
+
+void LiftoffAssembler::emit_i32_shl(Register dst, Register lhs, Register rhs) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shll_cl);
+}
+
+void LiftoffAssembler::emit_i32_sar(Register dst, Register lhs, Register rhs) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::sarl_cl);
+}
+
+void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, Register rhs) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shrl_cl);
+}
+
+bool LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
+ testl(src, src);
+ setcc(zero, dst);
+ movzxbl(dst, dst);
+ return true;
+}
+
+bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
+ Label nonzero_input;
+ Label continuation;
+ testl(src, src);
+ j(not_zero, &nonzero_input, Label::kNear);
+ movl(dst, Immediate(32));
+ jmp(&continuation, Label::kNear);
+
+ bind(&nonzero_input);
+ // Get most significant bit set (MSBS).
+ bsrl(dst, src);
+ // CLZ = 31 - MSBS = MSBS ^ 31.
+ xorl(dst, Immediate(31));
+
+ bind(&continuation);
+ return true;
+}
+
+bool LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
+ Label nonzero_input;
+ Label continuation;
+ testl(src, src);
+ j(not_zero, &nonzero_input, Label::kNear);
+ movl(dst, Immediate(32));
+ jmp(&continuation, Label::kNear);
+
+ bind(&nonzero_input);
+ // Get least significant bit set, which equals number of trailing zeros.
+ bsfl(dst, src);
+
+ bind(&continuation);
+ return true;
+}
+
+bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
+ if (!CpuFeatures::IsSupported(POPCNT)) return false;
+ CpuFeatureScope scope(this, POPCNT);
+ popcntl(dst, src);
+ return true;
+}
+
+void LiftoffAssembler::emit_ptrsize_add(Register dst, Register lhs,
+ Register rhs) {
+ if (lhs != dst) {
+ leap(dst, Operand(lhs, rhs, times_1, 0));
+ } else {
+ addp(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vaddss(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ addss(dst, lhs);
+ } else {
+ if (dst != lhs) movss(dst, lhs);
+ addss(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vsubss(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ movss(kScratchDoubleReg, rhs);
+ movss(dst, lhs);
+ subss(dst, kScratchDoubleReg);
+ } else {
+ if (dst != lhs) movss(dst, lhs);
+ subss(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmulss(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ mulss(dst, lhs);
+ } else {
+ if (dst != lhs) movss(dst, lhs);
+ mulss(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_i32_test(Register reg) { testl(reg, reg); }
+
+void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
+ cmpl(lhs, rhs);
+}
+
+void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
+
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
+ j(cond, label);
+}
+
+void LiftoffAssembler::StackCheck(Label* ool_code) {
+ Register limit = GetUnusedRegister(kGpReg).gp();
+ LoadAddress(limit, ExternalReference::address_of_stack_limit(isolate()));
+ cmpp(rsp, Operand(limit, 0));
+ j(below_equal, ool_code);
+}
+
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ PrepareCallCFunction(0);
+ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()), 0);
+}
+
+void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
+ TurboAssembler::AssertUnreachable(reason);
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
+ uint32_t src_index) {
+ switch (src.loc()) {
+ case VarState::kStack:
+ pushq(liftoff::GetStackSlot(src_index));
+ break;
+ case VarState::kRegister:
+ PushCallerFrameSlot(src.reg());
+ break;
+ case VarState::kI32Const:
+ pushq(Immediate(src.i32_const()));
+ break;
+ }
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
+ if (reg.is_gp()) {
+ pushq(reg.gp());
+ } else {
+ subp(rsp, Immediate(kPointerSize));
+ Movsd(Operand(rsp, 0), reg.fp());
+ }
+}
+
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ LiftoffRegList gp_regs = regs & kGpCacheRegList;
+ while (!gp_regs.is_empty()) {
+ LiftoffRegister reg = gp_regs.GetFirstRegSet();
+ pushq(reg.gp());
+ gp_regs.clear(reg);
+ }
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ unsigned num_fp_regs = fp_regs.GetNumRegsSet();
+ if (num_fp_regs) {
+ subp(rsp, Immediate(num_fp_regs * kStackSlotSize));
+ unsigned offset = 0;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ Movsd(Operand(rsp, offset), reg.fp());
+ fp_regs.clear(reg);
+ offset += sizeof(double);
+ }
+ DCHECK_EQ(offset, num_fp_regs * sizeof(double));
+ }
+}
+
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ unsigned fp_offset = 0;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ Movsd(reg.fp(), Operand(rsp, fp_offset));
+ fp_regs.clear(reg);
+ fp_offset += sizeof(double);
+ }
+ if (fp_offset) addp(rsp, Immediate(fp_offset));
+ LiftoffRegList gp_regs = regs & kGpCacheRegList;
+ while (!gp_regs.is_empty()) {
+ LiftoffRegister reg = gp_regs.GetLastRegSet();
+ popq(reg.gp());
+ gp_regs.clear(reg);
+ }
+}
+
+void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
+ DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
+ ret(static_cast<int>(num_stack_slots * kPointerSize));
+}
+
+void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
+ for (size_t param = 0; param < num_params; ++param) {
+ pushq(args[param]);
+ }
+ movq(liftoff::kCCallLastArgAddrReg, rsp);
+ PrepareCallCFunction(num_params);
+}
+
+void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
+ uint32_t num_params) {
+ int offset = kPointerSize * static_cast<int>(num_params - 1 - param_idx);
+ leaq(dst, Operand(liftoff::kCCallLastArgAddrReg, offset));
+}
+
+void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
+ uint32_t param_idx,
+ uint32_t num_params) {
+ // On x64, all C call arguments fit in registers.
+ UNREACHABLE();
+}
+
+void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
+ CallCFunction(ext_ref, static_cast<int>(num_params));
+}
+
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ near_call(addr, RelocInfo::WASM_CALL);
+}
+
+void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
+ // Set context to zero.
+ xorp(rsi, rsi);
+ CallRuntimeDelayed(zone, fid);
+}
+
+void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
+ subp(rsp, Immediate(size));
+ movp(addr, rsp);
+}
-void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {
- testl(reg, reg);
- j(zero, label);
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ addp(rsp, Immediate(size));
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index 9c0fa268f3..242130b035 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -19,7 +19,6 @@ namespace v8 {
namespace internal {
namespace wasm {
-#if DEBUG
#define TRACE(...) \
do { \
if (FLAG_trace_wasm_decoder) PrintF(__VA_ARGS__); \
@@ -28,10 +27,6 @@ namespace wasm {
do { \
if (FLAG_trace_wasm_decoder && (cond)) PrintF(__VA_ARGS__); \
} while (false)
-#else
-#define TRACE(...)
-#define TRACE_IF(...)
-#endif
// A {DecodeResult} only stores the failure / success status, but no data. Thus
// we use {nullptr_t} as data value, such that the only valid data stored in
@@ -50,16 +45,21 @@ class Decoder {
enum TraceFlag : bool { kTrace = true, kNoTrace = false };
Decoder(const byte* start, const byte* end, uint32_t buffer_offset = 0)
- : start_(start), pc_(start), end_(end), buffer_offset_(buffer_offset) {}
+ : Decoder(start, start, end, buffer_offset) {}
Decoder(const byte* start, const byte* pc, const byte* end,
uint32_t buffer_offset = 0)
- : start_(start), pc_(pc), end_(end), buffer_offset_(buffer_offset) {}
+ : start_(start), pc_(pc), end_(end), buffer_offset_(buffer_offset) {
+ DCHECK_LE(start, pc);
+ DCHECK_LE(pc, end);
+ DCHECK_EQ(static_cast<uint32_t>(end - start), end - start);
+ }
virtual ~Decoder() {}
inline bool validate_size(const byte* pc, uint32_t length, const char* msg) {
DCHECK_LE(start_, pc);
- if (V8_UNLIKELY(pc + length > end_)) {
+ DCHECK_LE(pc, end_);
+ if (V8_UNLIKELY(length > static_cast<uint32_t>(end_ - pc))) {
error(pc, msg);
return false;
}
@@ -166,16 +166,12 @@ class Decoder {
// Check that at least {size} bytes exist between {pc_} and {end_}.
bool checkAvailable(uint32_t size) {
- uintptr_t pc_overflow_value = std::numeric_limits<uintptr_t>::max() - size;
- if ((uintptr_t)pc_ > pc_overflow_value) {
- errorf(pc_, "reading %u bytes would underflow/overflow", size);
- return false;
- } else if (pc_ < start_ || end_ < (pc_ + size)) {
+ DCHECK_LE(pc_, end_);
+ if (V8_UNLIKELY(size > static_cast<uint32_t>(end_ - pc_))) {
errorf(pc_, "expected %u bytes, fell off end", size);
return false;
- } else {
- return true;
}
+ return true;
}
void error(const char* msg) { errorf(pc_, "%s", msg); }
@@ -232,6 +228,8 @@ class Decoder {
// Resets the boundaries of this decoder.
void Reset(const byte* start, const byte* end, uint32_t buffer_offset = 0) {
+ DCHECK_LE(start, end);
+ DCHECK_EQ(static_cast<uint32_t>(end - start), end - start);
start_ = start;
pc_ = start;
end_ = end;
@@ -316,7 +314,8 @@ class Decoder {
static_assert(byte_index < kMaxLength, "invalid template instantiation");
constexpr int shift = byte_index * 7;
constexpr bool is_last_byte = byte_index == kMaxLength - 1;
- const bool at_end = validate && pc >= end_;
+ DCHECK_LE(pc, end_);
+ const bool at_end = validate && pc == end_;
byte b = 0;
if (!at_end) {
DCHECK_LT(pc, end_);
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index ffbf85cde8..04d918b0a4 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -22,14 +22,12 @@ namespace wasm {
struct WasmGlobal;
struct WasmException;
-#if DEBUG
#define TRACE(...) \
do { \
if (FLAG_trace_wasm_decoder) PrintF(__VA_ARGS__); \
} while (false)
-#else
-#define TRACE(...)
-#endif
+
+#define TRACE_INST_FORMAT " @%-8d #%-20s|"
// Return the evaluation of `condition` if validate==true, DCHECK that it's
// true and always return true otherwise.
@@ -250,10 +248,11 @@ struct CallIndirectOperand {
uint32_t table_index;
uint32_t index;
FunctionSig* sig = nullptr;
- unsigned length;
+ unsigned length = 0;
inline CallIndirectOperand(Decoder* decoder, const byte* pc) {
unsigned len = 0;
index = decoder->read_u32v<validate>(pc + 1, &len, "signature index");
+ if (!VALIDATE(decoder->ok())) return;
table_index = decoder->read_u8<validate>(pc + 1 + len, "table index");
if (!VALIDATE(table_index == 0)) {
decoder->errorf(pc + 1 + len, "expected table index 0, found %u",
@@ -342,7 +341,7 @@ template <Decoder::ValidateFlag validate>
struct MemoryAccessOperand {
uint32_t alignment;
uint32_t offset;
- unsigned length;
+ unsigned length = 0;
inline MemoryAccessOperand(Decoder* decoder, const byte* pc,
uint32_t max_alignment) {
unsigned alignment_length;
@@ -354,6 +353,7 @@ struct MemoryAccessOperand {
"actual alignment is %u",
max_alignment, alignment);
}
+ if (!VALIDATE(decoder->ok())) return;
unsigned offset_length;
offset = decoder->read_u32v<validate>(pc + 1 + alignment_length,
&offset_length, "offset");
@@ -386,11 +386,12 @@ struct SimdShiftOperand {
// Operand for SIMD S8x16 shuffle operations.
template <Decoder::ValidateFlag validate>
struct Simd8x16ShuffleOperand {
- uint8_t shuffle[kSimd128Size];
+ uint8_t shuffle[kSimd128Size] = {0};
inline Simd8x16ShuffleOperand(Decoder* decoder, const byte* pc) {
for (uint32_t i = 0; i < kSimd128Size; ++i) {
shuffle[i] = decoder->read_u8<validate>(pc + 2 + i, "shuffle");
+ if (!VALIDATE(decoder->ok())) return;
}
}
};
@@ -550,6 +551,7 @@ struct ControlWithNamedConstructors : public ControlBase<Value> {
F(StartFunctionBody, Control* block) \
F(FinishFunction) \
F(OnFirstError) \
+ F(NextInstruction, WasmOpcode) \
/* Control: */ \
F(Block, Control* block) \
F(Loop, Control* block) \
@@ -582,12 +584,10 @@ struct ControlWithNamedConstructors : public ControlBase<Value> {
F(BrIf, const Value& cond, Control* target) \
F(BrTable, const BranchTableOperand<validate>& operand, const Value& key) \
F(Else, Control* if_block) \
- F(LoadMem, ValueType type, MachineType mem_type, \
- const MemoryAccessOperand<validate>& operand, const Value& index, \
- Value* result) \
- F(StoreMem, ValueType type, MachineType mem_type, \
- const MemoryAccessOperand<validate>& operand, const Value& index, \
- const Value& value) \
+ F(LoadMem, LoadType type, const MemoryAccessOperand<validate>& operand, \
+ const Value& index, Value* result) \
+ F(StoreMem, StoreType type, const MemoryAccessOperand<validate>& operand, \
+ const Value& index, const Value& value) \
F(CurrentMemoryPages, Value* result) \
F(GrowMemory, const Value& value, Value* result) \
F(CallDirect, const CallFunctionOperand<validate>& operand, \
@@ -974,6 +974,8 @@ class WasmDecoder : public Decoder {
return 5;
case kExprF64Const:
return 9;
+ case kNumericPrefix:
+ return 2;
case kSimdPrefix: {
byte simd_index = decoder->read_u8<validate>(pc + 1, "simd_index");
WasmOpcode opcode =
@@ -1026,9 +1028,6 @@ class WasmDecoder : public Decoder {
std::pair<uint32_t, uint32_t> StackEffect(const byte* pc) {
WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
- if (WasmOpcodes::IsPrefixOpcode(opcode)) {
- opcode = static_cast<WasmOpcode>(opcode << 8 | *(pc + 1));
- }
// Handle "simple" opcodes with a fixed signature first.
FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (!sig) sig = WasmOpcodes::AsmjsSignature(opcode);
@@ -1039,10 +1038,8 @@ class WasmDecoder : public Decoder {
switch (opcode) {
case kExprSelect:
return {3, 1};
- case kExprS128StoreMem:
FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
return {2, 0};
- case kExprS128LoadMem:
FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
case kExprTeeLocal:
case kExprGrowMemory:
@@ -1083,6 +1080,24 @@ class WasmDecoder : public Decoder {
case kExprReturn:
case kExprUnreachable:
return {0, 0};
+ case kNumericPrefix:
+ case kAtomicPrefix:
+ case kSimdPrefix: {
+ opcode = static_cast<WasmOpcode>(opcode << 8 | *(pc + 1));
+ switch (opcode) {
+ case kExprI32AtomicStore:
+ case kExprI32AtomicStore8U:
+ case kExprI32AtomicStore16U:
+ case kExprS128StoreMem:
+ return {2, 0};
+ default: {
+ sig = WasmOpcodes::Signature(opcode);
+ if (sig) {
+ return {sig->parameter_count(), sig->return_count()};
+ }
+ }
+ }
+ }
default:
V8_Fatal(__FILE__, __LINE__, "unimplemented opcode: %x (%s)", opcode,
WasmOpcodes::OpcodeName(opcode));
@@ -1142,9 +1157,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DCHECK(stack_.empty());
DCHECK(control_.empty());
- if (FLAG_wasm_code_fuzzer_gen_test) {
- PrintRawWasmCode(this->start_, this->end_);
- }
base::ElapsedTimer decode_timer;
if (FLAG_trace_wasm_decode_time) {
decode_timer.Start();
@@ -1273,6 +1285,32 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return true;
}
+ class TraceLine {
+ public:
+ static constexpr int kMaxLen = 512;
+ ~TraceLine() {
+ if (!FLAG_trace_wasm_decoder) return;
+ PrintF("%.*s\n", len_, buffer_);
+ }
+
+ // Appends a formatted string.
+ PRINTF_FORMAT(2, 3)
+ void Append(const char* format, ...) {
+ if (!FLAG_trace_wasm_decoder) return;
+ va_list va_args;
+ va_start(va_args, format);
+ size_t remaining_len = kMaxLen - len_;
+ Vector<char> remaining_msg_space(buffer_ + len_, remaining_len);
+ int len = VSNPrintF(remaining_msg_space, format, va_args);
+ va_end(va_args);
+ len_ += len < 0 ? remaining_len : len;
+ }
+
+ private:
+ char buffer_[kMaxLen];
+ int len_ = 0;
+ };
+
// Decodes the body of a function.
void DecodeFunctionBody() {
TRACE("wasm-decode %p...%p (module+%u, %d bytes)\n",
@@ -1294,11 +1332,18 @@ class WasmFullDecoder : public WasmDecoder<validate> {
while (this->pc_ < this->end_) { // decoding loop.
unsigned len = 1;
WasmOpcode opcode = static_cast<WasmOpcode>(*this->pc_);
+
+ CALL_INTERFACE_IF_REACHABLE(NextInstruction, opcode);
+
#if DEBUG
- if (FLAG_trace_wasm_decoder && !WasmOpcodes::IsPrefixOpcode(opcode)) {
- TRACE(" @%-8d #%-20s|", startrel(this->pc_),
- WasmOpcodes::OpcodeName(opcode));
+ TraceLine trace_msg;
+#define TRACE_PART(...) trace_msg.Append(__VA_ARGS__)
+ if (!WasmOpcodes::IsPrefixOpcode(opcode)) {
+ TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
+ WasmOpcodes::OpcodeName(opcode));
}
+#else
+#define TRACE_PART(...)
#endif
FunctionSig* sig = WasmOpcodes::Signature(opcode);
@@ -1430,8 +1475,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->error(this->pc_, "else already present for if");
break;
}
- c->kind = kControlIfElse;
FallThruTo(c);
+ c->kind = kControlIfElse;
CALL_INTERFACE_IF_PARENT_REACHABLE(Else, c);
PushMergeValues(c, &c->start_merge);
c->reachability = control_at(1)->innerReachability();
@@ -1450,6 +1495,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (c->is_onearmed_if()) {
// Emulate empty else arm.
FallThruTo(c);
+ if (this->failed()) break;
CALL_INTERFACE_IF_PARENT_REACHABLE(Else, c);
PushMergeValues(c, &c->start_merge);
c->reachability = control_at(1)->innerReachability();
@@ -1467,10 +1513,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
last_end_found_ = true;
// The result of the block is the return value.
- TRACE(" @%-8d #xx:%-20s|", startrel(this->pc_),
- "(implicit) return");
+ TRACE_PART("\n" TRACE_INST_FORMAT, startrel(this->pc_),
+ "(implicit) return");
DoReturn(c, true);
- TRACE("\n");
}
PopControl(c);
@@ -1630,73 +1675,73 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
case kExprI32LoadMem8S:
- len = DecodeLoadMem(kWasmI32, MachineType::Int8());
+ len = 1 + DecodeLoadMem(LoadType::kI32Load8S);
break;
case kExprI32LoadMem8U:
- len = DecodeLoadMem(kWasmI32, MachineType::Uint8());
+ len = 1 + DecodeLoadMem(LoadType::kI32Load8U);
break;
case kExprI32LoadMem16S:
- len = DecodeLoadMem(kWasmI32, MachineType::Int16());
+ len = 1 + DecodeLoadMem(LoadType::kI32Load16S);
break;
case kExprI32LoadMem16U:
- len = DecodeLoadMem(kWasmI32, MachineType::Uint16());
+ len = 1 + DecodeLoadMem(LoadType::kI32Load16U);
break;
case kExprI32LoadMem:
- len = DecodeLoadMem(kWasmI32, MachineType::Int32());
+ len = 1 + DecodeLoadMem(LoadType::kI32Load);
break;
case kExprI64LoadMem8S:
- len = DecodeLoadMem(kWasmI64, MachineType::Int8());
+ len = 1 + DecodeLoadMem(LoadType::kI64Load8S);
break;
case kExprI64LoadMem8U:
- len = DecodeLoadMem(kWasmI64, MachineType::Uint8());
+ len = 1 + DecodeLoadMem(LoadType::kI64Load8U);
break;
case kExprI64LoadMem16S:
- len = DecodeLoadMem(kWasmI64, MachineType::Int16());
+ len = 1 + DecodeLoadMem(LoadType::kI64Load16S);
break;
case kExprI64LoadMem16U:
- len = DecodeLoadMem(kWasmI64, MachineType::Uint16());
+ len = 1 + DecodeLoadMem(LoadType::kI64Load16U);
break;
case kExprI64LoadMem32S:
- len = DecodeLoadMem(kWasmI64, MachineType::Int32());
+ len = 1 + DecodeLoadMem(LoadType::kI64Load32S);
break;
case kExprI64LoadMem32U:
- len = DecodeLoadMem(kWasmI64, MachineType::Uint32());
+ len = 1 + DecodeLoadMem(LoadType::kI64Load32U);
break;
case kExprI64LoadMem:
- len = DecodeLoadMem(kWasmI64, MachineType::Int64());
+ len = 1 + DecodeLoadMem(LoadType::kI64Load);
break;
case kExprF32LoadMem:
- len = DecodeLoadMem(kWasmF32, MachineType::Float32());
+ len = 1 + DecodeLoadMem(LoadType::kF32Load);
break;
case kExprF64LoadMem:
- len = DecodeLoadMem(kWasmF64, MachineType::Float64());
+ len = 1 + DecodeLoadMem(LoadType::kF64Load);
break;
case kExprI32StoreMem8:
- len = DecodeStoreMem(kWasmI32, MachineType::Int8());
+ len = 1 + DecodeStoreMem(StoreType::kI32Store8);
break;
case kExprI32StoreMem16:
- len = DecodeStoreMem(kWasmI32, MachineType::Int16());
+ len = 1 + DecodeStoreMem(StoreType::kI32Store16);
break;
case kExprI32StoreMem:
- len = DecodeStoreMem(kWasmI32, MachineType::Int32());
+ len = 1 + DecodeStoreMem(StoreType::kI32Store);
break;
case kExprI64StoreMem8:
- len = DecodeStoreMem(kWasmI64, MachineType::Int8());
+ len = 1 + DecodeStoreMem(StoreType::kI64Store8);
break;
case kExprI64StoreMem16:
- len = DecodeStoreMem(kWasmI64, MachineType::Int16());
+ len = 1 + DecodeStoreMem(StoreType::kI64Store16);
break;
case kExprI64StoreMem32:
- len = DecodeStoreMem(kWasmI64, MachineType::Int32());
+ len = 1 + DecodeStoreMem(StoreType::kI64Store32);
break;
case kExprI64StoreMem:
- len = DecodeStoreMem(kWasmI64, MachineType::Int64());
+ len = 1 + DecodeStoreMem(StoreType::kI64Store);
break;
case kExprF32StoreMem:
- len = DecodeStoreMem(kWasmF32, MachineType::Float32());
+ len = 1 + DecodeStoreMem(StoreType::kF32Store);
break;
case kExprF64StoreMem:
- len = DecodeStoreMem(kWasmF64, MachineType::Float64());
+ len = 1 + DecodeStoreMem(StoreType::kF64Store);
break;
case kExprGrowMemory: {
if (!CheckHasMemory()) break;
@@ -1742,14 +1787,31 @@ class WasmFullDecoder : public WasmDecoder<validate> {
args_.data(), returns);
break;
}
+ case kNumericPrefix: {
+ CHECK_PROTOTYPE_OPCODE(sat_f2i_conversions);
+ ++len;
+ byte numeric_index = this->template read_u8<validate>(
+ this->pc_ + 1, "numeric index");
+ opcode = static_cast<WasmOpcode>(opcode << 8 | numeric_index);
+ TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
+ WasmOpcodes::OpcodeName(opcode));
+ sig = WasmOpcodes::Signature(opcode);
+ if (sig == nullptr) {
+ this->errorf(this->pc_, "Unrecognized numeric opcode: %x\n",
+ opcode);
+ return;
+ }
+ BuildSimpleOperator(opcode, sig);
+ break;
+ }
case kSimdPrefix: {
CHECK_PROTOTYPE_OPCODE(simd);
len++;
byte simd_index =
this->template read_u8<validate>(this->pc_ + 1, "simd index");
opcode = static_cast<WasmOpcode>(opcode << 8 | simd_index);
- TRACE(" @%-4d #%-20s|", startrel(this->pc_),
- WasmOpcodes::OpcodeName(opcode));
+ TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
+ WasmOpcodes::OpcodeName(opcode));
len += DecodeSimdOpcode(opcode);
break;
}
@@ -1760,8 +1822,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
byte atomic_index =
this->template read_u8<validate>(this->pc_ + 1, "atomic index");
opcode = static_cast<WasmOpcode>(opcode << 8 | atomic_index);
- TRACE(" @%-4d #%-20s|", startrel(this->pc_),
- WasmOpcodes::OpcodeName(opcode));
+ TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
+ WasmOpcodes::OpcodeName(opcode));
len += DecodeAtomicOpcode(opcode);
break;
}
@@ -1782,62 +1844,64 @@ class WasmFullDecoder : public WasmDecoder<validate> {
#if DEBUG
if (FLAG_trace_wasm_decoder) {
- PrintF(" ");
+ TRACE_PART(" ");
for (Control& c : control_) {
switch (c.kind) {
case kControlIf:
- PrintF("I");
+ TRACE_PART("I");
break;
case kControlBlock:
- PrintF("B");
+ TRACE_PART("B");
break;
case kControlLoop:
- PrintF("L");
+ TRACE_PART("L");
break;
case kControlTry:
- PrintF("T");
+ TRACE_PART("T");
break;
default:
break;
}
- if (c.start_merge.arity) PrintF("%u-", c.start_merge.arity);
- PrintF("%u", c.end_merge.arity);
- if (!c.reachable()) PrintF("%c", c.unreachable() ? '*' : '#');
+ if (c.start_merge.arity) TRACE_PART("%u-", c.start_merge.arity);
+ TRACE_PART("%u", c.end_merge.arity);
+ if (!c.reachable()) TRACE_PART("%c", c.unreachable() ? '*' : '#');
}
- PrintF(" | ");
+ TRACE_PART(" | ");
for (size_t i = 0; i < stack_.size(); ++i) {
auto& val = stack_[i];
WasmOpcode opcode = static_cast<WasmOpcode>(*val.pc);
if (WasmOpcodes::IsPrefixOpcode(opcode)) {
opcode = static_cast<WasmOpcode>(opcode << 8 | *(val.pc + 1));
}
- PrintF(" %c@%d:%s", WasmOpcodes::ShortNameOf(val.type),
- static_cast<int>(val.pc - this->start_),
- WasmOpcodes::OpcodeName(opcode));
+ TRACE_PART(" %c@%d:%s", WasmOpcodes::ShortNameOf(val.type),
+ static_cast<int>(val.pc - this->start_),
+ WasmOpcodes::OpcodeName(opcode));
+ // If the decoder failed, don't try to decode the operands, as this
+ // can trigger a DCHECK failure.
+ if (this->failed()) continue;
switch (opcode) {
case kExprI32Const: {
- ImmI32Operand<validate> operand(this, val.pc);
- PrintF("[%d]", operand.value);
+ ImmI32Operand<Decoder::kNoValidate> operand(this, val.pc);
+ TRACE_PART("[%d]", operand.value);
break;
}
case kExprGetLocal:
case kExprSetLocal:
case kExprTeeLocal: {
- LocalIndexOperand<Decoder::kValidate> operand(this, val.pc);
- PrintF("[%u]", operand.index);
+ LocalIndexOperand<Decoder::kNoValidate> operand(this, val.pc);
+ TRACE_PART("[%u]", operand.index);
break;
}
case kExprGetGlobal:
case kExprSetGlobal: {
- GlobalIndexOperand<validate> operand(this, val.pc);
- PrintF("[%u]", operand.index);
+ GlobalIndexOperand<Decoder::kNoValidate> operand(this, val.pc);
+ TRACE_PART("[%u]", operand.index);
break;
}
default:
break;
}
}
- PrintF("\n");
}
#endif
this->pc_ += len;
@@ -1941,49 +2005,23 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
}
- int DecodeLoadMem(ValueType type, MachineType mem_type) {
+ int DecodeLoadMem(LoadType type, int prefix_len = 0) {
if (!CheckHasMemory()) return 0;
- MemoryAccessOperand<validate> operand(
- this, this->pc_, ElementSizeLog2Of(mem_type.representation()));
-
- auto index = Pop(0, kWasmI32);
- auto* result = Push(type);
- CALL_INTERFACE_IF_REACHABLE(LoadMem, type, mem_type, operand, index,
- result);
- return 1 + operand.length;
- }
-
- int DecodeStoreMem(ValueType type, MachineType mem_type) {
- if (!CheckHasMemory()) return 0;
- MemoryAccessOperand<validate> operand(
- this, this->pc_, ElementSizeLog2Of(mem_type.representation()));
- auto value = Pop(1, type);
- auto index = Pop(0, kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(StoreMem, type, mem_type, operand, index,
- value);
- return 1 + operand.length;
- }
-
- int DecodePrefixedLoadMem(ValueType type, MachineType mem_type) {
- if (!CheckHasMemory()) return 0;
- MemoryAccessOperand<validate> operand(
- this, this->pc_ + 1, ElementSizeLog2Of(mem_type.representation()));
-
+ MemoryAccessOperand<validate> operand(this, this->pc_ + prefix_len,
+ type.size_log_2());
auto index = Pop(0, kWasmI32);
- auto* result = Push(type);
- CALL_INTERFACE_IF_REACHABLE(LoadMem, type, mem_type, operand, index,
- result);
+ auto* result = Push(type.value_type());
+ CALL_INTERFACE_IF_REACHABLE(LoadMem, type, operand, index, result);
return operand.length;
}
- int DecodePrefixedStoreMem(ValueType type, MachineType mem_type) {
+ int DecodeStoreMem(StoreType store, int prefix_len = 0) {
if (!CheckHasMemory()) return 0;
- MemoryAccessOperand<validate> operand(
- this, this->pc_ + 1, ElementSizeLog2Of(mem_type.representation()));
- auto value = Pop(1, type);
+ MemoryAccessOperand<validate> operand(this, this->pc_ + prefix_len,
+ store.size_log_2());
+ auto value = Pop(1, store.value_type());
auto index = Pop(0, kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(StoreMem, type, mem_type, operand, index,
- value);
+ CALL_INTERFACE_IF_REACHABLE(StoreMem, store, operand, index, value);
return operand.length;
}
@@ -2073,10 +2111,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
case kExprS128LoadMem:
- len = DecodePrefixedLoadMem(kWasmS128, MachineType::Simd128());
+ len = DecodeLoadMem(LoadType::kS128Load, 1);
break;
case kExprS128StoreMem:
- len = DecodePrefixedStoreMem(kWasmS128, MachineType::Simd128());
+ len = DecodeStoreMem(StoreType::kS128Store, 1);
break;
default: {
FunctionSig* sig = WasmOpcodes::Signature(opcode);
@@ -2347,6 +2385,7 @@ class EmptyInterface {
};
#undef TRACE
+#undef TRACE_INST_FORMAT
#undef VALIDATE
#undef CHECK_PROTOTYPE_OPCODE
#undef OPCODE_ERROR
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index e9130f001d..57ee78f91c 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -142,11 +142,11 @@ class WasmGraphBuildingInterface {
block->end_env = break_env;
}
- void FinishFunction(Decoder* decoder) {
- builder_->PatchInStackCheckIfNeeded();
- }
+ void FinishFunction(Decoder*) { builder_->PatchInStackCheckIfNeeded(); }
+
+ void OnFirstError(Decoder*) {}
- void OnFirstError(Decoder* decoder) {}
+ void NextInstruction(Decoder*, WasmOpcode) {}
void Block(Decoder* decoder, Control* block) {
// The break environment is the outer environment.
@@ -215,8 +215,8 @@ class WasmGraphBuildingInterface {
void BinOp(Decoder* decoder, WasmOpcode opcode, FunctionSig* sig,
const Value& lhs, const Value& rhs, Value* result) {
- result->node =
- BUILD(Binop, opcode, lhs.node, rhs.node, decoder->position());
+ auto node = BUILD(Binop, opcode, lhs.node, rhs.node, decoder->position());
+ if (result) result->node = node;
}
void I32Const(Decoder* decoder, Value* result, int32_t value) {
@@ -340,18 +340,20 @@ class WasmGraphBuildingInterface {
SetEnv(if_block->false_env);
}
- void LoadMem(Decoder* decoder, ValueType type, MachineType mem_type,
+ void LoadMem(Decoder* decoder, LoadType type,
const MemoryAccessOperand<validate>& operand, const Value& index,
Value* result) {
- result->node = BUILD(LoadMem, type, mem_type, index.node, operand.offset,
- operand.alignment, decoder->position());
+ result->node =
+ BUILD(LoadMem, type.value_type(), type.mem_type(), index.node,
+ operand.offset, operand.alignment, decoder->position());
}
- void StoreMem(Decoder* decoder, ValueType type, MachineType mem_type,
+ void StoreMem(Decoder* decoder, StoreType type,
const MemoryAccessOperand<validate>& operand,
const Value& index, const Value& value) {
- BUILD(StoreMem, mem_type, index.node, operand.offset, operand.alignment,
- value.node, decoder->position(), type);
+ BUILD(StoreMem, type.mem_rep(), index.node, operand.offset,
+ operand.alignment, value.node, decoder->position(),
+ type.value_type());
}
void CurrentMemoryPages(Decoder* decoder, Value* result) {
@@ -729,13 +731,12 @@ class WasmGraphBuildingInterface {
return loop_body_env;
}
- // Create a complete copy of the {from}.
+ // Create a complete copy of {from}.
SsaEnv* Split(Decoder* decoder, SsaEnv* from) {
DCHECK_NOT_NULL(from);
SsaEnv* result =
reinterpret_cast<SsaEnv*>(decoder->zone()->New(sizeof(SsaEnv)));
- // The '+ 2' here is to accommodate for mem_size and mem_start nodes.
- size_t size = sizeof(TFNode*) * (decoder->NumLocals());
+ size_t size = sizeof(TFNode*) * decoder->NumLocals();
result->control = from->control;
result->effect = from->effect;
@@ -878,7 +879,8 @@ std::pair<uint32_t, uint32_t> StackEffect(const WasmModule* module,
void PrintRawWasmCode(const byte* start, const byte* end) {
AccountingAllocator allocator;
- PrintRawWasmCode(&allocator, FunctionBodyForTesting(start, end), nullptr);
+ PrintRawWasmCode(&allocator, FunctionBody{nullptr, 0, start, end}, nullptr,
+ kPrintLocals);
}
namespace {
@@ -897,7 +899,8 @@ const char* RawOpcodeName(WasmOpcode opcode) {
} // namespace
bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
- const wasm::WasmModule* module) {
+ const wasm::WasmModule* module,
+ PrintLocals print_locals) {
OFStream os(stdout);
Zone zone(allocator, ZONE_NAME);
WasmDecoder<Decoder::kNoValidate> decoder(module, body.sig, body.start,
@@ -913,7 +916,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
// Print the local declarations.
BodyLocalDecls decls(&zone);
BytecodeIterator i(body.start, body.end, &decls);
- if (body.start != i.pc() && !FLAG_wasm_code_fuzzer_gen_test) {
+ if (body.start != i.pc() && print_locals == kPrintLocals) {
os << "// locals: ";
if (!decls.type_list.empty()) {
ValueType type = decls.type_list[0];
diff --git a/deps/v8/src/wasm/function-body-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index 8df1c8a09e..50eb2295c9 100644
--- a/deps/v8/src/wasm/function-body-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -34,12 +34,11 @@ struct FunctionBody {
uint32_t offset; // offset in the module bytes, for error reporting
const byte* start; // start of the function body
const byte* end; // end of the function body
-};
-static inline FunctionBody FunctionBodyForTesting(const byte* start,
- const byte* end) {
- return {nullptr, 0, start, end};
-}
+ FunctionBody(FunctionSig* sig, uint32_t offset, const byte* start,
+ const byte* end)
+ : sig(sig), offset(offset), start(start), end(end) {}
+};
V8_EXPORT_PRIVATE DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
const wasm::WasmModule* module,
@@ -54,8 +53,10 @@ DecodeResult VerifyWasmCodeWithStats(AccountingAllocator* allocator,
DecodeResult BuildTFGraph(AccountingAllocator* allocator, TFBuilder* builder,
FunctionBody& body);
+enum PrintLocals { kPrintLocals, kOmitLocals };
+V8_EXPORT_PRIVATE
bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
- const wasm::WasmModule* module);
+ const wasm::WasmModule* module, PrintLocals print_locals);
// A simplified form of AST printing, e.g. from a debugger.
void PrintRawWasmCode(const byte* start, const byte* end);
@@ -63,14 +64,14 @@ void PrintRawWasmCode(const byte* start, const byte* end);
inline DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
const WasmModule* module, FunctionSig* sig,
const byte* start, const byte* end) {
- FunctionBody body = {sig, 0, start, end};
+ FunctionBody body(sig, 0, start, end);
return VerifyWasmCode(allocator, module, body);
}
inline DecodeResult BuildTFGraph(AccountingAllocator* allocator,
TFBuilder* builder, FunctionSig* sig,
const byte* start, const byte* end) {
- FunctionBody body = {sig, 0, start, end};
+ FunctionBody body(sig, 0, start, end);
return BuildTFGraph(allocator, builder, body);
}
diff --git a/deps/v8/src/wasm/memory-tracing.cc b/deps/v8/src/wasm/memory-tracing.cc
index d6e7891fc0..75a790db50 100644
--- a/deps/v8/src/wasm/memory-tracing.cc
+++ b/deps/v8/src/wasm/memory-tracing.cc
@@ -8,18 +8,18 @@
namespace v8 {
namespace internal {
-namespace tracing {
+namespace wasm {
-void TraceMemoryOperation(ExecutionEngine engine, bool is_store,
- MachineRepresentation rep, uint32_t addr,
+void TraceMemoryOperation(ExecutionEngine engine, const MemoryTracingInfo* info,
int func_index, int position, uint8_t* mem_start) {
EmbeddedVector<char, 64> value;
- switch (rep) {
-#define TRACE_TYPE(rep, str, format, ctype1, ctype2) \
- case MachineRepresentation::rep: \
- SNPrintF(value, str ":" format, \
- ReadLittleEndianValue<ctype1>(mem_start + addr), \
- ReadLittleEndianValue<ctype2>(mem_start + addr)); \
+ auto mem_rep = static_cast<MachineRepresentation>(info->mem_rep);
+ switch (mem_rep) {
+#define TRACE_TYPE(rep, str, format, ctype1, ctype2) \
+ case MachineRepresentation::rep: \
+ SNPrintF(value, str ":" format, \
+ ReadLittleEndianValue<ctype1>(mem_start + info->address), \
+ ReadLittleEndianValue<ctype2>(mem_start + info->address)); \
break;
TRACE_TYPE(kWord8, " i8", "%d / %02x", uint8_t, uint8_t)
TRACE_TYPE(kWord16, "i16", "%d / %04x", uint16_t, uint16_t)
@@ -33,17 +33,20 @@ void TraceMemoryOperation(ExecutionEngine engine, bool is_store,
}
char eng_c = '?';
switch (engine) {
- case kWasmCompiled:
- eng_c = 'C';
+ case ExecutionEngine::kTurbofan:
+ eng_c = 'T';
break;
- case kWasmInterpreted:
+ case ExecutionEngine::kLiftoff:
+ eng_c = 'L';
+ break;
+ case ExecutionEngine::kInterpreter:
eng_c = 'I';
break;
}
printf("%c %8d+0x%-6x %s @%08x %s\n", eng_c, func_index, position,
- is_store ? "store" : "read ", addr, value.start());
+ info->is_store ? "store" : "load ", info->address, value.start());
}
-} // namespace tracing
+} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/memory-tracing.h b/deps/v8/src/wasm/memory-tracing.h
index 7d7bc288c0..33170aefbe 100644
--- a/deps/v8/src/wasm/memory-tracing.h
+++ b/deps/v8/src/wasm/memory-tracing.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MEMORY_TRACING_H
-#define V8_MEMORY_TRACING_H
+#ifndef V8_WASM_MEMORY_TRACING_H_
+#define V8_WASM_MEMORY_TRACING_H_
#include <cstdint>
@@ -11,18 +11,31 @@
namespace v8 {
namespace internal {
-namespace tracing {
+namespace wasm {
-enum ExecutionEngine { kWasmCompiled, kWasmInterpreted };
+enum class ExecutionEngine { kTurbofan, kLiftoff, kInterpreter };
+
+// This struct is create in generated code, hence use low-level types.
+struct MemoryTracingInfo {
+ uint32_t address;
+ uint8_t is_store; // 0 or 1
+ uint8_t mem_rep;
+ static_assert(
+ std::is_same<decltype(mem_rep),
+ std::underlying_type<MachineRepresentation>::type>::value,
+ "MachineRepresentation uses uint8_t");
+
+ MemoryTracingInfo(uint32_t addr, bool is_store, MachineRepresentation rep)
+ : address(addr), is_store(is_store), mem_rep(static_cast<uint8_t>(rep)) {}
+};
// Callback for tracing a memory operation for debugging.
// Triggered by --wasm-trace-memory.
-void TraceMemoryOperation(ExecutionEngine, bool is_store, MachineRepresentation,
- uint32_t addr, int func_index, int position,
- uint8_t* mem_start);
+void TraceMemoryOperation(ExecutionEngine, const MemoryTracingInfo* info,
+ int func_index, int position, uint8_t* mem_start);
-} // namespace tracing
+} // namespace wasm
} // namespace internal
} // namespace v8
-#endif /* !V8_MEMORY_TRACING_H */
+#endif // V8_WASM_MEMORY_TRACING_H_
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 4bd52a2a8f..4a2e610b99 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -19,8 +19,9 @@
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/compilation-manager.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-code-specialization.h"
-#include "src/wasm/wasm-heap.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -51,12 +52,12 @@
if (FLAG_trace_wasm_lazy_compilation) PrintF(__VA_ARGS__); \
} while (false)
-static const int kInvalidSigIndex = -1;
-
namespace v8 {
namespace internal {
namespace wasm {
+static constexpr int kInvalidSigIndex = -1;
+
// A class compiling an entire module.
class ModuleCompiler {
public:
@@ -103,7 +104,9 @@ class ModuleCompiler {
compiler_->counters()));
}
- void Commit() {
+ bool Commit() {
+ if (units_.empty()) return false;
+
{
base::LockGuard<base::Mutex> guard(
&compiler_->compilation_units_mutex_);
@@ -113,6 +116,7 @@ class ModuleCompiler {
std::make_move_iterator(units_.end()));
}
units_.clear();
+ return true;
}
void Clear() { units_.clear(); }
@@ -165,8 +169,13 @@ class ModuleCompiler {
bool CanAcceptWork() const { return executed_units_.CanAcceptWork(); }
- bool ShouldIncreaseWorkload() const {
- return executed_units_.ShouldIncreaseWorkload();
+ bool ShouldIncreaseWorkload() {
+ if (executed_units_.ShouldIncreaseWorkload()) {
+ // Check if it actually makes sense to increase the workload.
+ base::LockGuard<base::Mutex> guard(&compilation_units_mutex_);
+ return !compilation_units_.empty();
+ }
+ return false;
}
size_t InitializeCompilationUnits(const std::vector<WasmFunction>& functions,
@@ -241,7 +250,8 @@ class JSToWasmWrapperCache {
Handle<Code> CloneOrCompileJSToWasmWrapper(Isolate* isolate,
wasm::WasmModule* module,
WasmCodeWrapper wasm_code,
- uint32_t index) {
+ uint32_t index,
+ bool use_trap_handler) {
const wasm::WasmFunction* func = &module->functions[index];
int cached_idx = sig_map_.Find(func->sig);
if (cached_idx >= 0) {
@@ -263,22 +273,19 @@ class JSToWasmWrapperCache {
}
}
} else {
- for (RelocIterator it(*code,
- RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
- ; it.next()) {
- DCHECK(!it.done());
- it.rinfo()->set_js_to_wasm_address(
- isolate, wasm_code.is_null()
- ? nullptr
- : wasm_code.GetWasmCode()->instructions().start());
- break;
- }
+ RelocIterator it(*code,
+ RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
+ DCHECK(!it.done());
+ it.rinfo()->set_js_to_wasm_address(
+ isolate, wasm_code.is_null()
+ ? nullptr
+ : wasm_code.GetWasmCode()->instructions().start());
}
return code;
}
Handle<Code> code = compiler::CompileJSToWasmWrapper(
- isolate, module, wasm_code, index, context_address_);
+ isolate, module, wasm_code, index, context_address_, use_trap_handler);
uint32_t new_cache_idx = sig_map_.FindOrInsert(func->sig);
DCHECK_EQ(code_cache_.size(), new_cache_idx);
USE(new_cache_idx);
@@ -312,8 +319,7 @@ class InstanceBuilder {
struct TableInstance {
Handle<WasmTableObject> table_object; // WebAssembly.Table instance
Handle<FixedArray> js_wrappers; // JSFunctions exported
- Handle<FixedArray> function_table; // internal code array
- Handle<FixedArray> signature_table; // internal sig array
+ Handle<FixedArray> function_table; // internal array of <sig,code> pairs
};
// A pre-evaluated value to use in import binding.
@@ -343,6 +349,8 @@ class InstanceBuilder {
}
Counters* counters() const { return async_counters().get(); }
+ bool use_trap_handler() const { return compiled_module_->use_trap_handler(); }
+
// Helper routines to print out errors with imports.
#define ERROR_THROWER_WITH_MESSAGE(TYPE) \
void Report##TYPE(const char* error, uint32_t index, \
@@ -435,12 +443,13 @@ void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
} else {
TRACE("Finalizing %d {\n", compiled_module->instance_id());
- if (trap_handler::UseTrapHandler()) {
+ if (compiled_module->use_trap_handler()) {
// TODO(6792): No longer needed once WebAssembly code is off heap.
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
- Handle<FixedArray> code_table = compiled_module->code_table();
+ DisallowHeapAllocation no_gc;
+ FixedArray* code_table = compiled_module->code_table();
for (int i = 0; i < code_table->length(); ++i) {
- Handle<Code> code = code_table->GetValueChecked<Code>(isolate, i);
+ Code* code = Code::cast(code_table->get(i));
int index = code->trap_handler_index()->value();
if (index >= 0) {
trap_handler::ReleaseHandlerData(index);
@@ -450,7 +459,7 @@ void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
}
}
}
- WeakCell* weak_wasm_module = compiled_module->ptr_to_weak_wasm_module();
+ WeakCell* weak_wasm_module = compiled_module->weak_wasm_module();
// Since the order of finalizers is not guaranteed, it can be the case
// that {instance->compiled_module()->module()}, which is a
@@ -483,7 +492,7 @@ void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
WasmCompiledModule::Reset(isolate, compiled_module);
} else {
WasmModuleObject::cast(wasm_module)
- ->set_compiled_module(compiled_module->ptr_to_next_instance());
+ ->set_compiled_module(compiled_module->next_instance());
}
}
}
@@ -500,14 +509,29 @@ void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
TRACE("}\n");
}
-} // namespace
+// This is used in ProcessImports.
+// When importing other modules' exports, we need to ask
+// the exporter for a WasmToWasm wrapper. To do that, we need to
+// switch that module to RW. To avoid flip-floping the same module
+// RW <->RX, we create a scope for a set of NativeModules.
+class SetOfNativeModuleModificationScopes final {
+ public:
+ void Add(NativeModule* module) {
+ module->SetExecutable(false);
+ native_modules_.insert(module);
+ }
-bool SyncValidate(Isolate* isolate, const ModuleWireBytes& bytes) {
- if (bytes.start() == nullptr || bytes.length() == 0) return false;
- ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
- bytes.end(), true, kWasmOrigin);
- return result.ok();
-}
+ ~SetOfNativeModuleModificationScopes() {
+ for (NativeModule* module : native_modules_) {
+ module->SetExecutable(true);
+ }
+ }
+
+ private:
+ std::unordered_set<NativeModule*> native_modules_;
+};
+
+} // namespace
MaybeHandle<WasmModuleObject> SyncCompileTranslatedAsmJs(
Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
@@ -530,13 +554,8 @@ MaybeHandle<WasmModuleObject> SyncCompileTranslatedAsmJs(
MaybeHandle<WasmModuleObject> SyncCompile(Isolate* isolate,
ErrorThrower* thrower,
const ModuleWireBytes& bytes) {
- // TODO(titzer): only make a copy of the bytes if SharedArrayBuffer
- std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
- memcpy(copy.get(), bytes.start(), bytes.length());
- ModuleWireBytes bytes_copy(copy.get(), copy.get() + bytes.length());
-
- ModuleResult result = SyncDecodeWasmModule(
- isolate, bytes_copy.start(), bytes_copy.end(), false, kWasmOrigin);
+ ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
+ bytes.end(), false, kWasmOrigin);
if (result.failed()) {
thrower->CompileFailed("Wasm decoding failed", result);
return {};
@@ -545,7 +564,7 @@ MaybeHandle<WasmModuleObject> SyncCompile(Isolate* isolate,
// Transfer ownership of the WasmModule to the {WasmModuleWrapper} generated
// in {CompileToModuleObject}.
return ModuleCompiler::CompileToModuleObject(
- isolate, thrower, std::move(result.val), bytes_copy, Handle<Script>(),
+ isolate, thrower, std::move(result.val), bytes, Handle<Script>(),
Vector<const byte>());
}
@@ -602,12 +621,22 @@ void AsyncInstantiate(Isolate* isolate, Handle<JSPromise> promise,
}
void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
- const ModuleWireBytes& bytes) {
+ const ModuleWireBytes& bytes, bool is_shared) {
if (!FLAG_wasm_async_compilation) {
+ // Asynchronous compilation disabled; fall back on synchronous compilation.
ErrorThrower thrower(isolate, "WasmCompile");
- // Compile the module.
- MaybeHandle<WasmModuleObject> module_object =
- SyncCompile(isolate, &thrower, bytes);
+ MaybeHandle<WasmModuleObject> module_object;
+ if (is_shared) {
+ // Make a copy of the wire bytes to avoid concurrent modification.
+ std::unique_ptr<uint8_t[]> copy(new uint8_t[bytes.length()]);
+ memcpy(copy.get(), bytes.start(), bytes.length());
+ i::wasm::ModuleWireBytes bytes_copy(copy.get(),
+ copy.get() + bytes.length());
+ module_object = SyncCompile(isolate, &thrower, bytes_copy);
+ } else {
+ // The wire bytes are not shared, OK to use them directly.
+ module_object = SyncCompile(isolate, &thrower, bytes);
+ }
if (thrower.error()) {
RejectPromise(isolate, handle(isolate->context()), thrower, promise);
return;
@@ -619,8 +648,10 @@ void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
if (FLAG_wasm_test_streaming) {
std::shared_ptr<StreamingDecoder> streaming_decoder =
- isolate->wasm_compilation_manager()->StartStreamingCompilation(
- isolate, handle(isolate->context()), promise);
+ isolate->wasm_engine()
+ ->compilation_manager()
+ ->StartStreamingCompilation(isolate, handle(isolate->context()),
+ promise);
streaming_decoder->OnBytesReceived(bytes.module_bytes());
streaming_decoder->Finish();
return;
@@ -629,7 +660,7 @@ void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
// during asynchronous compilation.
std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
memcpy(copy.get(), bytes.start(), bytes.length());
- isolate->wasm_compilation_manager()->StartAsyncCompileJob(
+ isolate->wasm_engine()->compilation_manager()->StartAsyncCompileJob(
isolate, std::move(copy), bytes.length(), handle(isolate->context()),
promise);
}
@@ -692,6 +723,8 @@ Handle<Code> CompileLazyOnGCHeap(Isolate* isolate) {
->shared()
->lazy_compilation_orchestrator())
->get();
+ DCHECK(!orchestrator->IsFrozenForTesting());
+
Handle<Code> compiled_code = orchestrator->CompileLazyOnGCHeap(
isolate, instance, caller_code, offset, func_index, patch_caller);
if (!exp_deopt_data.is_null() && exp_deopt_data->length() > 2) {
@@ -705,8 +738,9 @@ Handle<Code> CompileLazyOnGCHeap(Isolate* isolate) {
if (exp_deopt_data->get(idx)->IsUndefined(isolate)) break;
FixedArray* exp_table = FixedArray::cast(exp_deopt_data->get(idx));
int exp_index = Smi::ToInt(exp_deopt_data->get(idx + 1));
- DCHECK(exp_table->get(exp_index) == *lazy_compile_code);
- exp_table->set(exp_index, *compiled_code);
+ int table_index = compiler::FunctionTableCodeOffset(exp_index);
+ DCHECK(exp_table->get(table_index) == *lazy_compile_code);
+ exp_table->set(table_index, *compiled_code);
}
// TODO(6792): No longer needed once WebAssembly code is off heap.
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
@@ -737,11 +771,13 @@ Address CompileLazy(Isolate* isolate) {
Maybe<uint32_t> func_index_to_compile = Nothing<uint32_t>();
Handle<Object> exp_deopt_data_entry;
const wasm::WasmCode* lazy_stub_or_copy =
- isolate->wasm_code_manager()->LookupCode(it.frame()->pc());
- DCHECK_EQ(wasm::WasmCode::LazyStub, lazy_stub_or_copy->kind());
+ isolate->wasm_engine()->code_manager()->LookupCode(it.frame()->pc());
+ DCHECK_EQ(wasm::WasmCode::kLazyStub, lazy_stub_or_copy->kind());
if (!lazy_stub_or_copy->IsAnonymous()) {
// Then it's an indirect call or via JS->wasm wrapper.
- instance = lazy_stub_or_copy->owner()->compiled_module()->owning_instance();
+ instance =
+ handle(lazy_stub_or_copy->owner()->compiled_module()->owning_instance(),
+ isolate);
func_index_to_compile = Just(lazy_stub_or_copy->index());
exp_deopt_data_entry =
handle(instance->compiled_module()->lazy_compile_data()->get(
@@ -761,15 +797,16 @@ Address CompileLazy(Isolate* isolate) {
js_to_wasm_caller_code = handle(it.frame()->LookupCode(), isolate);
} else {
wasm_caller_code =
- isolate->wasm_code_manager()->LookupCode(it.frame()->pc());
+ isolate->wasm_engine()->code_manager()->LookupCode(it.frame()->pc());
offset = Just(static_cast<uint32_t>(
it.frame()->pc() - wasm_caller_code->instructions().start()));
if (instance.is_null()) {
// Then this is a direct call (otherwise we would have attached the
// instance via deopt data to the lazy compile stub). Just use the
// instance of the caller.
- instance =
- wasm_caller_code->owner()->compiled_module()->owning_instance();
+ instance = handle(
+ wasm_caller_code->owner()->compiled_module()->owning_instance(),
+ isolate);
}
}
@@ -779,6 +816,11 @@ Address CompileLazy(Isolate* isolate) {
Managed<wasm::LazyCompilationOrchestrator>::cast(
compiled_module->shared()->lazy_compilation_orchestrator())
->get();
+ DCHECK(!orchestrator->IsFrozenForTesting());
+
+ NativeModuleModificationScope native_module_modification_scope(
+ compiled_module->GetNativeModule());
+
const wasm::WasmCode* result = nullptr;
// The caller may be js to wasm calling a function
// also available for indirect calls.
@@ -812,13 +854,15 @@ Address CompileLazy(Isolate* isolate) {
// See EnsureExportedLazyDeoptData: exp_deopt_data[0...(len-1)] are pairs
// of <export_table, index> followed by undefined values. Use this
// information here to patch all export tables.
+ Handle<Foreign> foreign_holder =
+ isolate->factory()->NewForeign(result->instructions().start(), TENURED);
for (int idx = 0, end = exp_deopt_data->length(); idx < end; idx += 2) {
if (exp_deopt_data->get(idx)->IsUndefined(isolate)) break;
- FixedArray* exp_table = FixedArray::cast(exp_deopt_data->get(idx));
+ DisallowHeapAllocation no_gc;
int exp_index = Smi::ToInt(exp_deopt_data->get(idx + 1));
- Handle<Foreign> foreign_holder = isolate->factory()->NewForeign(
- result->instructions().start(), TENURED);
- exp_table->set(exp_index, *foreign_holder);
+ FixedArray* exp_table = FixedArray::cast(exp_deopt_data->get(idx));
+ exp_table->set(compiler::FunctionTableCodeOffset(exp_index),
+ *foreign_holder);
}
// TODO(6792): No longer needed once WebAssembly code is off heap.
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
@@ -834,42 +878,31 @@ Address CompileLazy(Isolate* isolate) {
compiler::ModuleEnv CreateModuleEnvFromCompiledModule(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
DisallowHeapAllocation no_gc;
- WasmModule* module = compiled_module->module();
- std::vector<Handle<Code>> empty_code;
+ WasmModule* module = compiled_module->shared()->module();
if (FLAG_wasm_jit_to_native) {
NativeModule* native_module = compiled_module->GetNativeModule();
- std::vector<GlobalHandleAddress> function_tables =
- native_module->function_tables();
- std::vector<GlobalHandleAddress> signature_tables =
- native_module->signature_tables();
-
- compiler::ModuleEnv result = {module, // --
- function_tables, // --
- signature_tables, // --
- empty_code,
- BUILTIN_CODE(isolate, WasmCompileLazy)};
+ compiler::ModuleEnv result(module, native_module->function_tables(),
+ std::vector<Handle<Code>>{},
+ BUILTIN_CODE(isolate, WasmCompileLazy),
+ compiled_module->use_trap_handler());
return result;
- } else {
- std::vector<GlobalHandleAddress> function_tables;
- std::vector<GlobalHandleAddress> signature_tables;
-
- int num_function_tables = static_cast<int>(module->function_tables.size());
- for (int i = 0; i < num_function_tables; ++i) {
- FixedArray* ft = compiled_module->ptr_to_function_tables();
- FixedArray* st = compiled_module->ptr_to_signature_tables();
+ }
- // TODO(clemensh): defer these handles for concurrent compilation.
- function_tables.push_back(WasmCompiledModule::GetTableValue(ft, i));
- signature_tables.push_back(WasmCompiledModule::GetTableValue(st, i));
- }
+ std::vector<GlobalHandleAddress> function_tables;
- compiler::ModuleEnv result = {module, // --
- function_tables, // --
- signature_tables, // --
- empty_code, // --
- BUILTIN_CODE(isolate, WasmCompileLazy)};
- return result;
+ int num_function_tables = static_cast<int>(module->function_tables.size());
+ FixedArray* ft =
+ num_function_tables == 0 ? nullptr : compiled_module->function_tables();
+ for (int i = 0; i < num_function_tables; ++i) {
+ // TODO(clemensh): defer these handles for concurrent compilation.
+ function_tables.push_back(WasmCompiledModule::GetTableValue(ft, i));
}
+
+ compiler::ModuleEnv result(module, std::move(function_tables),
+ std::vector<Handle<Code>>{},
+ BUILTIN_CODE(isolate, WasmCompileLazy),
+ compiled_module->use_trap_handler());
+ return result;
}
const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction(
@@ -882,7 +915,7 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction(
wasm::WasmCode* existing_code = compiled_module->GetNativeModule()->GetCode(
static_cast<uint32_t>(func_index));
if (existing_code != nullptr &&
- existing_code->kind() == wasm::WasmCode::Function) {
+ existing_code->kind() == wasm::WasmCode::kFunction) {
TRACE_LAZY("Function %d already compiled.\n", func_index);
return existing_code;
}
@@ -897,7 +930,8 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction(
compiler::ModuleEnv module_env =
CreateModuleEnvFromCompiledModule(isolate, compiled_module);
- const uint8_t* module_start = compiled_module->module_bytes()->GetChars();
+ const uint8_t* module_start =
+ compiled_module->shared()->module_bytes()->GetChars();
const WasmFunction* func = &module_env.module->functions[func_index];
FunctionBody body{func->sig, func->code.offset(),
@@ -908,7 +942,7 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction(
std::string func_name;
{
WasmName name = Vector<const char>::cast(
- compiled_module->GetRawFunctionName(func_index));
+ compiled_module->shared()->GetRawFunctionName(func_index));
// Copy to std::string, because the underlying string object might move on
// the heap.
func_name.assign(name.start(), static_cast<size_t>(name.length()));
@@ -1000,6 +1034,40 @@ Code* ExtractWasmToWasmCallee(Code* wasm_to_wasm) {
return callee;
}
+const WasmCode* WasmExtractWasmToWasmCallee(const WasmCodeManager* code_manager,
+ const WasmCode* wasm_to_wasm) {
+ DCHECK_EQ(WasmCode::kWasmToWasmWrapper, wasm_to_wasm->kind());
+ // Find the one code target in this wrapper.
+ RelocIterator it(wasm_to_wasm->instructions(), wasm_to_wasm->reloc_info(),
+ wasm_to_wasm->constant_pool(),
+ RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
+ DCHECK(!it.done());
+ const WasmCode* callee =
+ code_manager->LookupCode(it.rinfo()->js_to_wasm_address());
+#ifdef DEBUG
+ it.next();
+ DCHECK(it.done());
+#endif
+ return callee;
+}
+
+// TODO(mtrofin): this should be a function again, when chromium:761307
+// is addressed. chromium:771171 is also related.
+#define WasmPatchWasmToWasmWrapper(isolate, wasm_to_wasm, new_target) \
+ do { \
+ TRACE_LAZY("Patching wasm-to-wasm wrapper.\n"); \
+ DCHECK_EQ(WasmCode::kWasmToWasmWrapper, wasm_to_wasm->kind()); \
+ NativeModuleModificationScope scope(wasm_to_wasm->owner()); \
+ RelocIterator it(wasm_to_wasm->instructions(), wasm_to_wasm->reloc_info(), \
+ wasm_to_wasm->constant_pool(), \
+ RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL)); \
+ DCHECK(!it.done()); \
+ it.rinfo()->set_js_to_wasm_address(isolate, \
+ new_target->instructions().start()); \
+ it.next(); \
+ DCHECK(it.done()); \
+ } while (0)
+
void PatchWasmToWasmWrapper(Isolate* isolate, Code* wasm_to_wasm,
Code* new_target) {
DCHECK_EQ(Code::WASM_TO_WASM_FUNCTION, wasm_to_wasm->kind());
@@ -1051,9 +1119,10 @@ Handle<Code> LazyCompilationOrchestrator::CompileLazyOnGCHeap(
Handle<WasmCompiledModule> caller_module(
caller_func_info.instance.ToHandleChecked()->compiled_module(),
isolate);
- SeqOneByteString* module_bytes = caller_module->module_bytes();
+ SeqOneByteString* module_bytes = caller_module->shared()->module_bytes();
const byte* func_bytes =
- module_bytes->GetChars() + caller_module->module()
+ module_bytes->GetChars() + caller_module->shared()
+ ->module()
->functions[caller_func_info.func_index]
.code.offset();
Code* lazy_callee = nullptr;
@@ -1182,24 +1251,32 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFromJsToWasm(
CompileFunction(isolate, instance, exported_func_index);
{
DisallowHeapAllocation no_gc;
- int idx = 0;
- for (RelocIterator it(*js_to_wasm_caller,
- RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
- !it.done(); it.next()) {
- ++idx;
- const wasm::WasmCode* callee_compiled =
- compiled_module->GetNativeModule()->GetCode(exported_func_index);
- DCHECK_NOT_NULL(callee_compiled);
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+ RelocIterator it(*js_to_wasm_caller,
+ RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
+ DCHECK(!it.done());
+ wasm::WasmCode* current_callee =
+ isolate->wasm_engine()->code_manager()->LookupCode(
+ it.rinfo()->js_to_wasm_address());
+ const wasm::WasmCode* callee_compiled =
+ compiled_module->GetNativeModule()->GetCode(exported_func_index);
+ DCHECK_NOT_NULL(callee_compiled);
+ if (current_callee->kind() == WasmCode::kWasmToWasmWrapper) {
+ WasmPatchWasmToWasmWrapper(isolate, current_callee, callee_compiled);
+ } else {
it.rinfo()->set_js_to_wasm_address(
isolate, callee_compiled->instructions().start());
}
- DCHECK_EQ(1, idx);
+#ifdef DEBUG
+ it.next();
+ DCHECK(it.done());
+#endif
}
wasm::WasmCode* ret =
compiled_module->GetNativeModule()->GetCode(exported_func_index);
DCHECK_NOT_NULL(ret);
- DCHECK_EQ(wasm::WasmCode::Function, ret->kind());
+ DCHECK_EQ(wasm::WasmCode::kFunction, ret->kind());
return ret;
}
@@ -1217,36 +1294,30 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
Isolate* isolate, Handle<WasmInstanceObject> instance,
Maybe<uint32_t> maybe_func_to_return_idx, const wasm::WasmCode* wasm_caller,
int call_offset) {
- struct WasmDirectCallData {
- uint32_t offset = 0;
- uint32_t func_index = 0;
- };
- std::vector<Maybe<WasmDirectCallData>> non_compiled_functions;
+ std::vector<Maybe<uint32_t>> non_compiled_functions;
Decoder decoder(nullptr, nullptr);
+ WasmCode* last_callee = nullptr;
+
{
DisallowHeapAllocation no_gc;
Handle<WasmCompiledModule> caller_module(
wasm_caller->owner()->compiled_module(), isolate);
- SeqOneByteString* module_bytes = caller_module->module_bytes();
+ SeqOneByteString* module_bytes = caller_module->shared()->module_bytes();
uint32_t caller_func_index = wasm_caller->index();
SourcePositionTableIterator source_pos_iterator(
Handle<ByteArray>(ByteArray::cast(
caller_module->source_positions()->get(caller_func_index))));
const byte* func_bytes =
- module_bytes->GetChars() +
- caller_module->module()->functions[caller_func_index].code.offset();
+ module_bytes->GetChars() + caller_module->shared()
+ ->module()
+ ->functions[caller_func_index]
+ .code.offset();
for (RelocIterator it(wasm_caller->instructions(),
wasm_caller->reloc_info(),
wasm_caller->constant_pool(),
RelocInfo::ModeMask(RelocInfo::WASM_CALL));
!it.done(); it.next()) {
- const WasmCode* callee = isolate->wasm_code_manager()->LookupCode(
- it.rinfo()->target_address());
- if (callee->kind() != WasmCode::LazyStub) {
- non_compiled_functions.push_back(Nothing<WasmDirectCallData>());
- continue;
- }
// TODO(clemensh): Introduce safe_cast<T, bool> which (D)CHECKS
// (depending on the bool) against limits of T and then static_casts.
size_t offset_l = it.rinfo()->pc() - wasm_caller->instructions().start();
@@ -1254,14 +1325,19 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
int offset = static_cast<int>(offset_l);
int byte_pos =
AdvanceSourcePositionTableIterator(source_pos_iterator, offset);
+
+ WasmCode* callee = isolate->wasm_engine()->code_manager()->LookupCode(
+ it.rinfo()->target_address());
+ if (offset < call_offset) last_callee = callee;
+ if (callee->kind() != WasmCode::kLazyStub) {
+ non_compiled_functions.push_back(Nothing<uint32_t>());
+ continue;
+ }
uint32_t called_func_index =
ExtractDirectCallIndex(decoder, func_bytes + byte_pos);
DCHECK_LT(called_func_index,
caller_module->GetNativeModule()->FunctionCount());
- WasmDirectCallData data;
- data.offset = offset;
- data.func_index = called_func_index;
- non_compiled_functions.push_back(Just<WasmDirectCallData>(data));
+ non_compiled_functions.push_back(Just(called_func_index));
// Call offset one instruction after the call. Remember the last called
// function before that offset.
if (offset < call_offset) {
@@ -1269,7 +1345,15 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
}
}
}
- uint32_t func_to_return_idx = maybe_func_to_return_idx.ToChecked();
+ uint32_t func_to_return_idx = 0;
+
+ if (last_callee->kind() == WasmCode::kWasmToWasmWrapper) {
+ const WasmCode* actual_callee = WasmExtractWasmToWasmCallee(
+ isolate->wasm_engine()->code_manager(), last_callee);
+ func_to_return_idx = actual_callee->index();
+ } else {
+ func_to_return_idx = maybe_func_to_return_idx.ToChecked();
+ }
TRACE_LAZY(
"Starting lazy compilation (func %u @%d, js_to_wasm: false, patch "
@@ -1278,15 +1362,16 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
// TODO(clemensh): compile all functions in non_compiled_functions in
// background, wait for func_to_return_idx.
- CompileFunction(isolate, instance, func_to_return_idx);
-
- Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
- isolate);
- WasmCode* ret =
- compiled_module->GetNativeModule()->GetCode(func_to_return_idx);
-
+ const WasmCode* ret = CompileFunction(isolate, instance, func_to_return_idx);
DCHECK_NOT_NULL(ret);
- {
+
+ if (last_callee->kind() == WasmCode::kWasmToWasmWrapper) {
+ // We can finish it all here by compiling the target wasm function and
+ // patching the wasm_to_wasm caller.
+ WasmPatchWasmToWasmWrapper(isolate, last_callee, ret);
+ } else {
+ Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
+ isolate);
DisallowHeapAllocation no_gc;
// Now patch the code object with all functions which are now compiled. This
// will pick up any other compiled functions, not only {ret}.
@@ -1299,10 +1384,10 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
!it.done(); it.next(), ++idx) {
auto& info = non_compiled_functions[idx];
if (info.IsNothing()) continue;
- uint32_t lookup = info.ToChecked().func_index;
+ uint32_t lookup = info.ToChecked();
const WasmCode* callee_compiled =
compiled_module->GetNativeModule()->GetCode(lookup);
- if (callee_compiled->kind() != WasmCode::Function) continue;
+ if (callee_compiled->kind() != WasmCode::kFunction) continue;
it.rinfo()->set_wasm_call_address(
isolate, callee_compiled->instructions().start());
++patched;
@@ -1721,7 +1806,7 @@ WasmCodeWrapper EnsureExportedLazyDeoptData(Isolate* isolate,
} else {
wasm::WasmCode* code = native_module->GetCode(func_index);
// {code} will be nullptr when exporting imports.
- if (code == nullptr || code->kind() != wasm::WasmCode::LazyStub ||
+ if (code == nullptr || code->kind() != wasm::WasmCode::kLazyStub ||
!code->IsAnonymous()) {
return WasmCodeWrapper(code);
}
@@ -1787,7 +1872,7 @@ WasmCodeWrapper EnsureTableExportLazyDeoptData(
EnsureExportedLazyDeoptData(isolate, instance, code_table,
native_module, func_index)
.GetWasmCode();
- if (code == nullptr || code->kind() != wasm::WasmCode::LazyStub)
+ if (code == nullptr || code->kind() != wasm::WasmCode::kLazyStub)
return WasmCodeWrapper(code);
// deopt_data:
@@ -1866,7 +1951,7 @@ WasmCodeWrapper MakeWasmToWasmWrapper(
new_wasm_context_address);
return WasmCodeWrapper(
instance->compiled_module()->GetNativeModule()->AddCodeCopy(
- code, wasm::WasmCode::WasmToWasmWrapper, index));
+ code, wasm::WasmCode::kWasmToWasmWrapper, index));
}
}
@@ -1885,13 +1970,15 @@ WasmCodeWrapper UnwrapExportOrCompileImportWrapper(
// signature.
if (FLAG_wasm_jit_to_native) {
Handle<Code> temp_code = compiler::CompileWasmToJSWrapper(
- isolate, target, sig, import_index, origin, js_imports_table);
+ isolate, target, sig, import_index, origin,
+ instance->compiled_module()->use_trap_handler(), js_imports_table);
return WasmCodeWrapper(
instance->compiled_module()->GetNativeModule()->AddCodeCopy(
- temp_code, wasm::WasmCode::WasmToJsWrapper, import_index));
+ temp_code, wasm::WasmCode::kWasmToJsWrapper, import_index));
} else {
return WasmCodeWrapper(compiler::CompileWasmToJSWrapper(
- isolate, target, sig, import_index, origin, js_imports_table));
+ isolate, target, sig, import_index, origin,
+ instance->compiled_module()->use_trap_handler(), js_imports_table));
}
}
@@ -1908,33 +1995,21 @@ void FunctionTableFinalizer(const v8::WeakCallbackInfo<void>& data) {
std::unique_ptr<compiler::ModuleEnv> CreateDefaultModuleEnv(
Isolate* isolate, WasmModule* module, Handle<Code> illegal_builtin) {
std::vector<GlobalHandleAddress> function_tables;
- std::vector<GlobalHandleAddress> signature_tables;
- for (size_t i = 0; i < module->function_tables.size(); i++) {
+ for (size_t i = module->function_tables.size(); i > 0; --i) {
Handle<Object> func_table =
isolate->global_handles()->Create(isolate->heap()->undefined_value());
- Handle<Object> sig_table =
- isolate->global_handles()->Create(isolate->heap()->undefined_value());
GlobalHandles::MakeWeak(func_table.location(), func_table.location(),
&FunctionTableFinalizer,
v8::WeakCallbackType::kFinalizer);
- GlobalHandles::MakeWeak(sig_table.location(), sig_table.location(),
- &FunctionTableFinalizer,
- v8::WeakCallbackType::kFinalizer);
function_tables.push_back(func_table.address());
- signature_tables.push_back(sig_table.address());
}
- std::vector<Handle<Code>> empty_code;
-
- compiler::ModuleEnv result = {
- module, // --
- function_tables, // --
- signature_tables, // --
- empty_code, // --
- illegal_builtin // --
- };
- return std::unique_ptr<compiler::ModuleEnv>(new compiler::ModuleEnv(result));
+ // TODO(kschimpf): Add module-specific policy handling here (see v8:7143)?
+ bool use_trap_handler = trap_handler::IsTrapHandlerEnabled();
+ return base::make_unique<compiler::ModuleEnv>(
+ module, function_tables, std::vector<Handle<Code>>{}, illegal_builtin,
+ use_trap_handler);
}
// TODO(mtrofin): remove code_table when we don't need FLAG_wasm_jit_to_native
@@ -1945,7 +2020,7 @@ Handle<WasmCompiledModule> NewCompiledModule(Isolate* isolate,
compiler::ModuleEnv* env) {
Handle<WasmCompiledModule> compiled_module =
WasmCompiledModule::New(isolate, module, code_table, export_wrappers,
- env->function_tables, env->signature_tables);
+ env->function_tables, env->use_trap_handler);
return compiled_module;
}
@@ -2047,8 +2122,9 @@ MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObjectInternal(
native_module_ = compiled_module->GetNativeModule();
compiled_module->OnWasmModuleDecodingComplete(shared);
if (lazy_compile && FLAG_wasm_jit_to_native) {
- compiled_module->set_lazy_compile_data(isolate_->factory()->NewFixedArray(
- static_cast<int>(module_->functions.size()), TENURED));
+ Handle<FixedArray> lazy_compile_data = isolate_->factory()->NewFixedArray(
+ static_cast<int>(module_->functions.size()), TENURED);
+ compiled_module->set_lazy_compile_data(*lazy_compile_data);
}
if (!lazy_compile) {
@@ -2122,7 +2198,7 @@ InstanceBuilder::InstanceBuilder(
MaybeHandle<JSArrayBuffer> memory,
WeakCallbackInfo<void>::Callback instance_finalizer_callback)
: isolate_(isolate),
- module_(module_object->compiled_module()->module()),
+ module_(module_object->compiled_module()->shared()->module()),
async_counters_(isolate->async_counters()),
thrower_(thrower),
module_object_(module_object),
@@ -2205,13 +2281,13 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
TRACE("Cloning from %zu\n", original->GetNativeModule()->instance_id);
compiled_module_ = WasmCompiledModule::Clone(isolate_, original);
native_module = compiled_module_->GetNativeModule();
- wrapper_table = compiled_module_->export_wrappers();
+ wrapper_table = handle(compiled_module_->export_wrappers(), isolate_);
} else {
TRACE("Cloning from %d\n", original->instance_id());
- old_code_table = original->code_table();
+ old_code_table = handle(original->code_table(), isolate_);
compiled_module_ = WasmCompiledModule::Clone(isolate_, original);
- code_table = compiled_module_->code_table();
- wrapper_table = compiled_module_->export_wrappers();
+ code_table = handle(compiled_module_->code_table(), isolate_);
+ wrapper_table = handle(compiled_module_->export_wrappers(), isolate_);
// Avoid creating too many handles in the outer scope.
HandleScope scope(isolate_);
@@ -2261,21 +2337,27 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
} else {
// There was no owner, so we can reuse the original.
compiled_module_ = original;
- wrapper_table = compiled_module_->export_wrappers();
+ wrapper_table = handle(compiled_module_->export_wrappers(), isolate_);
if (FLAG_wasm_jit_to_native) {
old_module = compiled_module_->GetNativeModule();
native_module = old_module;
TRACE("Reusing existing instance %zu\n",
compiled_module_->GetNativeModule()->instance_id);
} else {
- old_code_table =
- factory->CopyFixedArray(compiled_module_->code_table());
- code_table = compiled_module_->code_table();
+ code_table = handle(compiled_module_->code_table(), isolate_);
+ old_code_table = factory->CopyFixedArray(code_table);
TRACE("Reusing existing instance %d\n",
compiled_module_->instance_id());
}
}
- compiled_module_->set_native_context(isolate_->native_context());
+ Handle<WeakCell> weak_native_context =
+ isolate_->factory()->NewWeakCell(isolate_->native_context());
+ compiled_module_->set_weak_native_context(*weak_native_context);
+ }
+ base::Optional<wasm::NativeModuleModificationScope>
+ native_module_modification_scope;
+ if (native_module != nullptr) {
+ native_module_modification_scope.emplace(native_module);
}
//--------------------------------------------------------------------------
@@ -2312,9 +2394,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
int function_table_count = static_cast<int>(module_->function_tables.size());
table_instances_.reserve(module_->function_tables.size());
for (int index = 0; index < function_table_count; ++index) {
- table_instances_.push_back(
- {Handle<WasmTableObject>::null(), Handle<FixedArray>::null(),
- Handle<FixedArray>::null(), Handle<FixedArray>::null()});
+ table_instances_.emplace_back();
}
//--------------------------------------------------------------------------
@@ -2348,7 +2428,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
memory->set_is_neuterable(false);
- DCHECK_IMPLIES(trap_handler::UseTrapHandler(),
+ DCHECK_IMPLIES(use_trap_handler(),
module_->is_asm_js() || memory->has_guard_region());
} else if (initial_pages > 0) {
// Allocate memory if the initial size is more than 0 pages.
@@ -2389,7 +2469,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
DCHECK(table_init.table_index < table_instances_.size());
uint32_t base = EvalUint32InitExpr(table_init.offset);
uint32_t table_size =
- table_instances_[table_init.table_index].function_table->length();
+ table_instances_[table_init.table_index].function_table->length() /
+ compiler::kFunctionTableEntrySize;
if (!in_bounds(base, static_cast<uint32_t>(table_init.entries.size()),
table_size)) {
thrower_->LinkError("table initializer is out of bounds");
@@ -2477,7 +2558,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
// Unpack and notify signal handler of protected instructions.
//--------------------------------------------------------------------------
- if (trap_handler::UseTrapHandler()) {
+ if (use_trap_handler()) {
if (FLAG_wasm_jit_to_native) {
UnpackAndRegisterProtectedInstructions(isolate_, native_module);
} else {
@@ -2498,7 +2579,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
compiled_module_->InsertInChain(*module_object_);
}
module_object_->set_compiled_module(*compiled_module_);
- compiled_module_->set_weak_owning_instance(link_to_owning_instance);
+ compiled_module_->set_weak_owning_instance(*link_to_owning_instance);
GlobalHandles::MakeWeak(global_handle.location(), global_handle.location(),
instance_finalizer_callback_,
v8::WeakCallbackType::kFinalizer);
@@ -2508,8 +2589,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// Debugging support.
//--------------------------------------------------------------------------
// Set all breakpoints that were set on the shared module.
- WasmSharedModuleData::SetBreakpointsOnNewInstance(compiled_module_->shared(),
- instance);
+ WasmSharedModuleData::SetBreakpointsOnNewInstance(
+ handle(compiled_module_->shared(), isolate_), instance);
if (FLAG_wasm_interpret_all && module_->is_wasm()) {
Handle<WasmDebugInfo> debug_info =
@@ -2535,15 +2616,17 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
isolate_, instance, code_table, native_module, start_index);
FunctionSig* sig = module_->functions[start_index].sig;
Handle<Code> wrapper_code = js_to_wasm_cache_.CloneOrCompileJSToWasmWrapper(
- isolate_, module_, startup_code, start_index);
+ isolate_, module_, startup_code, start_index,
+ compiled_module_->use_trap_handler());
Handle<WasmExportedFunction> startup_fct = WasmExportedFunction::New(
isolate_, instance, MaybeHandle<String>(), start_index,
static_cast<int>(sig->parameter_count()), wrapper_code);
RecordStats(startup_code, counters());
// Call the JS function.
Handle<Object> undefined = factory->undefined_value();
- // Close the CodeSpaceMemoryModificationScope to execute the start function.
+ // Close the modification scopes, so we can execute the start function.
modification_scope.reset();
+ native_module_modification_scope.reset();
{
// We're OK with JS execution here. The instance is fully setup.
AllowJavascriptExecution allow_js(isolate_);
@@ -2658,8 +2741,8 @@ uint32_t InstanceBuilder::EvalUint32InitExpr(const WasmInitExpr& expr) {
// Load data segments into the memory.
void InstanceBuilder::LoadDataSegments(WasmContext* wasm_context) {
- Handle<SeqOneByteString> module_bytes(compiled_module_->module_bytes(),
- isolate_);
+ Handle<SeqOneByteString> module_bytes(
+ compiled_module_->shared()->module_bytes(), isolate_);
for (const WasmDataSegment& segment : module_->data_segments) {
uint32_t source_size = segment.source.length();
// Segments of size == 0 are just nops.
@@ -2700,13 +2783,13 @@ void InstanceBuilder::WriteGlobalValue(WasmGlobal& global,
void InstanceBuilder::SanitizeImports() {
Handle<SeqOneByteString> module_bytes(
- module_object_->compiled_module()->module_bytes());
+ module_object_->compiled_module()->shared()->module_bytes());
for (size_t index = 0; index < module_->import_table.size(); ++index) {
WasmImport& import = module_->import_table[index];
Handle<String> module_name;
MaybeHandle<String> maybe_module_name =
- WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+ WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
isolate_, module_bytes, import.module_name);
if (!maybe_module_name.ToHandle(&module_name)) {
thrower_->LinkError("Could not resolve module name for import %zu",
@@ -2716,7 +2799,7 @@ void InstanceBuilder::SanitizeImports() {
Handle<String> import_name;
MaybeHandle<String> maybe_import_name =
- WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+ WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
isolate_, module_bytes, import.field_name);
if (!maybe_import_name.ToHandle(&import_name)) {
thrower_->LinkError("Could not resolve import name for import %zu",
@@ -2764,10 +2847,15 @@ Handle<FixedArray> InstanceBuilder::SetupWasmToJSImportsTable(
// functions.
int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
Handle<WasmInstanceObject> instance) {
+ using compiler::kFunctionTableSignatureOffset;
+ using compiler::kFunctionTableCodeOffset;
+ using compiler::kFunctionTableEntrySize;
int num_imported_functions = 0;
int num_imported_tables = 0;
Handle<FixedArray> js_imports_table = SetupWasmToJSImportsTable(instance);
WasmInstanceMap imported_wasm_instances(isolate_->heap());
+ SetOfNativeModuleModificationScopes set_of_native_module_scopes;
+
DCHECK_EQ(module_->import_table.size(), sanitized_imports_.size());
for (int index = 0; index < static_cast<int>(module_->import_table.size());
++index) {
@@ -2842,19 +2930,18 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
}
}
- // Allocate a new dispatch table and signature table.
- int table_size = imported_cur_size;
+ // Allocate a new dispatch table, containing <smi(sig), code> pairs.
+ CHECK_GE(kMaxInt / kFunctionTableEntrySize, imported_cur_size);
+ int table_size = kFunctionTableEntrySize * imported_cur_size;
table_instance.function_table =
isolate_->factory()->NewFixedArray(table_size);
- table_instance.signature_table =
- isolate_->factory()->NewFixedArray(table_size);
- for (int i = 0; i < table_size; ++i) {
- table_instance.signature_table->set(i,
- Smi::FromInt(kInvalidSigIndex));
+ for (int i = kFunctionTableSignatureOffset; i < table_size;
+ i += kFunctionTableEntrySize) {
+ table_instance.function_table->set(i, Smi::FromInt(kInvalidSigIndex));
}
// Initialize the dispatch table with the (foreign) JS functions
// that are already in the table.
- for (int i = 0; i < table_size; ++i) {
+ for (int i = 0; i < imported_cur_size; ++i) {
Handle<Object> val(table_instance.js_wrappers->get(i), isolate_);
// TODO(mtrofin): this is the same logic as WasmTableObject::Set:
// insert in the local table a wrapper from the other module, and add
@@ -2876,8 +2963,10 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
&imported_wasm_instances, instance, 0)
.GetCode();
int sig_index = module_->signature_map.Find(sig);
- table_instance.signature_table->set(i, Smi::FromInt(sig_index));
- table_instance.function_table->set(i, *code);
+ table_instance.function_table->set(
+ compiler::FunctionTableSigOffset(i), Smi::FromInt(sig_index));
+ table_instance.function_table->set(
+ compiler::FunctionTableCodeOffset(i), *code);
} else {
const wasm::WasmCode* exported_code =
target->GetWasmCode().GetWasmCode();
@@ -2896,14 +2985,17 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
Handle<Code> wrapper = compiler::CompileWasmToWasmWrapper(
isolate_, target->GetWasmCode(), sig,
reinterpret_cast<Address>(other_context));
+ set_of_native_module_scopes.Add(exporting_module);
wrapper_code = exporting_module->AddExportedWrapper(
wrapper, exported_code->index());
}
int sig_index = module_->signature_map.Find(sig);
- table_instance.signature_table->set(i, Smi::FromInt(sig_index));
Handle<Foreign> foreign_holder = isolate_->factory()->NewForeign(
wrapper_code->instructions().start(), TENURED);
- table_instance.function_table->set(i, *foreign_holder);
+ table_instance.function_table->set(
+ compiler::FunctionTableSigOffset(i), Smi::FromInt(sig_index));
+ table_instance.function_table->set(
+ compiler::FunctionTableCodeOffset(i), *foreign_holder);
}
}
@@ -2924,7 +3016,7 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate_);
memory_ = buffer;
uint32_t imported_cur_pages = static_cast<uint32_t>(
- buffer->byte_length()->Number() / WasmModule::kPageSize);
+ buffer->byte_length()->Number() / kWasmPageSize);
if (imported_cur_pages < module_->initial_pages) {
thrower_->LinkError(
"memory import %d is smaller than initial %u, got %u", index,
@@ -3060,9 +3152,12 @@ Handle<JSArrayBuffer> InstanceBuilder::AllocateMemory(uint32_t num_pages) {
thrower_->RangeError("Out of memory: wasm memory too large");
return Handle<JSArrayBuffer>::null();
}
- const bool enable_guard_regions = trap_handler::UseTrapHandler();
+ const bool enable_guard_regions = use_trap_handler();
+ const bool is_shared_memory =
+ module_->has_shared_memory && i::FLAG_experimental_wasm_threads;
Handle<JSArrayBuffer> mem_buffer = NewArrayBuffer(
- isolate_, num_pages * WasmModule::kPageSize, enable_guard_regions);
+ isolate_, num_pages * kWasmPageSize, enable_guard_regions,
+ is_shared_memory ? i::SharedFlag::kShared : i::SharedFlag::kNotShared);
if (mem_buffer.is_null()) {
thrower_->RangeError("Out of memory: wasm memory");
@@ -3086,7 +3181,8 @@ bool InstanceBuilder::NeedsWrappers() const {
void InstanceBuilder::ProcessExports(
Handle<WasmInstanceObject> instance,
Handle<WasmCompiledModule> compiled_module) {
- Handle<FixedArray> wrapper_table = compiled_module->export_wrappers();
+ Handle<FixedArray> wrapper_table(compiled_module->export_wrappers(),
+ isolate_);
if (NeedsWrappers()) {
// Fill the table to cache the exported JSFunction wrappers.
js_wrappers_.insert(js_wrappers_.begin(), module_->functions.size(),
@@ -3117,22 +3213,24 @@ void InstanceBuilder::ProcessExports(
// Store weak references to all exported functions.
Handle<FixedArray> weak_exported_functions;
if (compiled_module->has_weak_exported_functions()) {
- weak_exported_functions = compiled_module->weak_exported_functions();
+ weak_exported_functions =
+ handle(compiled_module->weak_exported_functions(), isolate_);
} else {
int export_count = 0;
for (WasmExport& exp : module_->export_table) {
if (exp.kind == kExternalFunction) ++export_count;
}
weak_exported_functions = isolate_->factory()->NewFixedArray(export_count);
- compiled_module->set_weak_exported_functions(weak_exported_functions);
+ compiled_module->set_weak_exported_functions(*weak_exported_functions);
}
// Process each export in the export table.
int export_index = 0; // Index into {weak_exported_functions}.
for (WasmExport& exp : module_->export_table) {
- Handle<String> name = WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate_, compiled_module_, exp.name)
- .ToHandleChecked();
+ Handle<String> name =
+ WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
+ isolate_, handle(compiled_module_->shared(), isolate_), exp.name)
+ .ToHandleChecked();
Handle<JSObject> export_to;
if (module_->is_asm_js() && exp.kind == kExternalFunction &&
String::Equals(name, single_function_name)) {
@@ -3153,9 +3251,11 @@ void InstanceBuilder::ProcessExports(
MaybeHandle<String> func_name;
if (module_->is_asm_js()) {
// For modules arising from asm.js, honor the names section.
- func_name = WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate_, compiled_module_, function.name)
- .ToHandleChecked();
+ func_name =
+ WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
+ isolate_, handle(compiled_module_->shared(), isolate_),
+ function.name)
+ .ToHandleChecked();
}
js_function = WasmExportedFunction::New(
isolate_, instance, func_name, function.func_index,
@@ -3246,21 +3346,16 @@ void InstanceBuilder::InitializeTables(
CodeSpecialization* code_specialization) {
size_t function_table_count = module_->function_tables.size();
std::vector<GlobalHandleAddress> new_function_tables(function_table_count);
- std::vector<GlobalHandleAddress> new_signature_tables(function_table_count);
wasm::NativeModule* native_module = compiled_module_->GetNativeModule();
std::vector<GlobalHandleAddress> empty;
std::vector<GlobalHandleAddress>& old_function_tables =
FLAG_wasm_jit_to_native ? native_module->function_tables() : empty;
- std::vector<GlobalHandleAddress>& old_signature_tables =
- FLAG_wasm_jit_to_native ? native_module->signature_tables() : empty;
Handle<FixedArray> old_function_tables_gc =
- FLAG_wasm_jit_to_native ? Handle<FixedArray>::null()
- : compiled_module_->function_tables();
- Handle<FixedArray> old_signature_tables_gc =
- FLAG_wasm_jit_to_native ? Handle<FixedArray>::null()
- : compiled_module_->signature_tables();
+ FLAG_wasm_jit_to_native
+ ? Handle<FixedArray>::null()
+ : handle(compiled_module_->function_tables(), isolate_);
// function_table_count is 0 or 1, so we just create these objects even if not
// needed for native wasm.
@@ -3269,58 +3364,52 @@ void InstanceBuilder::InitializeTables(
Handle<FixedArray> new_function_tables_gc =
isolate_->factory()->NewFixedArray(static_cast<int>(function_table_count),
TENURED);
- Handle<FixedArray> new_signature_tables_gc =
- isolate_->factory()->NewFixedArray(static_cast<int>(function_table_count),
- TENURED);
// These go on the instance.
Handle<FixedArray> rooted_function_tables =
isolate_->factory()->NewFixedArray(static_cast<int>(function_table_count),
TENURED);
- Handle<FixedArray> rooted_signature_tables =
- isolate_->factory()->NewFixedArray(static_cast<int>(function_table_count),
- TENURED);
instance->set_function_tables(*rooted_function_tables);
- instance->set_signature_tables(*rooted_signature_tables);
if (FLAG_wasm_jit_to_native) {
DCHECK_EQ(old_function_tables.size(), new_function_tables.size());
- DCHECK_EQ(old_signature_tables.size(), new_signature_tables.size());
} else {
DCHECK_EQ(old_function_tables_gc->length(),
new_function_tables_gc->length());
- DCHECK_EQ(old_signature_tables_gc->length(),
- new_signature_tables_gc->length());
}
for (size_t index = 0; index < function_table_count; ++index) {
WasmIndirectFunctionTable& table = module_->function_tables[index];
TableInstance& table_instance = table_instances_[index];
- int table_size = static_cast<int>(table.initial_size);
+ // The table holds <smi(sig), code> pairs.
+ CHECK_GE(kMaxInt / compiler::kFunctionTableEntrySize, table.initial_size);
+ int num_table_entries = static_cast<int>(table.initial_size);
+ int table_size = compiler::kFunctionTableEntrySize * num_table_entries;
if (table_instance.function_table.is_null()) {
// Create a new dispatch table if necessary.
table_instance.function_table =
isolate_->factory()->NewFixedArray(table_size);
- table_instance.signature_table =
- isolate_->factory()->NewFixedArray(table_size);
- for (int i = 0; i < table_size; ++i) {
+ for (int i = compiler::kFunctionTableSignatureOffset; i < table_size;
+ i += compiler::kFunctionTableEntrySize) {
// Fill the table with invalid signature indexes so that
// uninitialized entries will always fail the signature check.
- table_instance.signature_table->set(i, Smi::FromInt(kInvalidSigIndex));
+ table_instance.function_table->set(i, Smi::FromInt(kInvalidSigIndex));
}
} else {
// Table is imported, patch table bounds check
- DCHECK_LE(table_size, table_instance.function_table->length());
- code_specialization->PatchTableSize(
- table_size, table_instance.function_table->length());
+ int existing_table_size = table_instance.function_table->length();
+ DCHECK_EQ(0, existing_table_size % compiler::kFunctionTableEntrySize);
+ int existing_num_table_entries =
+ existing_table_size / compiler::kFunctionTableEntrySize;
+ DCHECK_LE(num_table_entries, existing_num_table_entries);
+ code_specialization->PatchTableSize(num_table_entries,
+ existing_num_table_entries);
}
int int_index = static_cast<int>(index);
Handle<FixedArray> global_func_table =
isolate_->global_handles()->Create(*table_instance.function_table);
- Handle<FixedArray> global_sig_table =
- isolate_->global_handles()->Create(*table_instance.signature_table);
// Make the handles weak. The table objects are rooted on the instance, as
// they belong to it. We need the global handles in order to have stable
// pointers to embed in the instance's specialization (wasm compiled code).
@@ -3333,47 +3422,30 @@ void InstanceBuilder::InitializeTables(
reinterpret_cast<Object**>(global_func_table.location()),
global_func_table.location(), &FunctionTableFinalizer,
v8::WeakCallbackType::kFinalizer);
- GlobalHandles::MakeWeak(
- reinterpret_cast<Object**>(global_sig_table.location()),
- global_sig_table.location(), &FunctionTableFinalizer,
- v8::WeakCallbackType::kFinalizer);
rooted_function_tables->set(int_index, *global_func_table);
- rooted_signature_tables->set(int_index, *global_sig_table);
GlobalHandleAddress new_func_table_addr = global_func_table.address();
- GlobalHandleAddress new_sig_table_addr = global_sig_table.address();
GlobalHandleAddress old_func_table_addr;
- GlobalHandleAddress old_sig_table_addr;
if (!FLAG_wasm_jit_to_native) {
WasmCompiledModule::SetTableValue(isolate_, new_function_tables_gc,
int_index, new_func_table_addr);
- WasmCompiledModule::SetTableValue(isolate_, new_signature_tables_gc,
- int_index, new_sig_table_addr);
old_func_table_addr =
WasmCompiledModule::GetTableValue(*old_function_tables_gc, int_index);
- old_sig_table_addr = WasmCompiledModule::GetTableValue(
- *old_signature_tables_gc, int_index);
} else {
new_function_tables[int_index] = new_func_table_addr;
- new_signature_tables[int_index] = new_sig_table_addr;
old_func_table_addr = old_function_tables[int_index];
- old_sig_table_addr = old_signature_tables[int_index];
}
code_specialization->RelocatePointer(old_func_table_addr,
new_func_table_addr);
- code_specialization->RelocatePointer(old_sig_table_addr,
- new_sig_table_addr);
}
if (FLAG_wasm_jit_to_native) {
native_module->function_tables() = new_function_tables;
- native_module->signature_tables() = new_signature_tables;
} else {
- compiled_module_->set_function_tables(new_function_tables_gc);
- compiled_module_->set_signature_tables(new_signature_tables_gc);
+ compiled_module_->set_function_tables(*new_function_tables_gc);
}
}
@@ -3384,13 +3456,6 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
for (int index = 0; index < function_table_count; ++index) {
TableInstance& table_instance = table_instances_[index];
- Handle<FixedArray> all_dispatch_tables;
- if (!table_instance.table_object.is_null()) {
- // Get the existing dispatch table(s) with the WebAssembly.Table object.
- all_dispatch_tables =
- handle(table_instance.table_object->dispatch_tables());
- }
-
// Count the number of table exports for each function (needed for lazy
// compilation).
std::unordered_map<uint32_t, uint32_t> num_table_exports;
@@ -3402,14 +3467,20 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
Code::cast(code_table->get(static_cast<int>(func_index)));
// Only increase the counter for lazy compile builtins (it's not
// needed otherwise).
- if (code->is_wasm_code()) continue;
- DCHECK_EQ(Builtins::kWasmCompileLazy, code->builtin_index());
+ if (code->builtin_index() != Builtins::kWasmCompileLazy) {
+ DCHECK(code->kind() == Code::WASM_FUNCTION ||
+ code->kind() == Code::WASM_TO_JS_FUNCTION);
+ continue;
+ }
} else {
const wasm::WasmCode* code = native_module->GetCode(func_index);
// Only increase the counter for lazy compile builtins (it's not
// needed otherwise).
- if (code->kind() == wasm::WasmCode::Function) continue;
- DCHECK_EQ(wasm::WasmCode::LazyStub, code->kind());
+ if (code->kind() != wasm::WasmCode::kLazyStub) {
+ DCHECK(code->kind() == wasm::WasmCode::kFunction ||
+ code->kind() == wasm::WasmCode::kWasmToJsWrapper);
+ continue;
+ }
}
++num_table_exports[func_index];
}
@@ -3422,14 +3493,16 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
uint32_t base = EvalUint32InitExpr(table_init.offset);
uint32_t num_entries = static_cast<uint32_t>(table_init.entries.size());
DCHECK(in_bounds(base, num_entries,
- table_instance.function_table->length()));
+ table_instance.function_table->length() /
+ compiler::kFunctionTableEntrySize));
for (uint32_t i = 0; i < num_entries; ++i) {
uint32_t func_index = table_init.entries[i];
WasmFunction* function = &module_->functions[func_index];
int table_index = static_cast<int>(i + base);
uint32_t sig_index = module_->signature_ids[function->sig_index];
- table_instance.signature_table->set(table_index,
- Smi::FromInt(sig_index));
+ table_instance.function_table->set(
+ compiler::FunctionTableSigOffset(table_index),
+ Smi::FromInt(sig_index));
WasmCodeWrapper wasm_code = EnsureTableExportLazyDeoptData(
isolate_, instance, code_table, native_module, func_index,
table_instance.function_table, table_index, &num_table_exports);
@@ -3437,13 +3510,14 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
if (!wasm_code.IsCodeObject()) {
Handle<Foreign> as_foreign = isolate_->factory()->NewForeign(
wasm_code.GetWasmCode()->instructions().start(), TENURED);
- table_instance.function_table->set(table_index, *as_foreign);
value_to_update_with = as_foreign;
} else {
- table_instance.function_table->set(table_index, *wasm_code.GetCode());
value_to_update_with = wasm_code.GetCode();
}
- if (!all_dispatch_tables.is_null()) {
+ table_instance.function_table->set(
+ compiler::FunctionTableCodeOffset(table_index),
+ *value_to_update_with);
+ if (!table_instance.table_object.is_null()) {
if (js_wrappers_[func_index].is_null()) {
// No JSFunction entry yet exists for this function. Create one.
// TODO(titzer): We compile JS->wasm wrappers for functions are
@@ -3452,13 +3526,16 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
Handle<Code> wrapper_code =
js_to_wasm_cache_.CloneOrCompileJSToWasmWrapper(
- isolate_, module_, wasm_code, func_index);
+ isolate_, module_, wasm_code, func_index,
+ instance->compiled_module()->use_trap_handler());
MaybeHandle<String> func_name;
if (module_->is_asm_js()) {
// For modules arising from asm.js, honor the names section.
- func_name = WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate_, compiled_module_, function->name)
- .ToHandleChecked();
+ func_name =
+ WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
+ isolate_, handle(compiled_module_->shared(), isolate_),
+ function->name)
+ .ToHandleChecked();
}
Handle<WasmExportedFunction> js_function =
WasmExportedFunction::New(
@@ -3486,13 +3563,14 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
Code::WASM_TO_WASM_FUNCTION);
} else {
DCHECK(wasm_code.GetWasmCode()->kind() ==
- WasmCode::WasmToJsWrapper ||
+ WasmCode::kWasmToJsWrapper ||
wasm_code.GetWasmCode()->kind() ==
- WasmCode::WasmToWasmWrapper);
+ WasmCode::kWasmToWasmWrapper);
}
}
- UpdateDispatchTables(isolate_, all_dispatch_tables, table_index,
- function, value_to_update_with);
+ WasmTableObject::UpdateDispatchTables(table_instance.table_object,
+ table_index, function->sig,
+ value_to_update_with);
}
}
}
@@ -3510,9 +3588,9 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
// initialized.
if (!table_instance.table_object.is_null()) {
// Add the new dispatch table to the WebAssembly.Table object.
- all_dispatch_tables = WasmTableObject::AddDispatchTable(
- isolate_, table_instance.table_object, instance, index,
- table_instance.function_table, table_instance.signature_table);
+ WasmTableObject::AddDispatchTable(isolate_, table_instance.table_object,
+ instance, index,
+ table_instance.function_table);
}
}
}
@@ -3544,7 +3622,7 @@ void AsyncCompileJob::Abort() {
background_task_manager_.CancelAndWait();
if (num_pending_foreground_tasks_ == 0) {
// No task is pending, we can just remove the AsyncCompileJob.
- isolate_->wasm_compilation_manager()->RemoveJob(this);
+ isolate_->wasm_engine()->compilation_manager()->RemoveJob(this);
} else {
// There is still a compilation task in the task queue. We enter the
// AbortCompilation state and wait for this compilation task to abort the
@@ -3582,6 +3660,8 @@ class AsyncStreamingProcessor final : public StreamingProcessor {
// Finishes the AsyncCOmpileJob with an error.
void FinishAsyncCompileJobWithError(ResultBase result);
+ void CommitCompilationUnits();
+
ModuleDecoder decoder_;
AsyncCompileJob* job_;
std::unique_ptr<ModuleCompiler::CompilationUnitBuilder>
@@ -3605,14 +3685,14 @@ void AsyncCompileJob::AsyncCompileFailed(ErrorThrower& thrower) {
if (stream_) stream_->NotifyError();
// {job} keeps the {this} pointer alive.
std::shared_ptr<AsyncCompileJob> job =
- isolate_->wasm_compilation_manager()->RemoveJob(this);
+ isolate_->wasm_engine()->compilation_manager()->RemoveJob(this);
RejectPromise(isolate_, context_, thrower, module_promise_);
}
void AsyncCompileJob::AsyncCompileSucceeded(Handle<Object> result) {
// {job} keeps the {this} pointer alive.
std::shared_ptr<AsyncCompileJob> job =
- isolate_->wasm_compilation_manager()->RemoveJob(this);
+ isolate_->wasm_engine()->compilation_manager()->RemoveJob(this);
ResolvePromise(isolate_, context_, module_promise_, result);
}
@@ -3888,7 +3968,6 @@ class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
DisallowHeapAllocation no_allocation;
if (!job_->compiler_->FetchAndExecuteCompilationUnit(
StartFinishCompilationUnit)) {
- finished_ = true;
break;
}
}
@@ -3913,7 +3992,7 @@ class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
double deadline = MonotonicallyIncreasingTimeInMs() + 1.0;
while (true) {
- if (!finished_ && job_->compiler_->ShouldIncreaseWorkload()) {
+ if (job_->compiler_->ShouldIncreaseWorkload()) {
job_->RestartBackgroundTasks();
}
@@ -3969,7 +4048,6 @@ class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
private:
std::atomic<bool> failed_{false};
- std::atomic<bool> finished_{false};
};
//==========================================================================
@@ -4026,7 +4104,7 @@ class AsyncCompileJob::FinishCompile : public CompileStep {
// Finish the wasm script now and make it public to the debugger.
job_->isolate_->debug()->OnAfterCompile(
- handle(job_->compiled_module_->script()));
+ handle(job_->compiled_module_->shared()->script()));
// TODO(wasm): compiling wrappers should be made async as well.
job_->DoSync<CompileWrappers>();
@@ -4066,7 +4144,7 @@ class AsyncCompileJob::FinishModule : public CompileStep {
class AsyncCompileJob::AbortCompilation : public CompileStep {
void RunInForeground() override {
TRACE_COMPILE("Abort asynchronous compilation ...\n");
- job_->isolate_->wasm_compilation_manager()->RemoveJob(job_);
+ job_->isolate_->wasm_engine()->compilation_manager()->RemoveJob(job_);
}
};
@@ -4095,7 +4173,10 @@ void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(ResultBase error) {
job_->DoSync<AsyncCompileJob::DecodeFail>(std::move(result));
}
- compilation_unit_builder_->Clear();
+ // Clear the {compilation_unit_builder_} if it exists. This is needed
+ // because there is a check in the destructor of the
+ // {CompilationUnitBuilder} that it is empty.
+ if (compilation_unit_builder_) compilation_unit_builder_->Clear();
} else {
job_->DoSync<AsyncCompileJob::DecodeFail>(std::move(result));
}
@@ -4119,6 +4200,12 @@ bool AsyncStreamingProcessor::ProcessSection(SectionCode section_code,
Vector<const uint8_t> bytes,
uint32_t offset) {
TRACE_STREAMING("Process section %d ...\n", section_code);
+ if (compilation_unit_builder_) {
+ // We reached a section after the code section, we do not need the the
+ // compilation_unit_builder_ anymore.
+ CommitCompilationUnits();
+ compilation_unit_builder_.reset();
+ }
if (section_code == SectionCode::kUnknownSectionCode) {
// No need to decode unknown sections, even the names section. If decoding
// of the unknown section fails, compilation should succeed anyways, and
@@ -4186,14 +4273,19 @@ bool AsyncStreamingProcessor::ProcessFunctionBody(Vector<const uint8_t> bytes,
return true;
}
-void AsyncStreamingProcessor::OnFinishedChunk() {
- // TRACE_STREAMING("FinishChunk...\n");
- if (compilation_unit_builder_) {
- compilation_unit_builder_->Commit();
+void AsyncStreamingProcessor::CommitCompilationUnits() {
+ DCHECK(compilation_unit_builder_);
+ if (compilation_unit_builder_->Commit()) {
+ // Only restart background tasks when compilation units were committed.
job_->RestartBackgroundTasks();
}
}
+void AsyncStreamingProcessor::OnFinishedChunk() {
+ TRACE_STREAMING("FinishChunk...\n");
+ if (compilation_unit_builder_) CommitCompilationUnits();
+}
+
// Finish the processing of the stream.
void AsyncStreamingProcessor::OnFinishedStream(std::unique_ptr<uint8_t[]> bytes,
size_t length) {
@@ -4234,15 +4326,18 @@ void CompileJsToWasmWrappers(Isolate* isolate,
Counters* counters) {
JSToWasmWrapperCache js_to_wasm_cache;
int wrapper_index = 0;
- Handle<FixedArray> export_wrappers = compiled_module->export_wrappers();
+ Handle<FixedArray> export_wrappers(compiled_module->export_wrappers(),
+ isolate);
+ Handle<FixedArray> code_table(compiled_module->code_table(), isolate);
NativeModule* native_module = compiled_module->GetNativeModule();
- for (auto exp : compiled_module->module()->export_table) {
+ for (auto exp : compiled_module->shared()->module()->export_table) {
if (exp.kind != kExternalFunction) continue;
- WasmCodeWrapper wasm_code = EnsureExportedLazyDeoptData(
- isolate, Handle<WasmInstanceObject>::null(),
- compiled_module->code_table(), native_module, exp.index);
+ WasmCodeWrapper wasm_code =
+ EnsureExportedLazyDeoptData(isolate, Handle<WasmInstanceObject>::null(),
+ code_table, native_module, exp.index);
Handle<Code> wrapper_code = js_to_wasm_cache.CloneOrCompileJSToWasmWrapper(
- isolate, compiled_module->module(), wasm_code, exp.index);
+ isolate, compiled_module->shared()->module(), wasm_code, exp.index,
+ compiled_module->use_trap_handler());
export_wrappers->set(wrapper_index, *wrapper_code);
RecordStats(*wrapper_code, counters);
++wrapper_index;
@@ -4283,6 +4378,7 @@ Handle<Script> CreateWasmScript(Isolate* isolate,
} // namespace internal
} // namespace v8
+#undef WasmPatchWasmToWasmWrapper
#undef TRACE
#undef TRACE_CHAIN
#undef TRACE_COMPILE
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index 864af287cf..3a8b1972d6 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -23,9 +23,6 @@ namespace wasm {
class ModuleCompiler;
class WasmCode;
-V8_EXPORT_PRIVATE bool SyncValidate(Isolate* isolate,
- const ModuleWireBytes& bytes);
-
V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> SyncCompileTranslatedAsmJs(
Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
Handle<Script> asm_js_script, Vector<const byte> asm_js_offset_table_bytes);
@@ -43,7 +40,8 @@ V8_EXPORT_PRIVATE MaybeHandle<WasmInstanceObject> SyncCompileAndInstantiate(
MaybeHandle<JSReceiver> imports, MaybeHandle<JSArrayBuffer> memory);
V8_EXPORT_PRIVATE void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
- const ModuleWireBytes& bytes);
+ const ModuleWireBytes& bytes,
+ bool is_shared);
V8_EXPORT_PRIVATE void AsyncInstantiate(Isolate* isolate,
Handle<JSPromise> promise,
@@ -95,6 +93,20 @@ class LazyCompilationOrchestrator {
const wasm::WasmCode* CompileIndirectCall(Isolate*,
Handle<WasmInstanceObject>,
uint32_t func_index);
+
+#ifdef DEBUG
+ // Call this method in tests to disallow any further lazy compilation; then
+ // call into the wasm instance again to verify that no lazy compilation is
+ // triggered.
+ void FreezeLazyCompilationForTesting() { frozen_ = true; }
+ bool IsFrozenForTesting() const { return frozen_; }
+
+ private:
+ bool frozen_;
+#else
+ void FreezeLazyCompilationForTesting() {}
+ bool IsFrozenForTesting() { return false; }
+#endif
};
// Encapsulates all the state and steps of an asynchronous compilation.
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 1176c56935..010f191263 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -21,14 +21,11 @@ namespace v8 {
namespace internal {
namespace wasm {
-#if DEBUG
#define TRACE(...) \
do { \
if (FLAG_trace_wasm_decoder) PrintF(__VA_ARGS__); \
} while (false)
-#else
-#define TRACE(...)
-#endif
+
namespace {
constexpr char kNameString[] = "name";
@@ -40,6 +37,20 @@ constexpr size_t num_chars(const char (&)[N]) {
return N - 1; // remove null character at end.
}
+const char* ExternalKindName(ImportExportKindCode kind) {
+ switch (kind) {
+ case kExternalFunction:
+ return "function";
+ case kExternalTable:
+ return "table";
+ case kExternalMemory:
+ return "memory";
+ case kExternalGlobal:
+ return "global";
+ }
+ return "unknown";
+}
+
} // namespace
const char* SectionName(SectionCode code) {
@@ -299,7 +310,7 @@ class ModuleDecoderImpl : public Decoder {
const byte* pos = pc_;
uint32_t magic_word = consume_u32("wasm magic");
-#define BYTES(x) (x & 0xff), (x >> 8) & 0xff, (x >> 16) & 0xff, (x >> 24) & 0xff
+#define BYTES(x) (x & 0xFF), (x >> 8) & 0xFF, (x >> 16) & 0xFF, (x >> 24) & 0xFF
if (magic_word != kWasmMagic) {
errorf(pos,
"expected magic word %02x %02x %02x %02x, "
@@ -446,7 +457,8 @@ class ModuleDecoderImpl : public Decoder {
const byte* pos = pc_;
import->module_name = consume_string(true, "module name");
import->field_name = consume_string(true, "field name");
- import->kind = static_cast<WasmExternalKind>(consume_u8("import kind"));
+ import->kind =
+ static_cast<ImportExportKindCode>(consume_u8("import kind"));
switch (import->kind) {
case kExternalFunction: {
// ===== Imported function =======================================
@@ -472,7 +484,7 @@ class ModuleDecoderImpl : public Decoder {
module_->function_tables.emplace_back();
WasmIndirectFunctionTable* table = &module_->function_tables.back();
table->imported = true;
- expect_u8("element type", kWasmAnyFunctionTypeForm);
+ expect_u8("element type", kWasmAnyFunctionTypeCode);
consume_resizable_limits(
"element count", "elements", FLAG_wasm_max_table_size,
&table->initial_size, &table->has_maximum_size,
@@ -538,7 +550,7 @@ class ModuleDecoderImpl : public Decoder {
if (!AddTable(module_.get())) break;
module_->function_tables.emplace_back();
WasmIndirectFunctionTable* table = &module_->function_tables.back();
- expect_u8("table type", kWasmAnyFunctionTypeForm);
+ expect_u8("table type", kWasmAnyFunctionTypeCode);
consume_resizable_limits("table elements", "elements",
FLAG_wasm_max_table_size, &table->initial_size,
&table->has_maximum_size,
@@ -591,7 +603,7 @@ class ModuleDecoderImpl : public Decoder {
exp->name = consume_string(true, "field name");
const byte* pos = pc();
- exp->kind = static_cast<WasmExternalKind>(consume_u8("export kind"));
+ exp->kind = static_cast<ImportExportKindCode>(consume_u8("export kind"));
switch (exp->kind) {
case kExternalFunction: {
WasmFunction* func = nullptr;
@@ -784,12 +796,12 @@ class ModuleDecoderImpl : public Decoder {
// Decode function names, ignore the rest.
// Local names will be decoded when needed.
switch (name_type) {
- case NameSectionType::kModule: {
+ case NameSectionKindCode::kModule: {
WireBytesRef name = wasm::consume_string(inner, false, "module name");
if (inner.ok() && validate_utf8(&inner, name)) module_->name = name;
break;
}
- case NameSectionType::kFunction: {
+ case NameSectionKindCode::kFunction: {
uint32_t functions_count = inner.consume_u32v("functions count");
for (; inner.ok() && functions_count > 0; --functions_count) {
@@ -1118,7 +1130,7 @@ class ModuleDecoderImpl : public Decoder {
if (FLAG_experimental_wasm_threads) {
bool is_memory = (strcmp(name, "memory") == 0);
- if (flags & 0xfc || (!is_memory && (flags & 0xfe))) {
+ if (flags & 0xFC || (!is_memory && (flags & 0xFE))) {
errorf(pos - 1, "invalid %s limits flags", name);
}
if (flags == 3) {
@@ -1130,7 +1142,7 @@ class ModuleDecoderImpl : public Decoder {
name);
}
} else {
- if (flags & 0xfe) {
+ if (flags & 0xFE) {
errorf(pos - 1, "invalid %s limits flags", name);
}
}
@@ -1292,7 +1304,7 @@ class ModuleDecoderImpl : public Decoder {
private:
FunctionSig* consume_sig_internal(Zone* zone, bool has_return_values) {
- if (has_return_values && !expect_u8("type form", kWasmFunctionTypeForm))
+ if (has_return_values && !expect_u8("type form", kWasmFunctionTypeCode))
return nullptr;
// parse parameter types
uint32_t param_count =
@@ -1586,7 +1598,7 @@ void DecodeLocalNames(const byte* module_start, const byte* module_end,
uint32_t name_payload_len = decoder.consume_u32v("name payload length");
if (!decoder.checkAvailable(name_payload_len)) break;
- if (name_type != NameSectionType::kLocal) {
+ if (name_type != NameSectionKindCode::kLocal) {
decoder.consume_bytes(name_payload_len, "name subsection payload");
continue;
}
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index 8b36205ed3..f98a5ed66d 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -7,6 +7,7 @@
#include "src/globals.h"
#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-result.h"
@@ -19,41 +20,6 @@ struct ModuleEnv;
namespace wasm {
-const uint8_t kWasmFunctionTypeForm = 0x60;
-const uint8_t kWasmAnyFunctionTypeForm = 0x70;
-const uint8_t kHasMaximumFlag = 1;
-const uint8_t kNoMaximumFlag = 0;
-
-enum MemoryFlags : uint8_t {
- kNoMaximum = 0,
- kMaximum = 1,
- kSharedNoMaximum = 2,
- kSharedAndMaximum = 3
-};
-
-enum SectionCode : int8_t {
- kUnknownSectionCode = 0, // code for unknown sections
- kTypeSectionCode = 1, // Function signature declarations
- kImportSectionCode = 2, // Import declarations
- kFunctionSectionCode = 3, // Function declarations
- kTableSectionCode = 4, // Indirect function table and other tables
- kMemorySectionCode = 5, // Memory attributes
- kGlobalSectionCode = 6, // Global declarations
- kExportSectionCode = 7, // Exports
- kStartSectionCode = 8, // Start function declaration
- kElementSectionCode = 9, // Elements section
- kCodeSectionCode = 10, // Function code
- kDataSectionCode = 11, // Data segments
- kNameSectionCode = 12, // Name section (encoded as a string)
- kExceptionSectionCode = 13, // Exception section
-
- // Helper values
- kFirstSectionInModule = kTypeSectionCode,
- kLastKnownModuleSection = kExceptionSectionCode,
-};
-
-enum NameSectionType : uint8_t { kModule = 0, kFunction = 1, kLocal = 2 };
-
inline bool IsValidSectionCode(uint8_t byte) {
return kTypeSectionCode <= byte && byte <= kLastKnownModuleSection;
}
diff --git a/deps/v8/src/wasm/streaming-decoder.cc b/deps/v8/src/wasm/streaming-decoder.cc
index 2387edba34..1b5eaab332 100644
--- a/deps/v8/src/wasm/streaming-decoder.cc
+++ b/deps/v8/src/wasm/streaming-decoder.cc
@@ -66,7 +66,7 @@ void StreamingDecoder::Finish() {
std::unique_ptr<uint8_t[]> bytes(new uint8_t[total_size_]);
uint8_t* cursor = bytes.get();
{
-#define BYTES(x) (x & 0xff), (x >> 8) & 0xff, (x >> 16) & 0xff, (x >> 24) & 0xff
+#define BYTES(x) (x & 0xFF), (x >> 8) & 0xFF, (x >> 16) & 0xFF, (x >> 24) & 0xFF
uint8_t module_header[]{BYTES(kWasmMagic), BYTES(kWasmVersion)};
#undef BYTES
memcpy(cursor, module_header, arraysize(module_header));
@@ -92,7 +92,9 @@ class StreamingDecoder::DecodeVarInt32 : public DecodingState {
public:
explicit DecodeVarInt32(size_t max_value, const char* field_name)
: max_value_(max_value), field_name_(field_name) {}
+
uint8_t* buffer() override { return byte_buffer_; }
+
size_t size() const override { return kMaxVarInt32Size; }
size_t ReadBytes(StreamingDecoder* streaming,
@@ -103,10 +105,7 @@ class StreamingDecoder::DecodeVarInt32 : public DecodingState {
virtual std::unique_ptr<DecodingState> NextWithValue(
StreamingDecoder* streaming) = 0;
- size_t value() const { return value_; }
- size_t bytes_consumed() const { return bytes_consumed_; }
-
- private:
+ protected:
uint8_t byte_buffer_[kMaxVarInt32Size];
// The maximum valid value decoded in this state. {Next} returns an error if
// this value is exceeded.
@@ -141,10 +140,6 @@ class StreamingDecoder::DecodeSectionID : public DecodingState {
uint8_t* buffer() override { return &id_; }
bool is_finishing_allowed() const override { return true; }
- uint8_t id() const { return id_; }
-
- uint32_t module_offset() const { return module_offset_; }
-
std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
private:
@@ -160,10 +155,6 @@ class StreamingDecoder::DecodeSectionLength : public DecodeVarInt32 {
section_id_(id),
module_offset_(module_offset) {}
- uint8_t section_id() const { return section_id_; }
-
- uint32_t module_offset() const { return module_offset_; }
-
std::unique_ptr<DecodingState> NextWithValue(
StreamingDecoder* streaming) override;
@@ -179,14 +170,13 @@ class StreamingDecoder::DecodeSectionPayload : public DecodingState {
: section_buffer_(section_buffer) {}
size_t size() const override { return section_buffer_->payload_length(); }
+
uint8_t* buffer() override {
return section_buffer_->bytes() + section_buffer_->payload_offset();
}
std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
- SectionBuffer* section_buffer() const { return section_buffer_; }
-
private:
SectionBuffer* section_buffer_;
};
@@ -197,8 +187,6 @@ class StreamingDecoder::DecodeNumberOfFunctions : public DecodeVarInt32 {
: DecodeVarInt32(kV8MaxWasmFunctions, "functions count"),
section_buffer_(section_buffer) {}
- SectionBuffer* section_buffer() const { return section_buffer_; }
-
std::unique_ptr<DecodingState> NextWithValue(
StreamingDecoder* streaming) override;
@@ -219,10 +207,6 @@ class StreamingDecoder::DecodeFunctionLength : public DecodeVarInt32 {
DCHECK_GT(num_remaining_functions, 0);
}
- size_t num_remaining_functions() const { return num_remaining_functions_; }
- size_t buffer_offset() const { return buffer_offset_; }
- SectionBuffer* section_buffer() const { return section_buffer_; }
-
std::unique_ptr<DecodingState> NextWithValue(
StreamingDecoder* streaming) override;
@@ -244,14 +228,11 @@ class StreamingDecoder::DecodeFunctionBody : public DecodingState {
num_remaining_functions_(num_remaining_functions),
module_offset_(module_offset) {}
- size_t buffer_offset() const { return buffer_offset_; }
size_t size() const override { return size_; }
+
uint8_t* buffer() override {
return section_buffer_->bytes() + buffer_offset_;
}
- size_t num_remaining_functions() const { return num_remaining_functions_; }
- uint32_t module_offset() const { return module_offset_; }
- SectionBuffer* section_buffer() const { return section_buffer_; }
std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
@@ -297,9 +278,9 @@ StreamingDecoder::DecodeVarInt32::Next(StreamingDecoder* streaming) {
if (!streaming->ok()) {
return nullptr;
}
- if (value() > max_value_) {
+ if (value_ > max_value_) {
std::ostringstream oss;
- oss << "function size > maximum function size: " << value() << " < "
+ oss << "function size > maximum function size: " << value_ << " < "
<< max_value_;
return streaming->Error(oss.str());
}
@@ -320,32 +301,32 @@ StreamingDecoder::DecodeModuleHeader::Next(StreamingDecoder* streaming) {
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeSectionID::Next(StreamingDecoder* streaming) {
TRACE_STREAMING("DecodeSectionID: %s section\n",
- SectionName(static_cast<SectionCode>(id())));
- return base::make_unique<DecodeSectionLength>(id(), module_offset());
+ SectionName(static_cast<SectionCode>(id_)));
+ return base::make_unique<DecodeSectionLength>(id_, module_offset_);
}
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeSectionLength::NextWithValue(
StreamingDecoder* streaming) {
- TRACE_STREAMING("DecodeSectionLength(%zu)\n", value());
+ TRACE_STREAMING("DecodeSectionLength(%zu)\n", value_);
SectionBuffer* buf = streaming->CreateNewBuffer(
- module_offset(), section_id(), value(),
- Vector<const uint8_t>(buffer(), static_cast<int>(bytes_consumed())));
+ module_offset_, section_id_, value_,
+ Vector<const uint8_t>(buffer(), static_cast<int>(bytes_consumed_)));
if (!buf) return nullptr;
- if (value() == 0) {
- if (section_id() == SectionCode::kCodeSectionCode) {
+ if (value_ == 0) {
+ if (section_id_ == SectionCode::kCodeSectionCode) {
return streaming->Error("Code section cannot have size 0");
} else {
streaming->ProcessSection(buf);
if (streaming->ok()) {
// There is no payload, we go to the next section immediately.
- return base::make_unique<DecodeSectionID>(streaming->module_offset());
+ return base::make_unique<DecodeSectionID>(streaming->module_offset_);
} else {
return nullptr;
}
}
} else {
- if (section_id() == SectionCode::kCodeSectionCode) {
+ if (section_id_ == SectionCode::kCodeSectionCode) {
// We reached the code section. All functions of the code section are put
// into the same SectionBuffer.
return base::make_unique<DecodeNumberOfFunctions>(buf);
@@ -358,7 +339,7 @@ StreamingDecoder::DecodeSectionLength::NextWithValue(
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeSectionPayload::Next(StreamingDecoder* streaming) {
TRACE_STREAMING("DecodeSectionPayload\n");
- streaming->ProcessSection(section_buffer());
+ streaming->ProcessSection(section_buffer_);
if (streaming->ok()) {
return base::make_unique<DecodeSectionID>(streaming->module_offset());
}
@@ -368,24 +349,24 @@ StreamingDecoder::DecodeSectionPayload::Next(StreamingDecoder* streaming) {
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeNumberOfFunctions::NextWithValue(
StreamingDecoder* streaming) {
- TRACE_STREAMING("DecodeNumberOfFunctions(%zu)\n", value());
+ TRACE_STREAMING("DecodeNumberOfFunctions(%zu)\n", value_);
// Copy the bytes we read into the section buffer.
- if (section_buffer()->payload_length() >= bytes_consumed()) {
- memcpy(section_buffer()->bytes() + section_buffer()->payload_offset(),
- buffer(), bytes_consumed());
+ if (section_buffer_->payload_length() >= bytes_consumed_) {
+ memcpy(section_buffer_->bytes() + section_buffer_->payload_offset(),
+ buffer(), bytes_consumed_);
} else {
return streaming->Error("Invalid code section length");
}
// {value} is the number of functions.
- if (value() > 0) {
- streaming->StartCodeSection(value());
+ if (value_ > 0) {
+ streaming->StartCodeSection(value_);
if (!streaming->ok()) return nullptr;
return base::make_unique<DecodeFunctionLength>(
- section_buffer(), section_buffer()->payload_offset() + bytes_consumed(),
- value());
+ section_buffer_, section_buffer_->payload_offset() + bytes_consumed_,
+ value_);
} else {
- if (section_buffer()->payload_length() != bytes_consumed()) {
+ if (section_buffer_->payload_length() != bytes_consumed_) {
return streaming->Error("not all code section bytes were consumed");
}
return base::make_unique<DecodeSectionID>(streaming->module_offset());
@@ -395,27 +376,27 @@ StreamingDecoder::DecodeNumberOfFunctions::NextWithValue(
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeFunctionLength::NextWithValue(
StreamingDecoder* streaming) {
- TRACE_STREAMING("DecodeFunctionLength(%zu)\n", value());
+ TRACE_STREAMING("DecodeFunctionLength(%zu)\n", value_);
// Copy the bytes we consumed into the section buffer.
- if (section_buffer_->length() >= buffer_offset_ + bytes_consumed()) {
+ if (section_buffer_->length() >= buffer_offset_ + bytes_consumed_) {
memcpy(section_buffer_->bytes() + buffer_offset_, buffer(),
- bytes_consumed());
+ bytes_consumed_);
} else {
return streaming->Error("Invalid code section length");
}
// {value} is the length of the function.
- if (value() == 0) {
+ if (value_ == 0) {
return streaming->Error("Invalid function length (0)");
- } else if (buffer_offset() + bytes_consumed() + value() >
- section_buffer()->length()) {
+ } else if (buffer_offset_ + bytes_consumed_ + value_ >
+ section_buffer_->length()) {
streaming->Error("not enough code section bytes");
return nullptr;
}
return base::make_unique<DecodeFunctionBody>(
- section_buffer(), buffer_offset() + bytes_consumed(), value(),
- num_remaining_functions(), streaming->module_offset());
+ section_buffer_, buffer_offset_ + bytes_consumed_, value_,
+ num_remaining_functions_, streaming->module_offset());
}
std::unique_ptr<StreamingDecoder::DecodingState>
@@ -423,15 +404,15 @@ StreamingDecoder::DecodeFunctionBody::Next(StreamingDecoder* streaming) {
TRACE_STREAMING("DecodeFunctionBody\n");
streaming->ProcessFunctionBody(
Vector<const uint8_t>(buffer(), static_cast<int>(size())),
- module_offset());
+ module_offset_);
if (!streaming->ok()) {
return nullptr;
}
- if (num_remaining_functions() != 0) {
+ if (num_remaining_functions_ != 0) {
return base::make_unique<DecodeFunctionLength>(
- section_buffer(), buffer_offset() + size(), num_remaining_functions());
+ section_buffer_, buffer_offset_ + size(), num_remaining_functions_);
} else {
- if (buffer_offset() + size() != section_buffer()->length()) {
+ if (buffer_offset_ + size() != section_buffer_->length()) {
return streaming->Error("not all code section bytes were used");
}
return base::make_unique<DecodeSectionID>(streaming->module_offset());
diff --git a/deps/v8/src/wasm/wasm-heap.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index e111ec55f5..8e46f33b01 100644
--- a/deps/v8/src/wasm/wasm-heap.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/wasm/wasm-heap.h"
+#include "src/wasm/wasm-code-manager.h"
+
+#include <iomanip>
#include "src/assembler-inl.h"
#include "src/base/atomic-utils.h"
@@ -39,6 +41,15 @@ void GenerateJumpTrampoline(MacroAssembler* masm, Address target) {
__ jmp(kScratchRegister);
}
#undef __
+#elif V8_TARGET_ARCH_S390X
+#define __ masm->
+constexpr bool kModuleCanAllocateMoreMemory = false;
+
+void GenerateJumpTrampoline(MacroAssembler* masm, Address target) {
+ __ mov(ip, Operand(bit_cast<intptr_t, Address>(target)));
+ __ b(ip);
+}
+#undef __
#else
const bool kModuleCanAllocateMoreMemory = true;
#endif
@@ -53,7 +64,7 @@ void PatchTrampolineAndStubCalls(
new_code->constant_pool(), RelocInfo::kCodeTargetMask);
!it.done(); it.next(), orig_it.next()) {
Address old_target = orig_it.rinfo()->target_address();
-#if V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X
auto found = reverse_lookup.find(old_target);
DCHECK(found != reverse_lookup.end());
Address new_target = found->second;
@@ -176,18 +187,72 @@ bool WasmCode::HasTrapHandlerIndex() const { return trap_handler_index_ >= 0; }
void WasmCode::ResetTrapHandlerIndex() { trap_handler_index_ = -1; }
-// TODO(mtrofin): rework the dependency on isolate and code in
-// Disassembler::Decode.
-void WasmCode::Disassemble(Isolate* isolate, const char* name,
+void WasmCode::Print(Isolate* isolate) const {
+ OFStream os(stdout);
+ Disassemble(nullptr, isolate, os);
+}
+
+void WasmCode::Disassemble(const char* name, Isolate* isolate,
std::ostream& os) const {
- os << name << std::endl;
+ if (name) os << "name: " << name << "\n";
+ if (index_.IsJust()) os << "index: " << index_.FromJust() << "\n";
+ os << "kind: " << GetWasmCodeKindAsString(kind_) << "\n";
+ os << "compiler: " << (is_liftoff() ? "Liftoff" : "TurboFan") << "\n";
+ size_t body_size = instructions().size();
+ os << "Body (size = " << body_size << ")\n";
+
+#ifdef ENABLE_DISASSEMBLER
+
+ size_t instruction_size =
+ std::min(constant_pool_offset_, safepoint_table_offset_);
+ os << "Instructions (size = " << instruction_size << ")\n";
+ // TODO(mtrofin): rework the dependency on isolate and code in
+ // Disassembler::Decode.
Disassembler::Decode(isolate, &os, instructions().start(),
- instructions().end(), nullptr);
+ instructions().start() + instruction_size, nullptr);
+ os << "\n";
+
+ Object* source_positions_or_undef =
+ owner_->compiled_module()->source_positions()->get(index());
+ if (!source_positions_or_undef->IsUndefined(isolate)) {
+ os << "Source positions:\n pc offset position\n";
+ for (SourcePositionTableIterator it(
+ ByteArray::cast(source_positions_or_undef));
+ !it.done(); it.Advance()) {
+ os << std::setw(10) << std::hex << it.code_offset() << std::dec
+ << std::setw(10) << it.source_position().ScriptOffset()
+ << (it.is_statement() ? " statement" : "") << "\n";
+ }
+ os << "\n";
+ }
+
+ os << "RelocInfo (size = " << reloc_size_ << ")\n";
+ for (RelocIterator it(instructions(), reloc_info(), constant_pool());
+ !it.done(); it.next()) {
+ it.rinfo()->Print(isolate, os);
+ }
+ os << "\n";
+#endif // ENABLE_DISASSEMBLER
}
-void WasmCode::Print(Isolate* isolate) const {
- OFStream os(stdout);
- Disassemble(isolate, "", os);
+const char* GetWasmCodeKindAsString(WasmCode::Kind kind) {
+ switch (kind) {
+ case WasmCode::kFunction:
+ return "wasm function";
+ case WasmCode::kWasmToWasmWrapper:
+ return "wasm-to-wasm";
+ case WasmCode::kWasmToJsWrapper:
+ return "wasm-to-js";
+ case WasmCode::kLazyStub:
+ return "lazy-compile";
+ case WasmCode::kInterpreterStub:
+ return "interpreter-entry";
+ case WasmCode::kCopiedStub:
+ return "copied stub";
+ case WasmCode::kTrampoline:
+ return "trampoline";
+ }
+ return "unknown kind";
}
WasmCode::~WasmCode() {
@@ -226,12 +291,16 @@ void NativeModule::ResizeCodeTableForTest(size_t last_index) {
code_table_.resize(new_size);
int grow_by = static_cast<int>(new_size) -
compiled_module()->source_positions()->length();
- compiled_module()->set_source_positions(
- isolate->factory()->CopyFixedArrayAndGrow(
- compiled_module()->source_positions(), grow_by, TENURED));
- compiled_module()->set_handler_table(
- isolate->factory()->CopyFixedArrayAndGrow(
- compiled_module()->handler_table(), grow_by, TENURED));
+ Handle<FixedArray> source_positions(compiled_module()->source_positions(),
+ isolate);
+ source_positions = isolate->factory()->CopyFixedArrayAndGrow(
+ source_positions, grow_by, TENURED);
+ compiled_module()->set_source_positions(*source_positions);
+ Handle<FixedArray> handler_table(compiled_module()->handler_table(),
+ isolate);
+ handler_table = isolate->factory()->CopyFixedArrayAndGrow(handler_table,
+ grow_by, TENURED);
+ compiled_module()->set_handler_table(*handler_table);
}
}
@@ -246,7 +315,7 @@ uint32_t NativeModule::FunctionCount() const {
WasmCode* NativeModule::AddOwnedCode(
Vector<const byte> orig_instructions,
- std::unique_ptr<const byte[]>&& reloc_info, size_t reloc_size,
+ std::unique_ptr<const byte[]> reloc_info, size_t reloc_size,
Maybe<uint32_t> index, WasmCode::Kind kind, size_t constant_pool_offset,
uint32_t stack_slots, size_t safepoint_table_offset,
std::shared_ptr<ProtectedInstructions> protected_instructions,
@@ -261,7 +330,7 @@ WasmCode* NativeModule::AddOwnedCode(
std::unique_ptr<WasmCode> code(new WasmCode(
{executable_buffer, orig_instructions.size()}, std::move(reloc_info),
reloc_size, this, index, kind, constant_pool_offset, stack_slots,
- safepoint_table_offset, protected_instructions, is_liftoff));
+ safepoint_table_offset, std::move(protected_instructions), is_liftoff));
WasmCode* ret = code.get();
// TODO(mtrofin): We allocate in increasing address order, and
@@ -270,6 +339,9 @@ WasmCode* NativeModule::AddOwnedCode(
auto insert_before = std::upper_bound(owned_code_.begin(), owned_code_.end(),
code, owned_code_comparer_);
owned_code_.insert(insert_before, std::move(code));
+ wasm_code_manager_->FlushICache(ret->instructions().start(),
+ ret->instructions().size());
+
return ret;
}
@@ -278,23 +350,23 @@ WasmCode* NativeModule::AddCodeCopy(Handle<Code> code, WasmCode::Kind kind,
WasmCode* ret = AddAnonymousCode(code, kind);
SetCodeTable(index, ret);
ret->index_ = Just(index);
- compiled_module()->ptr_to_source_positions()->set(
- static_cast<int>(index), code->source_position_table());
- compiled_module()->ptr_to_handler_table()->set(static_cast<int>(index),
- code->handler_table());
+ compiled_module()->source_positions()->set(static_cast<int>(index),
+ code->source_position_table());
+ compiled_module()->handler_table()->set(static_cast<int>(index),
+ code->handler_table());
return ret;
}
WasmCode* NativeModule::AddInterpreterWrapper(Handle<Code> code,
uint32_t index) {
- WasmCode* ret = AddAnonymousCode(code, WasmCode::InterpreterStub);
+ WasmCode* ret = AddAnonymousCode(code, WasmCode::kInterpreterStub);
ret->index_ = Just(index);
return ret;
}
WasmCode* NativeModule::SetLazyBuiltin(Handle<Code> code) {
DCHECK_NULL(lazy_builtin_);
- lazy_builtin_ = AddAnonymousCode(code, WasmCode::LazyStub);
+ lazy_builtin_ = AddAnonymousCode(code, WasmCode::kLazyStub);
for (uint32_t i = num_imported_functions(), e = FunctionCount(); i < e; ++i) {
SetCodeTable(i, lazy_builtin_);
@@ -325,8 +397,8 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code,
static_cast<size_t>(code->instruction_size())},
std::move(reloc_info), static_cast<size_t>(code->relocation_size()),
Nothing<uint32_t>(), kind, code->constant_pool_offset(),
- (code->is_turbofanned() ? code->stack_slots() : 0),
- (code->is_turbofanned() ? code->safepoint_table_offset() : 0), {});
+ (code->has_safepoint_info() ? code->stack_slots() : 0),
+ (code->has_safepoint_info() ? code->safepoint_table_offset() : 0), {});
if (ret == nullptr) return nullptr;
intptr_t delta = ret->instructions().start() - code->instruction_start();
int mask = RelocInfo::kApplyMask | RelocInfo::kCodeTargetMask |
@@ -356,7 +428,7 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code,
WasmCode* NativeModule::AddCode(
const CodeDesc& desc, uint32_t frame_slots, uint32_t index,
size_t safepoint_table_offset,
- std::shared_ptr<ProtectedInstructions> protected_instructions,
+ std::unique_ptr<ProtectedInstructions> protected_instructions,
bool is_liftoff) {
std::unique_ptr<byte[]> reloc_info;
if (desc.reloc_size) {
@@ -368,8 +440,9 @@ WasmCode* NativeModule::AddCode(
WasmCode* ret = AddOwnedCode(
{desc.buffer, static_cast<size_t>(desc.instr_size)},
std::move(reloc_info), static_cast<size_t>(desc.reloc_size), Just(index),
- WasmCode::Function, desc.instr_size - desc.constant_pool_size,
- frame_slots, safepoint_table_offset, protected_instructions, is_liftoff);
+ WasmCode::kFunction, desc.instr_size - desc.constant_pool_size,
+ frame_slots, safepoint_table_offset, std::move(protected_instructions),
+ is_liftoff);
if (ret == nullptr) return nullptr;
SetCodeTable(index, ret);
@@ -408,7 +481,7 @@ WasmCode* NativeModule::AddCode(
return ret;
}
-#if V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X
Address NativeModule::CreateTrampolineTo(Handle<Code> code) {
MacroAssembler masm(code->GetIsolate(), nullptr, 0, CodeObjectRequired::kNo);
Address dest = code->instruction_start();
@@ -417,7 +490,7 @@ Address NativeModule::CreateTrampolineTo(Handle<Code> code) {
masm.GetCode(nullptr, &code_desc);
WasmCode* wasm_code = AddOwnedCode(
{code_desc.buffer, static_cast<size_t>(code_desc.instr_size)}, nullptr, 0,
- Nothing<uint32_t>(), WasmCode::Trampoline, 0, 0, 0, {});
+ Nothing<uint32_t>(), WasmCode::kTrampoline, 0, 0, 0, {});
if (wasm_code == nullptr) return nullptr;
Address ret = wasm_code->instructions().start();
trampolines_.emplace(std::make_pair(dest, ret));
@@ -438,7 +511,7 @@ Address NativeModule::GetLocalAddressFor(Handle<Code> code) {
uint32_t key = code->stub_key();
auto copy = stubs_.find(key);
if (copy == stubs_.end()) {
- WasmCode* ret = AddAnonymousCode(code, WasmCode::CopiedStub);
+ WasmCode* ret = AddAnonymousCode(code, WasmCode::kCopiedStub);
copy = stubs_.emplace(std::make_pair(key, ret)).first;
}
return copy->second->instructions().start();
@@ -462,7 +535,7 @@ WasmCode* NativeModule::GetExportedWrapper(uint32_t index) {
}
WasmCode* NativeModule::AddExportedWrapper(Handle<Code> code, uint32_t index) {
- WasmCode* ret = AddAnonymousCode(code, WasmCode::WasmToWasmWrapper);
+ WasmCode* ret = AddAnonymousCode(code, WasmCode::kWasmToWasmWrapper);
ret->index_ = Just(index);
exported_wasm_to_wasm_wrappers_.insert(std::make_pair(index, ret));
return ret;
@@ -482,8 +555,7 @@ void NativeModule::Link(uint32_t index) {
for (RelocIterator it(code->instructions(), code->reloc_info(),
code->constant_pool(), mode_mask);
!it.done(); it.next()) {
- uint32_t index =
- *(reinterpret_cast<uint32_t*>(it.rinfo()->target_address_address()));
+ uint32_t index = GetWasmCalleeTag(it.rinfo());
const WasmCode* target = GetCode(index);
if (target == nullptr) continue;
Address target_addr = target->instructions().start();
@@ -519,8 +591,8 @@ Address NativeModule::AllocateForCode(size_t size) {
}
Address ret = mem.ranges().front().first;
Address end = ret + size;
- Address commit_start = RoundUp(ret, base::OS::AllocatePageSize());
- Address commit_end = RoundUp(end, base::OS::AllocatePageSize());
+ Address commit_start = RoundUp(ret, AllocatePageSize());
+ Address commit_end = RoundUp(end, AllocatePageSize());
// {commit_start} will be either ret or the start of the next page.
// {commit_end} will be the start of the page after the one in which
// the allocation ends.
@@ -542,7 +614,7 @@ Address NativeModule::AllocateForCode(size_t size) {
Address start =
std::max(commit_start, reinterpret_cast<Address>(it->address()));
size_t commit_size = static_cast<size_t>(commit_end - start);
- DCHECK(IsAligned(commit_size, base::OS::AllocatePageSize()));
+ DCHECK(IsAligned(commit_size, AllocatePageSize()));
if (!wasm_code_manager_->Commit(start, commit_size)) {
return nullptr;
}
@@ -551,7 +623,7 @@ Address NativeModule::AllocateForCode(size_t size) {
}
#else
size_t commit_size = static_cast<size_t>(commit_end - commit_start);
- DCHECK(IsAligned(commit_size, base::OS::AllocatePageSize()));
+ DCHECK(IsAligned(commit_size, AllocatePageSize()));
if (!wasm_code_manager_->Commit(commit_start, commit_size)) {
return nullptr;
}
@@ -651,9 +723,8 @@ WasmCodeManager::WasmCodeManager(v8::Isolate* isolate, size_t max_committed)
}
bool WasmCodeManager::Commit(Address start, size_t size) {
- DCHECK(
- IsAligned(reinterpret_cast<size_t>(start), base::OS::AllocatePageSize()));
- DCHECK(IsAligned(size, base::OS::AllocatePageSize()));
+ DCHECK(IsAligned(reinterpret_cast<size_t>(start), AllocatePageSize()));
+ DCHECK(IsAligned(size, AllocatePageSize()));
if (size > static_cast<size_t>(std::numeric_limits<intptr_t>::max())) {
return false;
}
@@ -663,9 +734,14 @@ bool WasmCodeManager::Commit(Address start, size_t size) {
remaining_uncommitted_.Increment(size);
return false;
}
- // TODO(v8:7105) Enable W^X instead of setting W|X permissions below.
- bool ret = base::OS::SetPermissions(
- start, size, base::OS::MemoryPermission::kReadWriteExecute);
+ PageAllocator::Permission permission = FLAG_wasm_write_protect_code_memory
+ ? PageAllocator::kReadWrite
+ : PageAllocator::kReadWriteExecute;
+
+ bool ret = SetPermissions(start, size, permission);
+ TRACE_HEAP("Setting rw permissions for %p:%p\n",
+ reinterpret_cast<void*>(start),
+ reinterpret_cast<void*>(start + size));
if (!ret) {
// Highly unlikely.
remaining_uncommitted_.Increment(size);
@@ -704,11 +780,11 @@ void WasmCodeManager::AssignRanges(void* start, void* end,
void WasmCodeManager::TryAllocate(size_t size, VirtualMemory* ret, void* hint) {
DCHECK_GT(size, 0);
- size = RoundUp(size, base::OS::AllocatePageSize());
- if (hint == nullptr) hint = base::OS::GetRandomMmapAddr();
+ size = RoundUp(size, AllocatePageSize());
+ if (hint == nullptr) hint = GetRandomMmapAddr();
- if (!AlignedAllocVirtualMemory(
- size, static_cast<size_t>(base::OS::AllocatePageSize()), hint, ret)) {
+ if (!AlignedAllocVirtualMemory(size, static_cast<size_t>(AllocatePageSize()),
+ hint, ret)) {
DCHECK(!ret->IsReserved());
}
TRACE_HEAP("VMem alloc: %p:%p (%zu)\n", ret->address(), ret->end(),
@@ -720,7 +796,7 @@ size_t WasmCodeManager::GetAllocationChunk(const WasmModule& module) {
// from something embedder-provided
if (kRequiresCodeRange) return kMaxWasmCodeMemory;
DCHECK(kModuleCanAllocateMoreMemory);
- size_t ret = base::OS::AllocatePageSize();
+ size_t ret = AllocatePageSize();
// a ballpark guesstimate on native inflation factor.
constexpr size_t kMultiplier = 4;
@@ -756,9 +832,56 @@ std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule(
return ret;
}
+ V8::FatalProcessOutOfMemory("WasmCodeManager::NewNativeModule");
return nullptr;
}
+bool NativeModule::SetExecutable(bool executable) {
+ if (is_executable_ == executable) return true;
+ TRACE_HEAP("Setting module %zu as executable: %d.\n", instance_id,
+ executable);
+ PageAllocator::Permission permission =
+ executable ? PageAllocator::kReadExecute : PageAllocator::kReadWrite;
+
+ if (FLAG_wasm_write_protect_code_memory) {
+#if V8_OS_WIN
+ // On windows, we need to switch permissions per separate virtual memory
+ // reservation. This is really just a problem when the NativeModule is
+ // growable (meaning can_request_more_memory_). That's 32-bit in production,
+ // or unittests.
+ // For now, in that case, we commit at reserved memory granularity.
+ // Technically, that may be a waste, because we may reserve more than we
+ // use. On 32-bit though, the scarce resource is the address space -
+ // committed or not.
+ if (can_request_more_memory_) {
+ for (auto& vmem : owned_memory_) {
+ if (!SetPermissions(vmem.address(), vmem.size(), permission)) {
+ return false;
+ }
+ TRACE_HEAP("Set %p:%p to executable:%d\n", vmem.address(), vmem.end(),
+ executable);
+ }
+ is_executable_ = executable;
+ return true;
+ }
+#endif
+ for (auto& range : allocated_memory_.ranges()) {
+ // allocated_memory_ is fine-grained, so we need to
+ // page-align it.
+ size_t range_size = RoundUp(
+ static_cast<size_t>(range.second - range.first), AllocatePageSize());
+ if (!SetPermissions(range.first, range_size, permission)) {
+ return false;
+ }
+ TRACE_HEAP("Set %p:%p to executable:%d\n",
+ reinterpret_cast<void*>(range.first),
+ reinterpret_cast<void*>(range.second), executable);
+ }
+ }
+ is_executable_ = executable;
+ return true;
+}
+
std::unique_ptr<NativeModule> NativeModule::Clone() {
std::unique_ptr<NativeModule> ret = wasm_code_manager_->NewNativeModule(
owned_memory_.front().size(), FunctionCount(), num_imported_functions(),
@@ -802,14 +925,14 @@ std::unique_ptr<NativeModule> NativeModule::Clone() {
for (uint32_t i = num_imported_functions(), e = FunctionCount(); i < e; ++i) {
const WasmCode* original_code = GetCode(i);
switch (original_code->kind()) {
- case WasmCode::LazyStub: {
+ case WasmCode::kLazyStub: {
if (original_code->IsAnonymous()) {
ret->SetCodeTable(i, ret->lazy_builtin());
} else {
if (!ret->CloneLazyBuiltinInto(i)) return nullptr;
}
} break;
- case WasmCode::Function: {
+ case WasmCode::kFunction: {
WasmCode* new_code = ret->CloneCode(original_code);
if (new_code == nullptr) return nullptr;
PatchTrampolineAndStubCalls(original_code, new_code, reverse_lookup);
@@ -835,7 +958,7 @@ void WasmCodeManager::FreeNativeModuleMemories(NativeModule* native_module) {
// which we currently indicate by having the isolate_ as null
if (isolate_ == nullptr) return;
size_t freed_mem = native_module->committed_memory_;
- DCHECK(IsAligned(freed_mem, base::OS::AllocatePageSize()));
+ DCHECK(IsAligned(freed_mem, AllocatePageSize()));
remaining_uncommitted_.Increment(freed_mem);
isolate_->AdjustAmountOfExternalAllocatedMemory(
-static_cast<int64_t>(freed_mem));
@@ -847,7 +970,11 @@ void WasmCodeManager::FreeNativeModuleMemories(NativeModule* native_module) {
// easily identify those places where we know we have the first
// instruction PC.
WasmCode* WasmCodeManager::GetCodeFromStartAddress(Address pc) const {
- return LookupCode(pc);
+ WasmCode* code = LookupCode(pc);
+ // This method can only be called for valid instruction start addresses.
+ DCHECK_NOT_NULL(code);
+ DCHECK_EQ(pc, code->instructions().start());
+ return code;
}
WasmCode* WasmCodeManager::LookupCode(Address pc) const {
@@ -880,6 +1007,50 @@ intptr_t WasmCodeManager::remaining_uncommitted() const {
return remaining_uncommitted_.Value();
}
+void WasmCodeManager::FlushICache(Address start, size_t size) {
+ Assembler::FlushICache(reinterpret_cast<internal::Isolate*>(isolate_), start,
+ size);
+}
+
+NativeModuleModificationScope::NativeModuleModificationScope(
+ NativeModule* native_module)
+ : native_module_(native_module) {
+ if (native_module_) {
+ bool success = native_module_->SetExecutable(false);
+ CHECK(success);
+ }
+}
+
+NativeModuleModificationScope::~NativeModuleModificationScope() {
+ if (native_module_) {
+ bool success = native_module_->SetExecutable(true);
+ CHECK(success);
+ }
+}
+
+// On Intel, call sites are encoded as a displacement. For linking
+// and for serialization/deserialization, we want to store/retrieve
+// a tag (the function index). On Intel, that means accessing the
+// raw displacement. Everywhere else, that simply means accessing
+// the target address.
+void SetWasmCalleeTag(RelocInfo* rinfo, uint32_t tag) {
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
+ *(reinterpret_cast<uint32_t*>(rinfo->target_address_address())) = tag;
+#else
+ rinfo->set_target_address(nullptr, reinterpret_cast<Address>(tag),
+ SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+#endif
+}
+
+uint32_t GetWasmCalleeTag(RelocInfo* rinfo) {
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
+ return *(reinterpret_cast<uint32_t*>(rinfo->target_address_address()));
+#else
+ return static_cast<uint32_t>(
+ reinterpret_cast<size_t>(rinfo->target_address()));
+#endif
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-heap.h b/deps/v8/src/wasm/wasm-code-manager.h
index 9775f18b9b..3e2a0918fb 100644
--- a/deps/v8/src/wasm/wasm-heap.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -88,13 +88,13 @@ using ProtectedInstructions =
class V8_EXPORT_PRIVATE WasmCode final {
public:
enum Kind {
- Function,
- WasmToWasmWrapper,
- WasmToJsWrapper,
- LazyStub,
- InterpreterStub,
- CopiedStub,
- Trampoline
+ kFunction,
+ kWasmToWasmWrapper,
+ kWasmToJsWrapper,
+ kLazyStub,
+ kInterpreterStub,
+ kCopiedStub,
+ kTrampoline
};
Vector<byte> instructions() const { return instructions_; }
@@ -123,8 +123,8 @@ class V8_EXPORT_PRIVATE WasmCode final {
return *protected_instructions_.get();
}
- void Disassemble(Isolate* isolate, const char* name, std::ostream& os) const;
void Print(Isolate* isolate) const;
+ void Disassemble(const char* name, Isolate* isolate, std::ostream& os) const;
~WasmCode();
@@ -151,7 +151,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
constant_pool_offset_(constant_pool_offset),
stack_slots_(stack_slots),
safepoint_table_offset_(safepoint_table_offset),
- protected_instructions_(protected_instructions),
+ protected_instructions_(std::move(protected_instructions)),
is_liftoff_(is_liftoff) {}
WasmCode(const WasmCode&) = delete;
@@ -174,6 +174,9 @@ class V8_EXPORT_PRIVATE WasmCode final {
bool is_liftoff_;
};
+// Return a textual description of the kind.
+const char* GetWasmCodeKindAsString(WasmCode::Kind);
+
class WasmCodeManager;
// Note that we currently need to add code on the main thread, because we may
@@ -187,7 +190,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
WasmCode* AddCode(const CodeDesc& desc, uint32_t frame_count, uint32_t index,
size_t safepoint_table_offset,
- std::shared_ptr<ProtectedInstructions>,
+ std::unique_ptr<ProtectedInstructions>,
bool is_liftoff = false);
// A way to copy over JS-allocated code. This is because we compile
@@ -228,6 +231,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
// this change.
WasmCode* CloneLazyBuiltinInto(uint32_t);
+ bool SetExecutable(bool executable);
+
// For cctests, where we build both WasmModule and the runtime objects
// on the fly, and bypass the instance builder pipeline.
void ResizeCodeTableForTest(size_t);
@@ -243,16 +248,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
std::vector<wasm::GlobalHandleAddress>& function_tables() {
return specialization_data_.function_tables;
}
- std::vector<wasm::GlobalHandleAddress>& signature_tables() {
- return specialization_data_.signature_tables;
- }
std::vector<wasm::GlobalHandleAddress>& empty_function_tables() {
return specialization_data_.empty_function_tables;
}
- std::vector<wasm::GlobalHandleAddress>& empty_signature_tables() {
- return specialization_data_.empty_signature_tables;
- }
uint32_t num_imported_functions() const { return num_imported_functions_; }
size_t num_function_tables() const {
@@ -292,7 +291,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
// code is obtained (CodeDesc vs, as a point in time, Code*), the kind,
// whether it has an index or is anonymous, etc.
WasmCode* AddOwnedCode(Vector<const byte> orig_instructions,
- std::unique_ptr<const byte[]>&& reloc_info,
+ std::unique_ptr<const byte[]> reloc_info,
size_t reloc_size, Maybe<uint32_t> index,
WasmCode::Kind kind, size_t constant_pool_offset,
uint32_t stack_slots, size_t safepoint_table_offset,
@@ -325,15 +324,14 @@ class V8_EXPORT_PRIVATE NativeModule final {
Handle<WasmCompiledModule> compiled_module_;
size_t committed_memory_ = 0;
bool can_request_more_memory_;
+ bool is_executable_ = false;
// Specialization data that needs to be serialized and cloned.
// Keeping it groupped together because it makes cloning of all these
// elements a 1 line copy.
struct {
std::vector<wasm::GlobalHandleAddress> function_tables;
- std::vector<wasm::GlobalHandleAddress> signature_tables;
std::vector<wasm::GlobalHandleAddress> empty_function_tables;
- std::vector<wasm::GlobalHandleAddress> empty_signature_tables;
} specialization_data_;
};
@@ -358,6 +356,10 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
WasmCode* GetCodeFromStartAddress(Address pc) const;
intptr_t remaining_uncommitted() const;
+ // TODO(mtrofin): replace this API with an alternative that is Isolate-
+ // independent.
+ void FlushICache(Address start, size_t size);
+
private:
friend class NativeModule;
@@ -380,9 +382,37 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// worth requesting a GC on memory pressure.
size_t active_ = 0;
base::AtomicNumber<intptr_t> remaining_uncommitted_;
+
+ // TODO(mtrofin): remove the dependency on isolate.
v8::Isolate* isolate_;
};
+// Within the scope, the native_module is writable and not executable.
+// At the scope's destruction, the native_module is executable and not writable.
+// The states inside the scope and at the scope termination are irrespective of
+// native_module's state when entering the scope.
+// We currently mark the entire module's memory W^X:
+// - for AOT, that's as efficient as it can be.
+// - for Lazy, we don't have a heuristic for functions that may need patching,
+// and even if we did, the resulting set of pages may be fragmented.
+// Currently, we try and keep the number of syscalls low.
+// - similar argument for debug time.
+class NativeModuleModificationScope final {
+ public:
+ explicit NativeModuleModificationScope(NativeModule* native_module);
+ ~NativeModuleModificationScope();
+
+ private:
+ NativeModule* native_module_;
+};
+
+// Utilities specific to wasm code generation. We embed a tag for call sites -
+// the index of the called function - when serializing and when creating the
+// code, initially. These APIs offer accessors. The implementation has platform
+// specific nuances.
+void SetWasmCalleeTag(RelocInfo* rinfo, uint32_t tag);
+uint32_t GetWasmCalleeTag(RelocInfo* rinfo);
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-code-specialization.cc b/deps/v8/src/wasm/wasm-code-specialization.cc
index 40a9dac9a3..416d1d600a 100644
--- a/deps/v8/src/wasm/wasm-code-specialization.cc
+++ b/deps/v8/src/wasm/wasm-code-specialization.cc
@@ -9,9 +9,9 @@
#include "src/objects-inl.h"
#include "src/source-position-table.h"
#include "src/wasm/decoder.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
-#include "src/wasm/wasm-opcodes.h"
namespace v8 {
namespace internal {
@@ -50,18 +50,19 @@ class PatchDirectCallsHelper {
decoder(nullptr, nullptr) {
uint32_t func_index = code->index();
WasmCompiledModule* comp_mod = instance->compiled_module();
- func_bytes = comp_mod->module_bytes()->GetChars() +
- comp_mod->module()->functions[func_index].code.offset();
+ func_bytes =
+ comp_mod->shared()->module_bytes()->GetChars() +
+ comp_mod->shared()->module()->functions[func_index].code.offset();
}
PatchDirectCallsHelper(WasmInstanceObject* instance, Code* code)
: source_pos_it(code->SourcePositionTable()), decoder(nullptr, nullptr) {
FixedArray* deopt_data = code->deoptimization_data();
DCHECK_EQ(2, deopt_data->length());
- WasmCompiledModule* comp_mod = instance->compiled_module();
+ WasmSharedModuleData* shared = instance->compiled_module()->shared();
int func_index = Smi::ToInt(deopt_data->get(1));
- func_bytes = comp_mod->module_bytes()->GetChars() +
- comp_mod->module()->functions[func_index].code.offset();
+ func_bytes = shared->module_bytes()->GetChars() +
+ shared->module()->functions[func_index].code.offset();
}
SourcePositionTableIterator source_pos_it;
@@ -115,12 +116,12 @@ bool CodeSpecialization::ApplyToWholeInstance(
DisallowHeapAllocation no_gc;
WasmCompiledModule* compiled_module = instance->compiled_module();
NativeModule* native_module = compiled_module->GetNativeModule();
- FixedArray* code_table = compiled_module->ptr_to_code_table();
- WasmModule* module = compiled_module->module();
- std::vector<WasmFunction>* wasm_functions =
- &compiled_module->module()->functions;
+ FixedArray* code_table = compiled_module->code_table();
+ WasmSharedModuleData* shared = compiled_module->shared();
+ WasmModule* module = shared->module();
+ std::vector<WasmFunction>* wasm_functions = &shared->module()->functions;
DCHECK_EQ(compiled_module->export_wrappers()->length(),
- compiled_module->module()->num_exported_functions);
+ shared->module()->num_exported_functions);
bool changed = false;
int func_index = module->num_imported_functions;
@@ -131,7 +132,7 @@ bool CodeSpecialization::ApplyToWholeInstance(
WasmCodeWrapper wrapper;
if (FLAG_wasm_jit_to_native) {
const WasmCode* wasm_function = native_module->GetCode(func_index);
- if (wasm_function->kind() != WasmCode::Function) {
+ if (wasm_function->kind() != WasmCode::kFunction) {
continue;
}
wrapper = WasmCodeWrapper(wasm_function);
@@ -206,7 +207,7 @@ bool CodeSpecialization::ApplyToWasmCode(WasmCodeWrapper code,
if (code.IsCodeObject()) {
DCHECK_EQ(Code::WASM_FUNCTION, code.GetCode()->kind());
} else {
- DCHECK_EQ(wasm::WasmCode::Function, code.GetWasmCode()->kind());
+ DCHECK_EQ(wasm::WasmCode::kFunction, code.GetWasmCode()->kind());
}
bool patch_table_size = old_function_table_size || new_function_table_size;
@@ -261,8 +262,7 @@ bool CodeSpecialization::ApplyToWasmCode(WasmCodeWrapper code,
patch_direct_calls_helper->decoder,
patch_direct_calls_helper->func_bytes + byte_pos);
FixedArray* code_table =
- relocate_direct_calls_instance->compiled_module()
- ->ptr_to_code_table();
+ relocate_direct_calls_instance->compiled_module()->code_table();
Code* new_code = Code::cast(code_table->get(called_func_index));
it.rinfo()->set_target_address(new_code->GetIsolate(),
new_code->instruction_start(),
diff --git a/deps/v8/src/wasm/wasm-code-wrapper.cc b/deps/v8/src/wasm/wasm-code-wrapper.cc
index 28a96d16bf..9256391543 100644
--- a/deps/v8/src/wasm/wasm-code-wrapper.cc
+++ b/deps/v8/src/wasm/wasm-code-wrapper.cc
@@ -4,8 +4,10 @@
#include "src/wasm/wasm-code-wrapper.h"
-#include "src/objects.h"
+#include "src/objects-inl.h"
#include "src/objects/code.h"
+#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -34,5 +36,34 @@ const wasm::WasmCode* WasmCodeWrapper::GetWasmCode() const {
bool WasmCodeWrapper::IsCodeObject() const { return !FLAG_wasm_jit_to_native; }
+#ifdef ENABLE_DISASSEMBLER
+void WasmCodeWrapper::Disassemble(const char* name, Isolate* isolate,
+ std::ostream& os) const {
+ if (IsCodeObject()) {
+ GetCode()->Disassemble(name, os);
+ } else {
+ GetWasmCode()->Disassemble(name, isolate, os);
+ }
+}
+#endif
+
+bool WasmCodeWrapper::is_liftoff() const {
+ return IsCodeObject() ? !GetCode()->is_turbofanned()
+ : GetWasmCode()->is_liftoff();
+}
+
+Vector<uint8_t> WasmCodeWrapper::instructions() const {
+ if (!IsCodeObject()) return GetWasmCode()->instructions();
+ Handle<Code> code = GetCode();
+ return {code->instruction_start(),
+ static_cast<size_t>(code->instruction_size())};
+}
+
+Handle<WasmInstanceObject> WasmCodeWrapper::wasm_instance() const {
+ return IsCodeObject()
+ ? handle(WasmInstanceObject::GetOwningInstanceGC(*GetCode()))
+ : handle(WasmInstanceObject::GetOwningInstance(GetWasmCode()));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-code-wrapper.h b/deps/v8/src/wasm/wasm-code-wrapper.h
index f80aee8056..7d978152f1 100644
--- a/deps/v8/src/wasm/wasm-code-wrapper.h
+++ b/deps/v8/src/wasm/wasm-code-wrapper.h
@@ -13,6 +13,7 @@ class WasmCode;
} // namespace wasm
class Code;
+class WasmInstanceObject;
// TODO(mtrofin): remove once we remove FLAG_wasm_jit_to_native
class WasmCodeWrapper {
@@ -25,6 +26,15 @@ class WasmCodeWrapper {
const wasm::WasmCode* GetWasmCode() const;
bool is_null() const { return code_ptr_.wasm_code_ == nullptr; }
bool IsCodeObject() const;
+ bool is_liftoff() const;
+
+ Vector<uint8_t> instructions() const;
+
+ Handle<WasmInstanceObject> wasm_instance() const;
+
+#ifdef ENABLE_DISASSEMBLER
+ void Disassemble(const char* name, Isolate* isolate, std::ostream& os) const;
+#endif
private:
union {
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
new file mode 100644
index 0000000000..5e7ce1e4f5
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -0,0 +1,83 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_CONSTANTS_H_
+#define V8_WASM_CONSTANTS_H_
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// Binary encoding of the module header.
+constexpr uint32_t kWasmMagic = 0x6d736100;
+constexpr uint32_t kWasmVersion = 0x01;
+
+// Binary encoding of local types.
+enum ValueTypeCode : uint8_t {
+ kLocalVoid = 0x40,
+ kLocalI32 = 0x7f,
+ kLocalI64 = 0x7e,
+ kLocalF32 = 0x7d,
+ kLocalF64 = 0x7c,
+ kLocalS128 = 0x7b
+};
+// Binary encoding of other types.
+constexpr uint8_t kWasmFunctionTypeCode = 0x60;
+constexpr uint8_t kWasmAnyFunctionTypeCode = 0x70;
+
+// Binary encoding of import/export kinds.
+enum ImportExportKindCode : uint8_t {
+ kExternalFunction = 0,
+ kExternalTable = 1,
+ kExternalMemory = 2,
+ kExternalGlobal = 3
+};
+
+// Binary encoding of maximum and shared flags for memories.
+enum MaximumFlag : uint8_t { kNoMaximumFlag = 0, kHasMaximumFlag = 1 };
+
+enum MemoryFlags : uint8_t {
+ kNoMaximum = 0,
+ kMaximum = 1,
+ kSharedNoMaximum = 2,
+ kSharedAndMaximum = 3
+};
+
+// Binary encoding of sections identifiers.
+enum SectionCode : int8_t {
+ kUnknownSectionCode = 0, // code for unknown sections
+ kTypeSectionCode = 1, // Function signature declarations
+ kImportSectionCode = 2, // Import declarations
+ kFunctionSectionCode = 3, // Function declarations
+ kTableSectionCode = 4, // Indirect function table and other tables
+ kMemorySectionCode = 5, // Memory attributes
+ kGlobalSectionCode = 6, // Global declarations
+ kExportSectionCode = 7, // Exports
+ kStartSectionCode = 8, // Start function declaration
+ kElementSectionCode = 9, // Elements section
+ kCodeSectionCode = 10, // Function code
+ kDataSectionCode = 11, // Data segments
+ kNameSectionCode = 12, // Name section (encoded as a string)
+ kExceptionSectionCode = 13, // Exception section
+
+ // Helper values
+ kFirstSectionInModule = kTypeSectionCode,
+ kLastKnownModuleSection = kExceptionSectionCode,
+};
+
+// Binary encoding of name section kinds.
+enum NameSectionKindCode : uint8_t { kModule = 0, kFunction = 1, kLocal = 2 };
+
+constexpr uint32_t kWasmPageSize = 0x10000;
+constexpr int kInvalidExceptionTag = -1;
+
+// TODO(wasm): Wrap WasmCodePosition in a struct.
+using WasmCodePosition = int;
+constexpr WasmCodePosition kNoCodePosition = -1;
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_CONSTANTS_H_
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 49ca995f5d..87995df4e6 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -6,6 +6,7 @@
#include "src/assembler-inl.h"
#include "src/assert-scope.h"
+#include "src/base/optional.h"
#include "src/compiler/wasm-compiler.h"
#include "src/debug/debug-scopes.h"
#include "src/debug/debug.h"
@@ -14,6 +15,7 @@
#include "src/identity-map.h"
#include "src/isolate.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-interpreter.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
@@ -68,10 +70,9 @@ MaybeHandle<String> GetLocalName(Isolate* isolate,
DCHECK_LE(0, func_index);
DCHECK_LE(0, local_index);
if (!debug_info->has_locals_names()) {
- Handle<WasmCompiledModule> compiled_module(
- debug_info->wasm_instance()->compiled_module(), isolate);
- Handle<FixedArray> locals_names =
- wasm::DecodeLocalNames(isolate, compiled_module);
+ Handle<WasmSharedModuleData> shared(
+ debug_info->wasm_instance()->compiled_module()->shared(), isolate);
+ Handle<FixedArray> locals_names = wasm::DecodeLocalNames(isolate, shared);
debug_info->set_locals_names(*locals_names);
}
@@ -131,15 +132,18 @@ class InterpreterHandle {
static Vector<const byte> GetBytes(WasmDebugInfo* debug_info) {
// Return raw pointer into heap. The WasmInterpreter will make its own copy
// of this data anyway, and there is no heap allocation in-between.
- SeqOneByteString* bytes_str =
- debug_info->wasm_instance()->compiled_module()->module_bytes();
+ SeqOneByteString* bytes_str = debug_info->wasm_instance()
+ ->compiled_module()
+ ->shared()
+ ->module_bytes();
return {bytes_str->GetChars(), static_cast<size_t>(bytes_str->length())};
}
public:
InterpreterHandle(Isolate* isolate, WasmDebugInfo* debug_info)
: isolate_(isolate),
- module_(debug_info->wasm_instance()->compiled_module()->module()),
+ module_(
+ debug_info->wasm_instance()->compiled_module()->shared()->module()),
interpreter_(isolate, module_, GetBytes(debug_info),
debug_info->wasm_instance()->wasm_context()->get()) {}
@@ -305,11 +309,12 @@ class InterpreterHandle {
// Check whether we hit a breakpoint.
if (isolate_->debug()->break_points_active()) {
- Handle<WasmCompiledModule> compiled_module(
- GetInstanceObject()->compiled_module(), isolate_);
- int position = GetTopPosition(compiled_module);
+ Handle<WasmSharedModuleData> shared(
+ GetInstanceObject()->compiled_module()->shared(), isolate_);
+ int position = GetTopPosition(shared);
Handle<FixedArray> breakpoints;
- if (compiled_module->CheckBreakPoints(position).ToHandle(&breakpoints)) {
+ if (WasmSharedModuleData::CheckBreakPoints(isolate_, shared, position)
+ .ToHandle(&breakpoints)) {
// We hit one or several breakpoints. Clear stepping, notify the
// listeners and return.
ClearStepping();
@@ -341,13 +346,13 @@ class InterpreterHandle {
isolate_->debug()->OnDebugBreak(isolate_->factory()->empty_fixed_array());
}
- int GetTopPosition(Handle<WasmCompiledModule> compiled_module) {
+ int GetTopPosition(Handle<WasmSharedModuleData> shared) {
DCHECK_EQ(1, interpreter()->GetThreadCount());
WasmInterpreter::Thread* thread = interpreter()->GetThread(0);
DCHECK_LT(0, thread->GetFrameCount());
auto frame = thread->GetFrame(thread->GetFrameCount() - 1);
- return compiled_module->GetFunctionOffset(frame->function()->func_index) +
+ return shared->GetFunctionOffset(frame->function()->func_index) +
frame->pc();
}
@@ -368,8 +373,8 @@ class InterpreterHandle {
return stack;
}
- std::unique_ptr<wasm::InterpretedFrame> GetInterpretedFrame(
- Address frame_pointer, int idx) {
+ WasmInterpreter::FramePtr GetInterpretedFrame(Address frame_pointer,
+ int idx) {
DCHECK_EQ(1, interpreter()->GetThreadCount());
WasmInterpreter::Thread* thread = interpreter()->GetThread(0);
@@ -557,7 +562,7 @@ wasm::InterpreterHandle* GetInterpreterHandleOrNull(WasmDebugInfo* debug_info) {
int GetNumFunctions(WasmInstanceObject* instance) {
size_t num_functions =
- instance->compiled_module()->module()->functions.size();
+ instance->compiled_module()->shared()->module()->functions.size();
DCHECK_GE(kMaxInt, num_functions);
return static_cast<int>(num_functions);
}
@@ -622,7 +627,7 @@ void RedirectCallsitesInInstanceGC(Isolate* isolate,
CodeRelocationMapGC& map) {
DisallowHeapAllocation no_gc;
// Redirect all calls in wasm functions.
- FixedArray* code_table = instance->compiled_module()->ptr_to_code_table();
+ FixedArray* code_table = instance->compiled_module()->code_table();
for (int i = 0, e = GetNumFunctions(instance); i < e; ++i) {
RedirectCallsitesInCodeGC(Code::cast(code_table->get(i)), map);
}
@@ -630,7 +635,7 @@ void RedirectCallsitesInInstanceGC(Isolate* isolate,
// Redirect all calls in exported functions.
FixedArray* weak_exported_functions =
- instance->compiled_module()->ptr_to_weak_exported_functions();
+ instance->compiled_module()->weak_exported_functions();
for (int i = 0, e = weak_exported_functions->length(); i != e; ++i) {
WeakCell* weak_function = WeakCell::cast(weak_exported_functions->get(i));
if (weak_function->cleared()) continue;
@@ -652,7 +657,7 @@ void RedirectCallsitesInInstance(Isolate* isolate, WasmInstanceObject* instance,
// Redirect all calls in exported functions.
FixedArray* weak_exported_functions =
- instance->compiled_module()->ptr_to_weak_exported_functions();
+ instance->compiled_module()->weak_exported_functions();
for (int i = 0, e = weak_exported_functions->length(); i != e; ++i) {
WeakCell* weak_function = WeakCell::cast(weak_exported_functions->get(i));
if (weak_function->cleared()) continue;
@@ -723,22 +728,25 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate);
wasm::NativeModule* native_module =
instance->compiled_module()->GetNativeModule();
+ wasm::WasmModule* module = instance->module();
CodeRelocationMap code_to_relocate;
- Handle<FixedArray> code_table = instance->compiled_module()->code_table();
+ Handle<FixedArray> code_table(instance->compiled_module()->code_table(),
+ isolate);
CodeRelocationMapGC code_to_relocate_gc(isolate->heap());
- // TODO(6792): No longer needed once WebAssembly code is off heap.
+ // We may modify js wrappers, as well as wasm functions. Hence the 2
+ // modification scopes.
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+ wasm::NativeModuleModificationScope native_module_modification_scope(
+ native_module);
+
for (int func_index : func_indexes) {
DCHECK_LE(0, func_index);
- DCHECK_GT(debug_info->wasm_instance()->module()->functions.size(),
- func_index);
+ DCHECK_GT(module->functions.size(), func_index);
if (!interpreted_functions->get(func_index)->IsUndefined(isolate)) continue;
Handle<Code> new_code = compiler::CompileWasmInterpreterEntry(
- isolate, func_index,
- instance->compiled_module()->module()->functions[func_index].sig,
- instance);
+ isolate, func_index, module->functions[func_index].sig, instance);
if (FLAG_wasm_jit_to_native) {
const wasm::WasmCode* wasm_new_code =
native_module->AddInterpreterWrapper(new_code, func_index);
@@ -782,7 +790,7 @@ std::vector<std::pair<uint32_t, int>> WasmDebugInfo::GetInterpretedStack(
return GetInterpreterHandle(this)->GetInterpretedStack(frame_pointer);
}
-std::unique_ptr<wasm::InterpretedFrame> WasmDebugInfo::GetInterpretedFrame(
+wasm::WasmInterpreter::FramePtr WasmDebugInfo::GetInterpretedFrame(
Address frame_pointer, int idx) {
return GetInterpreterHandle(this)->GetInterpretedFrame(frame_pointer, idx);
}
@@ -858,7 +866,7 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
name, new_entry_code, isolate->sloppy_function_map());
Handle<JSFunction> new_entry = isolate->factory()->NewFunction(args);
new_entry->set_context(
- *debug_info->wasm_instance()->compiled_module()->native_context());
+ debug_info->wasm_instance()->compiled_module()->native_context());
new_entry->set_shared(*shared);
entries->set(index, *new_entry);
}
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
new file mode 100644
index 0000000000..4c84b70dbd
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -0,0 +1,23 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-engine.h"
+#include "src/objects-inl.h"
+#include "src/wasm/module-compiler.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+bool WasmEngine::SyncValidate(Isolate* isolate, const ModuleWireBytes& bytes) {
+ // TODO(titzer): remove dependency on the isolate.
+ if (bytes.start() == nullptr || bytes.length() == 0) return false;
+ ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
+ bytes.end(), true, kWasmOrigin);
+ return result.ok();
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
new file mode 100644
index 0000000000..bf06b47ed7
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -0,0 +1,46 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef WASM_ENGINE_H_
+#define WASM_ENGINE_H_
+
+#include <memory>
+
+#include "src/wasm/compilation-manager.h"
+#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-memory.h"
+
+namespace v8 {
+namespace internal {
+
+namespace wasm {
+
+// The central data structure that represents an engine instance capable of
+// loading, instantiating, and executing WASM code.
+class V8_EXPORT_PRIVATE WasmEngine {
+ public:
+ explicit WasmEngine(std::unique_ptr<WasmCodeManager> code_manager)
+ : code_manager_(std::move(code_manager)) {}
+
+ bool SyncValidate(Isolate* isolate, const ModuleWireBytes& bytes);
+
+ CompilationManager* compilation_manager() { return &compilation_manager_; }
+
+ WasmCodeManager* code_manager() const { return code_manager_.get(); }
+
+ WasmAllocationTracker* allocation_tracker() { return &allocation_tracker_; }
+
+ private:
+ CompilationManager compilation_manager_;
+ std::unique_ptr<WasmCodeManager> code_manager_;
+ WasmAllocationTracker allocation_tracker_;
+
+ DISALLOW_COPY_AND_ASSIGN(WasmEngine);
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index 238785ca3c..0a9d1401e3 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -55,7 +55,7 @@ void uint64_to_float32_wrapper(uint64_t* input, float* output) {
// achieve proper rounding in all cases we have to adjust the high_word
// with a "rounding bit" sometimes. The rounding bit is stored in the LSB of
// the high_word if the low_word may affect the rounding of the high_word.
- uint32_t low_word = static_cast<uint32_t>(*input & 0xffffffff);
+ uint32_t low_word = static_cast<uint32_t>(*input & 0xFFFFFFFF);
uint32_t high_word = static_cast<uint32_t>(*input >> 32);
float shift = static_cast<float>(1ull << 32);
@@ -65,7 +65,7 @@ void uint64_to_float32_wrapper(uint64_t* input, float* output) {
shift = static_cast<float>(1ull << 31);
}
- if ((high_word & 0xfe000000) && low_word) {
+ if ((high_word & 0xFE000000) && low_word) {
// Set the rounding bit.
high_word |= 1;
}
@@ -91,7 +91,7 @@ void uint64_to_float64_wrapper(uint64_t* input, double* output) {
// static_cast<double>(uint64_t) to achieve round-to-nearest-ties-even
// semantics. The idea is to calculate
// static_cast<double>(high_word) * 2^32 + static_cast<double>(low_word).
- uint32_t low_word = static_cast<uint32_t>(*input & 0xffffffff);
+ uint32_t low_word = static_cast<uint32_t>(*input & 0xFFFFFFFF);
uint32_t high_word = static_cast<uint32_t>(*input >> 32);
double shift = static_cast<double>(1ull << 32);
@@ -201,21 +201,29 @@ int32_t uint64_mod_wrapper(uint64_t* dst, uint64_t* src) {
}
uint32_t word32_ctz_wrapper(uint32_t* input) {
- return static_cast<uint32_t>(base::bits::CountTrailingZeros(*input));
+ return base::bits::CountTrailingZeros(*input);
}
uint32_t word64_ctz_wrapper(uint64_t* input) {
- return static_cast<uint32_t>(
- base::bits::CountTrailingZeros(ReadUnalignedValue<uint64_t>(input)));
+ return base::bits::CountTrailingZeros(ReadUnalignedValue<uint64_t>(input));
}
uint32_t word32_popcnt_wrapper(uint32_t* input) {
- return static_cast<uint32_t>(base::bits::CountPopulation(*input));
+ return base::bits::CountPopulation(*input);
}
uint32_t word64_popcnt_wrapper(uint64_t* input) {
- return static_cast<uint32_t>(
- base::bits::CountPopulation(ReadUnalignedValue<uint64_t>(input)));
+ return base::bits::CountPopulation(ReadUnalignedValue<uint64_t>(input));
+}
+
+uint32_t word32_rol_wrapper(uint32_t* input_p, uint32_t* shift_p) {
+ uint32_t shift = (*shift_p & 31);
+ return (*input_p << shift) | (*input_p >> (32 - shift));
+}
+
+uint32_t word32_ror_wrapper(uint32_t* input_p, uint32_t* shift_p) {
+ uint32_t shift = (*shift_p & 31);
+ return (*input_p >> shift) | (*input_p << (32 - shift));
}
void float64_pow_wrapper(double* param0, double* param1) {
diff --git a/deps/v8/src/wasm/wasm-external-refs.h b/deps/v8/src/wasm/wasm-external-refs.h
index e4e88de0db..dea620338a 100644
--- a/deps/v8/src/wasm/wasm-external-refs.h
+++ b/deps/v8/src/wasm/wasm-external-refs.h
@@ -59,6 +59,10 @@ uint32_t word32_popcnt_wrapper(uint32_t* input);
uint32_t word64_popcnt_wrapper(uint64_t* input);
+uint32_t word32_rol_wrapper(uint32_t* input_p, uint32_t* shift_p);
+
+uint32_t word32_ror_wrapper(uint32_t* input_p, uint32_t* shift_p);
+
void float64_pow_wrapper(double* param0, double* param1);
void set_thread_in_wasm_flag();
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index 80d56a05f8..2f8fb0bf4a 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -19,6 +19,7 @@
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/memory-tracing.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-external-refs.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
@@ -31,14 +32,10 @@ namespace v8 {
namespace internal {
namespace wasm {
-#if DEBUG
#define TRACE(...) \
do { \
if (FLAG_trace_wasm_interpreter) PrintF(__VA_ARGS__); \
} while (false)
-#else
-#define TRACE(...)
-#endif
#define FOREACH_INTERNAL_OPCODE(V) V(Breakpoint, 0xFF)
@@ -129,6 +126,12 @@ namespace wasm {
V(F32CopySign, Float32) \
V(F64CopySign, Float64)
+#define FOREACH_I32CONV_FLOATOP(V) \
+ V(I32SConvertF32, int32_t, float) \
+ V(I32SConvertF64, int32_t, double) \
+ V(I32UConvertF32, uint32_t, float) \
+ V(I32UConvertF64, uint32_t, double)
+
#define FOREACH_OTHER_UNOP(V) \
V(I32Clz, uint32_t) \
V(I32Ctz, uint32_t) \
@@ -150,10 +153,6 @@ namespace wasm {
V(F64Floor, double) \
V(F64Trunc, double) \
V(F64NearestInt, double) \
- V(I32SConvertF32, float) \
- V(I32SConvertF64, double) \
- V(I32UConvertF32, float) \
- V(I32UConvertF64, double) \
V(I32ConvertI64, int64_t) \
V(I64SConvertF32, float) \
V(I64SConvertF64, double) \
@@ -223,15 +222,15 @@ inline uint32_t ExecuteI32RemU(uint32_t a, uint32_t b, TrapReason* trap) {
}
inline uint32_t ExecuteI32Shl(uint32_t a, uint32_t b, TrapReason* trap) {
- return a << (b & 0x1f);
+ return a << (b & 0x1F);
}
inline uint32_t ExecuteI32ShrU(uint32_t a, uint32_t b, TrapReason* trap) {
- return a >> (b & 0x1f);
+ return a >> (b & 0x1F);
}
inline int32_t ExecuteI32ShrS(int32_t a, int32_t b, TrapReason* trap) {
- return a >> (b & 0x1f);
+ return a >> (b & 0x1F);
}
inline int64_t ExecuteI64DivS(int64_t a, int64_t b, TrapReason* trap) {
@@ -272,34 +271,34 @@ inline uint64_t ExecuteI64RemU(uint64_t a, uint64_t b, TrapReason* trap) {
}
inline uint64_t ExecuteI64Shl(uint64_t a, uint64_t b, TrapReason* trap) {
- return a << (b & 0x3f);
+ return a << (b & 0x3F);
}
inline uint64_t ExecuteI64ShrU(uint64_t a, uint64_t b, TrapReason* trap) {
- return a >> (b & 0x3f);
+ return a >> (b & 0x3F);
}
inline int64_t ExecuteI64ShrS(int64_t a, int64_t b, TrapReason* trap) {
- return a >> (b & 0x3f);
+ return a >> (b & 0x3F);
}
inline uint32_t ExecuteI32Ror(uint32_t a, uint32_t b, TrapReason* trap) {
- uint32_t shift = (b & 0x1f);
+ uint32_t shift = (b & 0x1F);
return (a >> shift) | (a << (32 - shift));
}
inline uint32_t ExecuteI32Rol(uint32_t a, uint32_t b, TrapReason* trap) {
- uint32_t shift = (b & 0x1f);
+ uint32_t shift = (b & 0x1F);
return (a << shift) | (a >> (32 - shift));
}
inline uint64_t ExecuteI64Ror(uint64_t a, uint64_t b, TrapReason* trap) {
- uint32_t shift = (b & 0x3f);
+ uint32_t shift = (b & 0x3F);
return (a >> shift) | (a << (64 - shift));
}
inline uint64_t ExecuteI64Rol(uint64_t a, uint64_t b, TrapReason* trap) {
- uint32_t shift = (b & 0x3f);
+ uint32_t shift = (b & 0x3F);
return (a << shift) | (a >> (64 - shift));
}
@@ -444,59 +443,26 @@ inline double ExecuteF64NearestInt(double a, TrapReason* trap) {
inline double ExecuteF64Sqrt(double a, TrapReason* trap) { return sqrt(a); }
-int32_t ExecuteI32SConvertF32(float a, TrapReason* trap) {
- // The upper bound is (INT32_MAX + 1), which is the lowest float-representable
- // number above INT32_MAX which cannot be represented as int32.
- float upper_bound = 2147483648.0f;
- // We use INT32_MIN as a lower bound because (INT32_MIN - 1) is not
- // representable as float, and no number between (INT32_MIN - 1) and INT32_MIN
- // is.
- float lower_bound = static_cast<float>(INT32_MIN);
- if (a < upper_bound && a >= lower_bound) {
- return static_cast<int32_t>(a);
- }
- *trap = kTrapFloatUnrepresentable;
- return 0;
-}
-
-int32_t ExecuteI32SConvertF64(double a, TrapReason* trap) {
- // The upper bound is (INT32_MAX + 1), which is the lowest double-
- // representable number above INT32_MAX which cannot be represented as int32.
- double upper_bound = 2147483648.0;
- // The lower bound is (INT32_MIN - 1), which is the greatest double-
- // representable number below INT32_MIN which cannot be represented as int32.
- double lower_bound = -2147483649.0;
- if (a < upper_bound && a > lower_bound) {
- return static_cast<int32_t>(a);
- }
- *trap = kTrapFloatUnrepresentable;
- return 0;
-}
-
-uint32_t ExecuteI32UConvertF32(float a, TrapReason* trap) {
- // The upper bound is (UINT32_MAX + 1), which is the lowest
- // float-representable number above UINT32_MAX which cannot be represented as
- // uint32.
- double upper_bound = 4294967296.0f;
- double lower_bound = -1.0f;
- if (a < upper_bound && a > lower_bound) {
- return static_cast<uint32_t>(a);
+template <typename int_type, typename float_type>
+int_type ExecuteConvert(float_type a, TrapReason* trap) {
+ if (is_inbounds<int_type>(a)) {
+ return static_cast<int_type>(a);
}
*trap = kTrapFloatUnrepresentable;
return 0;
}
-uint32_t ExecuteI32UConvertF64(double a, TrapReason* trap) {
- // The upper bound is (UINT32_MAX + 1), which is the lowest
- // double-representable number above UINT32_MAX which cannot be represented as
- // uint32.
- double upper_bound = 4294967296.0;
- double lower_bound = -1.0;
- if (a < upper_bound && a > lower_bound) {
- return static_cast<uint32_t>(a);
+template <typename int_type, typename float_type>
+int_type ExecuteConvertSaturate(float_type a) {
+ TrapReason base_trap = kTrapCount;
+ int32_t val = ExecuteConvert<int_type>(a, &base_trap);
+ if (base_trap == kTrapCount) {
+ return val;
}
- *trap = kTrapFloatUnrepresentable;
- return 0;
+ return std::isnan(a) ? 0
+ : (a < static_cast<float_type>(0.0)
+ ? std::numeric_limits<int_type>::min()
+ : std::numeric_limits<int_type>::max());
}
inline uint32_t ExecuteI32ConvertI64(int64_t a, TrapReason* trap) {
@@ -643,7 +609,7 @@ Handle<HeapObject> UnwrapWasmToJSWrapper(Isolate* isolate,
DCHECK_GT(js_imports_table->length(), index);
} else {
const wasm::WasmCode* wasm_code = wrapper.GetWasmCode();
- DCHECK_EQ(wasm::WasmCode::WasmToJsWrapper, wasm_code->kind());
+ DCHECK_EQ(wasm::WasmCode::kWasmToJsWrapper, wasm_code->kind());
js_imports_table = Handle<FixedArray>(wasm_code->owner()
->compiled_module()
->owning_instance()
@@ -1005,7 +971,7 @@ class CodeMap {
Code* GetImportedFunctionGC(uint32_t function_index) {
DCHECK(has_instance());
DCHECK_GT(module_->num_imported_functions, function_index);
- FixedArray* code_table = instance()->compiled_module()->ptr_to_code_table();
+ FixedArray* code_table = instance()->compiled_module()->code_table();
return Code::cast(code_table->get(static_cast<int>(function_index)));
}
@@ -1221,7 +1187,7 @@ class ThreadImpl {
}
WasmValue GetReturnValue(uint32_t index) {
- if (state_ == WasmInterpreter::TRAPPED) return WasmValue(0xdeadbeef);
+ if (state_ == WasmInterpreter::TRAPPED) return WasmValue(0xDEADBEEF);
DCHECK_EQ(WasmInterpreter::FINISHED, state_);
Activation act = current_activation();
// Current activation must be finished.
@@ -1510,10 +1476,10 @@ class ThreadImpl {
len = 1 + operand.length;
if (FLAG_wasm_trace_memory) {
- tracing::TraceMemoryOperation(
- tracing::kWasmInterpreted, false, rep, operand.offset + index,
- code->function->func_index, static_cast<int>(pc),
- wasm_context_->mem_start);
+ wasm::MemoryTracingInfo info(operand.offset + index, false, rep);
+ TraceMemoryOperation(ExecutionEngine::kInterpreter, &info,
+ code->function->func_index, static_cast<int>(pc),
+ wasm_context_->mem_start);
}
return true;
@@ -1536,22 +1502,22 @@ class ThreadImpl {
len = 1 + operand.length;
if (FLAG_wasm_trace_memory) {
- tracing::TraceMemoryOperation(
- tracing::kWasmInterpreted, true, rep, operand.offset + index,
- code->function->func_index, static_cast<int>(pc),
- wasm_context_->mem_start);
+ wasm::MemoryTracingInfo info(operand.offset + index, true, rep);
+ TraceMemoryOperation(ExecutionEngine::kInterpreter, &info,
+ code->function->func_index, static_cast<int>(pc),
+ wasm_context_->mem_start);
}
return true;
}
template <typename type>
- bool ExtractAtomicBinOpParams(Decoder* decoder, InterpreterCode* code,
- Address& address, pc_t pc, type& val,
- int& len) {
+ bool ExtractAtomicOpParams(Decoder* decoder, InterpreterCode* code,
+ Address& address, pc_t pc, int& len,
+ type* val = nullptr) {
MemoryAccessOperand<Decoder::kNoValidate> operand(decoder, code->at(pc + 1),
sizeof(type));
- val = Pop().to<uint32_t>();
+ if (val) *val = Pop().to<uint32_t>();
uint32_t index = Pop().to<uint32_t>();
address = BoundsCheckMem<type>(operand.offset, index);
if (!address) {
@@ -1562,42 +1528,48 @@ class ThreadImpl {
return true;
}
+ bool ExecuteNumericOp(WasmOpcode opcode, Decoder* decoder,
+ InterpreterCode* code, pc_t pc, int& len) {
+ switch (opcode) {
+ case kExprI32SConvertSatF32:
+ Push(WasmValue(ExecuteConvertSaturate<int32_t>(Pop().to<float>())));
+ return true;
+ case kExprI32UConvertSatF32:
+ Push(WasmValue(ExecuteConvertSaturate<uint32_t>(Pop().to<float>())));
+ return true;
+ case kExprI32SConvertSatF64:
+ Push(WasmValue(ExecuteConvertSaturate<int32_t>(Pop().to<double>())));
+ return true;
+ case kExprI32UConvertSatF64:
+ Push(WasmValue(ExecuteConvertSaturate<uint32_t>(Pop().to<double>())));
+ return true;
+ default:
+ V8_Fatal(__FILE__, __LINE__, "Unknown or unimplemented opcode #%d:%s",
+ code->start[pc], OpcodeName(code->start[pc]));
+ UNREACHABLE();
+ }
+ return false;
+ }
+
bool ExecuteAtomicOp(WasmOpcode opcode, Decoder* decoder,
InterpreterCode* code, pc_t pc, int& len) {
WasmValue result;
switch (opcode) {
-// TODO(gdeepti): Remove work-around when the bots are upgraded to a more
-// recent gcc version. The gcc bots (Android ARM, linux) currently use
-// gcc 4.8, in which atomics are insufficiently supported, also Bug#58016
-// (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58016)
-#if __GNUG__ && __GNUC__ < 5
-#define ATOMIC_BINOP_CASE(name, type, operation) \
- case kExpr##name: { \
- type val; \
- Address addr; \
- if (!ExtractAtomicBinOpParams<type>(decoder, code, addr, pc, val, len)) { \
- return false; \
- } \
- result = WasmValue( \
- __##operation(reinterpret_cast<type*>(addr), val, __ATOMIC_SEQ_CST)); \
- break; \
- }
-#else
-#define ATOMIC_BINOP_CASE(name, type, operation) \
- case kExpr##name: { \
- type val; \
- Address addr; \
- if (!ExtractAtomicBinOpParams<type>(decoder, code, addr, pc, val, len)) { \
- return false; \
- } \
- static_assert(sizeof(std::atomic<std::type>) == sizeof(type), \
- "Size mismatch for types std::atomic<std::" #type \
- ">, and " #type); \
- result = WasmValue( \
- std::operation(reinterpret_cast<std::atomic<std::type>*>(addr), val)); \
- break; \
+#define ATOMIC_BINOP_CASE(name, type, operation) \
+ case kExpr##name: { \
+ type val; \
+ Address addr; \
+ if (!ExtractAtomicOpParams<type>(decoder, code, addr, pc, len, &val)) { \
+ return false; \
+ } \
+ static_assert(sizeof(std::atomic<type>) == sizeof(type), \
+ "Size mismatch for types std::atomic<" #type \
+ ">, and " #type); \
+ result = WasmValue( \
+ std::operation(reinterpret_cast<std::atomic<type>*>(addr), val)); \
+ Push(result); \
+ break; \
}
-#endif
ATOMIC_BINOP_CASE(I32AtomicAdd, uint32_t, atomic_fetch_add);
ATOMIC_BINOP_CASE(I32AtomicAdd8U, uint8_t, atomic_fetch_add);
ATOMIC_BINOP_CASE(I32AtomicAdd16U, uint16_t, atomic_fetch_add);
@@ -1613,20 +1585,48 @@ class ThreadImpl {
ATOMIC_BINOP_CASE(I32AtomicXor, uint32_t, atomic_fetch_xor);
ATOMIC_BINOP_CASE(I32AtomicXor8U, uint8_t, atomic_fetch_xor);
ATOMIC_BINOP_CASE(I32AtomicXor16U, uint16_t, atomic_fetch_xor);
-#if __GNUG__ && __GNUC__ < 5
- ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, atomic_exchange_n);
- ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, atomic_exchange_n);
- ATOMIC_BINOP_CASE(I32AtomicExchange16U, uint16_t, atomic_exchange_n);
-#else
ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, atomic_exchange);
ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, atomic_exchange);
ATOMIC_BINOP_CASE(I32AtomicExchange16U, uint16_t, atomic_exchange);
-#endif
#undef ATOMIC_BINOP_CASE
+#define ATOMIC_LOAD_CASE(name, type, operation) \
+ case kExpr##name: { \
+ Address addr; \
+ if (!ExtractAtomicOpParams<type>(decoder, code, addr, pc, len)) { \
+ return false; \
+ } \
+ static_assert(sizeof(std::atomic<type>) == sizeof(type), \
+ "Size mismatch for types std::atomic<" #type \
+ ">, and " #type); \
+ result = \
+ WasmValue(std::operation(reinterpret_cast<std::atomic<type>*>(addr))); \
+ Push(result); \
+ break; \
+ }
+ ATOMIC_LOAD_CASE(I32AtomicLoad, uint32_t, atomic_load);
+ ATOMIC_LOAD_CASE(I32AtomicLoad8U, uint8_t, atomic_load);
+ ATOMIC_LOAD_CASE(I32AtomicLoad16U, uint16_t, atomic_load);
+#undef ATOMIC_LOAD_CASE
+#define ATOMIC_STORE_CASE(name, type, operation) \
+ case kExpr##name: { \
+ type val; \
+ Address addr; \
+ if (!ExtractAtomicOpParams<type>(decoder, code, addr, pc, len, &val)) { \
+ return false; \
+ } \
+ static_assert(sizeof(std::atomic<type>) == sizeof(type), \
+ "Size mismatch for types std::atomic<" #type \
+ ">, and " #type); \
+ std::operation(reinterpret_cast<std::atomic<type>*>(addr), val); \
+ break; \
+ }
+ ATOMIC_STORE_CASE(I32AtomicStore, uint32_t, atomic_store);
+ ATOMIC_STORE_CASE(I32AtomicStore8U, uint8_t, atomic_store);
+ ATOMIC_STORE_CASE(I32AtomicStore16U, uint16_t, atomic_store);
+#undef ATOMIC_STORE_CASE
default:
return false;
}
- Push(result);
return true;
}
@@ -2076,8 +2076,8 @@ class ThreadImpl {
case kExprMemorySize: {
MemoryIndexOperand<Decoder::kNoValidate> operand(&decoder,
code->at(pc));
- Push(WasmValue(static_cast<uint32_t>(wasm_context_->mem_size /
- WasmModule::kPageSize)));
+ Push(WasmValue(
+ static_cast<uint32_t>(wasm_context_->mem_size / kWasmPageSize)));
len = 1 + operand.length;
break;
}
@@ -2094,6 +2094,11 @@ class ThreadImpl {
Push(WasmValue(ExecuteI64ReinterpretF64(val)));
break;
}
+ case kNumericPrefix: {
+ ++len;
+ if (!ExecuteNumericOp(opcode, &decoder, code, pc, len)) return;
+ break;
+ }
case kAtomicPrefix: {
if (!ExecuteAtomicOp(opcode, &decoder, code, pc, len)) return;
break;
@@ -2125,19 +2130,27 @@ class ThreadImpl {
FOREACH_OTHER_BINOP(EXECUTE_OTHER_BINOP)
#undef EXECUTE_OTHER_BINOP
-#define EXECUTE_OTHER_UNOP(name, ctype) \
+#define EXECUTE_UNOP(name, ctype, exec_fn) \
case kExpr##name: { \
TrapReason trap = kTrapCount; \
ctype val = Pop().to<ctype>(); \
- auto result = Execute##name(val, &trap); \
+ auto result = exec_fn(val, &trap); \
possible_nondeterminism_ |= has_nondeterminism(result); \
if (trap != kTrapCount) return DoTrap(trap, pc); \
Push(WasmValue(result)); \
break; \
}
+
+#define EXECUTE_OTHER_UNOP(name, ctype) EXECUTE_UNOP(name, ctype, Execute##name)
FOREACH_OTHER_UNOP(EXECUTE_OTHER_UNOP)
#undef EXECUTE_OTHER_UNOP
+#define EXECUTE_I32CONV_FLOATOP(name, out_type, in_type) \
+ EXECUTE_UNOP(name, in_type, ExecuteConvert<out_type>)
+ FOREACH_I32CONV_FLOATOP(EXECUTE_I32CONV_FLOATOP)
+#undef EXECUTE_I32CONV_FLOATOP
+#undef EXECUTE_UNOP
+
default:
V8_Fatal(__FILE__, __LINE__, "Unknown or unimplemented opcode #%d:%s",
code->start[pc], OpcodeName(code->start[pc]));
@@ -2452,14 +2465,14 @@ class ThreadImpl {
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
- if (code->kind() == wasm::WasmCode::Function) {
- DCHECK_EQ(*code->owner()->compiled_module()->owning_instance(),
+ if (code->kind() == wasm::WasmCode::kFunction) {
+ DCHECK_EQ(code->owner()->compiled_module()->owning_instance(),
codemap()->instance());
return {ExternalCallResult::INTERNAL, codemap()->GetCode(code->index())};
}
- if (code->kind() == wasm::WasmCode::WasmToJsWrapper) {
+ if (code->kind() == wasm::WasmCode::kWasmToJsWrapper) {
return CallExternalJSFunction(isolate, WasmCodeWrapper(code), signature);
- } else if (code->kind() == wasm::WasmCode::WasmToWasmWrapper) {
+ } else if (code->kind() == wasm::WasmCode::kWasmToWasmWrapper) {
return CallExternalWasmFunction(isolate, WasmCodeWrapper(code),
signature);
}
@@ -2540,66 +2553,61 @@ class ThreadImpl {
if (!FLAG_wasm_jit_to_native) {
// Check signature.
- FixedArray* sig_tables = compiled_module->ptr_to_signature_tables();
- if (table_index >= static_cast<uint32_t>(sig_tables->length())) {
+ FixedArray* fun_tables = compiled_module->function_tables();
+ if (table_index >= static_cast<uint32_t>(fun_tables->length())) {
return {ExternalCallResult::INVALID_FUNC};
}
- // Reconstitute the global handle to sig_table, and, further below,
- // to the function table, from the address stored in the
- // respective table of tables.
+ // Reconstitute the global handle to the function table, from the
+ // address stored in the respective table of tables.
int table_index_as_int = static_cast<int>(table_index);
- Handle<FixedArray> sig_table(reinterpret_cast<FixedArray**>(
- WasmCompiledModule::GetTableValue(sig_tables, table_index_as_int)));
- if (entry_index >= static_cast<uint32_t>(sig_table->length())) {
+ FixedArray* fun_table = *reinterpret_cast<FixedArray**>(
+ WasmCompiledModule::GetTableValue(fun_tables, table_index_as_int));
+ // Function tables store <smi, code> pairs.
+ int num_funcs_in_table =
+ fun_table->length() / compiler::kFunctionTableEntrySize;
+ if (entry_index >= static_cast<uint32_t>(num_funcs_in_table)) {
return {ExternalCallResult::INVALID_FUNC};
}
- int found_sig =
- Smi::ToInt(sig_table->get(static_cast<int>(entry_index)));
+ int found_sig = Smi::ToInt(fun_table->get(
+ compiler::FunctionTableSigOffset(static_cast<int>(entry_index))));
if (static_cast<uint32_t>(found_sig) != canonical_sig_index) {
return {ExternalCallResult::SIGNATURE_MISMATCH};
}
// Get code object.
- FixedArray* fun_tables = compiled_module->ptr_to_function_tables();
- DCHECK_EQ(sig_tables->length(), fun_tables->length());
- Handle<FixedArray> fun_table(reinterpret_cast<FixedArray**>(
- WasmCompiledModule::GetTableValue(fun_tables, table_index_as_int)));
- DCHECK_EQ(sig_table->length(), fun_table->length());
- target_gc = Code::cast(fun_table->get(static_cast<int>(entry_index)));
+ target_gc = Code::cast(fun_table->get(
+ compiler::FunctionTableCodeOffset(static_cast<int>(entry_index))));
} else {
// Check signature.
- std::vector<GlobalHandleAddress>& sig_tables =
- compiled_module->GetNativeModule()->signature_tables();
- if (table_index >= sig_tables.size()) {
+ std::vector<GlobalHandleAddress>& fun_tables =
+ compiled_module->GetNativeModule()->function_tables();
+ if (table_index >= fun_tables.size()) {
return {ExternalCallResult::INVALID_FUNC};
}
- // Reconstitute the global handle to sig_table, and, further below,
- // to the function table, from the address stored in the
- // respective table of tables.
- int table_index_as_int = static_cast<int>(table_index);
- Handle<FixedArray> sig_table(
- reinterpret_cast<FixedArray**>(sig_tables[table_index_as_int]));
- if (entry_index >= static_cast<uint32_t>(sig_table->length())) {
+ // Reconstitute the global handle to the function table, from the
+ // address stored in the respective table of tables.
+ FixedArray* fun_table =
+ *reinterpret_cast<FixedArray**>(fun_tables[table_index]);
+ // Function tables store <smi, code> pairs.
+ int num_funcs_in_table =
+ fun_table->length() / compiler::kFunctionTableEntrySize;
+ if (entry_index >= static_cast<uint32_t>(num_funcs_in_table)) {
return {ExternalCallResult::INVALID_FUNC};
}
- int found_sig =
- Smi::ToInt(sig_table->get(static_cast<int>(entry_index)));
+ int found_sig = Smi::ToInt(fun_table->get(
+ compiler::FunctionTableSigOffset(static_cast<int>(entry_index))));
if (static_cast<uint32_t>(found_sig) != canonical_sig_index) {
return {ExternalCallResult::SIGNATURE_MISMATCH};
}
// Get code object.
- std::vector<GlobalHandleAddress>& fun_tables =
- compiled_module->GetNativeModule()->function_tables();
- DCHECK_EQ(sig_tables.size(), fun_tables.size());
- Handle<FixedArray> fun_table(
- reinterpret_cast<FixedArray**>(fun_tables[table_index_as_int]));
- DCHECK_EQ(sig_table->length(), fun_table->length());
Address first_instr =
- Foreign::cast(fun_table->get(static_cast<int>(entry_index)))
+ Foreign::cast(fun_table->get(compiler::FunctionTableCodeOffset(
+ static_cast<int>(entry_index))))
->foreign_address();
target =
- isolate->wasm_code_manager()->GetCodeFromStartAddress(first_instr);
+ isolate->wasm_engine()->code_manager()->GetCodeFromStartAddress(
+ first_instr);
}
}
@@ -2761,11 +2769,10 @@ pc_t WasmInterpreter::Thread::GetBreakpointPc() {
int WasmInterpreter::Thread::GetFrameCount() {
return ToImpl(this)->GetFrameCount();
}
-std::unique_ptr<InterpretedFrame> WasmInterpreter::Thread::GetFrame(int index) {
+WasmInterpreter::FramePtr WasmInterpreter::Thread::GetFrame(int index) {
DCHECK_LE(0, index);
DCHECK_GT(GetFrameCount(), index);
- return std::unique_ptr<InterpretedFrame>(
- ToFrame(new InterpretedFrameImpl(ToImpl(this), index)));
+ return FramePtr(ToFrame(new InterpretedFrameImpl(ToImpl(this), index)));
}
WasmValue WasmInterpreter::Thread::GetReturnValue(int index) {
return ToImpl(this)->GetReturnValue(index);
@@ -2926,6 +2933,9 @@ WasmValue InterpretedFrame::GetLocalValue(int index) const {
WasmValue InterpretedFrame::GetStackValue(int index) const {
return ToImpl(this)->GetStackValue(index);
}
+void InterpretedFrame::Deleter::operator()(InterpretedFrame* ptr) {
+ delete ToImpl(ptr);
+}
//============================================================================
// Public API of the heap objects scope.
@@ -2945,6 +2955,7 @@ WasmInterpreter::HeapObjectsScope::~HeapObjectsScope() {
#undef WASM_CTYPES
#undef FOREACH_SIMPLE_BINOP
#undef FOREACH_OTHER_BINOP
+#undef FOREACH_I32CONV_FLOATOP
#undef FOREACH_OTHER_UNOP
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-interpreter.h b/deps/v8/src/wasm/wasm-interpreter.h
index cdfa74cfad..b0c100b5a9 100644
--- a/deps/v8/src/wasm/wasm-interpreter.h
+++ b/deps/v8/src/wasm/wasm-interpreter.h
@@ -71,6 +71,12 @@ class InterpretedFrame {
WasmValue GetLocalValue(int index) const;
WasmValue GetStackValue(int index) const;
+ // Deleter struct to delete the underlying InterpretedFrameImpl without
+ // violating language specifications.
+ struct Deleter {
+ void operator()(InterpretedFrame* ptr);
+ };
+
private:
friend class WasmInterpreter;
// Don't instante InterpretedFrames; they will be allocated as
@@ -113,6 +119,8 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
AfterCall = 1 << 1
};
+ using FramePtr = std::unique_ptr<InterpretedFrame, InterpretedFrame::Deleter>;
+
// Representation of a thread in the interpreter.
class V8_EXPORT_PRIVATE Thread {
// Don't instante Threads; they will be allocated as ThreadImpl in the
@@ -139,7 +147,7 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
// TODO(clemensh): Make this uint32_t.
int GetFrameCount();
// The InterpretedFrame is only valid as long as the Thread is paused.
- std::unique_ptr<InterpretedFrame> GetFrame(int index);
+ FramePtr GetFrame(int index);
WasmValue GetReturnValue(int index = 0);
TrapReason GetTrapReason();
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 03cc26e017..ce2bf42455 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -17,13 +17,11 @@
#include "src/parsing/parse-info.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/module-compiler.h"
-#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-api.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-memory.h"
-#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
-#include "src/wasm/wasm-result.h"
using v8::internal::wasm::ErrorThrower;
@@ -63,7 +61,8 @@ i::MaybeHandle<i::WasmModuleObject> GetFirstArgumentAsModule(
}
i::wasm::ModuleWireBytes GetFirstArgumentAsBytes(
- const v8::FunctionCallbackInfo<v8::Value>& args, ErrorThrower* thrower) {
+ const v8::FunctionCallbackInfo<v8::Value>& args, ErrorThrower* thrower,
+ bool* is_shared) {
const uint8_t* start = nullptr;
size_t length = 0;
v8::Local<v8::Value> source = args[0];
@@ -74,6 +73,7 @@ i::wasm::ModuleWireBytes GetFirstArgumentAsBytes(
start = reinterpret_cast<const uint8_t*>(contents.Data());
length = contents.ByteLength();
+ *is_shared = buffer->IsSharedArrayBuffer();
} else if (source->IsTypedArray()) {
// A TypedArray was passed.
Local<TypedArray> array = Local<TypedArray>::Cast(source);
@@ -84,6 +84,7 @@ i::wasm::ModuleWireBytes GetFirstArgumentAsBytes(
start =
reinterpret_cast<const uint8_t*>(contents.Data()) + array->ByteOffset();
length = array->ByteLength();
+ *is_shared = buffer->IsSharedArrayBuffer();
} else {
thrower->TypeError("Argument 0 must be a buffer source");
}
@@ -154,7 +155,8 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(resolver->GetPromise());
- auto bytes = GetFirstArgumentAsBytes(args, &thrower);
+ bool is_shared = false;
+ auto bytes = GetFirstArgumentAsBytes(args, &thrower, &is_shared);
if (thrower.error()) {
auto maybe = resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
CHECK_IMPLIES(!maybe.FromMaybe(false),
@@ -162,7 +164,8 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
i::Handle<i::JSPromise> promise = Utils::OpenHandle(*resolver->GetPromise());
- i::wasm::AsyncCompile(i_isolate, promise, bytes);
+ // Asynchronous compilation handles copying wire bytes if necessary.
+ i::wasm::AsyncCompile(i_isolate, promise, bytes, is_shared);
}
// WebAssembly.validate(bytes) -> bool
@@ -172,16 +175,31 @@ void WebAssemblyValidate(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(isolate);
i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.validate()");
- auto bytes = GetFirstArgumentAsBytes(args, &thrower);
+ bool is_shared = false;
+ auto bytes = GetFirstArgumentAsBytes(args, &thrower, &is_shared);
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
- if (!thrower.error() &&
- i::wasm::SyncValidate(reinterpret_cast<i::Isolate*>(isolate), bytes)) {
- return_value.Set(v8::True(isolate));
- } else {
+
+ if (thrower.error()) {
if (thrower.wasm_error()) thrower.Reset(); // Clear error.
return_value.Set(v8::False(isolate));
+ return;
}
+
+ bool validated = false;
+ if (is_shared) {
+ // Make a copy of the wire bytes to avoid concurrent modification.
+ std::unique_ptr<uint8_t[]> copy(new uint8_t[bytes.length()]);
+ memcpy(copy.get(), bytes.start(), bytes.length());
+ i::wasm::ModuleWireBytes bytes_copy(copy.get(),
+ copy.get() + bytes.length());
+ validated = i_isolate->wasm_engine()->SyncValidate(i_isolate, bytes_copy);
+ } else {
+ // The wire bytes are not shared, OK to use them directly.
+ validated = i_isolate->wasm_engine()->SyncValidate(i_isolate, bytes);
+ }
+
+ return_value.Set(Boolean::New(isolate, validated));
}
// new WebAssembly.Module(bytes) -> WebAssembly.Module
@@ -202,13 +220,25 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- auto bytes = GetFirstArgumentAsBytes(args, &thrower);
+ bool is_shared = false;
+ auto bytes = GetFirstArgumentAsBytes(args, &thrower, &is_shared);
if (thrower.error()) {
return;
}
- i::MaybeHandle<i::Object> module_obj =
- i::wasm::SyncCompile(i_isolate, &thrower, bytes);
+ i::MaybeHandle<i::Object> module_obj;
+ if (is_shared) {
+ // Make a copy of the wire bytes to avoid concurrent modification.
+ std::unique_ptr<uint8_t[]> copy(new uint8_t[bytes.length()]);
+ memcpy(copy.get(), bytes.start(), bytes.length());
+ i::wasm::ModuleWireBytes bytes_copy(copy.get(),
+ copy.get() + bytes.length());
+ module_obj = i::wasm::SyncCompile(i_isolate, &thrower, bytes_copy);
+ } else {
+ // The wire bytes are not shared, OK to use them directly.
+ module_obj = i::wasm::SyncCompile(i_isolate, &thrower, bytes);
+ }
+
if (module_obj.is_null()) return;
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
@@ -598,10 +628,12 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
}
- size_t size = static_cast<size_t>(i::wasm::WasmModule::kPageSize) *
+ size_t size = static_cast<size_t>(i::wasm::kWasmPageSize) *
static_cast<size_t>(initial);
+ const bool enable_guard_regions =
+ internal::trap_handler::IsTrapHandlerEnabled();
i::Handle<i::JSArrayBuffer> buffer = i::wasm::NewArrayBuffer(
- i_isolate, size, internal::trap_handler::UseTrapHandler(),
+ i_isolate, size, enable_guard_regions,
is_shared_memory ? i::SharedFlag::kShared : i::SharedFlag::kNotShared);
if (buffer.is_null()) {
thrower.RangeError("could not allocate memory");
@@ -751,7 +783,20 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- i::WasmTableObject::Set(i_isolate, receiver, static_cast<int32_t>(index),
+ // TODO(v8:7232) Allow reset/mutation after addressing referenced issue.
+ int32_t int_index = static_cast<int32_t>(index);
+ if (receiver->functions()->get(int_index) !=
+ i_isolate->heap()->undefined_value() &&
+ receiver->functions()->get(int_index) !=
+ i_isolate->heap()->null_value()) {
+ for (i::StackFrameIterator it(i_isolate); !it.done(); it.Advance()) {
+ if (it.frame()->type() == i::StackFrame::WASM_TO_JS) {
+ thrower.RangeError("Modifying existing entry in table not supported.");
+ return;
+ }
+ }
+ }
+ i::WasmTableObject::Set(i_isolate, receiver, static_cast<int32_t>(int_index),
value->IsNull(i_isolate)
? i::Handle<i::JSFunction>::null()
: i::Handle<i::JSFunction>::cast(value));
@@ -794,18 +839,6 @@ void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
thrower.RangeError("Unable to grow instance memory.");
return;
}
- if (!old_buffer->is_shared()) {
- // When delta_size == 0, or guard pages are enabled, the same backing store
- // is used. To be spec compliant, the buffer associated with the memory
- // object needs to be detached. Setup a new buffer with the same backing
- // store, detach the old buffer, and do not free backing store memory.
- bool free_memory = delta_size != 0 && !old_buffer->has_guard_region();
- if ((!free_memory && old_size != 0) || new_size64 == 0) {
- i::WasmMemoryObject::SetupNewBufferWithSameBackingStore(
- i_isolate, receiver, static_cast<uint32_t>(new_size64));
- }
- i::wasm::DetachMemoryBuffer(i_isolate, old_buffer, free_memory);
- }
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(ret);
}
diff --git a/deps/v8/src/wasm/wasm-limits.h b/deps/v8/src/wasm/wasm-limits.h
index f298fd3fe1..184b6329ba 100644
--- a/deps/v8/src/wasm/wasm-limits.h
+++ b/deps/v8/src/wasm/wasm-limits.h
@@ -9,6 +9,8 @@
#include <cstdint>
#include <limits>
+#include "src/wasm/wasm-constants.h"
+
namespace v8 {
namespace internal {
namespace wasm {
@@ -41,10 +43,11 @@ constexpr size_t kV8MaxWasmTables = 1;
constexpr size_t kV8MaxWasmMemories = 1;
constexpr size_t kSpecMaxWasmMemoryPages = 65536;
+static_assert(kV8MaxWasmMemoryPages <= kSpecMaxWasmMemoryPages,
+ "v8 should not be more permissive than the spec");
constexpr size_t kSpecMaxWasmTableSize = 0xFFFFFFFFu;
-// TODO(titzer): move WASM page size constant here.
-constexpr size_t kV8MaxWasmMemoryBytes = kV8MaxWasmMemoryPages * 65536;
+constexpr size_t kV8MaxWasmMemoryBytes = kV8MaxWasmMemoryPages * kWasmPageSize;
constexpr uint64_t kWasmMaxHeapOffset =
static_cast<uint64_t>(
diff --git a/deps/v8/src/wasm/wasm-memory.cc b/deps/v8/src/wasm/wasm-memory.cc
index 9f037c898d..fcbe60ae0e 100644
--- a/deps/v8/src/wasm/wasm-memory.cc
+++ b/deps/v8/src/wasm/wasm-memory.cc
@@ -4,6 +4,7 @@
#include "src/wasm/wasm-memory.h"
#include "src/objects-inl.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
@@ -11,30 +12,70 @@ namespace v8 {
namespace internal {
namespace wasm {
+WasmAllocationTracker::~WasmAllocationTracker() {
+ // All reserved address space should be released before the allocation tracker
+ // is destroyed.
+ DCHECK_EQ(allocated_address_space_, 0u);
+}
+
+bool WasmAllocationTracker::ReserveAddressSpace(size_t num_bytes) {
+// Address space reservations are currently only meaningful using guard
+// regions, which is currently only supported on 64-bit systems. On other
+// platforms, we always fall back on bounds checks.
+#if V8_TARGET_ARCH_64_BIT
+ static constexpr size_t kAddressSpaceLimit = 0x10000000000L; // 1 TiB
+
+ size_t const old_count = allocated_address_space_.fetch_add(num_bytes);
+ DCHECK_GE(old_count + num_bytes, old_count);
+ if (old_count + num_bytes <= kAddressSpaceLimit) {
+ return true;
+ }
+ allocated_address_space_ -= num_bytes;
+#endif
+ return false;
+}
+
+void WasmAllocationTracker::ReleaseAddressSpace(size_t num_bytes) {
+ DCHECK_LE(num_bytes, allocated_address_space_);
+ allocated_address_space_ -= num_bytes;
+}
+
void* TryAllocateBackingStore(Isolate* isolate, size_t size,
- bool enable_guard_regions, void*& allocation_base,
- size_t& allocation_length) {
- // TODO(eholk): Right now enable_guard_regions has no effect on 32-bit
+ bool require_guard_regions,
+ void** allocation_base,
+ size_t* allocation_length) {
+ // TODO(eholk): Right now require_guard_regions has no effect on 32-bit
// systems. It may be safer to fail instead, given that other code might do
// things that would be unsafe if they expected guard pages where there
// weren't any.
- if (enable_guard_regions) {
+ if (require_guard_regions) {
// TODO(eholk): On Windows we want to make sure we don't commit the guard
// pages yet.
// We always allocate the largest possible offset into the heap, so the
// addressable memory after the guard page can be made inaccessible.
- allocation_length = RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize());
- DCHECK_EQ(0, size % base::OS::CommitPageSize());
+ *allocation_length = RoundUp(kWasmMaxHeapOffset, CommitPageSize());
+ DCHECK_EQ(0, size % CommitPageSize());
+
+ WasmAllocationTracker* const allocation_tracker =
+ isolate->wasm_engine()->allocation_tracker();
+
+ // Let the WasmAllocationTracker know we are going to reserve a bunch of
+ // address space.
+ if (!allocation_tracker->ReserveAddressSpace(*allocation_length)) {
+ // If we are over the address space limit, fail.
+ return nullptr;
+ }
// The Reserve makes the whole region inaccessible by default.
- allocation_base =
- isolate->array_buffer_allocator()->Reserve(allocation_length);
- if (allocation_base == nullptr) {
+ *allocation_base =
+ isolate->array_buffer_allocator()->Reserve(*allocation_length);
+ if (*allocation_base == nullptr) {
+ allocation_tracker->ReleaseAddressSpace(*allocation_length);
return nullptr;
}
- void* memory = allocation_base;
+ void* memory = *allocation_base;
// Make the part we care about accessible.
isolate->array_buffer_allocator()->SetProtection(
@@ -47,13 +88,13 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
} else {
// TODO(titzer): use guard regions for minicage and merge with above code.
CHECK_LE(size, kV8MaxWasmMemoryBytes);
- allocation_length =
+ *allocation_length =
base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(size));
void* memory =
size == 0
? nullptr
- : isolate->array_buffer_allocator()->Allocate(allocation_length);
- allocation_base = memory;
+ : isolate->array_buffer_allocator()->Allocate(*allocation_length);
+ *allocation_base = memory;
return memory;
}
}
@@ -78,13 +119,12 @@ Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* allocation_base,
}
Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
- bool enable_guard_regions,
+ bool require_guard_regions,
SharedFlag shared) {
// Check against kMaxInt, since the byte length is stored as int in the
// JSArrayBuffer. Note that wasm_max_mem_pages can be raised from the command
// line, and we don't want to fail a CHECK then.
- if (size > FLAG_wasm_max_mem_pages * WasmModule::kPageSize ||
- size > kMaxInt) {
+ if (size > FLAG_wasm_max_mem_pages * kWasmPageSize || size > kMaxInt) {
// TODO(titzer): lift restriction on maximum memory allocated here.
return Handle<JSArrayBuffer>::null();
}
@@ -92,10 +132,10 @@ Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
void* allocation_base = nullptr; // Set by TryAllocateBackingStore
size_t allocation_length = 0; // Set by TryAllocateBackingStore
// Do not reserve memory till non zero memory is encountered.
- void* memory =
- (size == 0) ? nullptr
- : TryAllocateBackingStore(isolate, size, enable_guard_regions,
- allocation_base, allocation_length);
+ void* memory = (size == 0) ? nullptr
+ : TryAllocateBackingStore(
+ isolate, size, require_guard_regions,
+ &allocation_base, &allocation_length);
if (size > 0 && memory == nullptr) {
return Handle<JSArrayBuffer>::null();
@@ -111,11 +151,14 @@ Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
constexpr bool is_external = false;
return SetupArrayBuffer(isolate, allocation_base, allocation_length, memory,
- size, is_external, enable_guard_regions, shared);
+ size, is_external, require_guard_regions, shared);
}
void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
bool free_memory) {
+ if (buffer->is_shared()) return; // Detaching shared buffers is impossible.
+ DCHECK(!buffer->is_neuterable());
+
const bool is_external = buffer->is_external();
DCHECK(!buffer->is_neuterable());
if (!is_external) {
@@ -130,6 +173,8 @@ void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
buffer->FreeBackingStore();
}
}
+
+ DCHECK(buffer->is_external());
buffer->set_is_neuterable(true);
buffer->Neuter();
}
diff --git a/deps/v8/src/wasm/wasm-memory.h b/deps/v8/src/wasm/wasm-memory.h
index 2676f3ade7..c5d6ef5154 100644
--- a/deps/v8/src/wasm/wasm-memory.h
+++ b/deps/v8/src/wasm/wasm-memory.h
@@ -13,8 +13,28 @@ namespace v8 {
namespace internal {
namespace wasm {
+class WasmAllocationTracker {
+ public:
+ WasmAllocationTracker() {}
+ ~WasmAllocationTracker();
+
+ // ReserveAddressSpace attempts to increase the reserved address space counter
+ // to determine whether there is enough headroom to allocate another guarded
+ // Wasm memory. Returns true if successful (meaning it is okay to go ahead and
+ // allocate the buffer), false otherwise.
+ bool ReserveAddressSpace(size_t num_bytes);
+
+ // Reduces the address space counter so that the space can be reused.
+ void ReleaseAddressSpace(size_t num_bytes);
+
+ private:
+ std::atomic_size_t allocated_address_space_{0};
+
+ DISALLOW_COPY_AND_ASSIGN(WasmAllocationTracker);
+};
+
Handle<JSArrayBuffer> NewArrayBuffer(
- Isolate*, size_t size, bool enable_guard_regions,
+ Isolate*, size_t size, bool require_guard_regions,
SharedFlag shared = SharedFlag::kNotShared);
Handle<JSArrayBuffer> SetupArrayBuffer(
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index 407ef08700..90b1d702cf 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -11,7 +11,7 @@
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/leb-helper.h"
-#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
@@ -140,8 +140,8 @@ void WasmFunctionBuilder::EmitDirectCallIndex(uint32_t index) {
void WasmFunctionBuilder::SetName(Vector<const char> name) { name_ = name; }
-void WasmFunctionBuilder::AddAsmWasmOffset(int call_position,
- int to_number_position) {
+void WasmFunctionBuilder::AddAsmWasmOffset(size_t call_position,
+ size_t to_number_position) {
// We only want to emit one mapping per byte offset.
DCHECK(asm_offsets_.size() == 0 || body_.size() > last_asm_byte_offset_);
@@ -150,21 +150,25 @@ void WasmFunctionBuilder::AddAsmWasmOffset(int call_position,
asm_offsets_.write_u32v(byte_offset - last_asm_byte_offset_);
last_asm_byte_offset_ = byte_offset;
- DCHECK_GE(call_position, 0);
- asm_offsets_.write_i32v(call_position - last_asm_source_position_);
+ DCHECK_GE(std::numeric_limits<uint32_t>::max(), call_position);
+ uint32_t call_position_u32 = static_cast<uint32_t>(call_position);
+ asm_offsets_.write_i32v(call_position_u32 - last_asm_source_position_);
- DCHECK_GE(to_number_position, 0);
- asm_offsets_.write_i32v(to_number_position - call_position);
- last_asm_source_position_ = to_number_position;
+ DCHECK_GE(std::numeric_limits<uint32_t>::max(), to_number_position);
+ uint32_t to_number_position_u32 = static_cast<uint32_t>(to_number_position);
+ asm_offsets_.write_i32v(to_number_position_u32 - call_position_u32);
+ last_asm_source_position_ = to_number_position_u32;
}
-void WasmFunctionBuilder::SetAsmFunctionStartPosition(int position) {
+void WasmFunctionBuilder::SetAsmFunctionStartPosition(
+ size_t function_position) {
DCHECK_EQ(0, asm_func_start_source_position_);
- DCHECK_LE(0, position);
+ DCHECK_GE(std::numeric_limits<uint32_t>::max(), function_position);
+ uint32_t function_position_u32 = static_cast<uint32_t>(function_position);
// Must be called before emitting any asm.js source position.
DCHECK_EQ(0, asm_offsets_.size());
- asm_func_start_source_position_ = position;
- last_asm_source_position_ = position;
+ asm_func_start_source_position_ = function_position_u32;
+ last_asm_source_position_ = function_position_u32;
}
void WasmFunctionBuilder::DeleteCodeAfter(size_t position) {
@@ -339,7 +343,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
buffer.write_size(signatures_.size());
for (FunctionSig* sig : signatures_) {
- buffer.write_u8(kWasmFunctionTypeForm);
+ buffer.write_u8(kWasmFunctionTypeCode);
buffer.write_size(sig->parameter_count());
for (auto param : sig->parameters()) {
buffer.write_u8(WasmOpcodes::ValueTypeCodeFor(param));
@@ -388,7 +392,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
if (indirect_functions_.size() > 0) {
size_t start = EmitSection(kTableSectionCode, buffer);
buffer.write_u8(1); // table count
- buffer.write_u8(kWasmAnyFunctionTypeForm);
+ buffer.write_u8(kWasmAnyFunctionTypeCode);
buffer.write_u8(kHasMaximumFlag);
buffer.write_size(indirect_functions_.size());
buffer.write_size(indirect_functions_.size());
@@ -550,7 +554,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
buffer.write_size(4);
buffer.write(reinterpret_cast<const byte*>("name"), 4);
// Emit a subsection for the function names.
- buffer.write_u8(NameSectionType::kFunction);
+ buffer.write_u8(NameSectionKindCode::kFunction);
// Emit a placeholder for the subsection length.
size_t functions_start = buffer.reserve_u32v();
// Emit the function names.
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index 898f996cd3..0beae76513 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -172,8 +172,8 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
void EmitWithU32V(WasmOpcode opcode, uint32_t immediate);
void EmitDirectCallIndex(uint32_t index);
void SetName(Vector<const char> name);
- void AddAsmWasmOffset(int call_position, int to_number_position);
- void SetAsmFunctionStartPosition(int position);
+ void AddAsmWasmOffset(size_t call_position, size_t to_number_position);
+ void SetAsmFunctionStartPosition(size_t function_position);
size_t GetPosition() const { return body_.size(); }
void FixupByte(size_t position, byte value) {
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index bfeeb0fbff..b6b9117ae5 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -20,8 +20,8 @@
#include "src/wasm/compilation-manager.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-code-specialization.h"
-#include "src/wasm/wasm-heap.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -31,21 +31,6 @@ namespace v8 {
namespace internal {
namespace wasm {
-#define TRACE(...) \
- do { \
- if (FLAG_trace_wasm_instances) PrintF(__VA_ARGS__); \
- } while (false)
-
-#define TRACE_CHAIN(instance) \
- do { \
- instance->PrintInstancesChain(); \
- } while (false)
-
-#define TRACE_COMPILE(...) \
- do { \
- if (FLAG_trace_wasm_compiler) PrintF(__VA_ARGS__); \
- } while (false)
-
// static
const WasmExceptionSig WasmException::empty_sig_(0, 0, nullptr);
@@ -109,8 +94,8 @@ void UnpackAndRegisterProtectedInstructionsGC(Isolate* isolate,
}
}
-void UnpackAndRegisterProtectedInstructions(Isolate* isolate,
- wasm::NativeModule* native_module) {
+void UnpackAndRegisterProtectedInstructions(
+ Isolate* isolate, const wasm::NativeModule* native_module) {
DisallowHeapAllocation no_gc;
for (uint32_t i = native_module->num_imported_functions(),
@@ -118,7 +103,7 @@ void UnpackAndRegisterProtectedInstructions(Isolate* isolate,
i < e; ++i) {
wasm::WasmCode* code = native_module->GetCode(i);
- if (code == nullptr || code->kind() != wasm::WasmCode::Function) {
+ if (code == nullptr || code->kind() != wasm::WasmCode::kFunction) {
continue;
}
@@ -177,7 +162,7 @@ Handle<Object> GetOrCreateIndirectCallWrapper(
reinterpret_cast<Address>(owning_instance->wasm_context()->get());
if (!wasm_code.IsCodeObject()) {
DCHECK_NE(wasm_code.GetWasmCode()->kind(),
- wasm::WasmCode::WasmToWasmWrapper);
+ wasm::WasmCode::kWasmToWasmWrapper);
wasm::NativeModule* native_module = wasm_code.GetWasmCode()->owner();
// The only reason we pass owning_instance is for the GC case. Check
// that the values match.
@@ -203,30 +188,6 @@ Handle<Object> GetOrCreateIndirectCallWrapper(
return code;
}
-void UpdateDispatchTables(Isolate* isolate, Handle<FixedArray> dispatch_tables,
- int index, WasmFunction* function,
- Handle<Object> code_or_foreign) {
- DCHECK_EQ(0, dispatch_tables->length() % 4);
- for (int i = 0; i < dispatch_tables->length(); i += 4) {
- Handle<FixedArray> function_table(
- FixedArray::cast(dispatch_tables->get(i + 2)), isolate);
- Handle<FixedArray> signature_table(
- FixedArray::cast(dispatch_tables->get(i + 3)), isolate);
- if (function) {
- Handle<WasmInstanceObject> instance(
- WasmInstanceObject::cast(dispatch_tables->get(i)), isolate);
- // Note that {SignatureMap::Find} may return {-1} if the signature is
- // not found; it will simply never match any check.
- auto sig_index = instance->module()->signature_map.Find(function->sig);
- signature_table->set(index, Smi::FromInt(sig_index));
- function_table->set(index, *code_or_foreign);
- } else {
- signature_table->set(index, Smi::FromInt(-1));
- function_table->set(index, Smi::kZero);
- }
- }
-}
-
bool IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context) {
// TODO(wasm): Once wasm has its own CSP policy, we should introduce a
// separate callback that includes information about the module about to be
@@ -246,8 +207,8 @@ bool IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context) {
Handle<JSArray> GetImports(Isolate* isolate,
Handle<WasmModuleObject> module_object) {
- Handle<WasmCompiledModule> compiled_module(module_object->compiled_module(),
- isolate);
+ Handle<WasmSharedModuleData> shared(
+ module_object->compiled_module()->shared(), isolate);
Factory* factory = isolate->factory();
Handle<String> module_string = factory->InternalizeUtf8String("module");
@@ -260,7 +221,7 @@ Handle<JSArray> GetImports(Isolate* isolate,
Handle<String> global_string = factory->InternalizeUtf8String("global");
// Create the result array.
- WasmModule* module = compiled_module->module();
+ WasmModule* module = shared->module();
int num_imports = static_cast<int>(module->import_table.size());
Handle<JSArray> array_object = factory->NewJSArray(PACKED_ELEMENTS, 0, 0);
Handle<FixedArray> storage = factory->NewFixedArray(num_imports);
@@ -295,12 +256,12 @@ Handle<JSArray> GetImports(Isolate* isolate,
}
MaybeHandle<String> import_module =
- WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate, compiled_module, import.module_name);
+ WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
+ isolate, shared, import.module_name);
MaybeHandle<String> import_name =
- WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate, compiled_module, import.field_name);
+ WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
+ isolate, shared, import.field_name);
JSObject::AddProperty(entry, module_string, import_module.ToHandleChecked(),
NONE);
@@ -316,8 +277,8 @@ Handle<JSArray> GetImports(Isolate* isolate,
Handle<JSArray> GetExports(Isolate* isolate,
Handle<WasmModuleObject> module_object) {
- Handle<WasmCompiledModule> compiled_module(module_object->compiled_module(),
- isolate);
+ Handle<WasmSharedModuleData> shared(
+ module_object->compiled_module()->shared(), isolate);
Factory* factory = isolate->factory();
Handle<String> name_string = factory->InternalizeUtf8String("name");
@@ -329,7 +290,7 @@ Handle<JSArray> GetExports(Isolate* isolate,
Handle<String> global_string = factory->InternalizeUtf8String("global");
// Create the result array.
- WasmModule* module = compiled_module->module();
+ WasmModule* module = shared->module();
int num_exports = static_cast<int>(module->export_table.size());
Handle<JSArray> array_object = factory->NewJSArray(PACKED_ELEMENTS, 0, 0);
Handle<FixedArray> storage = factory->NewFixedArray(num_exports);
@@ -364,8 +325,8 @@ Handle<JSArray> GetExports(Isolate* isolate,
Handle<JSObject> entry = factory->NewJSObject(object_function);
MaybeHandle<String> export_name =
- WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate, compiled_module, exp.name);
+ WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(isolate, shared,
+ exp.name);
JSObject::AddProperty(entry, name_string, export_name.ToHandleChecked(),
NONE);
@@ -380,15 +341,14 @@ Handle<JSArray> GetExports(Isolate* isolate,
Handle<JSArray> GetCustomSections(Isolate* isolate,
Handle<WasmModuleObject> module_object,
Handle<String> name, ErrorThrower* thrower) {
- Handle<WasmCompiledModule> compiled_module(module_object->compiled_module(),
- isolate);
+ Handle<WasmSharedModuleData> shared(
+ module_object->compiled_module()->shared(), isolate);
Factory* factory = isolate->factory();
std::vector<CustomSectionOffset> custom_sections;
{
DisallowHeapAllocation no_gc; // for raw access to string bytes.
- Handle<SeqOneByteString> module_bytes(compiled_module->module_bytes(),
- isolate);
+ Handle<SeqOneByteString> module_bytes(shared->module_bytes(), isolate);
const byte* start =
reinterpret_cast<const byte*>(module_bytes->GetCharsAddress());
const byte* end = start + module_bytes->length();
@@ -400,8 +360,8 @@ Handle<JSArray> GetCustomSections(Isolate* isolate,
// Gather matching sections.
for (auto& section : custom_sections) {
MaybeHandle<String> section_name =
- WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate, compiled_module, section.name);
+ WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(isolate, shared,
+ section.name);
if (!name->Equals(*section_name.ToHandleChecked())) continue;
@@ -419,8 +379,7 @@ Handle<JSArray> GetCustomSections(Isolate* isolate,
JSArrayBuffer::Setup(buffer, isolate, is_external, memory, size, memory,
size);
DisallowHeapAllocation no_gc; // for raw access to string bytes.
- Handle<SeqOneByteString> module_bytes(compiled_module->module_bytes(),
- isolate);
+ Handle<SeqOneByteString> module_bytes(shared->module_bytes(), isolate);
const byte* start =
reinterpret_cast<const byte*>(module_bytes->GetCharsAddress());
memcpy(memory, start + section.payload.offset(), section.payload.length());
@@ -441,9 +400,9 @@ Handle<JSArray> GetCustomSections(Isolate* isolate,
return array_object;
}
-Handle<FixedArray> DecodeLocalNames(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
- Handle<SeqOneByteString> wire_bytes(compiled_module->module_bytes(), isolate);
+Handle<FixedArray> DecodeLocalNames(Isolate* isolate,
+ Handle<WasmSharedModuleData> shared) {
+ Handle<SeqOneByteString> wire_bytes(shared->module_bytes(), isolate);
LocalNames decoded_locals;
{
DisallowHeapAllocation no_gc;
@@ -459,33 +418,14 @@ Handle<FixedArray> DecodeLocalNames(
locals_names->set(func.function_index, *func_locals_names);
for (LocalName& name : func.names) {
Handle<String> name_str =
- WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate, compiled_module, name.name)
+ WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
+ isolate, shared, name.name)
.ToHandleChecked();
func_locals_names->set(name.local_index, *name_str);
}
}
return locals_names;
}
-
-const char* ExternalKindName(WasmExternalKind kind) {
- switch (kind) {
- case kExternalFunction:
- return "function";
- case kExternalTable:
- return "table";
- case kExternalMemory:
- return "memory";
- case kExternalGlobal:
- return "global";
- }
- return "unknown";
-}
-
-#undef TRACE
-#undef TRACE_CHAIN
-#undef TRACE_COMPILE
-
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index e44ca995b0..492c51487f 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -15,18 +15,18 @@
#include "src/wasm/decoder.h"
#include "src/wasm/signature-map.h"
-#include "src/wasm/wasm-heap.h"
-#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-constants.h"
namespace v8 {
namespace internal {
class WasmCompiledModule;
class WasmDebugInfo;
-class WasmModuleObject;
class WasmInstanceObject;
-class WasmTableObject;
class WasmMemoryObject;
+class WasmModuleObject;
+class WasmSharedModuleData;
+class WasmTableObject;
namespace compiler {
class CallDescriptor;
@@ -34,13 +34,7 @@ class CallDescriptor;
namespace wasm {
class ErrorThrower;
-
-enum WasmExternalKind {
- kExternalFunction = 0,
- kExternalTable = 1,
- kExternalMemory = 2,
- kExternalGlobal = 3
-};
+class NativeModule;
// Static representation of a wasm function.
struct WasmFunction {
@@ -117,14 +111,14 @@ struct WasmTableInit {
struct WasmImport {
WireBytesRef module_name; // module name.
WireBytesRef field_name; // import name.
- WasmExternalKind kind; // kind of the import.
+ ImportExportKindCode kind; // kind of the import.
uint32_t index; // index into the respective space.
};
// Static representation of a wasm export.
struct WasmExport {
WireBytesRef name; // exported name.
- WasmExternalKind kind; // kind of the export.
+ ImportExportKindCode kind; // kind of the export.
uint32_t index; // index into the respective space.
};
@@ -136,11 +130,6 @@ struct ModuleWireBytes;
struct V8_EXPORT_PRIVATE WasmModule {
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(WasmModule);
- static const uint32_t kPageSize = 0x10000; // Page size, 64kb.
- static const uint32_t kMinMemPages = 1; // Minimum memory size = 64kb
-
- static constexpr int kInvalidExceptionTag = -1;
-
std::unique_ptr<Zone> signature_zone;
uint32_t initial_pages = 0; // initial size of the memory in 64k pages
uint32_t maximum_pages = 0; // maximum size of the memory in 64k pages
@@ -247,7 +236,7 @@ struct WasmFunctionName {
: function_(function), name_(name) {}
const WasmFunction* function_;
- WasmName name_;
+ const WasmName name_;
};
std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name);
@@ -275,7 +264,7 @@ V8_EXPORT_PRIVATE Handle<JSArray> GetCustomSections(
// Decode local variable names from the names section. Return FixedArray of
// FixedArray of <undefined|String>. The outer fixed array is indexed by the
// function index, the inner one by the local index.
-Handle<FixedArray> DecodeLocalNames(Isolate*, Handle<WasmCompiledModule>);
+Handle<FixedArray> DecodeLocalNames(Isolate*, Handle<WasmSharedModuleData>);
// If the target is an export wrapper, return the {WasmFunction*} corresponding
// to the wrapped wasm function; in all other cases, return nullptr.
@@ -284,10 +273,6 @@ Handle<FixedArray> DecodeLocalNames(Isolate*, Handle<WasmCompiledModule>);
// TODO(titzer): move this to WasmExportedFunction.
WasmFunction* GetWasmFunctionForExport(Isolate* isolate, Handle<Object> target);
-void UpdateDispatchTables(Isolate* isolate, Handle<FixedArray> dispatch_tables,
- int index, WasmFunction* function,
- Handle<Object> code_or_foreign);
-
Handle<Object> GetOrCreateIndirectCallWrapper(
Isolate* isolate, Handle<WasmInstanceObject> owning_instance,
WasmCodeWrapper wasm_code, uint32_t index, FunctionSig* sig);
@@ -295,10 +280,8 @@ Handle<Object> GetOrCreateIndirectCallWrapper(
void UnpackAndRegisterProtectedInstructionsGC(Isolate* isolate,
Handle<FixedArray> code_table);
-void UnpackAndRegisterProtectedInstructions(Isolate* isolate,
- wasm::NativeModule* native_module);
-
-const char* ExternalKindName(WasmExternalKind);
+void UnpackAndRegisterProtectedInstructions(
+ Isolate* isolate, const wasm::NativeModule* native_module);
// TruncatedUserString makes it easy to output names up to a certain length, and
// output a truncation followed by '...' if they exceed a limit.
@@ -332,7 +315,7 @@ class TruncatedUserString {
private:
const char* start_;
- int length_;
+ const int length_;
char buffer_[kMaxLen];
};
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index 27f7d68d17..0a85862174 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -56,8 +56,6 @@ OPTIONAL_ACCESSORS(WasmInstanceObject, table_object, WasmTableObject,
kTableObjectOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, function_tables, FixedArray,
kFunctionTablesOffset)
-OPTIONAL_ACCESSORS(WasmInstanceObject, signature_tables, FixedArray,
- kSignatureTablesOffset)
ACCESSORS(WasmInstanceObject, directly_called_instances, FixedArray,
kDirectlyCalledInstancesOffset)
ACCESSORS(WasmInstanceObject, js_imports_table, FixedArray,
@@ -83,31 +81,14 @@ OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entry_map, Managed<wasm::SignatureMap>,
#undef OPTIONAL_ACCESSORS
-#define FORWARD_SHARED(type, name) \
- type WasmCompiledModule::name() { return shared()->name(); }
-FORWARD_SHARED(SeqOneByteString*, module_bytes)
-FORWARD_SHARED(wasm::WasmModule*, module)
-FORWARD_SHARED(Script*, script)
-FORWARD_SHARED(bool, is_asm_js)
-#undef FORWARD_SHARED
-
#define WCM_OBJECT_OR_WEAK(TYPE, NAME, ID, TYPE_CHECK, SETTER_MODIFIER) \
- Handle<TYPE> WasmCompiledModule::NAME() const { \
- return handle(ptr_to_##NAME()); \
- } \
- \
- MaybeHandle<TYPE> WasmCompiledModule::maybe_##NAME() const { \
- if (has_##NAME()) return NAME(); \
- return MaybeHandle<TYPE>(); \
- } \
- \
- TYPE* WasmCompiledModule::maybe_ptr_to_##NAME() const { \
+ TYPE* WasmCompiledModule::maybe_##NAME() const { \
Object* obj = get(ID); \
if (!(TYPE_CHECK)) return nullptr; \
return TYPE::cast(obj); \
} \
\
- TYPE* WasmCompiledModule::ptr_to_##NAME() const { \
+ TYPE* WasmCompiledModule::NAME() const { \
Object* obj = get(ID); \
DCHECK(TYPE_CHECK); \
return TYPE::cast(obj); \
@@ -120,10 +101,7 @@ FORWARD_SHARED(bool, is_asm_js)
\
void WasmCompiledModule::reset_##NAME() { set_undefined(ID); } \
\
- void WasmCompiledModule::set_##NAME(Handle<TYPE> value) { \
- set_ptr_to_##NAME(*value); \
- } \
- void WasmCompiledModule::set_ptr_to_##NAME(TYPE* value) { set(ID, value); }
+ void WasmCompiledModule::set_##NAME(TYPE* value) { set(ID, value); }
#define WCM_OBJECT(TYPE, NAME) \
WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, obj->Is##TYPE(), public)
@@ -147,8 +125,9 @@ FORWARD_SHARED(bool, is_asm_js)
WCM_OBJECT_OR_WEAK(WeakCell, weak_##NAME, kID_##NAME, obj->IsWeakCell(), \
public) \
\
- Handle<TYPE> WasmCompiledModule::NAME() const { \
- return handle(TYPE::cast(weak_##NAME()->value())); \
+ TYPE* WasmCompiledModule::NAME() const { \
+ DCHECK(!weak_##NAME()->cleared()); \
+ return TYPE::cast(weak_##NAME()->value()); \
}
#define DEFINITION(KIND, TYPE, NAME) WCM_##KIND(TYPE, NAME)
@@ -171,7 +150,7 @@ bool WasmMemoryObject::has_maximum_pages() { return maximum_pages() >= 0; }
void WasmCompiledModule::ReplaceCodeTableForTesting(
Handle<FixedArray> testing_table) {
- set_code_table(testing_table);
+ set_code_table(*testing_table);
}
#include "src/objects/object-macros-undef.h"
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index 565f38a9e7..c92a51716a 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -14,7 +14,9 @@
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-code-specialization.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -45,7 +47,9 @@ class CompiledModulesIterator
Handle<WasmCompiledModule> start_module, bool at_end)
: isolate_(isolate),
start_module_(start_module),
- current_(at_end ? Handle<WasmCompiledModule>::null() : start_module) {}
+ current_(
+ at_end ? Handle<WasmCompiledModule>::null()
+ : Handle<WasmCompiledModule>::New(*start_module, isolate)) {}
Handle<WasmCompiledModule> operator*() const {
DCHECK(!current_.is_null());
@@ -64,7 +68,7 @@ class CompiledModulesIterator
DCHECK(!current_.is_null());
if (!is_backwards_) {
if (current_->has_next_instance()) {
- current_ = current_->next_instance();
+ *current_.location() = current_->next_instance();
return;
}
// No more modules in next-links, now try the previous-links.
@@ -72,7 +76,7 @@ class CompiledModulesIterator
current_ = start_module_;
}
if (current_->has_prev_instance()) {
- current_ = current_->prev_instance();
+ *current_.location() = current_->prev_instance();
return;
}
current_ = Handle<WasmCompiledModule>::null();
@@ -118,7 +122,7 @@ class CompiledModuleInstancesIterator
bool NeedToAdvance() {
return !it.current_.is_null() &&
(!it.current_->has_weak_owning_instance() ||
- it.current_->ptr_to_weak_owning_instance()->cleared());
+ it.current_->weak_owning_instance()->cleared());
}
CompiledModulesIterator it;
};
@@ -131,14 +135,14 @@ iterate_compiled_module_instance_chain(
}
#ifdef DEBUG
-bool IsBreakablePosition(Handle<WasmCompiledModule> compiled_module,
- int func_index, int offset_in_func) {
+bool IsBreakablePosition(WasmSharedModuleData* shared, int func_index,
+ int offset_in_func) {
DisallowHeapAllocation no_gc;
AccountingAllocator alloc;
Zone tmp(&alloc, ZONE_NAME);
wasm::BodyLocalDecls locals(&tmp);
- const byte* module_start = compiled_module->module_bytes()->GetChars();
- WasmFunction& func = compiled_module->module()->functions[func_index];
+ const byte* module_start = shared->module_bytes()->GetChars();
+ WasmFunction& func = shared->module()->functions[func_index];
wasm::BytecodeIterator iterator(module_start + func.code.offset(),
module_start + func.code.end_offset(),
&locals);
@@ -159,6 +163,14 @@ void CompiledModuleFinalizer(const v8::WeakCallbackInfo<void>& data) {
GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
}
+enum DispatchTableElements : int {
+ kDispatchTableInstanceOffset,
+ kDispatchTableIndexOffset,
+ kDispatchTableFunctionTableOffset,
+ // Marker:
+ kDispatchTableNumElements
+};
+
} // namespace
Handle<WasmModuleObject> WasmModuleObject::New(
@@ -170,7 +182,7 @@ Handle<WasmModuleObject> WasmModuleObject::New(
module_object->set_compiled_module(*compiled_module);
Handle<WeakCell> link_to_module =
isolate->factory()->NewWeakCell(module_object);
- compiled_module->set_weak_wasm_module(link_to_module);
+ compiled_module->set_weak_wasm_module(*link_to_module);
return module_object;
}
@@ -179,7 +191,7 @@ void WasmModuleObject::ValidateStateForTesting(
DisallowHeapAllocation no_gc;
WasmCompiledModule* compiled_module = module_obj->compiled_module();
CHECK(compiled_module->has_weak_wasm_module());
- CHECK_EQ(compiled_module->ptr_to_weak_wasm_module()->value(), *module_obj);
+ CHECK_EQ(compiled_module->weak_wasm_module()->value(), *module_obj);
CHECK(!compiled_module->has_prev_instance());
CHECK(!compiled_module->has_next_instance());
CHECK(!compiled_module->has_weak_owning_instance());
@@ -203,62 +215,61 @@ Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate, uint32_t initial,
Handle<Object> max = isolate->factory()->NewNumber(maximum);
table_obj->set_maximum_length(*max);
- Handle<FixedArray> dispatch_tables = isolate->factory()->NewFixedArray(0);
- table_obj->set_dispatch_tables(*dispatch_tables);
+ table_obj->set_dispatch_tables(isolate->heap()->empty_fixed_array());
return Handle<WasmTableObject>::cast(table_obj);
}
-Handle<FixedArray> WasmTableObject::AddDispatchTable(
- Isolate* isolate, Handle<WasmTableObject> table_obj,
- Handle<WasmInstanceObject> instance, int table_index,
- Handle<FixedArray> function_table, Handle<FixedArray> signature_table) {
+void WasmTableObject::AddDispatchTable(Isolate* isolate,
+ Handle<WasmTableObject> table_obj,
+ Handle<WasmInstanceObject> instance,
+ int table_index,
+ Handle<FixedArray> function_table) {
+ DCHECK_EQ(0, function_table->length() % compiler::kFunctionTableEntrySize);
Handle<FixedArray> dispatch_tables(table_obj->dispatch_tables());
- DCHECK_EQ(0, dispatch_tables->length() % 4);
+ int old_length = dispatch_tables->length();
+ DCHECK_EQ(0, old_length % kDispatchTableNumElements);
- if (instance.is_null()) return dispatch_tables;
+ if (instance.is_null()) return;
// TODO(titzer): use weak cells here to avoid leaking instances.
// Grow the dispatch table and add a new entry at the end.
Handle<FixedArray> new_dispatch_tables =
- isolate->factory()->CopyFixedArrayAndGrow(dispatch_tables, 4);
+ isolate->factory()->CopyFixedArrayAndGrow(dispatch_tables,
+ kDispatchTableNumElements);
- new_dispatch_tables->set(dispatch_tables->length() + 0, *instance);
- new_dispatch_tables->set(dispatch_tables->length() + 1,
+ new_dispatch_tables->set(old_length + kDispatchTableInstanceOffset,
+ *instance);
+ new_dispatch_tables->set(old_length + kDispatchTableIndexOffset,
Smi::FromInt(table_index));
- new_dispatch_tables->set(dispatch_tables->length() + 2, *function_table);
- new_dispatch_tables->set(dispatch_tables->length() + 3, *signature_table);
+ new_dispatch_tables->set(old_length + kDispatchTableFunctionTableOffset,
+ *function_table);
table_obj->set_dispatch_tables(*new_dispatch_tables);
-
- return new_dispatch_tables;
}
void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
// TODO(6792): No longer needed once WebAssembly code is off heap.
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
Handle<FixedArray> dispatch_tables(this->dispatch_tables());
- DCHECK_EQ(0, dispatch_tables->length() % 4);
+ DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
uint32_t old_size = functions()->length();
Zone specialization_zone(isolate->allocator(), ZONE_NAME);
- for (int i = 0; i < dispatch_tables->length(); i += 4) {
- Handle<FixedArray> old_function_table(
- FixedArray::cast(dispatch_tables->get(i + 2)));
- Handle<FixedArray> old_signature_table(
- FixedArray::cast(dispatch_tables->get(i + 3)));
+ for (int i = 0; i < dispatch_tables->length();
+ i += kDispatchTableNumElements) {
+ Handle<FixedArray> old_function_table(FixedArray::cast(
+ dispatch_tables->get(i + kDispatchTableFunctionTableOffset)));
Handle<FixedArray> new_function_table = isolate->global_handles()->Create(
- *isolate->factory()->CopyFixedArrayAndGrow(old_function_table, count));
- Handle<FixedArray> new_signature_table = isolate->global_handles()->Create(
- *isolate->factory()->CopyFixedArrayAndGrow(old_signature_table, count));
+ *isolate->factory()->CopyFixedArrayAndGrow(
+ old_function_table, count * compiler::kFunctionTableEntrySize));
GlobalHandleAddress new_function_table_addr = new_function_table.address();
- GlobalHandleAddress new_signature_table_addr =
- new_signature_table.address();
- int table_index = Smi::cast(dispatch_tables->get(i + 1))->value();
- // Update dispatch tables with new function/signature tables
- dispatch_tables->set(i + 2, *new_function_table);
- dispatch_tables->set(i + 3, *new_signature_table);
+ int table_index =
+ Smi::cast(dispatch_tables->get(i + kDispatchTableIndexOffset))->value();
+ // Update dispatch tables with new function tables.
+ dispatch_tables->set(i + kDispatchTableFunctionTableOffset,
+ *new_function_table);
// Patch the code of the respective instance.
if (FLAG_wasm_jit_to_native) {
@@ -269,18 +280,15 @@ void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
WasmInstanceObject::cast(dispatch_tables->get(i));
WasmCompiledModule* compiled_module = instance->compiled_module();
wasm::NativeModule* native_module = compiled_module->GetNativeModule();
+ wasm::NativeModuleModificationScope native_module_modification_scope(
+ native_module);
GlobalHandleAddress old_function_table_addr =
native_module->function_tables()[table_index];
- GlobalHandleAddress old_signature_table_addr =
- native_module->signature_tables()[table_index];
code_specialization.PatchTableSize(old_size, old_size + count);
code_specialization.RelocatePointer(old_function_table_addr,
new_function_table_addr);
- code_specialization.RelocatePointer(old_signature_table_addr,
- new_signature_table_addr);
code_specialization.ApplyToWholeInstance(instance);
native_module->function_tables()[table_index] = new_function_table_addr;
- native_module->signature_tables()[table_index] = new_signature_table_addr;
} else {
DisallowHeapAllocation no_gc;
wasm::CodeSpecialization code_specialization(isolate,
@@ -289,23 +297,15 @@ void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
WasmInstanceObject::cast(dispatch_tables->get(i));
WasmCompiledModule* compiled_module = instance->compiled_module();
GlobalHandleAddress old_function_table_addr =
- WasmCompiledModule::GetTableValue(
- compiled_module->ptr_to_function_tables(), table_index);
- GlobalHandleAddress old_signature_table_addr =
- WasmCompiledModule::GetTableValue(
- compiled_module->ptr_to_signature_tables(), table_index);
+ WasmCompiledModule::GetTableValue(compiled_module->function_tables(),
+ table_index);
code_specialization.PatchTableSize(old_size, old_size + count);
code_specialization.RelocatePointer(old_function_table_addr,
new_function_table_addr);
- code_specialization.RelocatePointer(old_signature_table_addr,
- new_signature_table_addr);
code_specialization.ApplyToWholeInstance(instance);
- WasmCompiledModule::UpdateTableValue(
- compiled_module->ptr_to_function_tables(), table_index,
- new_function_table_addr);
- WasmCompiledModule::UpdateTableValue(
- compiled_module->ptr_to_signature_tables(), table_index,
- new_signature_table_addr);
+ WasmCompiledModule::UpdateTableValue(compiled_module->function_tables(),
+ table_index,
+ new_function_table_addr);
}
}
}
@@ -316,34 +316,69 @@ void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
Handle<FixedArray> dispatch_tables(table->dispatch_tables(), isolate);
- WasmFunction* wasm_function = nullptr;
+ wasm::FunctionSig* sig = nullptr;
Handle<Object> code = Handle<Object>::null();
Handle<Object> value = isolate->factory()->null_value();
if (!function.is_null()) {
auto exported_function = Handle<WasmExportedFunction>::cast(function);
- wasm_function = wasm::GetWasmFunctionForExport(isolate, function);
+ auto* wasm_function = wasm::GetWasmFunctionForExport(isolate, function);
// The verification that {function} is an export was done
// by the caller.
- DCHECK_NOT_NULL(wasm_function);
+ DCHECK(wasm_function != nullptr && wasm_function->sig != nullptr);
+ sig = wasm_function->sig;
value = function;
// TODO(titzer): Make JSToWasm wrappers just call the WASM to WASM wrapper,
// and then we can just reuse the WASM to WASM wrapper.
WasmCodeWrapper wasm_code = exported_function->GetWasmCode();
+ wasm::NativeModule* native_module =
+ wasm_code.IsCodeObject() ? nullptr : wasm_code.GetWasmCode()->owner();
CodeSpaceMemoryModificationScope gc_modification_scope(isolate->heap());
+ wasm::NativeModuleModificationScope native_modification_scope(
+ native_module);
code = wasm::GetOrCreateIndirectCallWrapper(
isolate, handle(exported_function->instance()), wasm_code,
- exported_function->function_index(), wasm_function->sig);
+ exported_function->function_index(), sig);
}
- UpdateDispatchTables(isolate, dispatch_tables, index, wasm_function, code);
+ UpdateDispatchTables(table, index, sig, code);
array->set(index, *value);
}
+void WasmTableObject::UpdateDispatchTables(Handle<WasmTableObject> table,
+ int index, wasm::FunctionSig* sig,
+ Handle<Object> code_or_foreign) {
+ DisallowHeapAllocation no_gc;
+ FixedArray* dispatch_tables = table->dispatch_tables();
+ DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
+ for (int i = 0; i < dispatch_tables->length();
+ i += kDispatchTableNumElements) {
+ FixedArray* function_table = FixedArray::cast(
+ dispatch_tables->get(i + kDispatchTableFunctionTableOffset));
+ Smi* sig_smi = Smi::FromInt(-1);
+ Object* code = Smi::kZero;
+ if (sig) {
+ DCHECK(code_or_foreign->IsCode() || code_or_foreign->IsForeign());
+ WasmInstanceObject* instance = WasmInstanceObject::cast(
+ dispatch_tables->get(i + kDispatchTableInstanceOffset));
+ // Note that {SignatureMap::Find} may return {-1} if the signature is
+ // not found; it will simply never match any check.
+ auto sig_index = instance->module()->signature_map.Find(sig);
+ sig_smi = Smi::FromInt(sig_index);
+ code = *code_or_foreign;
+ } else {
+ DCHECK(code_or_foreign.is_null());
+ }
+ function_table->set(compiler::FunctionTableSigOffset(index), sig_smi);
+ function_table->set(compiler::FunctionTableCodeOffset(index), code);
+ }
+}
+
namespace {
Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
Handle<JSArrayBuffer> old_buffer,
- uint32_t pages, uint32_t maximum_pages) {
+ uint32_t pages, uint32_t maximum_pages,
+ bool use_trap_handler) {
if (!old_buffer->is_growable()) return Handle<JSArrayBuffer>::null();
Address old_mem_start = nullptr;
uint32_t old_size = 0;
@@ -351,38 +386,64 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
old_mem_start = static_cast<Address>(old_buffer->backing_store());
CHECK(old_buffer->byte_length()->ToUint32(&old_size));
}
- DCHECK_EQ(0, old_size % WasmModule::kPageSize);
- uint32_t old_pages = old_size / WasmModule::kPageSize;
+ DCHECK_EQ(0, old_size % wasm::kWasmPageSize);
+ uint32_t old_pages = old_size / wasm::kWasmPageSize;
DCHECK_GE(std::numeric_limits<uint32_t>::max(),
- old_size + pages * WasmModule::kPageSize);
+ old_size + pages * wasm::kWasmPageSize);
if (old_pages > maximum_pages || pages > maximum_pages - old_pages) {
return Handle<JSArrayBuffer>::null();
}
- const bool enable_guard_regions = old_buffer.is_null()
- ? trap_handler::UseTrapHandler()
- : old_buffer->has_guard_region();
+ const bool enable_guard_regions =
+ old_buffer.is_null() ? use_trap_handler : old_buffer->has_guard_region();
size_t new_size =
- static_cast<size_t>(old_pages + pages) * WasmModule::kPageSize;
- if (enable_guard_regions && old_size != 0) {
+ static_cast<size_t>(old_pages + pages) * wasm::kWasmPageSize;
+ if (new_size > FLAG_wasm_max_mem_pages * wasm::kWasmPageSize ||
+ new_size > kMaxInt) {
+ return Handle<JSArrayBuffer>::null();
+ }
+ if ((enable_guard_regions || old_size == new_size) && old_size != 0) {
DCHECK_NOT_NULL(old_buffer->backing_store());
- if (new_size > FLAG_wasm_max_mem_pages * WasmModule::kPageSize ||
- new_size > kMaxInt) {
- return Handle<JSArrayBuffer>::null();
+ if (old_size != new_size) {
+ isolate->array_buffer_allocator()->SetProtection(
+ old_mem_start, new_size,
+ v8::ArrayBuffer::Allocator::Protection::kReadWrite);
+ reinterpret_cast<v8::Isolate*>(isolate)
+ ->AdjustAmountOfExternalAllocatedMemory(pages * wasm::kWasmPageSize);
}
- isolate->array_buffer_allocator()->SetProtection(
- old_mem_start, new_size,
- v8::ArrayBuffer::Allocator::Protection::kReadWrite);
- reinterpret_cast<v8::Isolate*>(isolate)
- ->AdjustAmountOfExternalAllocatedMemory(pages * WasmModule::kPageSize);
- Handle<Object> length_obj = isolate->factory()->NewNumberFromSize(new_size);
- old_buffer->set_byte_length(*length_obj);
- return old_buffer;
+ // NOTE: We must allocate a new array buffer here because the spec
+ // assumes that ArrayBuffers do not change size.
+ void* allocation_base = old_buffer->allocation_base();
+ size_t allocation_length = old_buffer->allocation_length();
+ void* backing_store = old_buffer->backing_store();
+ bool has_guard_region = old_buffer->has_guard_region();
+ bool is_external = old_buffer->is_external();
+ // Disconnect buffer early so GC won't free it.
+ i::wasm::DetachMemoryBuffer(isolate, old_buffer, false);
+ Handle<JSArrayBuffer> new_buffer = wasm::SetupArrayBuffer(
+ isolate, allocation_base, allocation_length, backing_store, new_size,
+ is_external, has_guard_region);
+ return new_buffer;
} else {
+ bool free_memory = false;
Handle<JSArrayBuffer> new_buffer;
- new_buffer = wasm::NewArrayBuffer(isolate, new_size, enable_guard_regions);
- if (new_buffer.is_null() || old_size == 0) return new_buffer;
- Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
- memcpy(new_mem_start, old_mem_start, old_size);
+ if (pages != 0) {
+ // Allocate a new buffer and memcpy the old contents.
+ free_memory = true;
+ new_buffer =
+ wasm::NewArrayBuffer(isolate, new_size, enable_guard_regions);
+ if (new_buffer.is_null() || old_size == 0) return new_buffer;
+ Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
+ memcpy(new_mem_start, old_mem_start, old_size);
+ DCHECK(old_buffer.is_null() || !old_buffer->is_shared());
+ DCHECK(old_buffer.is_null() || !old_buffer->has_guard_region());
+ } else {
+ // Reuse the prior backing store, but allocate a new array buffer.
+ new_buffer = wasm::SetupArrayBuffer(
+ isolate, old_buffer->allocation_base(),
+ old_buffer->allocation_length(), old_buffer->backing_store(),
+ new_size, old_buffer->is_external(), old_buffer->has_guard_region());
+ }
+ i::wasm::DetachMemoryBuffer(isolate, old_buffer, free_memory);
return new_buffer;
}
}
@@ -397,7 +458,7 @@ void SetInstanceMemory(Isolate* isolate, Handle<WasmInstanceObject> instance,
// To flush out bugs earlier, in DEBUG mode, check that all pages of the
// memory are accessible by reading and writing one byte on each page.
for (uint32_t offset = 0; offset < wasm_context->mem_size;
- offset += WasmModule::kPageSize) {
+ offset += wasm::kWasmPageSize) {
byte val = wasm_context->mem_start[offset];
wasm_context->mem_start[offset] = val;
}
@@ -409,6 +470,10 @@ void SetInstanceMemory(Isolate* isolate, Handle<WasmInstanceObject> instance,
Handle<WasmMemoryObject> WasmMemoryObject::New(
Isolate* isolate, MaybeHandle<JSArrayBuffer> maybe_buffer,
int32_t maximum) {
+ // TODO(kschimpf): Do we need to add an argument that defines the
+ // style of memory the user prefers (with/without trap handling), so
+ // that the memory will match the style of the compiled wasm module.
+ // See issue v8:7143
Handle<JSFunction> memory_ctor(
isolate->native_context()->wasm_memory_constructor());
auto memory_obj = Handle<WasmMemoryObject>::cast(
@@ -417,8 +482,11 @@ Handle<WasmMemoryObject> WasmMemoryObject::New(
Handle<JSArrayBuffer> buffer;
if (maybe_buffer.is_null()) {
// If no buffer was provided, create a 0-length one.
+
+ // TODO(kschimpf): Modify to use argument defining style of
+ // memory. (see above).
buffer = wasm::SetupArrayBuffer(isolate, nullptr, 0, nullptr, 0, false,
- trap_handler::UseTrapHandler());
+ trap_handler::IsTrapHandlerEnabled());
} else {
buffer = maybe_buffer.ToHandleChecked();
// Paranoid check that the buffer size makes sense.
@@ -434,7 +502,7 @@ Handle<WasmMemoryObject> WasmMemoryObject::New(
uint32_t WasmMemoryObject::current_pages() {
uint32_t byte_length;
CHECK(array_buffer()->byte_length()->ToUint32(&byte_length));
- return byte_length / WasmModule::kPageSize;
+ return byte_length / wasm::kWasmPageSize;
}
void WasmMemoryObject::AddInstance(Isolate* isolate,
@@ -459,32 +527,6 @@ void WasmMemoryObject::RemoveInstance(Isolate* isolate,
}
}
-void WasmMemoryObject::SetupNewBufferWithSameBackingStore(
- Isolate* isolate, Handle<WasmMemoryObject> memory_object, uint32_t size) {
- // In case of Memory.Grow(0), or Memory.Grow(delta) with guard pages enabled,
- // Setup a new buffer, update memory object, and instances associated with the
- // memory object, as the current buffer will be detached.
- Handle<JSArrayBuffer> old_buffer(memory_object->array_buffer());
- Handle<JSArrayBuffer> new_buffer;
-
- constexpr bool is_external = false;
- new_buffer = wasm::SetupArrayBuffer(
- isolate, old_buffer->allocation_base(), old_buffer->allocation_length(),
- old_buffer->backing_store(), size * WasmModule::kPageSize, is_external,
- old_buffer->has_guard_region());
- if (memory_object->has_instances()) {
- Handle<WeakFixedArray> instances(memory_object->instances(), isolate);
- for (int i = 0; i < instances->Length(); i++) {
- Object* elem = instances->Get(i);
- if (!elem->IsWasmInstanceObject()) continue;
- Handle<WasmInstanceObject> instance(WasmInstanceObject::cast(elem),
- isolate);
- SetInstanceMemory(isolate, instance, new_buffer);
- }
- }
- memory_object->set_array_buffer(*new_buffer);
-}
-
// static
int32_t WasmMemoryObject::Grow(Isolate* isolate,
Handle<WasmMemoryObject> memory_object,
@@ -493,17 +535,18 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
if (!old_buffer->is_growable()) return -1;
uint32_t old_size = 0;
CHECK(old_buffer->byte_length()->ToUint32(&old_size));
- DCHECK_EQ(0, old_size % WasmModule::kPageSize);
+ DCHECK_EQ(0, old_size % wasm::kWasmPageSize);
Handle<JSArrayBuffer> new_buffer;
- // Return current size if grow by 0.
- if (pages == 0) return old_size / WasmModule::kPageSize;
uint32_t maximum_pages = FLAG_wasm_max_mem_pages;
if (memory_object->has_maximum_pages()) {
maximum_pages = Min(FLAG_wasm_max_mem_pages,
static_cast<uint32_t>(memory_object->maximum_pages()));
}
- new_buffer = GrowMemoryBuffer(isolate, old_buffer, pages, maximum_pages);
+ // TODO(kschimpf): We need to fix this by adding a field to WasmMemoryObject
+ // that defines the style of memory being used.
+ new_buffer = GrowMemoryBuffer(isolate, old_buffer, pages, maximum_pages,
+ trap_handler::IsTrapHandlerEnabled());
if (new_buffer.is_null()) return -1;
if (memory_object->has_instances()) {
@@ -517,14 +560,16 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
}
}
memory_object->set_array_buffer(*new_buffer);
- return old_size / WasmModule::kPageSize;
+ return old_size / wasm::kWasmPageSize;
}
WasmModuleObject* WasmInstanceObject::module_object() {
- return *compiled_module()->wasm_module();
+ return compiled_module()->wasm_module();
}
-WasmModule* WasmInstanceObject::module() { return compiled_module()->module(); }
+WasmModule* WasmInstanceObject::module() {
+ return compiled_module()->shared()->module();
+}
Handle<WasmDebugInfo> WasmInstanceObject::GetOrCreateDebugInfo(
Handle<WasmInstanceObject> instance) {
@@ -553,46 +598,21 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
return instance;
}
-int32_t WasmInstanceObject::GetMemorySize() {
- if (!has_memory_object()) return 0;
- uint32_t bytes = memory_object()->array_buffer()->byte_length()->Number();
- DCHECK_EQ(0, bytes % WasmModule::kPageSize);
- return bytes / WasmModule::kPageSize;
-}
-
int32_t WasmInstanceObject::GrowMemory(Isolate* isolate,
Handle<WasmInstanceObject> instance,
uint32_t pages) {
- if (pages == 0) return instance->GetMemorySize();
DCHECK(instance->has_memory_object());
return WasmMemoryObject::Grow(
isolate, handle(instance->memory_object(), isolate), pages);
}
-uint32_t WasmInstanceObject::GetMaxMemoryPages() {
- if (has_memory_object()) {
- if (memory_object()->has_maximum_pages()) {
- uint32_t maximum =
- static_cast<uint32_t>(memory_object()->maximum_pages());
- if (maximum < FLAG_wasm_max_mem_pages) return maximum;
- }
- }
- uint32_t compiled_maximum_pages = compiled_module()->module()->maximum_pages;
- Isolate* isolate = GetIsolate();
- assert(compiled_module()->module()->is_wasm());
- isolate->counters()->wasm_wasm_max_mem_pages_count()->AddSample(
- compiled_maximum_pages);
- if (compiled_maximum_pages != 0) return compiled_maximum_pages;
- return FLAG_wasm_max_mem_pages;
-}
-
WasmInstanceObject* WasmInstanceObject::GetOwningInstance(
const wasm::WasmCode* code) {
DisallowHeapAllocation no_gc;
Object* weak_link = nullptr;
- DCHECK(code->kind() == wasm::WasmCode::Function ||
- code->kind() == wasm::WasmCode::InterpreterStub);
- weak_link = code->owner()->compiled_module()->ptr_to_weak_owning_instance();
+ DCHECK(code->kind() == wasm::WasmCode::kFunction ||
+ code->kind() == wasm::WasmCode::kInterpreterStub);
+ weak_link = code->owner()->compiled_module()->weak_owning_instance();
DCHECK(weak_link->IsWeakCell());
WeakCell* cell = WeakCell::cast(weak_link);
if (cell->cleared()) return nullptr;
@@ -618,21 +638,21 @@ void WasmInstanceObject::ValidateInstancesChainForTesting(
CHECK_GE(instance_count, 0);
DisallowHeapAllocation no_gc;
WasmCompiledModule* compiled_module = module_obj->compiled_module();
- CHECK_EQ(JSObject::cast(compiled_module->ptr_to_weak_wasm_module()->value()),
+ CHECK_EQ(JSObject::cast(compiled_module->weak_wasm_module()->value()),
*module_obj);
Object* prev = nullptr;
int found_instances = compiled_module->has_weak_owning_instance() ? 1 : 0;
WasmCompiledModule* current_instance = compiled_module;
while (current_instance->has_next_instance()) {
CHECK((prev == nullptr && !current_instance->has_prev_instance()) ||
- current_instance->ptr_to_prev_instance() == prev);
- CHECK_EQ(current_instance->ptr_to_weak_wasm_module()->value(), *module_obj);
- CHECK(current_instance->ptr_to_weak_owning_instance()
+ current_instance->prev_instance() == prev);
+ CHECK_EQ(current_instance->weak_wasm_module()->value(), *module_obj);
+ CHECK(current_instance->weak_owning_instance()
->value()
->IsWasmInstanceObject());
prev = current_instance;
current_instance =
- WasmCompiledModule::cast(current_instance->ptr_to_next_instance());
+ WasmCompiledModule::cast(current_instance->next_instance());
++found_instances;
CHECK_LE(found_instances, instance_count);
}
@@ -644,7 +664,7 @@ void WasmInstanceObject::ValidateOrphanedInstanceForTesting(
DisallowHeapAllocation no_gc;
WasmCompiledModule* compiled_module = instance->compiled_module();
CHECK(compiled_module->has_weak_wasm_module());
- CHECK(compiled_module->ptr_to_weak_wasm_module()->cleared());
+ CHECK(compiled_module->weak_wasm_module()->cleared());
}
bool WasmExportedFunction::IsWasmExportedFunction(Object* object) {
@@ -735,8 +755,9 @@ WasmCodeWrapper WasmExportedFunction::GetWasmCode() {
DCHECK(!it.done());
WasmCodeWrapper target;
if (FLAG_wasm_jit_to_native) {
- target = WasmCodeWrapper(GetIsolate()->wasm_code_manager()->LookupCode(
- it.rinfo()->js_to_wasm_address()));
+ target = WasmCodeWrapper(
+ GetIsolate()->wasm_engine()->code_manager()->LookupCode(
+ it.rinfo()->js_to_wasm_address()));
} else {
Code* code = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
if (!IsWasmFunctionCode(code)) continue;
@@ -953,8 +974,6 @@ void WasmSharedModuleData::SetBreakpointsOnNewInstance(
Handle<WasmSharedModuleData> shared, Handle<WasmInstanceObject> instance) {
if (!shared->has_breakpoint_infos()) return;
Isolate* isolate = shared->GetIsolate();
- Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
- isolate);
Handle<WasmDebugInfo> debug_info =
WasmInstanceObject::GetOrCreateDebugInfo(instance);
@@ -974,9 +993,9 @@ void WasmSharedModuleData::SetBreakpointsOnNewInstance(
int position = breakpoint_info->source_position();
// Find the function for this breakpoint, and set the breakpoint.
- int func_index = compiled_module->GetContainingFunction(position);
+ int func_index = shared->GetContainingFunction(position);
DCHECK_LE(0, func_index);
- WasmFunction& func = compiled_module->module()->functions[func_index];
+ WasmFunction& func = shared->module()->functions[func_index];
int offset_in_func = position - func.code.offset();
WasmDebugInfo::SetBreakpoint(debug_info, func_index, offset_in_func);
}
@@ -991,62 +1010,286 @@ void WasmSharedModuleData::PrepareForLazyCompilation(
shared->set_lazy_compilation_orchestrator(*orch_handle);
}
+namespace {
+
+enum AsmJsOffsetTableEntryLayout {
+ kOTEByteOffset,
+ kOTECallPosition,
+ kOTENumberConvPosition,
+ kOTESize
+};
+
+Handle<ByteArray> GetDecodedAsmJsOffsetTable(
+ Handle<WasmSharedModuleData> shared, Isolate* isolate) {
+ DCHECK(shared->is_asm_js());
+ Handle<ByteArray> offset_table(shared->asm_js_offset_table(), isolate);
+
+ // The last byte in the asm_js_offset_tables ByteArray tells whether it is
+ // still encoded (0) or decoded (1).
+ enum AsmJsTableType : int { Encoded = 0, Decoded = 1 };
+ int table_type = offset_table->get(offset_table->length() - 1);
+ DCHECK(table_type == Encoded || table_type == Decoded);
+ if (table_type == Decoded) return offset_table;
+
+ wasm::AsmJsOffsetsResult asm_offsets;
+ {
+ DisallowHeapAllocation no_gc;
+ const byte* bytes_start = offset_table->GetDataStartAddress();
+ const byte* bytes_end = bytes_start + offset_table->length() - 1;
+ asm_offsets = wasm::DecodeAsmJsOffsets(bytes_start, bytes_end);
+ }
+ // Wasm bytes must be valid and must contain asm.js offset table.
+ DCHECK(asm_offsets.ok());
+ DCHECK_GE(kMaxInt, asm_offsets.val.size());
+ int num_functions = static_cast<int>(asm_offsets.val.size());
+ int num_imported_functions =
+ static_cast<int>(shared->module()->num_imported_functions);
+ DCHECK_EQ(shared->module()->functions.size(),
+ static_cast<size_t>(num_functions) + num_imported_functions);
+ int num_entries = 0;
+ for (int func = 0; func < num_functions; ++func) {
+ size_t new_size = asm_offsets.val[func].size();
+ DCHECK_LE(new_size, static_cast<size_t>(kMaxInt) - num_entries);
+ num_entries += static_cast<int>(new_size);
+ }
+ // One byte to encode that this is a decoded table.
+ DCHECK_GE(kMaxInt,
+ 1 + static_cast<uint64_t>(num_entries) * kOTESize * kIntSize);
+ int total_size = 1 + num_entries * kOTESize * kIntSize;
+ Handle<ByteArray> decoded_table =
+ isolate->factory()->NewByteArray(total_size, TENURED);
+ decoded_table->set(total_size - 1, AsmJsTableType::Decoded);
+ shared->set_asm_js_offset_table(*decoded_table);
+
+ int idx = 0;
+ std::vector<WasmFunction>& wasm_funs = shared->module()->functions;
+ for (int func = 0; func < num_functions; ++func) {
+ std::vector<wasm::AsmJsOffsetEntry>& func_asm_offsets =
+ asm_offsets.val[func];
+ if (func_asm_offsets.empty()) continue;
+ int func_offset = wasm_funs[num_imported_functions + func].code.offset();
+ for (wasm::AsmJsOffsetEntry& e : func_asm_offsets) {
+ // Byte offsets must be strictly monotonously increasing:
+ DCHECK_IMPLIES(idx > 0, func_offset + e.byte_offset >
+ decoded_table->get_int(idx - kOTESize));
+ decoded_table->set_int(idx + kOTEByteOffset, func_offset + e.byte_offset);
+ decoded_table->set_int(idx + kOTECallPosition, e.source_position_call);
+ decoded_table->set_int(idx + kOTENumberConvPosition,
+ e.source_position_number_conversion);
+ idx += kOTESize;
+ }
+ }
+ DCHECK_EQ(total_size, idx * kIntSize + 1);
+ return decoded_table;
+}
+
+} // namespace
+
+int WasmSharedModuleData::GetSourcePosition(Handle<WasmSharedModuleData> shared,
+ uint32_t func_index,
+ uint32_t byte_offset,
+ bool is_at_number_conversion) {
+ Isolate* isolate = shared->GetIsolate();
+ const WasmModule* module = shared->module();
+
+ if (!module->is_asm_js()) {
+ // for non-asm.js modules, we just add the function's start offset
+ // to make a module-relative position.
+ return byte_offset + shared->GetFunctionOffset(func_index);
+ }
+
+ // asm.js modules have an additional offset table that must be searched.
+ Handle<ByteArray> offset_table = GetDecodedAsmJsOffsetTable(shared, isolate);
+
+ DCHECK_LT(func_index, module->functions.size());
+ uint32_t func_code_offset = module->functions[func_index].code.offset();
+ uint32_t total_offset = func_code_offset + byte_offset;
+
+ // Binary search for the total byte offset.
+ int left = 0; // inclusive
+ int right = offset_table->length() / kIntSize / kOTESize; // exclusive
+ DCHECK_LT(left, right);
+ while (right - left > 1) {
+ int mid = left + (right - left) / 2;
+ int mid_entry = offset_table->get_int(kOTESize * mid);
+ DCHECK_GE(kMaxInt, mid_entry);
+ if (static_cast<uint32_t>(mid_entry) <= total_offset) {
+ left = mid;
+ } else {
+ right = mid;
+ }
+ }
+ // There should be an entry for each position that could show up on the stack
+ // trace:
+ DCHECK_EQ(total_offset, offset_table->get_int(kOTESize * left));
+ int idx = is_at_number_conversion ? kOTENumberConvPosition : kOTECallPosition;
+ return offset_table->get_int(kOTESize * left + idx);
+}
+
+v8::debug::WasmDisassembly WasmSharedModuleData::DisassembleFunction(
+ int func_index) {
+ DisallowHeapAllocation no_gc;
+
+ if (func_index < 0 ||
+ static_cast<uint32_t>(func_index) >= module()->functions.size())
+ return {};
+
+ SeqOneByteString* module_bytes_str = module_bytes();
+ Vector<const byte> module_bytes(module_bytes_str->GetChars(),
+ module_bytes_str->length());
+
+ std::ostringstream disassembly_os;
+ v8::debug::WasmDisassembly::OffsetTable offset_table;
+
+ PrintWasmText(module(), module_bytes, static_cast<uint32_t>(func_index),
+ disassembly_os, &offset_table);
+
+ return {disassembly_os.str(), std::move(offset_table)};
+}
+
+bool WasmSharedModuleData::GetPossibleBreakpoints(
+ const v8::debug::Location& start, const v8::debug::Location& end,
+ std::vector<v8::debug::BreakLocation>* locations) {
+ DisallowHeapAllocation no_gc;
+
+ std::vector<WasmFunction>& functions = module()->functions;
+ if (start.GetLineNumber() < 0 || start.GetColumnNumber() < 0 ||
+ (!end.IsEmpty() &&
+ (end.GetLineNumber() < 0 || end.GetColumnNumber() < 0)))
+ return false;
+
+ // start_func_index, start_offset and end_func_index is inclusive.
+ // end_offset is exclusive.
+ // start_offset and end_offset are module-relative byte offsets.
+ uint32_t start_func_index = start.GetLineNumber();
+ if (start_func_index >= functions.size()) return false;
+ int start_func_len = functions[start_func_index].code.length();
+ if (start.GetColumnNumber() > start_func_len) return false;
+ uint32_t start_offset =
+ functions[start_func_index].code.offset() + start.GetColumnNumber();
+ uint32_t end_func_index;
+ uint32_t end_offset;
+ if (end.IsEmpty()) {
+ // Default: everything till the end of the Script.
+ end_func_index = static_cast<uint32_t>(functions.size() - 1);
+ end_offset = functions[end_func_index].code.end_offset();
+ } else {
+ // If end is specified: Use it and check for valid input.
+ end_func_index = static_cast<uint32_t>(end.GetLineNumber());
+
+ // Special case: Stop before the start of the next function. Change to: Stop
+ // at the end of the function before, such that we don't disassemble the
+ // next function also.
+ if (end.GetColumnNumber() == 0 && end_func_index > 0) {
+ --end_func_index;
+ end_offset = functions[end_func_index].code.end_offset();
+ } else {
+ if (end_func_index >= functions.size()) return false;
+ end_offset =
+ functions[end_func_index].code.offset() + end.GetColumnNumber();
+ if (end_offset > functions[end_func_index].code.end_offset())
+ return false;
+ }
+ }
+
+ AccountingAllocator alloc;
+ Zone tmp(&alloc, ZONE_NAME);
+ const byte* module_start = module_bytes()->GetChars();
+
+ for (uint32_t func_idx = start_func_index; func_idx <= end_func_index;
+ ++func_idx) {
+ WasmFunction& func = functions[func_idx];
+ if (func.code.length() == 0) continue;
+
+ wasm::BodyLocalDecls locals(&tmp);
+ wasm::BytecodeIterator iterator(module_start + func.code.offset(),
+ module_start + func.code.end_offset(),
+ &locals);
+ DCHECK_LT(0u, locals.encoded_size);
+ for (uint32_t offset : iterator.offsets()) {
+ uint32_t total_offset = func.code.offset() + offset;
+ if (total_offset >= end_offset) {
+ DCHECK_EQ(end_func_index, func_idx);
+ break;
+ }
+ if (total_offset < start_offset) continue;
+ locations->emplace_back(func_idx, offset, debug::kCommonBreakLocation);
+ }
+ }
+ return true;
+}
+
+MaybeHandle<FixedArray> WasmSharedModuleData::CheckBreakPoints(
+ Isolate* isolate, Handle<WasmSharedModuleData> shared, int position) {
+ if (!shared->has_breakpoint_infos()) return {};
+
+ Handle<FixedArray> breakpoint_infos(shared->breakpoint_infos(), isolate);
+ int insert_pos =
+ FindBreakpointInfoInsertPos(isolate, breakpoint_infos, position);
+ if (insert_pos >= breakpoint_infos->length()) return {};
+
+ Handle<Object> maybe_breakpoint_info(breakpoint_infos->get(insert_pos),
+ isolate);
+ if (maybe_breakpoint_info->IsUndefined(isolate)) return {};
+ Handle<BreakPointInfo> breakpoint_info =
+ Handle<BreakPointInfo>::cast(maybe_breakpoint_info);
+ if (breakpoint_info->source_position() != position) return {};
+
+ Handle<Object> breakpoint_objects(breakpoint_info->break_point_objects(),
+ isolate);
+ return isolate->debug()->GetHitBreakPointObjects(breakpoint_objects);
+}
+
Handle<WasmCompiledModule> WasmCompiledModule::New(
Isolate* isolate, WasmModule* module, Handle<FixedArray> code_table,
Handle<FixedArray> export_wrappers,
const std::vector<GlobalHandleAddress>& function_tables,
- const std::vector<GlobalHandleAddress>& signature_tables) {
- DCHECK_EQ(function_tables.size(), signature_tables.size());
+ bool use_trap_handler) {
Handle<FixedArray> ret =
isolate->factory()->NewFixedArray(PropertyIndices::Count, TENURED);
// WasmCompiledModule::cast would fail since fields are not set yet.
Handle<WasmCompiledModule> compiled_module(
reinterpret_cast<WasmCompiledModule*>(*ret), isolate);
- compiled_module->set_native_context(isolate->native_context());
+ Handle<WeakCell> weak_native_context =
+ isolate->factory()->NewWeakCell(isolate->native_context());
+ compiled_module->set_weak_native_context(*weak_native_context);
+ compiled_module->set_use_trap_handler(use_trap_handler);
if (!FLAG_wasm_jit_to_native) {
compiled_module->InitId();
- compiled_module->set_native_context(isolate->native_context());
- compiled_module->set_code_table(code_table);
- compiled_module->set_export_wrappers(export_wrappers);
+ compiled_module->set_code_table(*code_table);
+ compiled_module->set_export_wrappers(*export_wrappers);
// TODO(mtrofin): we copy these because the order of finalization isn't
// reliable, and we need these at Reset (which is called at
// finalization). If the order were reliable, and top-down, we could instead
// just get them from shared().
- compiled_module->set_initial_pages(module->initial_pages);
compiled_module->set_num_imported_functions(module->num_imported_functions);
int num_function_tables = static_cast<int>(function_tables.size());
if (num_function_tables > 0) {
- Handle<FixedArray> st =
- isolate->factory()->NewFixedArray(num_function_tables, TENURED);
Handle<FixedArray> ft =
isolate->factory()->NewFixedArray(num_function_tables, TENURED);
for (int i = 0; i < num_function_tables; ++i) {
- size_t index = static_cast<size_t>(i);
- SetTableValue(isolate, ft, i, function_tables[index]);
- SetTableValue(isolate, st, i, signature_tables[index]);
+ SetTableValue(isolate, ft, i, function_tables[i]);
}
// TODO(wasm): setting the empty tables here this way is OK under the
// assumption that we compile and then instantiate. It needs rework if we
// do direct instantiation. The empty tables are used as a default when
// resetting the compiled module.
- compiled_module->set_signature_tables(st);
- compiled_module->set_empty_signature_tables(st);
- compiled_module->set_function_tables(ft);
- compiled_module->set_empty_function_tables(ft);
+ compiled_module->set_function_tables(*ft);
+ compiled_module->set_empty_function_tables(*ft);
}
} else {
if (!export_wrappers.is_null()) {
- compiled_module->set_export_wrappers(export_wrappers);
+ compiled_module->set_export_wrappers(*export_wrappers);
}
wasm::NativeModule* native_module = nullptr;
{
std::unique_ptr<wasm::NativeModule> native_module_ptr =
- isolate->wasm_code_manager()->NewNativeModule(*module);
+ isolate->wasm_engine()->code_manager()->NewNativeModule(*module);
native_module = native_module_ptr.release();
Handle<Foreign> native_module_wrapper =
Managed<wasm::NativeModule>::From(isolate, native_module);
- compiled_module->set_native_module(native_module_wrapper);
+ compiled_module->set_native_module(*native_module_wrapper);
Handle<WasmCompiledModule> weak_link =
isolate->global_handles()->Create(*compiled_module);
GlobalHandles::MakeWeak(Handle<Object>::cast(weak_link).location(),
@@ -1058,18 +1301,18 @@ Handle<WasmCompiledModule> WasmCompiledModule::New(
// This is here just because it's easier for APIs that need to work with
// either code_table or native_module. Otherwise we need to check if
// has_code_table and pass undefined.
- compiled_module->set_code_table(code_table);
+ compiled_module->set_code_table(*code_table);
native_module->function_tables() = function_tables;
- native_module->signature_tables() = signature_tables;
native_module->empty_function_tables() = function_tables;
- native_module->empty_signature_tables() = signature_tables;
int function_count = static_cast<int>(module->functions.size());
- compiled_module->set_handler_table(
- isolate->factory()->NewFixedArray(function_count, TENURED));
- compiled_module->set_source_positions(
- isolate->factory()->NewFixedArray(function_count, TENURED));
+ Handle<FixedArray> handler_table =
+ isolate->factory()->NewFixedArray(function_count, TENURED);
+ compiled_module->set_handler_table(*handler_table);
+ Handle<FixedArray> source_positions =
+ isolate->factory()->NewFixedArray(function_count, TENURED);
+ compiled_module->set_source_positions(*source_positions);
}
// TODO(mtrofin): copy the rest of the specialization parameters over.
// We're currently OK because we're only using defaults.
@@ -1080,7 +1323,8 @@ Handle<WasmCompiledModule> WasmCompiledModule::Clone(
Isolate* isolate, Handle<WasmCompiledModule> module) {
Handle<FixedArray> code_copy;
if (!FLAG_wasm_jit_to_native) {
- code_copy = isolate->factory()->CopyFixedArray(module->code_table());
+ code_copy = isolate->factory()->CopyFixedArray(
+ handle(module->code_table(), isolate));
}
Handle<WasmCompiledModule> ret = Handle<WasmCompiledModule>::cast(
isolate->factory()->CopyFixedArray(module));
@@ -1090,7 +1334,7 @@ Handle<WasmCompiledModule> WasmCompiledModule::Clone(
ret->reset_weak_exported_functions();
if (!FLAG_wasm_jit_to_native) {
ret->InitId();
- ret->set_code_table(code_copy);
+ ret->set_code_table(*code_copy);
return ret;
}
@@ -1100,7 +1344,7 @@ Handle<WasmCompiledModule> WasmCompiledModule::Clone(
// which would shift the this pointer in set_native_module.
Handle<Foreign> native_module_wrapper =
Managed<wasm::NativeModule>::From(isolate, native_module.release());
- ret->set_native_module(native_module_wrapper);
+ ret->set_native_module(*native_module_wrapper);
Handle<WasmCompiledModule> weak_link =
isolate->global_handles()->Create(*ret);
GlobalHandles::MakeWeak(Handle<Object>::cast(weak_link).location(),
@@ -1112,7 +1356,7 @@ Handle<WasmCompiledModule> WasmCompiledModule::Clone(
if (module->has_lazy_compile_data()) {
Handle<FixedArray> lazy_comp_data = isolate->factory()->NewFixedArray(
module->lazy_compile_data()->length(), TENURED);
- ret->set_lazy_compile_data(lazy_comp_data);
+ ret->set_lazy_compile_data(*lazy_comp_data);
}
return ret;
}
@@ -1140,7 +1384,7 @@ Address WasmCompiledModule::GetTableValue(FixedArray* table, int index) {
wasm::NativeModule* WasmCompiledModule::GetNativeModule() const {
if (!has_native_module()) return nullptr;
- return Managed<wasm::NativeModule>::cast(ptr_to_native_module())->get();
+ return Managed<wasm::NativeModule>::cast(native_module())->get();
}
void WasmCompiledModule::ResetGCModel(Isolate* isolate,
@@ -1148,7 +1392,7 @@ void WasmCompiledModule::ResetGCModel(Isolate* isolate,
DisallowHeapAllocation no_gc;
TRACE("Resetting %d\n", compiled_module->instance_id());
Object* undefined = *isolate->factory()->undefined_value();
- Object* fct_obj = compiled_module->ptr_to_code_table();
+ Object* fct_obj = compiled_module->code_table();
if (fct_obj != nullptr && fct_obj != undefined) {
// Patch code to update memory references, global references, and function
// table references.
@@ -1157,28 +1401,19 @@ void WasmCompiledModule::ResetGCModel(Isolate* isolate,
// Reset function tables.
if (compiled_module->has_function_tables()) {
- FixedArray* function_tables = compiled_module->ptr_to_function_tables();
- FixedArray* signature_tables = compiled_module->ptr_to_signature_tables();
+ FixedArray* function_tables = compiled_module->function_tables();
FixedArray* empty_function_tables =
- compiled_module->ptr_to_empty_function_tables();
- FixedArray* empty_signature_tables =
- compiled_module->ptr_to_empty_signature_tables();
+ compiled_module->empty_function_tables();
if (function_tables != empty_function_tables) {
DCHECK_EQ(function_tables->length(), empty_function_tables->length());
for (int i = 0, e = function_tables->length(); i < e; ++i) {
GlobalHandleAddress func_addr =
WasmCompiledModule::GetTableValue(function_tables, i);
- GlobalHandleAddress sig_addr =
- WasmCompiledModule::GetTableValue(signature_tables, i);
code_specialization.RelocatePointer(
func_addr,
WasmCompiledModule::GetTableValue(empty_function_tables, i));
- code_specialization.RelocatePointer(
- sig_addr,
- WasmCompiledModule::GetTableValue(empty_signature_tables, i));
}
- compiled_module->set_ptr_to_function_tables(empty_function_tables);
- compiled_module->set_ptr_to_signature_tables(empty_signature_tables);
+ compiled_module->set_function_tables(empty_function_tables);
}
}
@@ -1226,8 +1461,10 @@ void WasmCompiledModule::Reset(Isolate* isolate,
compiled_module->reset_next_instance();
wasm::NativeModule* native_module = compiled_module->GetNativeModule();
if (native_module == nullptr) return;
+ native_module->SetExecutable(false);
+
TRACE("Resetting %zu\n", native_module->instance_id);
- if (trap_handler::UseTrapHandler()) {
+ if (compiled_module->use_trap_handler()) {
for (uint32_t i = native_module->num_imported_functions(),
e = native_module->FunctionCount();
i < e; ++i) {
@@ -1258,23 +1495,16 @@ void WasmCompiledModule::Reset(Isolate* isolate,
if (native_module->function_tables().size() > 0) {
std::vector<GlobalHandleAddress>& function_tables =
native_module->function_tables();
- std::vector<GlobalHandleAddress>& signature_tables =
- native_module->signature_tables();
std::vector<GlobalHandleAddress>& empty_function_tables =
native_module->empty_function_tables();
- std::vector<GlobalHandleAddress>& empty_signature_tables =
- native_module->empty_signature_tables();
if (function_tables != empty_function_tables) {
DCHECK_EQ(function_tables.size(), empty_function_tables.size());
for (size_t i = 0, e = function_tables.size(); i < e; ++i) {
code_specialization.RelocatePointer(function_tables[i],
empty_function_tables[i]);
- code_specialization.RelocatePointer(signature_tables[i],
- empty_signature_tables[i]);
}
native_module->function_tables() = empty_function_tables;
- native_module->signature_tables() = empty_signature_tables;
}
}
@@ -1283,7 +1513,7 @@ void WasmCompiledModule::Reset(Isolate* isolate,
i < end; ++i) {
wasm::WasmCode* code = native_module->GetCode(i);
// Skip lazy compile stubs.
- if (code == nullptr || code->kind() != wasm::WasmCode::Function) continue;
+ if (code == nullptr || code->kind() != wasm::WasmCode::kFunction) continue;
bool changed = code_specialization.ApplyToWasmCode(WasmCodeWrapper(code),
SKIP_ICACHE_FLUSH);
// TODO(wasm): Check if this is faster than passing FLUSH_ICACHE_IF_NEEDED
@@ -1295,17 +1525,15 @@ void WasmCompiledModule::Reset(Isolate* isolate,
}
}
-MaybeHandle<String> WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
+MaybeHandle<String> WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
+ Isolate* isolate, Handle<WasmSharedModuleData> shared,
wasm::WireBytesRef ref) {
// TODO(wasm): cache strings from modules if it's a performance win.
- Handle<SeqOneByteString> module_bytes(compiled_module->module_bytes(),
- isolate);
- return WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate, module_bytes, ref);
+ Handle<SeqOneByteString> module_bytes(shared->module_bytes(), isolate);
+ return ExtractUtf8StringFromModuleBytes(isolate, module_bytes, ref);
}
-MaybeHandle<String> WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+MaybeHandle<String> WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
Isolate* isolate, Handle<SeqOneByteString> module_bytes,
wasm::WireBytesRef ref) {
DCHECK_GE(module_bytes->length(), ref.end_offset());
@@ -1370,7 +1598,7 @@ void WasmCompiledModule::PrintInstancesChain() {
PrintF("->%d", current->instance_id());
}
if (!current->has_next_instance()) break;
- current = current->ptr_to_next_instance();
+ current = current->next_instance();
}
PrintF("\n");
#endif
@@ -1379,8 +1607,8 @@ void WasmCompiledModule::PrintInstancesChain() {
void WasmCompiledModule::InsertInChain(WasmModuleObject* module) {
DisallowHeapAllocation no_gc;
WasmCompiledModule* original = module->compiled_module();
- set_ptr_to_next_instance(original);
- original->set_ptr_to_prev_instance(this);
+ set_next_instance(original);
+ original->set_prev_instance(this);
set_weak_wasm_module(original->weak_wasm_module());
}
@@ -1401,7 +1629,7 @@ void WasmCompiledModule::RemoveFromChain() {
void WasmCompiledModule::OnWasmModuleDecodingComplete(
Handle<WasmSharedModuleData> shared) {
- set_shared(shared);
+ set_shared(*shared);
}
void WasmCompiledModule::ReinitializeAfterDeserialization(
@@ -1417,7 +1645,7 @@ void WasmCompiledModule::ReinitializeAfterDeserialization(
WasmSharedModuleData::ReinitializeAfterDeserialization(isolate, shared);
}
size_t function_table_count =
- compiled_module->module()->function_tables.size();
+ compiled_module->shared()->module()->function_tables.size();
wasm::NativeModule* native_module = compiled_module->GetNativeModule();
if (function_table_count > 0) {
@@ -1425,34 +1653,23 @@ void WasmCompiledModule::ReinitializeAfterDeserialization(
// addresses. Produce new global handles for the empty tables, then reset,
// which will relocate the code. We end up with a WasmCompiledModule as-if
// it were just compiled.
+ Handle<FixedArray> function_tables;
if (!FLAG_wasm_jit_to_native) {
DCHECK(compiled_module->has_function_tables());
- DCHECK(compiled_module->has_signature_tables());
- DCHECK(compiled_module->has_empty_signature_tables());
- DCHECK(compiled_module->has_empty_function_tables());
+ function_tables =
+ handle(compiled_module->empty_function_tables(), isolate);
} else {
DCHECK_GT(native_module->function_tables().size(), 0);
- DCHECK_GT(native_module->signature_tables().size(), 0);
- DCHECK_EQ(native_module->empty_signature_tables().size(),
- native_module->function_tables().size());
- DCHECK_EQ(native_module->empty_function_tables().size(),
- native_module->function_tables().size());
}
for (size_t i = 0; i < function_table_count; ++i) {
Handle<Object> global_func_table_handle =
isolate->global_handles()->Create(isolate->heap()->undefined_value());
- Handle<Object> global_sig_table_handle =
- isolate->global_handles()->Create(isolate->heap()->undefined_value());
GlobalHandleAddress new_func_table = global_func_table_handle.address();
- GlobalHandleAddress new_sig_table = global_sig_table_handle.address();
if (!FLAG_wasm_jit_to_native) {
- SetTableValue(isolate, compiled_module->empty_function_tables(),
- static_cast<int>(i), new_func_table);
- SetTableValue(isolate, compiled_module->empty_signature_tables(),
- static_cast<int>(i), new_sig_table);
+ SetTableValue(isolate, function_tables, static_cast<int>(i),
+ new_func_table);
} else {
native_module->empty_function_tables()[i] = new_func_table;
- native_module->empty_signature_tables()[i] = new_sig_table;
}
}
}
@@ -1463,38 +1680,31 @@ void WasmCompiledModule::ReinitializeAfterDeserialization(
DCHECK(WasmSharedModuleData::IsWasmSharedModuleData(*shared));
}
-uint32_t WasmCompiledModule::default_mem_size() const {
- return initial_pages() * WasmModule::kPageSize;
-}
-
-MaybeHandle<String> WasmCompiledModule::GetModuleNameOrNull(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
- WasmModule* module = compiled_module->module();
+MaybeHandle<String> WasmSharedModuleData::GetModuleNameOrNull(
+ Isolate* isolate, Handle<WasmSharedModuleData> shared) {
+ WasmModule* module = shared->module();
if (!module->name.is_set()) return {};
- return WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate, compiled_module, module->name);
+ return ExtractUtf8StringFromModuleBytes(isolate, shared, module->name);
}
-MaybeHandle<String> WasmCompiledModule::GetFunctionNameOrNull(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
+MaybeHandle<String> WasmSharedModuleData::GetFunctionNameOrNull(
+ Isolate* isolate, Handle<WasmSharedModuleData> shared,
uint32_t func_index) {
- DCHECK_LT(func_index, compiled_module->module()->functions.size());
- WasmFunction& function = compiled_module->module()->functions[func_index];
+ DCHECK_LT(func_index, shared->module()->functions.size());
+ WasmFunction& function = shared->module()->functions[func_index];
if (!function.name.is_set()) return {};
- return WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate, compiled_module, function.name);
+ return ExtractUtf8StringFromModuleBytes(isolate, shared, function.name);
}
-Handle<String> WasmCompiledModule::GetFunctionName(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
+Handle<String> WasmSharedModuleData::GetFunctionName(
+ Isolate* isolate, Handle<WasmSharedModuleData> shared,
uint32_t func_index) {
- MaybeHandle<String> name =
- GetFunctionNameOrNull(isolate, compiled_module, func_index);
+ MaybeHandle<String> name = GetFunctionNameOrNull(isolate, shared, func_index);
if (!name.is_null()) return name.ToHandleChecked();
return isolate->factory()->NewStringFromStaticChars("<WASM UNNAMED>");
}
-Vector<const uint8_t> WasmCompiledModule::GetRawFunctionName(
+Vector<const uint8_t> WasmSharedModuleData::GetRawFunctionName(
uint32_t func_index) {
DCHECK_GT(module()->functions.size(), func_index);
WasmFunction& function = module()->functions[func_index];
@@ -1505,14 +1715,14 @@ Vector<const uint8_t> WasmCompiledModule::GetRawFunctionName(
function.name.length());
}
-int WasmCompiledModule::GetFunctionOffset(uint32_t func_index) {
+int WasmSharedModuleData::GetFunctionOffset(uint32_t func_index) {
std::vector<WasmFunction>& functions = module()->functions;
if (static_cast<uint32_t>(func_index) >= functions.size()) return -1;
DCHECK_GE(kMaxInt, functions[func_index].code.offset());
return static_cast<int>(functions[func_index].code.offset());
}
-int WasmCompiledModule::GetContainingFunction(uint32_t byte_offset) {
+int WasmSharedModuleData::GetContainingFunction(uint32_t byte_offset) {
std::vector<WasmFunction>& functions = module()->functions;
// Binary search for a function containing the given position.
@@ -1537,8 +1747,8 @@ int WasmCompiledModule::GetContainingFunction(uint32_t byte_offset) {
return left;
}
-bool WasmCompiledModule::GetPositionInfo(uint32_t position,
- Script::PositionInfo* info) {
+bool WasmSharedModuleData::GetPositionInfo(uint32_t position,
+ Script::PositionInfo* info) {
int func_index = GetContainingFunction(position);
if (func_index < 0) return false;
@@ -1551,234 +1761,25 @@ bool WasmCompiledModule::GetPositionInfo(uint32_t position,
return true;
}
-namespace {
-
-enum AsmJsOffsetTableEntryLayout {
- kOTEByteOffset,
- kOTECallPosition,
- kOTENumberConvPosition,
- kOTESize
-};
-
-Handle<ByteArray> GetDecodedAsmJsOffsetTable(
- Handle<WasmCompiledModule> compiled_module, Isolate* isolate) {
- DCHECK(compiled_module->is_asm_js());
- Handle<ByteArray> offset_table(
- compiled_module->shared()->asm_js_offset_table(), isolate);
-
- // The last byte in the asm_js_offset_tables ByteArray tells whether it is
- // still encoded (0) or decoded (1).
- enum AsmJsTableType : int { Encoded = 0, Decoded = 1 };
- int table_type = offset_table->get(offset_table->length() - 1);
- DCHECK(table_type == Encoded || table_type == Decoded);
- if (table_type == Decoded) return offset_table;
-
- wasm::AsmJsOffsetsResult asm_offsets;
- {
- DisallowHeapAllocation no_gc;
- const byte* bytes_start = offset_table->GetDataStartAddress();
- const byte* bytes_end = bytes_start + offset_table->length() - 1;
- asm_offsets = wasm::DecodeAsmJsOffsets(bytes_start, bytes_end);
- }
- // Wasm bytes must be valid and must contain asm.js offset table.
- DCHECK(asm_offsets.ok());
- DCHECK_GE(kMaxInt, asm_offsets.val.size());
- int num_functions = static_cast<int>(asm_offsets.val.size());
- int num_imported_functions =
- static_cast<int>(compiled_module->module()->num_imported_functions);
- DCHECK_EQ(compiled_module->module()->functions.size(),
- static_cast<size_t>(num_functions) + num_imported_functions);
- int num_entries = 0;
- for (int func = 0; func < num_functions; ++func) {
- size_t new_size = asm_offsets.val[func].size();
- DCHECK_LE(new_size, static_cast<size_t>(kMaxInt) - num_entries);
- num_entries += static_cast<int>(new_size);
- }
- // One byte to encode that this is a decoded table.
- DCHECK_GE(kMaxInt,
- 1 + static_cast<uint64_t>(num_entries) * kOTESize * kIntSize);
- int total_size = 1 + num_entries * kOTESize * kIntSize;
- Handle<ByteArray> decoded_table =
- isolate->factory()->NewByteArray(total_size, TENURED);
- decoded_table->set(total_size - 1, AsmJsTableType::Decoded);
- compiled_module->shared()->set_asm_js_offset_table(*decoded_table);
-
- int idx = 0;
- std::vector<WasmFunction>& wasm_funs = compiled_module->module()->functions;
- for (int func = 0; func < num_functions; ++func) {
- std::vector<wasm::AsmJsOffsetEntry>& func_asm_offsets =
- asm_offsets.val[func];
- if (func_asm_offsets.empty()) continue;
- int func_offset = wasm_funs[num_imported_functions + func].code.offset();
- for (wasm::AsmJsOffsetEntry& e : func_asm_offsets) {
- // Byte offsets must be strictly monotonously increasing:
- DCHECK_IMPLIES(idx > 0, func_offset + e.byte_offset >
- decoded_table->get_int(idx - kOTESize));
- decoded_table->set_int(idx + kOTEByteOffset, func_offset + e.byte_offset);
- decoded_table->set_int(idx + kOTECallPosition, e.source_position_call);
- decoded_table->set_int(idx + kOTENumberConvPosition,
- e.source_position_number_conversion);
- idx += kOTESize;
- }
- }
- DCHECK_EQ(total_size, idx * kIntSize + 1);
- return decoded_table;
-}
-
-} // namespace
-
-int WasmCompiledModule::GetSourcePosition(
- Handle<WasmCompiledModule> compiled_module, uint32_t func_index,
- uint32_t byte_offset, bool is_at_number_conversion) {
- Isolate* isolate = compiled_module->GetIsolate();
- const WasmModule* module = compiled_module->module();
-
- if (!module->is_asm_js()) {
- // for non-asm.js modules, we just add the function's start offset
- // to make a module-relative position.
- return byte_offset + compiled_module->GetFunctionOffset(func_index);
- }
-
- // asm.js modules have an additional offset table that must be searched.
- Handle<ByteArray> offset_table =
- GetDecodedAsmJsOffsetTable(compiled_module, isolate);
-
- DCHECK_LT(func_index, module->functions.size());
- uint32_t func_code_offset = module->functions[func_index].code.offset();
- uint32_t total_offset = func_code_offset + byte_offset;
-
- // Binary search for the total byte offset.
- int left = 0; // inclusive
- int right = offset_table->length() / kIntSize / kOTESize; // exclusive
- DCHECK_LT(left, right);
- while (right - left > 1) {
- int mid = left + (right - left) / 2;
- int mid_entry = offset_table->get_int(kOTESize * mid);
- DCHECK_GE(kMaxInt, mid_entry);
- if (static_cast<uint32_t>(mid_entry) <= total_offset) {
- left = mid;
- } else {
- right = mid;
- }
- }
- // There should be an entry for each position that could show up on the stack
- // trace:
- DCHECK_EQ(total_offset, offset_table->get_int(kOTESize * left));
- int idx = is_at_number_conversion ? kOTENumberConvPosition : kOTECallPosition;
- return offset_table->get_int(kOTESize * left + idx);
-}
-
-v8::debug::WasmDisassembly WasmCompiledModule::DisassembleFunction(
- int func_index) {
- DisallowHeapAllocation no_gc;
-
- if (func_index < 0 ||
- static_cast<uint32_t>(func_index) >= module()->functions.size())
- return {};
-
- SeqOneByteString* module_bytes_str = module_bytes();
- Vector<const byte> module_bytes(module_bytes_str->GetChars(),
- module_bytes_str->length());
-
- std::ostringstream disassembly_os;
- v8::debug::WasmDisassembly::OffsetTable offset_table;
-
- PrintWasmText(module(), module_bytes, static_cast<uint32_t>(func_index),
- disassembly_os, &offset_table);
-
- return {disassembly_os.str(), std::move(offset_table)};
-}
-
-bool WasmCompiledModule::GetPossibleBreakpoints(
- const v8::debug::Location& start, const v8::debug::Location& end,
- std::vector<v8::debug::BreakLocation>* locations) {
- DisallowHeapAllocation no_gc;
-
- std::vector<WasmFunction>& functions = module()->functions;
- if (start.GetLineNumber() < 0 || start.GetColumnNumber() < 0 ||
- (!end.IsEmpty() &&
- (end.GetLineNumber() < 0 || end.GetColumnNumber() < 0)))
- return false;
-
- // start_func_index, start_offset and end_func_index is inclusive.
- // end_offset is exclusive.
- // start_offset and end_offset are module-relative byte offsets.
- uint32_t start_func_index = start.GetLineNumber();
- if (start_func_index >= functions.size()) return false;
- int start_func_len = functions[start_func_index].code.length();
- if (start.GetColumnNumber() > start_func_len) return false;
- uint32_t start_offset =
- functions[start_func_index].code.offset() + start.GetColumnNumber();
- uint32_t end_func_index;
- uint32_t end_offset;
- if (end.IsEmpty()) {
- // Default: everything till the end of the Script.
- end_func_index = static_cast<uint32_t>(functions.size() - 1);
- end_offset = functions[end_func_index].code.end_offset();
- } else {
- // If end is specified: Use it and check for valid input.
- end_func_index = static_cast<uint32_t>(end.GetLineNumber());
-
- // Special case: Stop before the start of the next function. Change to: Stop
- // at the end of the function before, such that we don't disassemble the
- // next function also.
- if (end.GetColumnNumber() == 0 && end_func_index > 0) {
- --end_func_index;
- end_offset = functions[end_func_index].code.end_offset();
- } else {
- if (end_func_index >= functions.size()) return false;
- end_offset =
- functions[end_func_index].code.offset() + end.GetColumnNumber();
- if (end_offset > functions[end_func_index].code.end_offset())
- return false;
- }
- }
-
- AccountingAllocator alloc;
- Zone tmp(&alloc, ZONE_NAME);
- const byte* module_start = module_bytes()->GetChars();
-
- for (uint32_t func_idx = start_func_index; func_idx <= end_func_index;
- ++func_idx) {
- WasmFunction& func = functions[func_idx];
- if (func.code.length() == 0) continue;
-
- wasm::BodyLocalDecls locals(&tmp);
- wasm::BytecodeIterator iterator(module_start + func.code.offset(),
- module_start + func.code.end_offset(),
- &locals);
- DCHECK_LT(0u, locals.encoded_size);
- for (uint32_t offset : iterator.offsets()) {
- uint32_t total_offset = func.code.offset() + offset;
- if (total_offset >= end_offset) {
- DCHECK_EQ(end_func_index, func_idx);
- break;
- }
- if (total_offset < start_offset) continue;
- locations->emplace_back(func_idx, offset, debug::kCommonBreakLocation);
- }
- }
- return true;
-}
bool WasmCompiledModule::SetBreakPoint(
Handle<WasmCompiledModule> compiled_module, int* position,
Handle<Object> break_point_object) {
Isolate* isolate = compiled_module->GetIsolate();
+ Handle<WasmSharedModuleData> shared(compiled_module->shared(), isolate);
// Find the function for this breakpoint.
- int func_index = compiled_module->GetContainingFunction(*position);
+ int func_index = shared->GetContainingFunction(*position);
if (func_index < 0) return false;
- WasmFunction& func = compiled_module->module()->functions[func_index];
+ WasmFunction& func = shared->module()->functions[func_index];
int offset_in_func = *position - func.code.offset();
// According to the current design, we should only be called with valid
// breakable positions.
- DCHECK(IsBreakablePosition(compiled_module, func_index, offset_in_func));
+ DCHECK(IsBreakablePosition(*shared, func_index, offset_in_func));
// Insert new break point into break_positions of shared module data.
- WasmSharedModuleData::AddBreakpoint(compiled_module->shared(), *position,
- break_point_object);
+ WasmSharedModuleData::AddBreakpoint(shared, *position, break_point_object);
// Iterate over all instances of this module and tell them to set this new
// breakpoint.
@@ -1792,27 +1793,6 @@ bool WasmCompiledModule::SetBreakPoint(
return true;
}
-MaybeHandle<FixedArray> WasmCompiledModule::CheckBreakPoints(int position) {
- Isolate* isolate = GetIsolate();
- if (!shared()->has_breakpoint_infos()) return {};
-
- Handle<FixedArray> breakpoint_infos(shared()->breakpoint_infos(), isolate);
- int insert_pos =
- FindBreakpointInfoInsertPos(isolate, breakpoint_infos, position);
- if (insert_pos >= breakpoint_infos->length()) return {};
-
- Handle<Object> maybe_breakpoint_info(breakpoint_infos->get(insert_pos),
- isolate);
- if (maybe_breakpoint_info->IsUndefined(isolate)) return {};
- Handle<BreakPointInfo> breakpoint_info =
- Handle<BreakPointInfo>::cast(maybe_breakpoint_info);
- if (breakpoint_info->source_position() != position) return {};
-
- Handle<Object> breakpoint_objects(breakpoint_info->break_point_objects(),
- isolate);
- return isolate->debug()->GetHitBreakPointObjects(breakpoint_objects);
-}
-
void AttachWasmFunctionInfo(Isolate* isolate, Handle<Code> code,
MaybeHandle<WeakCell> weak_instance,
int func_index) {
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index ec53b8ac2a..cecc11f83f 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -12,6 +12,7 @@
#include "src/objects.h"
#include "src/objects/script.h"
#include "src/wasm/decoder.h"
+#include "src/wasm/wasm-interpreter.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
@@ -26,7 +27,6 @@ namespace wasm {
class InterpretedFrame;
class NativeModule;
class WasmCode;
-class WasmInterpreter;
struct WasmModule;
class SignatureMap;
typedef Address GlobalHandleAddress;
@@ -69,9 +69,7 @@ struct WasmContext {
byte* globals_start = nullptr;
inline void SetRawMemory(void* mem_start, size_t mem_size) {
- DCHECK_LE(mem_size, std::min(wasm::kV8MaxWasmMemoryPages,
- wasm::kSpecMaxWasmMemoryPages) *
- wasm::WasmModule::kPageSize);
+ DCHECK_LE(mem_size, wasm::kV8MaxWasmMemoryPages * wasm::kWasmPageSize);
this->mem_start = static_cast<byte*>(mem_start);
this->mem_size = static_cast<uint32_t>(mem_size);
this->mem_mask = base::bits::RoundUpToPowerOfTwo32(this->mem_size) - 1;
@@ -131,13 +129,17 @@ class WasmTableObject : public JSObject {
static Handle<WasmTableObject> New(Isolate* isolate, uint32_t initial,
int64_t maximum,
Handle<FixedArray>* js_functions);
- static Handle<FixedArray> AddDispatchTable(
- Isolate* isolate, Handle<WasmTableObject> table,
- Handle<WasmInstanceObject> instance, int table_index,
- Handle<FixedArray> function_table, Handle<FixedArray> signature_table);
+ static void AddDispatchTable(Isolate* isolate, Handle<WasmTableObject> table,
+ Handle<WasmInstanceObject> instance,
+ int table_index,
+ Handle<FixedArray> function_table);
static void Set(Isolate* isolate, Handle<WasmTableObject> table,
int32_t index, Handle<JSFunction> function);
+
+ static void UpdateDispatchTables(Handle<WasmTableObject> table, int index,
+ wasm::FunctionSig* sig,
+ Handle<Object> code_or_foreign);
};
// Representation of a WebAssembly.Memory JavaScript-level object.
@@ -177,8 +179,6 @@ class WasmMemoryObject : public JSObject {
Isolate* isolate, MaybeHandle<JSArrayBuffer> buffer, int32_t maximum);
static int32_t Grow(Isolate*, Handle<WasmMemoryObject>, uint32_t pages);
- static void SetupNewBufferWithSameBackingStore(
- Isolate* isolate, Handle<WasmMemoryObject> memory_object, uint32_t size);
};
// A WebAssembly.Instance JavaScript-level object.
@@ -194,7 +194,6 @@ class WasmInstanceObject : public JSObject {
DECL_OPTIONAL_ACCESSORS(debug_info, WasmDebugInfo)
DECL_OPTIONAL_ACCESSORS(table_object, WasmTableObject)
DECL_OPTIONAL_ACCESSORS(function_tables, FixedArray)
- DECL_OPTIONAL_ACCESSORS(signature_tables, FixedArray)
// FixedArray of all instances whose code was imported
DECL_OPTIONAL_ACCESSORS(directly_called_instances, FixedArray)
@@ -209,7 +208,6 @@ class WasmInstanceObject : public JSObject {
kDebugInfoIndex,
kTableObjectIndex,
kFunctionTablesIndex,
- kSignatureTablesIndex,
kDirectlyCalledInstancesIndex,
kJsImportsTableIndex,
kFieldCount
@@ -224,7 +222,6 @@ class WasmInstanceObject : public JSObject {
DEF_OFFSET(DebugInfo)
DEF_OFFSET(TableObject)
DEF_OFFSET(FunctionTables)
- DEF_OFFSET(SignatureTables)
DEF_OFFSET(DirectlyCalledInstances)
DEF_OFFSET(JsImportsTable)
@@ -237,13 +234,9 @@ class WasmInstanceObject : public JSObject {
static Handle<WasmInstanceObject> New(Isolate*, Handle<WasmCompiledModule>);
- int32_t GetMemorySize();
-
static int32_t GrowMemory(Isolate*, Handle<WasmInstanceObject>,
uint32_t pages);
- uint32_t GetMaxMemoryPages();
-
// Assumed to be called with a code object associated to a wasm module
// instance. Intended to be called from runtime functions. Returns nullptr on
// failing to get owning instance.
@@ -325,6 +318,77 @@ class WasmSharedModuleData : public FixedArray {
Handle<SeqOneByteString> module_bytes, Handle<Script> script,
Handle<ByteArray> asm_js_offset_table);
+ // Get the module name, if set. Returns an empty handle otherwise.
+ static MaybeHandle<String> GetModuleNameOrNull(Isolate*,
+ Handle<WasmSharedModuleData>);
+
+ // Get the function name of the function identified by the given index.
+ // Returns a null handle if the function is unnamed or the name is not a valid
+ // UTF-8 string.
+ static MaybeHandle<String> GetFunctionNameOrNull(Isolate*,
+ Handle<WasmSharedModuleData>,
+ uint32_t func_index);
+
+ // Get the function name of the function identified by the given index.
+ // Returns "<WASM UNNAMED>" if the function is unnamed or the name is not a
+ // valid UTF-8 string.
+ static Handle<String> GetFunctionName(Isolate*, Handle<WasmSharedModuleData>,
+ uint32_t func_index);
+
+ // Get the raw bytes of the function name of the function identified by the
+ // given index.
+ // Meant to be used for debugging or frame printing.
+ // Does not allocate, hence gc-safe.
+ Vector<const uint8_t> GetRawFunctionName(uint32_t func_index);
+
+ // Return the byte offset of the function identified by the given index.
+ // The offset will be relative to the start of the module bytes.
+ // Returns -1 if the function index is invalid.
+ int GetFunctionOffset(uint32_t func_index);
+
+ // Returns the function containing the given byte offset.
+ // Returns -1 if the byte offset is not contained in any function of this
+ // module.
+ int GetContainingFunction(uint32_t byte_offset);
+
+ // Translate from byte offset in the module to function number and byte offset
+ // within that function, encoded as line and column in the position info.
+ // Returns true if the position is valid inside this module, false otherwise.
+ bool GetPositionInfo(uint32_t position, Script::PositionInfo* info);
+
+ // Get the source position from a given function index and byte offset,
+ // for either asm.js or pure WASM modules.
+ static int GetSourcePosition(Handle<WasmSharedModuleData>,
+ uint32_t func_index, uint32_t byte_offset,
+ bool is_at_number_conversion);
+
+ // Compute the disassembly of a wasm function.
+ // Returns the disassembly string and a list of <byte_offset, line, column>
+ // entries, mapping wasm byte offsets to line and column in the disassembly.
+ // The list is guaranteed to be ordered by the byte_offset.
+ // Returns an empty string and empty vector if the function index is invalid.
+ debug::WasmDisassembly DisassembleFunction(int func_index);
+
+ // Extract a portion of the wire bytes as UTF-8 string.
+ // Returns a null handle if the respective bytes do not form a valid UTF-8
+ // string.
+ static MaybeHandle<String> ExtractUtf8StringFromModuleBytes(
+ Isolate* isolate, Handle<WasmSharedModuleData>, wasm::WireBytesRef ref);
+ static MaybeHandle<String> ExtractUtf8StringFromModuleBytes(
+ Isolate* isolate, Handle<SeqOneByteString> module_bytes,
+ wasm::WireBytesRef ref);
+
+ // Get a list of all possible breakpoints within a given range of this module.
+ bool GetPossibleBreakpoints(const debug::Location& start,
+ const debug::Location& end,
+ std::vector<debug::BreakLocation>* locations);
+
+ // Return an empty handle if no breakpoint is hit at that location, or a
+ // FixedArray with all hit breakpoint objects.
+ static MaybeHandle<FixedArray> CheckBreakPoints(Isolate*,
+ Handle<WasmSharedModuleData>,
+ int position);
+
DECL_OPTIONAL_ACCESSORS(lazy_compilation_orchestrator, Foreign)
};
@@ -360,16 +424,13 @@ class WasmCompiledModule : public FixedArray {
#define WCM_OBJECT_OR_WEAK(TYPE, NAME, ID, TYPE_CHECK, SETTER_MODIFIER) \
public: \
- inline Handle<TYPE> NAME() const; \
- inline MaybeHandle<TYPE> maybe_##NAME() const; \
- inline TYPE* maybe_ptr_to_##NAME() const; \
- inline TYPE* ptr_to_##NAME() const; \
+ inline TYPE* maybe_##NAME() const; \
+ inline TYPE* NAME() const; \
inline bool has_##NAME() const; \
inline void reset_##NAME(); \
\
SETTER_MODIFIER: \
- inline void set_##NAME(Handle<TYPE> value); \
- inline void set_ptr_to_##NAME(TYPE* value);
+ inline void set_##NAME(TYPE* value);
#define WCM_OBJECT(TYPE, NAME) \
WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, obj->Is##TYPE(), public)
@@ -392,7 +453,7 @@ class WasmCompiledModule : public FixedArray {
public) \
\
public: \
- inline Handle<TYPE> NAME() const;
+ inline TYPE* NAME() const;
// Add values here if they are required for creating new instances or
// for deserialization, and if they are serializable.
@@ -400,7 +461,7 @@ class WasmCompiledModule : public FixedArray {
// we embed the generated code with a value, then we track that value here.
#define CORE_WCM_PROPERTY_TABLE(MACRO) \
MACRO(WASM_OBJECT, WasmSharedModuleData, shared) \
- MACRO(OBJECT, Context, native_context) \
+ MACRO(WEAK_LINK, Context, native_context) \
MACRO(CONST_OBJECT, FixedArray, export_wrappers) \
MACRO(OBJECT, FixedArray, weak_exported_functions) \
MACRO(WASM_OBJECT, WasmCompiledModule, next_instance) \
@@ -410,7 +471,8 @@ class WasmCompiledModule : public FixedArray {
MACRO(OBJECT, FixedArray, handler_table) \
MACRO(OBJECT, FixedArray, source_positions) \
MACRO(OBJECT, Foreign, native_module) \
- MACRO(OBJECT, FixedArray, lazy_compile_data)
+ MACRO(OBJECT, FixedArray, lazy_compile_data) \
+ MACRO(SMALL_CONST_NUMBER, bool, use_trap_handler)
#define GC_WCM_PROPERTY_TABLE(MACRO) \
MACRO(SMALL_CONST_NUMBER, uint32_t, num_imported_functions) \
@@ -418,8 +480,7 @@ class WasmCompiledModule : public FixedArray {
MACRO(OBJECT, FixedArray, function_tables) \
MACRO(OBJECT, FixedArray, signature_tables) \
MACRO(CONST_OBJECT, FixedArray, empty_function_tables) \
- MACRO(CONST_OBJECT, FixedArray, empty_signature_tables) \
- MACRO(SMALL_CONST_NUMBER, uint32_t, initial_pages)
+ MACRO(CONST_OBJECT, FixedArray, empty_signature_tables)
// TODO(mtrofin): this is unnecessary when we stop needing
// FLAG_wasm_jit_to_native, because we have instance_id on NativeModule.
@@ -449,7 +510,7 @@ class WasmCompiledModule : public FixedArray {
Isolate* isolate, wasm::WasmModule* module, Handle<FixedArray> code_table,
Handle<FixedArray> export_wrappers,
const std::vector<wasm::GlobalHandleAddress>& function_tables,
- const std::vector<wasm::GlobalHandleAddress>& signature_tables);
+ bool use_trap_hander);
static Handle<WasmCompiledModule> Clone(Isolate* isolate,
Handle<WasmCompiledModule> module);
@@ -458,8 +519,6 @@ class WasmCompiledModule : public FixedArray {
// TODO(mtrofin): delete this when we don't need FLAG_wasm_jit_to_native
static void ResetGCModel(Isolate* isolate, WasmCompiledModule* module);
- uint32_t default_mem_size() const;
-
wasm::NativeModule* GetNativeModule() const;
void InsertInChain(WasmModuleObject*);
void RemoveFromChain();
@@ -470,14 +529,6 @@ class WasmCompiledModule : public FixedArray {
#undef DECLARATION
public:
-// Allow to call method on WasmSharedModuleData also on this object.
-#define FORWARD_SHARED(type, name) inline type name();
- FORWARD_SHARED(SeqOneByteString*, module_bytes)
- FORWARD_SHARED(wasm::WasmModule*, module)
- FORWARD_SHARED(Script*, script)
- FORWARD_SHARED(bool, is_asm_js)
-#undef FORWARD_SHARED
-
static bool IsWasmCompiledModule(Object* obj);
void PrintInstancesChain();
@@ -485,73 +536,6 @@ class WasmCompiledModule : public FixedArray {
static void ReinitializeAfterDeserialization(Isolate*,
Handle<WasmCompiledModule>);
- // Get the module name, if set. Returns an empty handle otherwise.
- static MaybeHandle<String> GetModuleNameOrNull(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module);
-
- // Get the function name of the function identified by the given index.
- // Returns a null handle if the function is unnamed or the name is not a valid
- // UTF-8 string.
- static MaybeHandle<String> GetFunctionNameOrNull(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
- uint32_t func_index);
-
- // Get the function name of the function identified by the given index.
- // Returns "<WASM UNNAMED>" if the function is unnamed or the name is not a
- // valid UTF-8 string.
- static Handle<String> GetFunctionName(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
- uint32_t func_index);
-
- // Get the raw bytes of the function name of the function identified by the
- // given index.
- // Meant to be used for debugging or frame printing.
- // Does not allocate, hence gc-safe.
- Vector<const uint8_t> GetRawFunctionName(uint32_t func_index);
-
- // Return the byte offset of the function identified by the given index.
- // The offset will be relative to the start of the module bytes.
- // Returns -1 if the function index is invalid.
- int GetFunctionOffset(uint32_t func_index);
-
- // Returns the function containing the given byte offset.
- // Returns -1 if the byte offset is not contained in any function of this
- // module.
- int GetContainingFunction(uint32_t byte_offset);
-
- // Translate from byte offset in the module to function number and byte offset
- // within that function, encoded as line and column in the position info.
- // Returns true if the position is valid inside this module, false otherwise.
- bool GetPositionInfo(uint32_t position, Script::PositionInfo* info);
-
- // Get the source position from a given function index and byte offset,
- // for either asm.js or pure WASM modules.
- static int GetSourcePosition(Handle<WasmCompiledModule> compiled_module,
- uint32_t func_index, uint32_t byte_offset,
- bool is_at_number_conversion);
-
- // Compute the disassembly of a wasm function.
- // Returns the disassembly string and a list of <byte_offset, line, column>
- // entries, mapping wasm byte offsets to line and column in the disassembly.
- // The list is guaranteed to be ordered by the byte_offset.
- // Returns an empty string and empty vector if the function index is invalid.
- debug::WasmDisassembly DisassembleFunction(int func_index);
-
- // Extract a portion of the wire bytes as UTF-8 string.
- // Returns a null handle if the respective bytes do not form a valid UTF-8
- // string.
- static MaybeHandle<String> ExtractUtf8StringFromModuleBytes(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
- wasm::WireBytesRef ref);
- static MaybeHandle<String> ExtractUtf8StringFromModuleBytes(
- Isolate* isolate, Handle<SeqOneByteString> module_bytes,
- wasm::WireBytesRef ref);
-
- // Get a list of all possible breakpoints within a given range of this module.
- bool GetPossibleBreakpoints(const debug::Location& start,
- const debug::Location& end,
- std::vector<debug::BreakLocation>* locations);
-
// Set a breakpoint on the given byte position inside the given module.
// This will affect all live and future instances of the module.
// The passed position might be modified to point to the next breakable
@@ -561,10 +545,6 @@ class WasmCompiledModule : public FixedArray {
static bool SetBreakPoint(Handle<WasmCompiledModule>, int* position,
Handle<Object> break_point_object);
- // Return an empty handle if no breakpoint is hit at that location, or a
- // FixedArray with all hit breakpoint objects.
- MaybeHandle<FixedArray> CheckBreakPoints(int position);
-
inline void ReplaceCodeTableForTesting(
std::vector<wasm::WasmCode*>&& testing_table);
@@ -645,8 +625,8 @@ class WasmDebugInfo : public FixedArray {
std::vector<std::pair<uint32_t, int>> GetInterpretedStack(
Address frame_pointer);
- std::unique_ptr<wasm::InterpretedFrame> GetInterpretedFrame(
- Address frame_pointer, int frame_index);
+ wasm::WasmInterpreter::FramePtr GetInterpretedFrame(Address frame_pointer,
+ int frame_index);
// Unwind the interpreted stack belonging to the passed interpreter entry
// frame.
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index 5188d7801e..b503aa1a5e 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -49,6 +49,9 @@ namespace wasm {
#define CASE_CONVERT_OP(name, RES, SRC, src_suffix, str) \
CASE_##RES##_OP(U##name##SRC, str "_u/" src_suffix) \
CASE_##RES##_OP(S##name##SRC, str "_s/" src_suffix)
+#define CASE_CONVERT_SAT_OP(name, RES, SRC, src_suffix, str) \
+ CASE_##RES##_OP(U##name##Sat##SRC, str "_u:sat/" src_suffix) \
+ CASE_##RES##_OP(S##name##Sat##SRC, str "_s:sat/" src_suffix)
#define CASE_L32_OP(name, str) \
CASE_SIGN_OP(I32, name##8, str "8") \
CASE_SIGN_OP(I32, name##16, str "16") \
@@ -98,6 +101,10 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_I32_OP(ConvertI64, "wrap/i64")
CASE_CONVERT_OP(Convert, INT, F32, "f32", "trunc")
CASE_CONVERT_OP(Convert, INT, F64, "f64", "trunc")
+ // TODO(kschimpf): Add I64 versions of saturating conversions.
+ CASE_CONVERT_SAT_OP(Convert, I32, F32, "f32", "trunc")
+ CASE_CONVERT_SAT_OP(Convert, I32, F64, "f64", "trunc")
+
CASE_CONVERT_OP(Convert, I64, I32, "i32", "extend")
CASE_CONVERT_OP(Convert, F32, I32, "i32", "convert")
CASE_CONVERT_OP(Convert, F32, I64, "i64", "convert")
@@ -275,6 +282,7 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
#undef CASE_UNSIGNED_OP
#undef CASE_ALL_SIGN_OP
#undef CASE_CONVERT_OP
+#undef CASE_CONVERT_SAT_OP
#undef CASE_L32_OP
#undef CASE_U32_OP
@@ -340,11 +348,12 @@ enum WasmOpcodeSig : byte {
FOREACH_SIGNATURE(DECLARE_SIG_ENUM)
};
#undef DECLARE_SIG_ENUM
-
-#define DECLARE_SIG(name, ...) \
- constexpr ValueType kTypes_##name[] = {__VA_ARGS__}; \
- constexpr FunctionSig kSig_##name( \
- 1, static_cast<int>(arraysize(kTypes_##name)) - 1, kTypes_##name);
+#define DECLARE_SIG(name, ...) \
+ constexpr ValueType kTypes_##name[] = {__VA_ARGS__}; \
+ constexpr int kReturnsCount_##name = kTypes_##name[0] == kWasmStmt ? 0 : 1; \
+ constexpr FunctionSig kSig_##name( \
+ kReturnsCount_##name, static_cast<int>(arraysize(kTypes_##name)) - 1, \
+ kTypes_##name + (1 - kReturnsCount_##name));
FOREACH_SIGNATURE(DECLARE_SIG)
#undef DECLARE_SIG
@@ -376,7 +385,7 @@ struct GetAsmJsOpcodeSigIndex {
struct GetSimdOpcodeSigIndex {
constexpr WasmOpcodeSig operator()(byte opcode) const {
-#define CASE(name, opc, sig) opcode == (opc & 0xff) ? kSigEnum_##sig:
+#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
return FOREACH_SIMD_0_OPERAND_OPCODE(CASE) kSigEnum_None;
#undef CASE
}
@@ -384,12 +393,20 @@ struct GetSimdOpcodeSigIndex {
struct GetAtomicOpcodeSigIndex {
constexpr WasmOpcodeSig operator()(byte opcode) const {
-#define CASE(name, opc, sig) opcode == (opc & 0xff) ? kSigEnum_##sig:
+#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
return FOREACH_ATOMIC_OPCODE(CASE) kSigEnum_None;
#undef CASE
}
};
+struct GetNumericOpcodeSigIndex {
+ constexpr WasmOpcodeSig operator()(byte opcode) const {
+#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
+ return FOREACH_NUMERIC_OPCODE(CASE) kSigEnum_None;
+#undef CASE
+ }
+};
+
constexpr std::array<WasmOpcodeSig, 256> kSimpleExprSigTable =
base::make_array<256>(GetOpcodeSigIndex{});
constexpr std::array<WasmOpcodeSig, 256> kSimpleAsmjsExprSigTable =
@@ -398,20 +415,26 @@ constexpr std::array<WasmOpcodeSig, 256> kSimdExprSigTable =
base::make_array<256>(GetSimdOpcodeSigIndex{});
constexpr std::array<WasmOpcodeSig, 256> kAtomicExprSigTable =
base::make_array<256>(GetAtomicOpcodeSigIndex{});
+constexpr std::array<WasmOpcodeSig, 256> kNumericExprSigTable =
+ base::make_array<256>(GetNumericOpcodeSigIndex{});
} // namespace
FunctionSig* WasmOpcodes::Signature(WasmOpcode opcode) {
- if (opcode >> 8 == kSimdPrefix) {
- return const_cast<FunctionSig*>(
- kSimpleExprSigs[kSimdExprSigTable[opcode & 0xff]]);
- } else if (opcode >> 8 == kAtomicPrefix) {
- return const_cast<FunctionSig*>(
- kSimpleExprSigs[kAtomicExprSigTable[opcode & 0xff]]);
- } else {
- DCHECK_GT(kSimpleExprSigTable.size(), opcode);
- return const_cast<FunctionSig*>(
- kSimpleExprSigs[kSimpleExprSigTable[opcode]]);
+ switch (opcode >> 8) {
+ case kSimdPrefix:
+ return const_cast<FunctionSig*>(
+ kSimpleExprSigs[kSimdExprSigTable[opcode & 0xFF]]);
+ case kAtomicPrefix:
+ return const_cast<FunctionSig*>(
+ kSimpleExprSigs[kAtomicExprSigTable[opcode & 0xFF]]);
+ case kNumericPrefix:
+ return const_cast<FunctionSig*>(
+ kSimpleExprSigs[kNumericExprSigTable[opcode & 0xFF]]);
+ default:
+ DCHECK_GT(kSimpleExprSigTable.size(), opcode);
+ return const_cast<FunctionSig*>(
+ kSimpleExprSigs[kSimpleExprSigTable[opcode]]);
}
}
@@ -421,6 +444,14 @@ FunctionSig* WasmOpcodes::AsmjsSignature(WasmOpcode opcode) {
kSimpleExprSigs[kSimpleAsmjsExprSigTable[opcode]]);
}
+// Define constexpr arrays.
+constexpr uint8_t LoadType::kLoadSizeLog2[];
+constexpr ValueType LoadType::kValueType[];
+constexpr MachineType LoadType::kMemType[];
+constexpr uint8_t StoreType::kStoreSizeLog2[];
+constexpr ValueType StoreType::kValueType[];
+constexpr MachineRepresentation StoreType::kMemRep[];
+
int WasmOpcodes::TrapReasonToMessageId(TrapReason reason) {
switch (reason) {
#define TRAPREASON_TO_MESSAGE(name) \
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index e8cb348b53..9f8232c902 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -9,25 +9,12 @@
#include "src/machine-type.h"
#include "src/runtime/runtime.h"
#include "src/signature.h"
+#include "src/wasm/wasm-constants.h"
namespace v8 {
namespace internal {
namespace wasm {
-// Binary encoding of the module header.
-const uint32_t kWasmMagic = 0x6d736100;
-const uint32_t kWasmVersion = 0x01;
-
-// Binary encoding of local types.
-enum ValueTypeCode : uint8_t {
- kLocalVoid = 0x40,
- kLocalI32 = 0x7f,
- kLocalI64 = 0x7e,
- kLocalF32 = 0x7d,
- kLocalF64 = 0x7c,
- kLocalS128 = 0x7b
-};
-
// We reuse the internal machine type to represent WebAssembly types.
// A typedef improves readability without adding a whole new type system.
using ValueType = MachineRepresentation;
@@ -45,9 +32,6 @@ bool IsJSCompatibleSignature(const FunctionSig* sig);
using WasmName = Vector<const char>;
-using WasmCodePosition = int;
-constexpr WasmCodePosition kNoCodePosition = -1;
-
// Control expressions and blocks.
#define FOREACH_CONTROL_OPCODE(V) \
V(Unreachable, 0x00, _) \
@@ -102,15 +86,15 @@ constexpr WasmCodePosition kNoCodePosition = -1;
// Store memory expressions.
#define FOREACH_STORE_MEM_OPCODE(V) \
- V(I32StoreMem, 0x36, i_ii) \
- V(I64StoreMem, 0x37, l_il) \
- V(F32StoreMem, 0x38, f_if) \
- V(F64StoreMem, 0x39, d_id) \
- V(I32StoreMem8, 0x3a, i_ii) \
- V(I32StoreMem16, 0x3b, i_ii) \
- V(I64StoreMem8, 0x3c, l_il) \
- V(I64StoreMem16, 0x3d, l_il) \
- V(I64StoreMem32, 0x3e, l_il)
+ V(I32StoreMem, 0x36, v_ii) \
+ V(I64StoreMem, 0x37, v_il) \
+ V(F32StoreMem, 0x38, v_if) \
+ V(F64StoreMem, 0x39, v_id) \
+ V(I32StoreMem8, 0x3a, v_ii) \
+ V(I32StoreMem16, 0x3b, v_ii) \
+ V(I64StoreMem8, 0x3c, v_il) \
+ V(I64StoreMem16, 0x3d, v_il) \
+ V(I64StoreMem32, 0x3e, v_il)
// Miscellaneous memory expressions
#define FOREACH_MISC_MEM_OPCODE(V) \
@@ -413,15 +397,22 @@ constexpr WasmCodePosition kNoCodePosition = -1;
#define FOREACH_SIMD_MEM_OPCODE(V) \
V(S128LoadMem, 0xfd80, s_i) \
- V(S128StoreMem, 0xfd81, s_is)
+ V(S128StoreMem, 0xfd81, v_is)
+
+#define FOREACH_NUMERIC_OPCODE(V) \
+ V(I32SConvertSatF32, 0xfc00, i_f) \
+ V(I32UConvertSatF32, 0xfc01, i_f) \
+ V(I32SConvertSatF64, 0xfc02, i_d) \
+ V(I32UConvertSatF64, 0xfc03, i_d)
+// TODO(kschimpf): Add remaining i64 numeric opcodes.
#define FOREACH_ATOMIC_OPCODE(V) \
V(I32AtomicLoad, 0xfe10, i_i) \
V(I32AtomicLoad8U, 0xfe12, i_i) \
V(I32AtomicLoad16U, 0xfe13, i_i) \
- V(I32AtomicStore, 0xfe17, i_ii) \
- V(I32AtomicStore8U, 0xfe19, i_ii) \
- V(I32AtomicStore16U, 0xfe1a, i_ii) \
+ V(I32AtomicStore, 0xfe17, v_ii) \
+ V(I32AtomicStore8U, 0xfe19, v_ii) \
+ V(I32AtomicStore16U, 0xfe1a, v_ii) \
V(I32AtomicAdd, 0xfe1e, i_ii) \
V(I32AtomicAdd8U, 0xfe20, i_ii) \
V(I32AtomicAdd16U, 0xfe21, i_ii) \
@@ -457,38 +448,42 @@ constexpr WasmCodePosition kNoCodePosition = -1;
FOREACH_SIMD_1_OPERAND_OPCODE(V) \
FOREACH_SIMD_MASK_OPERAND_OPCODE(V) \
FOREACH_SIMD_MEM_OPCODE(V) \
- FOREACH_ATOMIC_OPCODE(V)
+ FOREACH_ATOMIC_OPCODE(V) \
+ FOREACH_NUMERIC_OPCODE(V)
// All signatures.
-#define FOREACH_SIGNATURE(V) \
- FOREACH_SIMD_SIGNATURE(V) \
- V(i_ii, kWasmI32, kWasmI32, kWasmI32) \
- V(i_i, kWasmI32, kWasmI32) \
- V(i_v, kWasmI32) \
- V(i_ff, kWasmI32, kWasmF32, kWasmF32) \
- V(i_f, kWasmI32, kWasmF32) \
- V(i_dd, kWasmI32, kWasmF64, kWasmF64) \
- V(i_d, kWasmI32, kWasmF64) \
- V(i_l, kWasmI32, kWasmI64) \
- V(l_ll, kWasmI64, kWasmI64, kWasmI64) \
- V(i_ll, kWasmI32, kWasmI64, kWasmI64) \
- V(l_l, kWasmI64, kWasmI64) \
- V(l_i, kWasmI64, kWasmI32) \
- V(l_f, kWasmI64, kWasmF32) \
- V(l_d, kWasmI64, kWasmF64) \
- V(f_ff, kWasmF32, kWasmF32, kWasmF32) \
- V(f_f, kWasmF32, kWasmF32) \
- V(f_d, kWasmF32, kWasmF64) \
- V(f_i, kWasmF32, kWasmI32) \
- V(f_l, kWasmF32, kWasmI64) \
- V(d_dd, kWasmF64, kWasmF64, kWasmF64) \
- V(d_d, kWasmF64, kWasmF64) \
- V(d_f, kWasmF64, kWasmF32) \
- V(d_i, kWasmF64, kWasmI32) \
- V(d_l, kWasmF64, kWasmI64) \
- V(d_id, kWasmF64, kWasmI32, kWasmF64) \
- V(f_if, kWasmF32, kWasmI32, kWasmF32) \
- V(l_il, kWasmI64, kWasmI32, kWasmI64) \
+#define FOREACH_SIGNATURE(V) \
+ FOREACH_SIMD_SIGNATURE(V) \
+ V(i_ii, kWasmI32, kWasmI32, kWasmI32) \
+ V(i_i, kWasmI32, kWasmI32) \
+ V(i_v, kWasmI32) \
+ V(i_ff, kWasmI32, kWasmF32, kWasmF32) \
+ V(i_f, kWasmI32, kWasmF32) \
+ V(i_dd, kWasmI32, kWasmF64, kWasmF64) \
+ V(i_d, kWasmI32, kWasmF64) \
+ V(i_l, kWasmI32, kWasmI64) \
+ V(l_ll, kWasmI64, kWasmI64, kWasmI64) \
+ V(i_ll, kWasmI32, kWasmI64, kWasmI64) \
+ V(l_l, kWasmI64, kWasmI64) \
+ V(l_i, kWasmI64, kWasmI32) \
+ V(l_f, kWasmI64, kWasmF32) \
+ V(l_d, kWasmI64, kWasmF64) \
+ V(f_ff, kWasmF32, kWasmF32, kWasmF32) \
+ V(f_f, kWasmF32, kWasmF32) \
+ V(f_d, kWasmF32, kWasmF64) \
+ V(f_i, kWasmF32, kWasmI32) \
+ V(f_l, kWasmF32, kWasmI64) \
+ V(d_dd, kWasmF64, kWasmF64, kWasmF64) \
+ V(d_d, kWasmF64, kWasmF64) \
+ V(d_f, kWasmF64, kWasmF32) \
+ V(d_i, kWasmF64, kWasmI32) \
+ V(d_l, kWasmF64, kWasmI64) \
+ V(v_ii, kWasmStmt, kWasmI32, kWasmI32) \
+ V(v_id, kWasmStmt, kWasmI32, kWasmF64) \
+ V(d_id, kWasmF64, kWasmI32, kWasmF64) \
+ V(v_if, kWasmStmt, kWasmI32, kWasmF32) \
+ V(f_if, kWasmF32, kWasmI32, kWasmF32) \
+ V(v_il, kWasmI64, kWasmI32, kWasmI64) \
V(i_iii, kWasmI32, kWasmI32, kWasmI32, kWasmI32)
#define FOREACH_SIMD_SIGNATURE(V) \
@@ -501,6 +496,7 @@ constexpr WasmCodePosition kNoCodePosition = -1;
V(s_sss, kWasmS128, kWasmS128, kWasmS128, kWasmS128)
#define FOREACH_PREFIX(V) \
+ V(Numeric, 0xfc) \
V(Simd, 0xfd) \
V(Atomic, 0xfe)
@@ -532,6 +528,117 @@ enum TrapReason {
#undef DECLARE_ENUM
};
+// TODO(clemensh): Compute memtype and size from ValueType once we have c++14
+// constexpr support.
+#define FOREACH_LOAD_TYPE(V) \
+ V(I32, , Int32, 2) \
+ V(I32, 8S, Int8, 0) \
+ V(I32, 8U, Uint8, 0) \
+ V(I32, 16S, Int16, 1) \
+ V(I32, 16U, Uint16, 1) \
+ V(I64, , Int64, 3) \
+ V(I64, 8S, Int8, 0) \
+ V(I64, 8U, Uint8, 0) \
+ V(I64, 16S, Int16, 1) \
+ V(I64, 16U, Uint16, 1) \
+ V(I64, 32S, Int32, 2) \
+ V(I64, 32U, Uint32, 2) \
+ V(F32, , Float32, 2) \
+ V(F64, , Float64, 3) \
+ V(S128, , Simd128, 4)
+
+class LoadType {
+ public:
+ enum LoadTypeValue : uint8_t {
+#define DEF_ENUM(type, suffix, ...) k##type##Load##suffix,
+ FOREACH_LOAD_TYPE(DEF_ENUM)
+#undef DEF_ENUM
+ };
+
+ // Allow implicit convertion of the enum value to this wrapper.
+ constexpr LoadType(LoadTypeValue val) // NOLINT(runtime/explicit)
+ : val_(val) {}
+
+ constexpr LoadTypeValue value() const { return val_; }
+ constexpr unsigned size_log_2() const { return kLoadSizeLog2[val_]; }
+ constexpr unsigned size() const { return 1 << size_log_2(); }
+ constexpr ValueType value_type() const { return kValueType[val_]; }
+ constexpr MachineType mem_type() const { return kMemType[val_]; }
+
+ private:
+ const LoadTypeValue val_;
+
+ static constexpr uint8_t kLoadSizeLog2[] = {
+#define LOAD_SIZE(_, __, ___, size) size,
+ FOREACH_LOAD_TYPE(LOAD_SIZE)
+#undef LOAD_SIZE
+ };
+
+ static constexpr ValueType kValueType[] = {
+#define VALUE_TYPE(type, ...) kWasm##type,
+ FOREACH_LOAD_TYPE(VALUE_TYPE)
+#undef VALUE_TYPE
+ };
+
+ static constexpr MachineType kMemType[] = {
+#define MEMTYPE(_, __, memtype, ___) MachineType::memtype(),
+ FOREACH_LOAD_TYPE(MEMTYPE)
+#undef MEMTYPE
+ };
+};
+
+#define FOREACH_STORE_TYPE(V) \
+ V(I32, , Word32, 2) \
+ V(I32, 8, Word8, 0) \
+ V(I32, 16, Word16, 1) \
+ V(I64, , Word64, 3) \
+ V(I64, 8, Word8, 0) \
+ V(I64, 16, Word16, 1) \
+ V(I64, 32, Word32, 2) \
+ V(F32, , Float32, 2) \
+ V(F64, , Float64, 3) \
+ V(S128, , Simd128, 4)
+
+class StoreType {
+ public:
+ enum StoreTypeValue : uint8_t {
+#define DEF_ENUM(type, suffix, ...) k##type##Store##suffix,
+ FOREACH_STORE_TYPE(DEF_ENUM)
+#undef DEF_ENUM
+ };
+
+ // Allow implicit convertion of the enum value to this wrapper.
+ constexpr StoreType(StoreTypeValue val) // NOLINT(runtime/explicit)
+ : val_(val) {}
+
+ constexpr StoreTypeValue value() const { return val_; }
+ constexpr unsigned size_log_2() const { return kStoreSizeLog2[val_]; }
+ constexpr unsigned size() const { return 1 << size_log_2(); }
+ constexpr ValueType value_type() const { return kValueType[val_]; }
+ constexpr ValueType mem_rep() const { return kMemRep[val_]; }
+
+ private:
+ const StoreTypeValue val_;
+
+ static constexpr uint8_t kStoreSizeLog2[] = {
+#define STORE_SIZE(_, __, ___, size) size,
+ FOREACH_STORE_TYPE(STORE_SIZE)
+#undef STORE_SIZE
+ };
+
+ static constexpr ValueType kValueType[] = {
+#define VALUE_TYPE(type, ...) kWasm##type,
+ FOREACH_STORE_TYPE(VALUE_TYPE)
+#undef VALUE_TYPE
+ };
+
+ static constexpr MachineRepresentation kMemRep[] = {
+#define MEMREP(_, __, memrep, ___) MachineRepresentation::k##memrep,
+ FOREACH_STORE_TYPE(MEMREP)
+#undef MEMREP
+ };
+};
+
// A collection of opcode-related static methods.
class V8_EXPORT_PRIVATE WasmOpcodes {
public:
@@ -548,7 +655,7 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
static const char* TrapReasonMessage(TrapReason reason);
static byte MemSize(MachineType type) {
- return 1 << ElementSizeLog2Of(type.representation());
+ return MemSize(type.representation());
}
static byte MemSize(ValueType type) { return 1 << ElementSizeLog2Of(type); }
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index 337692b595..4466672f37 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -9,10 +9,12 @@
#include "src/external-reference-table.h"
#include "src/objects-inl.h"
#include "src/objects.h"
+#include "src/snapshot/code-serializer.h"
#include "src/snapshot/serializer-common.h"
#include "src/version.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-objects.h"
@@ -21,20 +23,7 @@
namespace v8 {
namespace internal {
namespace wasm {
-
namespace {
-void SetRawTargetData(RelocInfo* rinfo, uint32_t value) {
- if (rinfo->target_address_size() == sizeof(uint32_t)) {
- *(reinterpret_cast<uint32_t*>(rinfo->target_address_address())) = value;
- return;
- } else {
- DCHECK_EQ(rinfo->target_address_size(), sizeof(intptr_t));
- DCHECK_EQ(rinfo->target_address_size(), 8);
- *(reinterpret_cast<intptr_t*>(rinfo->target_address_address())) =
- static_cast<intptr_t>(value);
- return;
- }
-}
class Writer {
public:
@@ -108,31 +97,91 @@ class Reader {
Vector<const byte> buffer_;
};
-} // namespace
+constexpr size_t kVersionSize = 4 * sizeof(uint32_t);
-size_t WasmSerializedFormatVersion::GetVersionSize() { return kVersionSize; }
-
-bool WasmSerializedFormatVersion::WriteVersion(Isolate* isolate,
- Vector<byte> buffer) {
- if (buffer.size() < GetVersionSize()) return false;
+void WriteVersion(Isolate* isolate, Vector<byte> buffer) {
+ DCHECK_GE(buffer.size(), kVersionSize);
Writer writer(buffer);
writer.Write(SerializedData::ComputeMagicNumber(
ExternalReferenceTable::instance(isolate)));
writer.Write(Version::Hash());
writer.Write(static_cast<uint32_t>(CpuFeatures::SupportedFeatures()));
writer.Write(FlagList::Hash());
- return true;
}
-bool WasmSerializedFormatVersion::IsSupportedVersion(
- Isolate* isolate, const Vector<const byte> buffer) {
+bool IsSupportedVersion(Isolate* isolate, const Vector<const byte> buffer) {
if (buffer.size() < kVersionSize) return false;
byte version[kVersionSize];
- CHECK(WriteVersion(isolate, {version, kVersionSize}));
+ WriteVersion(isolate, {version, kVersionSize});
if (memcmp(buffer.start(), version, kVersionSize) == 0) return true;
return false;
}
+} // namespace
+
+enum SerializationSection { Init, Metadata, Stubs, CodeSection, Done };
+
+class V8_EXPORT_PRIVATE NativeModuleSerializer {
+ public:
+ explicit NativeModuleSerializer(Isolate*, const NativeModule*);
+ size_t Measure() const;
+ size_t Write(Vector<byte>);
+ bool IsDone() const { return state_ == Done; }
+
+ private:
+ size_t MeasureHeader() const;
+ static size_t GetCodeHeaderSize();
+ size_t MeasureCode(const WasmCode*) const;
+ size_t MeasureCopiedStubs() const;
+ FixedArray* GetHandlerTable(const WasmCode*) const;
+ ByteArray* GetSourcePositions(const WasmCode*) const;
+
+ void BufferHeader();
+ // we buffer all the stubs because they are small
+ void BufferCopiedStubs();
+ void BufferCodeInAllocatedScratch(const WasmCode*);
+ void BufferCurrentWasmCode();
+ size_t DrainBuffer(Vector<byte> dest);
+ uint32_t EncodeBuiltinOrStub(Address);
+
+ Isolate* const isolate_ = nullptr;
+ const NativeModule* const native_module_ = nullptr;
+ SerializationSection state_ = Init;
+ uint32_t index_ = 0;
+ std::vector<byte> scratch_;
+ Vector<byte> remaining_;
+ // wasm and copied stubs reverse lookup
+ std::map<Address, uint32_t> wasm_targets_lookup_;
+ // immovable builtins and runtime entries lookup
+ std::map<Address, uint32_t> reference_table_lookup_;
+ std::map<Address, uint32_t> stub_lookup_;
+ std::map<Address, uint32_t> builtin_lookup_;
+};
+
+class V8_EXPORT_PRIVATE NativeModuleDeserializer {
+ public:
+ explicit NativeModuleDeserializer(Isolate*, NativeModule*);
+ // Currently, we don't support streamed reading, yet albeit the
+ // API suggests that.
+ bool Read(Vector<const byte>);
+
+ private:
+ void ExpectHeader();
+ void Expect(size_t size);
+ bool ReadHeader();
+ bool ReadCode();
+ bool ReadStubs();
+ Address GetTrampolineOrStubFromTag(uint32_t);
+
+ Isolate* const isolate_ = nullptr;
+ NativeModule* const native_module_ = nullptr;
+ std::vector<byte> scratch_;
+ std::vector<Address> stubs_;
+ Vector<const byte> unread_;
+ size_t current_expectation_ = 0;
+ uint32_t index_ = 0;
+};
+
NativeModuleSerializer::NativeModuleSerializer(Isolate* isolate,
const NativeModule* module)
: isolate_(isolate), native_module_(module) {
@@ -164,8 +213,8 @@ size_t NativeModuleSerializer::MeasureHeader() const {
sizeof(
uint32_t) + // imported fcts - i.e. index of first wasm function
sizeof(uint32_t) + // table count
- native_module_->specialization_data_.function_tables.size() *
- 2 // 2 same-sized tables, containing pointers
+ native_module_->specialization_data_.function_tables.size()
+ // function table, containing pointers
* sizeof(GlobalHandleAddress);
}
@@ -182,7 +231,6 @@ void NativeModuleSerializer::BufferHeader() {
e = native_module_->specialization_data_.function_tables.size();
i < e; ++i) {
writer.Write(native_module_->specialization_data_.function_tables[i]);
- writer.Write(native_module_->specialization_data_.signature_tables[i]);
}
}
@@ -279,7 +327,7 @@ void NativeModuleSerializer::BufferCopiedStubs() {
FixedArray* NativeModuleSerializer::GetHandlerTable(
const WasmCode* code) const {
- if (code->kind() != WasmCode::Function) return nullptr;
+ if (code->kind() != WasmCode::kFunction) return nullptr;
uint32_t index = code->index();
// We write the address, the size, and then copy the code as-is, followed
// by reloc info, followed by handler table and source positions.
@@ -294,7 +342,7 @@ FixedArray* NativeModuleSerializer::GetHandlerTable(
ByteArray* NativeModuleSerializer::GetSourcePositions(
const WasmCode* code) const {
- if (code->kind() != WasmCode::Function) return nullptr;
+ if (code->kind() != WasmCode::kFunction) return nullptr;
uint32_t index = code->index();
Object* source_positions_entry =
native_module_->compiled_module()->source_positions()->get(
@@ -372,17 +420,17 @@ void NativeModuleSerializer::BufferCodeInAllocatedScratch(
case RelocInfo::CODE_TARGET: {
Address orig_target = orig_iter.rinfo()->target_address();
uint32_t tag = EncodeBuiltinOrStub(orig_target);
- SetRawTargetData(iter.rinfo(), tag);
+ SetWasmCalleeTag(iter.rinfo(), tag);
} break;
case RelocInfo::WASM_CALL: {
Address orig_target = orig_iter.rinfo()->wasm_call_address();
uint32_t tag = wasm_targets_lookup_[orig_target];
- SetRawTargetData(iter.rinfo(), tag);
+ SetWasmCalleeTag(iter.rinfo(), tag);
} break;
case RelocInfo::RUNTIME_ENTRY: {
Address orig_target = orig_iter.rinfo()->target_address();
uint32_t tag = reference_table_lookup_[orig_target];
- SetRawTargetData(iter.rinfo(), tag);
+ SetWasmCalleeTag(iter.rinfo(), tag);
} break;
default:
UNREACHABLE();
@@ -402,7 +450,7 @@ uint32_t NativeModuleSerializer::EncodeBuiltinOrStub(Address address) {
DCHECK(stub_iter != stub_lookup_.end());
uint32_t id = stub_iter->second;
DCHECK_LT(id, std::numeric_limits<uint16_t>::max());
- tag = id & 0x0000ffff;
+ tag = id & 0x0000FFFF;
}
return tag;
}
@@ -423,15 +471,20 @@ size_t NativeModuleSerializer::Write(Vector<byte> dest) {
dest = dest + DrainBuffer(dest);
if (remaining_.size() == 0) {
index_ = native_module_->num_imported_functions();
- BufferCurrentWasmCode();
- state_ = CodeSection;
+ if (index_ < native_module_->FunctionCount()) {
+ BufferCurrentWasmCode();
+ state_ = CodeSection;
+ } else {
+ state_ = Done;
+ }
}
break;
}
case CodeSection: {
dest = dest + DrainBuffer(dest);
if (remaining_.size() == 0) {
- if (++index_ < native_module_->FunctionCount()) {
+ ++index_; // Move to next code object.
+ if (index_ < native_module_->FunctionCount()) {
BufferCurrentWasmCode();
} else {
state_ = Done;
@@ -448,18 +501,22 @@ size_t NativeModuleSerializer::Write(Vector<byte> dest) {
}
// static
-std::pair<std::unique_ptr<byte[]>, size_t>
-NativeModuleSerializer::SerializeWholeModule(
+std::pair<std::unique_ptr<const byte[]>, size_t> SerializeNativeModule(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
+ if (!FLAG_wasm_jit_to_native) {
+ std::unique_ptr<ScriptData> script_data =
+ WasmCompiledModuleSerializer::SerializeWasmModule(isolate,
+ compiled_module);
+ script_data->ReleaseDataOwnership();
+ size_t size = static_cast<size_t>(script_data->length());
+ return {std::unique_ptr<const byte[]>(script_data->data()), size};
+ }
NativeModule* native_module = compiled_module->GetNativeModule();
NativeModuleSerializer serializer(isolate, native_module);
- size_t version_size = WasmSerializedFormatVersion::GetVersionSize();
+ size_t version_size = kVersionSize;
size_t buff_size = serializer.Measure() + version_size;
std::unique_ptr<byte[]> ret(new byte[buff_size]);
- if (!WasmSerializedFormatVersion::WriteVersion(isolate,
- {ret.get(), buff_size})) {
- return {};
- }
+ WriteVersion(isolate, {ret.get(), buff_size});
size_t written =
serializer.Write({ret.get() + version_size, buff_size - version_size});
@@ -500,18 +557,14 @@ bool NativeModuleDeserializer::ReadHeader() {
if (!ok) return false;
size_t table_count = reader.Read<uint32_t>();
- std::vector<GlobalHandleAddress> sigs(table_count);
std::vector<GlobalHandleAddress> funcs(table_count);
for (size_t i = 0; i < table_count; ++i) {
funcs[i] = reader.Read<GlobalHandleAddress>();
- sigs[i] = reader.Read<GlobalHandleAddress>();
}
- native_module_->signature_tables() = sigs;
native_module_->function_tables() = funcs;
// resize, so that from here on the native module can be
// asked about num_function_tables().
native_module_->empty_function_tables().resize(table_count);
- native_module_->empty_signature_tables().resize(table_count);
unread_ = unread_ + (start_size - reader.current_buffer().size());
return true;
@@ -558,7 +611,7 @@ bool NativeModuleDeserializer::ReadCode() {
}
WasmCode* ret = native_module_->AddOwnedCode(
code_buffer, std::move(reloc_info), reloc_size, Just(index_),
- WasmCode::Function, constant_pool_offset, stack_slot_count,
+ WasmCode::kFunction, constant_pool_offset, stack_slot_count,
safepoint_table_offset, protected_instructions, is_liftoff);
if (ret == nullptr) return false;
native_module_->SetCodeTable(index_, ret);
@@ -576,14 +629,15 @@ bool NativeModuleDeserializer::ReadCode() {
// We only expect {undefined}. We check for that when we add code.
iter.rinfo()->set_target_object(isolate_->heap()->undefined_value(),
SKIP_WRITE_BARRIER);
+ break;
}
case RelocInfo::CODE_TARGET: {
- uint32_t tag = *(reinterpret_cast<uint32_t*>(
- iter.rinfo()->target_address_address()));
+ uint32_t tag = GetWasmCalleeTag(iter.rinfo());
Address target = GetTrampolineOrStubFromTag(tag);
iter.rinfo()->set_target_address(nullptr, target, SKIP_WRITE_BARRIER,
SKIP_ICACHE_FLUSH);
- } break;
+ break;
+ }
case RelocInfo::RUNTIME_ENTRY: {
uint32_t orig_target = static_cast<uint32_t>(
reinterpret_cast<intptr_t>(iter.rinfo()->target_address()));
@@ -591,7 +645,8 @@ bool NativeModuleDeserializer::ReadCode() {
ExternalReferenceTable::instance(isolate_)->address(orig_target);
iter.rinfo()->set_target_runtime_entry(
nullptr, address, SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
- } break;
+ break;
+ }
default:
break;
}
@@ -624,25 +679,35 @@ bool NativeModuleDeserializer::ReadCode() {
}
Address NativeModuleDeserializer::GetTrampolineOrStubFromTag(uint32_t tag) {
- if ((tag & 0x0000ffff) == 0) {
+ if ((tag & 0x0000FFFF) == 0) {
int builtin_id = static_cast<int>(tag >> 16);
v8::internal::Code* builtin = isolate_->builtins()->builtin(builtin_id);
return native_module_->GetLocalAddressFor(handle(builtin));
} else {
- DCHECK_EQ(tag & 0xffff0000, 0);
+ DCHECK_EQ(tag & 0xFFFF0000, 0);
return stubs_[tag];
}
}
-MaybeHandle<WasmCompiledModule> NativeModuleDeserializer::DeserializeFullBuffer(
+MaybeHandle<WasmCompiledModule> DeserializeNativeModule(
Isolate* isolate, Vector<const byte> data, Vector<const byte> wire_bytes) {
+ if (!FLAG_wasm_jit_to_native) {
+ ScriptData script_data(data.start(), data.length());
+ Handle<FixedArray> compiled_module;
+ if (!WasmCompiledModuleSerializer::DeserializeWasmModule(
+ isolate, &script_data, wire_bytes)
+ .ToHandle(&compiled_module)) {
+ return {};
+ }
+ return Handle<WasmCompiledModule>::cast(compiled_module);
+ }
if (!IsWasmCodegenAllowed(isolate, isolate->native_context())) {
return {};
}
- if (!WasmSerializedFormatVersion::IsSupportedVersion(isolate, data)) {
+ if (!IsSupportedVersion(isolate, data)) {
return {};
}
- data = data + WasmSerializedFormatVersion::GetVersionSize();
+ data = data + kVersionSize;
ModuleResult decode_result =
SyncDecodeWasmModule(isolate, wire_bytes.start(), wire_bytes.end(), false,
i::wasm::kWasmOrigin);
@@ -669,9 +734,11 @@ MaybeHandle<WasmCompiledModule> NativeModuleDeserializer::DeserializeFullBuffer(
static_cast<int>(export_wrappers_size), TENURED);
Handle<WasmCompiledModule> compiled_module = WasmCompiledModule::New(
- isolate, shared->module(), isolate->factory()->NewFixedArray(0, TENURED),
- export_wrappers, {}, {});
+ isolate, shared->module(), isolate->factory()->empty_fixed_array(),
+ export_wrappers, std::vector<wasm::GlobalHandleAddress>(),
+ trap_handler::IsTrapHandlerEnabled());
compiled_module->OnWasmModuleDecodingComplete(shared);
+ script->set_wasm_compiled_module(*compiled_module);
NativeModuleDeserializer deserializer(isolate,
compiled_module->GetNativeModule());
if (!deserializer.Read(data)) return {};
diff --git a/deps/v8/src/wasm/wasm-serialization.h b/deps/v8/src/wasm/wasm-serialization.h
index 40025c23cf..9c0e9ce10a 100644
--- a/deps/v8/src/wasm/wasm-serialization.h
+++ b/deps/v8/src/wasm/wasm-serialization.h
@@ -5,89 +5,17 @@
#ifndef V8_WASM_SERIALIZATION_H_
#define V8_WASM_SERIALIZATION_H_
-#include "src/wasm/wasm-heap.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
namespace wasm {
-class WasmSerializedFormatVersion {
- public:
- static size_t GetVersionSize();
- static bool WriteVersion(Isolate* isolate, Vector<byte>);
- static bool IsSupportedVersion(Isolate* isolate, const Vector<const byte>);
+std::pair<std::unique_ptr<const byte[]>, size_t> SerializeNativeModule(
+ Isolate* isolate, Handle<WasmCompiledModule> compiled_module);
- private:
- static constexpr size_t kVersionSize = 4 * sizeof(uint32_t);
-};
-
-enum SerializationSection { Init, Metadata, Stubs, CodeSection, Done };
-
-class V8_EXPORT_PRIVATE NativeModuleSerializer {
- public:
- explicit NativeModuleSerializer(Isolate*, const NativeModule*);
- size_t Measure() const;
- size_t Write(Vector<byte>);
- bool IsDone() const { return state_ == Done; }
- static std::pair<std::unique_ptr<byte[]>, size_t> SerializeWholeModule(
- Isolate*, Handle<WasmCompiledModule>);
-
- private:
- size_t MeasureHeader() const;
- static size_t GetCodeHeaderSize();
- size_t MeasureCode(const WasmCode*) const;
- size_t MeasureCopiedStubs() const;
- FixedArray* GetHandlerTable(const WasmCode*) const;
- ByteArray* GetSourcePositions(const WasmCode*) const;
-
- void BufferHeader();
- // we buffer all the stubs because they are small
- void BufferCopiedStubs();
- void BufferCodeInAllocatedScratch(const WasmCode*);
- void BufferCurrentWasmCode();
- size_t DrainBuffer(Vector<byte> dest);
- uint32_t EncodeBuiltinOrStub(Address);
-
- Isolate* const isolate_ = nullptr;
- const NativeModule* const native_module_ = nullptr;
- SerializationSection state_ = Init;
- uint32_t index_ = 0;
- std::vector<byte> scratch_;
- Vector<byte> remaining_;
- // wasm and copied stubs reverse lookup
- std::map<Address, uint32_t> wasm_targets_lookup_;
- // immovable builtins and runtime entries lookup
- std::map<Address, uint32_t> reference_table_lookup_;
- std::map<Address, uint32_t> stub_lookup_;
- std::map<Address, uint32_t> builtin_lookup_;
-};
-
-class V8_EXPORT_PRIVATE NativeModuleDeserializer {
- public:
- explicit NativeModuleDeserializer(Isolate*, NativeModule*);
- // Currently, we don't support streamed reading, yet albeit the
- // API suggests that.
- bool Read(Vector<const byte>);
- static MaybeHandle<WasmCompiledModule> DeserializeFullBuffer(
- Isolate*, Vector<const byte> data, Vector<const byte> wire_bytes);
-
- private:
- void ExpectHeader();
- void Expect(size_t size);
- bool ReadHeader();
- bool ReadCode();
- bool ReadStubs();
- Address GetTrampolineOrStubFromTag(uint32_t);
-
- Isolate* const isolate_ = nullptr;
- NativeModule* const native_module_ = nullptr;
- std::vector<byte> scratch_;
- std::vector<Address> stubs_;
- Vector<const byte> unread_;
- size_t current_expectation_ = 0;
- uint32_t index_ = 0;
-};
+MaybeHandle<WasmCompiledModule> DeserializeNativeModule(
+ Isolate* isolate, Vector<const byte> data, Vector<const byte> wire_bytes);
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 8b12b0867e..a75a8ddd74 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -279,18 +279,6 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
}
}
-Address Assembler::target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- return target_address_at(pc, constant_pool);
-}
-
-void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
-}
-
void Assembler::deserialization_set_target_internal_reference_at(
Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
@@ -303,7 +291,8 @@ Address Assembler::target_address_from_return_address(Address pc) {
void Assembler::deserialization_set_special_target_at(
Isolate* isolate, Address instruction_payload, Code* code, Address target) {
- set_target_address_at(isolate, instruction_payload, code, target);
+ set_target_address_at(isolate, instruction_payload,
+ code ? code->constant_pool() : nullptr, target);
}
Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
@@ -330,7 +319,7 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
Address RelocInfo::target_address_address() {
@@ -421,7 +410,7 @@ void RelocInfo::WipeOut(Isolate* isolate) {
Memory::Address_at(pc_) = nullptr;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
- Assembler::set_target_address_at(isolate, pc_, host_,
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_,
pc_ + sizeof(int32_t));
} else {
UNREACHABLE();
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 5f62e2af66..38cbfc78d9 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -38,7 +38,7 @@ V8_INLINE uint64_t _xgetbv(unsigned int xcr) {
// directly because older assemblers do not include support for xgetbv and
// there is no easy way to conditionally compile based on the assembler
// used.
- __asm__ volatile(".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c"(xcr));
+ __asm__ volatile(".byte 0x0F, 0x01, 0xD0" : "=a"(eax), "=d"(edx) : "c"(xcr));
return static_cast<uint64_t>(eax) | (static_cast<uint64_t>(edx) << 32);
}
@@ -243,17 +243,17 @@ Operand::Operand(const Operand& operand, int32_t offset) {
rex_ = operand.rex_;
if (!is_int8(disp_value) || is_baseless) {
// Need 32 bits of displacement, mode 2 or mode 1 with register rbp/r13.
- buf_[0] = (modrm & 0x3f) | (is_baseless ? 0x00 : 0x80);
+ buf_[0] = (modrm & 0x3F) | (is_baseless ? 0x00 : 0x80);
len_ = disp_offset + 4;
Memory::int32_at(&buf_[disp_offset]) = disp_value;
} else if (disp_value != 0 || (base_reg == 0x05)) {
// Need 8 bits of displacement.
- buf_[0] = (modrm & 0x3f) | 0x40; // Mode 1.
+ buf_[0] = (modrm & 0x3F) | 0x40; // Mode 1.
len_ = disp_offset + 1;
buf_[disp_offset] = static_cast<byte>(disp_value);
} else {
// Need no displacement.
- buf_[0] = (modrm & 0x3f); // Mode 0.
+ buf_[0] = (modrm & 0x3F); // Mode 0.
len_ = disp_offset;
}
if (has_sib) {
@@ -385,7 +385,7 @@ bool Assembler::IsNop(Address addr) {
Address a = addr;
while (*a == 0x66) a++;
if (*a == 0x90) return true;
- if (a[0] == 0xf && a[1] == 0x1f) return true;
+ if (a[0] == 0xF && a[1] == 0x1F) return true;
return false;
}
@@ -1044,7 +1044,7 @@ void Assembler::cmovq(Condition cc, Register dst, Register src) {
EnsureSpace ensure_space(this);
// Opcode: REX.W 0f 40 + cc /r.
emit_rex_64(dst, src);
- emit(0x0f);
+ emit(0x0F);
emit(0x40 + cc);
emit_modrm(dst, src);
}
@@ -1060,7 +1060,7 @@ void Assembler::cmovq(Condition cc, Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
// Opcode: REX.W 0f 40 + cc /r.
emit_rex_64(dst, src);
- emit(0x0f);
+ emit(0x0F);
emit(0x40 + cc);
emit_operand(dst, src);
}
@@ -1076,7 +1076,7 @@ void Assembler::cmovl(Condition cc, Register dst, Register src) {
EnsureSpace ensure_space(this);
// Opcode: 0f 40 + cc /r.
emit_optional_rex_32(dst, src);
- emit(0x0f);
+ emit(0x0F);
emit(0x40 + cc);
emit_modrm(dst, src);
}
@@ -1092,7 +1092,7 @@ void Assembler::cmovl(Condition cc, Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
// Opcode: 0f 40 + cc /r.
emit_optional_rex_32(dst, src);
- emit(0x0f);
+ emit(0x0F);
emit(0x40 + cc);
emit_operand(dst, src);
}
@@ -1101,13 +1101,13 @@ void Assembler::cmovl(Condition cc, Register dst, const Operand& src) {
void Assembler::cmpb_al(Immediate imm8) {
DCHECK(is_int8(imm8.value_) || is_uint8(imm8.value_));
EnsureSpace ensure_space(this);
- emit(0x3c);
+ emit(0x3C);
emit(imm8.value_);
}
void Assembler::lock() {
EnsureSpace ensure_space(this);
- emit(0xf0);
+ emit(0xF0);
}
void Assembler::cmpxchgb(const Operand& dst, Register src) {
@@ -1118,8 +1118,8 @@ void Assembler::cmpxchgb(const Operand& dst, Register src) {
} else {
emit_optional_rex_32(src, dst);
}
- emit(0x0f);
- emit(0xb0);
+ emit(0x0F);
+ emit(0xB0);
emit_operand(src, dst);
}
@@ -1127,19 +1127,26 @@ void Assembler::cmpxchgw(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(src, dst);
- emit(0x0f);
- emit(0xb1);
+ emit(0x0F);
+ emit(0xB1);
emit_operand(src, dst);
}
void Assembler::emit_cmpxchg(const Operand& dst, Register src, int size) {
EnsureSpace ensure_space(this);
emit_rex(src, dst, size);
- emit(0x0f);
- emit(0xb1);
+ emit(0x0F);
+ emit(0xB1);
emit_operand(src, dst);
}
+void Assembler::lfence() {
+ EnsureSpace ensure_space(this);
+ emit(0x0F);
+ emit(0xAE);
+ emit(0xE8);
+}
+
void Assembler::cpuid() {
EnsureSpace ensure_space(this);
emit(0x0F);
@@ -1593,7 +1600,7 @@ void Assembler::movw(const Operand& dst, Immediate imm) {
emit_optional_rex_32(dst);
emit(0xC7);
emit_operand(0x0, dst);
- emit(static_cast<byte>(imm.value_ & 0xff));
+ emit(static_cast<byte>(imm.value_ & 0xFF));
emit(static_cast<byte>(imm.value_ >> 8));
}
@@ -1950,28 +1957,28 @@ void Assembler::Nop(int n) {
emit(0x90);
return;
case 3:
- emit(0x0f);
- emit(0x1f);
+ emit(0x0F);
+ emit(0x1F);
emit(0x00);
return;
case 4:
- emit(0x0f);
- emit(0x1f);
+ emit(0x0F);
+ emit(0x1F);
emit(0x40);
emit(0x00);
return;
case 6:
emit(0x66);
case 5:
- emit(0x0f);
- emit(0x1f);
+ emit(0x0F);
+ emit(0x1F);
emit(0x44);
emit(0x00);
emit(0x00);
return;
case 7:
- emit(0x0f);
- emit(0x1f);
+ emit(0x0F);
+ emit(0x1F);
emit(0x80);
emit(0x00);
emit(0x00);
@@ -1989,8 +1996,8 @@ void Assembler::Nop(int n) {
emit(0x66);
n--;
case 8:
- emit(0x0f);
- emit(0x1f);
+ emit(0x0F);
+ emit(0x1F);
emit(0x84);
emit(0x00);
emit(0x00);
@@ -2898,10 +2905,10 @@ void Assembler::movq(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
if (dst.low_bits() == 4) {
// Avoid unnecessary SIB byte.
- emit(0xf3);
+ emit(0xF3);
emit_optional_rex_32(dst, src);
emit(0x0F);
- emit(0x7e);
+ emit(0x7E);
emit_sse_operand(dst, src);
} else {
emit(0x66);
@@ -3370,8 +3377,8 @@ void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x2e);
+ emit(0x0F);
+ emit(0x2E);
emit_sse_operand(dst, src);
}
@@ -3380,8 +3387,8 @@ void Assembler::ucomiss(XMMRegister dst, const Operand& src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x2e);
+ emit(0x0F);
+ emit(0x2E);
emit_sse_operand(dst, src);
}
@@ -3984,14 +3991,31 @@ void Assembler::sqrtsd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
+void Assembler::haddps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x7C);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::haddps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x7C);
+ emit_sse_operand(dst, src);
+}
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x2e);
+ emit(0x0F);
+ emit(0x2E);
emit_sse_operand(dst, src);
}
@@ -4001,8 +4025,8 @@ void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x2e);
+ emit(0x0F);
+ emit(0x2E);
emit_sse_operand(dst, src);
}
@@ -4024,9 +4048,9 @@ void Assembler::roundss(XMMRegister dst, XMMRegister src, RoundingMode mode) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x3a);
- emit(0x0a);
+ emit(0x0F);
+ emit(0x3A);
+ emit(0x0A);
emit_sse_operand(dst, src);
// Mask precision exception.
emit(static_cast<byte>(mode) | 0x8);
@@ -4039,9 +4063,9 @@ void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x3a);
- emit(0x0b);
+ emit(0x0F);
+ emit(0x3A);
+ emit(0x0B);
emit_sse_operand(dst, src);
// Mask precision exception.
emit(static_cast<byte>(mode) | 0x8);
@@ -4052,7 +4076,7 @@ void Assembler::movmskpd(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
- emit(0x0f);
+ emit(0x0F);
emit(0x50);
emit_sse_operand(dst, src);
}
@@ -4061,7 +4085,7 @@ void Assembler::movmskpd(Register dst, XMMRegister src) {
void Assembler::movmskps(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
- emit(0x0f);
+ emit(0x0F);
emit(0x50);
emit_sse_operand(dst, src);
}
@@ -4141,7 +4165,7 @@ void Assembler::vmovd(XMMRegister dst, Register src) {
EnsureSpace ensure_space(this);
XMMRegister isrc = XMMRegister::from_code(src.code());
emit_vex_prefix(dst, xmm0, isrc, kL128, k66, k0F, kW0);
- emit(0x6e);
+ emit(0x6E);
emit_sse_operand(dst, src);
}
@@ -4150,7 +4174,7 @@ void Assembler::vmovd(XMMRegister dst, const Operand& src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, xmm0, src, kL128, k66, k0F, kW0);
- emit(0x6e);
+ emit(0x6E);
emit_sse_operand(dst, src);
}
@@ -4160,7 +4184,7 @@ void Assembler::vmovd(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
XMMRegister idst = XMMRegister::from_code(dst.code());
emit_vex_prefix(src, xmm0, idst, kL128, k66, k0F, kW0);
- emit(0x7e);
+ emit(0x7E);
emit_sse_operand(src, dst);
}
@@ -4170,7 +4194,7 @@ void Assembler::vmovq(XMMRegister dst, Register src) {
EnsureSpace ensure_space(this);
XMMRegister isrc = XMMRegister::from_code(src.code());
emit_vex_prefix(dst, xmm0, isrc, kL128, k66, k0F, kW1);
- emit(0x6e);
+ emit(0x6E);
emit_sse_operand(dst, src);
}
@@ -4179,7 +4203,7 @@ void Assembler::vmovq(XMMRegister dst, const Operand& src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, xmm0, src, kL128, k66, k0F, kW1);
- emit(0x6e);
+ emit(0x6E);
emit_sse_operand(dst, src);
}
@@ -4189,7 +4213,7 @@ void Assembler::vmovq(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
XMMRegister idst = XMMRegister::from_code(dst.code());
emit_vex_prefix(src, xmm0, idst, kL128, k66, k0F, kW1);
- emit(0x7e);
+ emit(0x7E);
emit_sse_operand(src, dst);
}
@@ -4258,7 +4282,7 @@ void Assembler::vucomiss(XMMRegister dst, XMMRegister src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, xmm0, src, kLIG, kNone, k0F, kWIG);
- emit(0x2e);
+ emit(0x2E);
emit_sse_operand(dst, src);
}
@@ -4267,7 +4291,7 @@ void Assembler::vucomiss(XMMRegister dst, const Operand& src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, xmm0, src, kLIG, kNone, k0F, kWIG);
- emit(0x2e);
+ emit(0x2E);
emit_sse_operand(dst, src);
}
@@ -4547,6 +4571,11 @@ void Assembler::rorxl(Register dst, const Operand& src, byte imm8) {
emit(imm8);
}
+void Assembler::pause() {
+ emit(0xF3);
+ emit(0x90);
+}
+
void Assembler::minps(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index e5711101bd..1c838b964b 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -182,6 +182,7 @@ constexpr Register arg_reg_4 = rcx;
V(xmm13) \
V(xmm14)
+constexpr bool kPadArguments = false;
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
@@ -477,10 +478,6 @@ class Assembler : public AssemblerBase {
static inline void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- static inline Address target_address_at(Address pc, Code* code);
- static inline void set_target_address_at(
- Isolate* isolate, Address pc, Code* code, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -1212,6 +1209,9 @@ class Assembler : public AssemblerBase {
void sqrtsd(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, const Operand& src);
+ void haddps(XMMRegister dst, XMMRegister src);
+ void haddps(XMMRegister dst, const Operand& src);
+
void ucomisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, const Operand& src);
void cmpltsd(XMMRegister dst, XMMRegister src);
@@ -1905,6 +1905,9 @@ class Assembler : public AssemblerBase {
void rorxl(Register dst, Register src, byte imm8);
void rorxl(Register dst, const Operand& src, byte imm8);
+ void lfence();
+ void pause();
+
// Check the code size generated from label to here.
int SizeOfCodeGeneratedSince(Label* label) {
return pc_offset() - label->pos();
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 60d04fcbe6..27061c1e2b 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -523,12 +523,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// external reference instead of inlining the call target address directly
// in the code, because the builtin stubs may not have been generated yet
// at the time this code is generated.
- if (type() == StackFrame::CONSTRUCT_ENTRY) {
- __ Call(BUILTIN_CODE(isolate(), JSConstructEntryTrampoline),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(BUILTIN_CODE(isolate(), JSEntryTrampoline), RelocInfo::CODE_TARGET);
- }
+ __ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
// Unlink this frame from the handler chain.
__ PopStackHandler();
@@ -651,7 +646,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -696,7 +691,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
Handle<Map> allocation_site_map =
masm->isolate()->factory()->allocation_site_map();
__ Cmp(FieldOperand(rbx, 0), allocation_site_map);
- __ Assert(equal, kExpectedAllocationSite);
+ __ Assert(equal, AbortReason::kExpectedAllocationSite);
}
// Save the resulting elements kind in type info. We can't just store r3
@@ -721,7 +716,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -796,9 +791,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Will both indicate a nullptr and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
- __ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
+ __ Check(not_smi, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CmpObjectType(rcx, MAP_TYPE, rcx);
- __ Check(equal, kUnexpectedInitialMapForArrayFunction);
+ __ Check(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
// We should either have undefined in rbx or a valid AllocationSite
__ AssertUndefinedOrAllocationSite(rbx);
@@ -895,9 +890,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Will both indicate a nullptr and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
- __ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
+ __ Check(not_smi, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CmpObjectType(rcx, MAP_TYPE, rcx);
- __ Check(equal, kUnexpectedInitialMapForArrayFunction);
+ __ Check(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
// Figure out the right elements kind
@@ -914,8 +909,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ cmpl(rcx, Immediate(PACKED_ELEMENTS));
__ j(equal, &done);
__ cmpl(rcx, Immediate(HOLEY_ELEMENTS));
- __ Assert(equal,
- kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ Assert(
+ equal,
+ AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
__ bind(&done);
}
@@ -1076,7 +1072,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ CompareRoot(return_value, Heap::kNullValueRootIndex);
__ j(equal, &ok, Label::kNear);
- __ Abort(kAPICallReturnedInvalidObject);
+ __ Abort(AbortReason::kAPICallReturnedInvalidObject);
__ bind(&ok);
#endif
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 8c22e07b12..25a74b98fc 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -15,8 +15,7 @@ namespace internal {
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
size_t allocated = 0;
- byte* buffer =
- AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@@ -32,8 +31,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
- CHECK(base::OS::SetPermissions(buffer, allocated,
- base::OS::MemoryPermission::kReadExecute));
+ CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index adc1b7874e..8c4d6a20e8 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -12,12 +12,14 @@
namespace v8 {
namespace internal {
-
-const int Deoptimizer::table_entry_size_ = 10;
+const int Deoptimizer::table_entry_size_ = 5;
#define __ masm()->
void Deoptimizer::TableEntryGenerator::Generate() {
+ Label deopt_table_entry;
+ __ bind(&deopt_table_entry);
+
GeneratePrologue();
// Save all general purpose registers before messing with them.
@@ -63,7 +65,22 @@ void Deoptimizer::TableEntryGenerator::Generate() {
Register arg5 = r11;
// Get the bailout id from the stack.
- __ movp(arg_reg_3, Operand(rsp, kSavedRegistersAreaSize));
+ __ movp(rax, Operand(rsp, kSavedRegistersAreaSize));
+
+ // address of deoptimization table
+ __ leap(rdx, Operand(&deopt_table_entry));
+
+ // rax = deopt_entry - deopt_table_entry - 5
+ __ subp(rax, rdx);
+ __ subl(rax, Immediate(5));
+
+ // rax /= 5
+ __ movl(rbx, Immediate(0xcccccccd));
+ __ imulq(rax, rbx);
+ __ shrq(rax, Immediate(0x22));
+
+ // bailout id
+ __ movl(arg_reg_3, rax);
// Get the address of the location in the code object
// and compute the fp-to-sp delta in register arg5.
@@ -231,8 +248,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
- __ pushq_imm32(i);
- __ jmp(&done);
+ __ call(&done);
DCHECK(masm()->pc_offset() - start == table_entry_size_);
}
__ bind(&done);
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 23f502ab47..247f5e889e 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -327,7 +327,7 @@ class DisassemblerX64 {
byte rex_;
byte operand_size_; // 0x66 or (if no group 3 prefix is present) 0x0.
byte group_1_prefix_; // 0xF2, 0xF3, or (if no group 1 prefix is present) 0.
- byte vex_byte0_; // 0xc4 or 0xc5
+ byte vex_byte0_; // 0xC4 or 0xC5
byte vex_byte1_;
byte vex_byte2_; // only for 3 bytes vex prefix
// Byte size operand override.
@@ -405,7 +405,7 @@ class DisassemblerX64 {
int vex_vreg() {
DCHECK(vex_byte0_ == VEX3_PREFIX || vex_byte0_ == VEX2_PREFIX);
byte checked = vex_byte0_ == VEX3_PREFIX ? vex_byte2_ : vex_byte1_;
- return ~(checked >> 3) & 0xf;
+ return ~(checked >> 3) & 0xF;
}
OperandSize operand_size() {
@@ -484,7 +484,7 @@ class DisassemblerX64 {
void UnimplementedInstruction() {
if (abort_on_unimplemented_) {
- CHECK(false);
+ FATAL("'Unimplemented Instruction'");
} else {
AppendToBuffer("'Unimplemented Instruction'");
}
@@ -616,8 +616,6 @@ int DisassemblerX64::PrintImmediate(byte* data, OperandSize size) {
break;
default:
UNREACHABLE();
- value = 0; // Initialize variables on all paths to satisfy the compiler.
- count = 0;
}
AppendToBuffer("%" PRIx64, value);
return count;
@@ -890,62 +888,62 @@ int DisassemblerX64::AVXInstruction(byte* data) {
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xa9:
+ case 0xA9:
AppendToBuffer("vfmadd213s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xb9:
+ case 0xB9:
AppendToBuffer("vfmadd231s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x9b:
+ case 0x9B:
AppendToBuffer("vfmsub132s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xab:
+ case 0xAB:
AppendToBuffer("vfmsub213s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xbb:
+ case 0xBB:
AppendToBuffer("vfmsub231s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x9d:
+ case 0x9D:
AppendToBuffer("vfnmadd132s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xad:
+ case 0xAD:
AppendToBuffer("vfnmadd213s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xbd:
+ case 0xBD:
AppendToBuffer("vfnmadd231s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x9f:
+ case 0x9F:
AppendToBuffer("vfnmsub132s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xaf:
+ case 0xAF:
AppendToBuffer("vfnmsub213s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xbf:
+ case 0xBF:
AppendToBuffer("vfnmsub231s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xf7:
+ case 0xF7:
AppendToBuffer("shlx%c %s,", operand_size_code(),
NameOfCPURegister(regop));
current += PrintRightOperand(current);
@@ -970,13 +968,13 @@ int DisassemblerX64::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
- case 0x0a:
+ case 0x0A:
AppendToBuffer("vroundss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
- case 0x0b:
+ case 0x0B:
AppendToBuffer("vroundsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
@@ -1031,12 +1029,12 @@ int DisassemblerX64::AVXInstruction(byte* data) {
}
AppendToBuffer(",%s", NameOfXMMRegister(regop));
break;
- case 0x2a:
+ case 0x2A:
AppendToBuffer("%s %s,%s,", vex_w() ? "vcvtqsi2ss" : "vcvtlsi2ss",
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightOperand(current);
break;
- case 0x2c:
+ case 0x2C:
AppendToBuffer("vcvttss2si%s %s,", vex_w() ? "q" : "",
NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
@@ -1051,27 +1049,27 @@ int DisassemblerX64::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5a:
+ case 0x5A:
AppendToBuffer("vcvtss2sd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5c:
+ case 0x5C:
AppendToBuffer("vsubss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5d:
+ case 0x5D:
AppendToBuffer("vminss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5e:
+ case 0x5E:
AppendToBuffer("vdivss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5f:
+ case 0x5F:
AppendToBuffer("vmaxss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
@@ -1098,17 +1096,17 @@ int DisassemblerX64::AVXInstruction(byte* data) {
}
AppendToBuffer(",%s", NameOfXMMRegister(regop));
break;
- case 0x2a:
+ case 0x2A:
AppendToBuffer("%s %s,%s,", vex_w() ? "vcvtqsi2sd" : "vcvtlsi2sd",
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightOperand(current);
break;
- case 0x2c:
+ case 0x2C:
AppendToBuffer("vcvttsd2si%s %s,", vex_w() ? "q" : "",
NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
break;
- case 0x2d:
+ case 0x2D:
AppendToBuffer("vcvtsd2si%s %s,", vex_w() ? "q" : "",
NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
@@ -1128,32 +1126,32 @@ int DisassemblerX64::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5a:
+ case 0x5A:
AppendToBuffer("vcvtsd2ss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5c:
+ case 0x5C:
AppendToBuffer("vsubsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5d:
+ case 0x5D:
AppendToBuffer("vminsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5e:
+ case 0x5E:
AppendToBuffer("vdivsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5f:
+ case 0x5F:
AppendToBuffer("vmaxsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xf0:
+ case 0xF0:
AppendToBuffer("vlddqu %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
@@ -1165,24 +1163,24 @@ int DisassemblerX64::AVXInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
const char* mnem = "?";
switch (opcode) {
- case 0xf2:
+ case 0xF2:
AppendToBuffer("andn%c %s,%s,", operand_size_code(),
NameOfCPURegister(regop), NameOfCPURegister(vvvv));
current += PrintRightOperand(current);
break;
- case 0xf5:
+ case 0xF5:
AppendToBuffer("bzhi%c %s,", operand_size_code(),
NameOfCPURegister(regop));
current += PrintRightOperand(current);
AppendToBuffer(",%s", NameOfCPURegister(vvvv));
break;
- case 0xf7:
+ case 0xF7:
AppendToBuffer("bextr%c %s,", operand_size_code(),
NameOfCPURegister(regop));
current += PrintRightOperand(current);
AppendToBuffer(",%s", NameOfCPURegister(vvvv));
break;
- case 0xf3:
+ case 0xF3:
switch (regop) {
case 1:
mnem = "blsr";
@@ -1208,17 +1206,17 @@ int DisassemblerX64::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
- case 0xf5:
+ case 0xF5:
AppendToBuffer("pdep%c %s,%s,", operand_size_code(),
NameOfCPURegister(regop), NameOfCPURegister(vvvv));
current += PrintRightOperand(current);
break;
- case 0xf6:
+ case 0xF6:
AppendToBuffer("mulx%c %s,%s,", operand_size_code(),
NameOfCPURegister(regop), NameOfCPURegister(vvvv));
current += PrintRightOperand(current);
break;
- case 0xf7:
+ case 0xF7:
AppendToBuffer("shrx%c %s,", operand_size_code(),
NameOfCPURegister(regop));
current += PrintRightOperand(current);
@@ -1231,12 +1229,12 @@ int DisassemblerX64::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
- case 0xf5:
+ case 0xF5:
AppendToBuffer("pext%c %s,%s,", operand_size_code(),
NameOfCPURegister(regop), NameOfCPURegister(vvvv));
current += PrintRightOperand(current);
break;
- case 0xf7:
+ case 0xF7:
AppendToBuffer("sarx%c %s,", operand_size_code(),
NameOfCPURegister(regop));
current += PrintRightOperand(current);
@@ -1249,16 +1247,16 @@ int DisassemblerX64::AVXInstruction(byte* data) {
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
- case 0xf0:
+ case 0xF0:
AppendToBuffer("rorx%c %s,", operand_size_code(),
NameOfCPURegister(regop));
current += PrintRightOperand(current);
switch (operand_size()) {
case OPERAND_DOUBLEWORD_SIZE:
- AppendToBuffer(",%d", *current & 0x1f);
+ AppendToBuffer(",%d", *current & 0x1F);
break;
case OPERAND_QUADWORD_SIZE:
- AppendToBuffer(",%d", *current & 0x3f);
+ AppendToBuffer(",%d", *current & 0x3F);
break;
default:
UnimplementedInstruction();
@@ -1290,7 +1288,7 @@ int DisassemblerX64::AVXInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
break;
- case 0x2e:
+ case 0x2E:
AppendToBuffer("vucomiss %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
@@ -1343,7 +1341,7 @@ int DisassemblerX64::AVXInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
break;
- case 0x2e:
+ case 0x2E:
AppendToBuffer("vucomisd %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
@@ -1366,7 +1364,7 @@ int DisassemblerX64::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x6e:
+ case 0x6E:
AppendToBuffer("vmov%c %s,", vex_w() ? 'q' : 'd',
NameOfXMMRegister(regop));
current += PrintRightOperand(current);
@@ -1394,7 +1392,7 @@ int DisassemblerX64::AVXInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(",%u", *current++);
break;
- case 0x7e:
+ case 0x7E:
AppendToBuffer("vmov%c ", vex_w() ? 'q' : 'd');
current += PrintRightOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
@@ -1409,13 +1407,13 @@ int DisassemblerX64::AVXInstruction(byte* data) {
current += 1;
break;
}
- case 0xc4:
+ case 0xC4:
AppendToBuffer("vpinsrw %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
- case 0xc5:
+ case 0xC5:
AppendToBuffer("vpextrw %s,", NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
AppendToBuffer(",0x%x", *current++);
@@ -1659,13 +1657,13 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += PrintRightOperand(current);
AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
current += 1;
- } else if (third_byte == 0x0a) {
+ } else if (third_byte == 0x0A) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("roundss %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
AppendToBuffer(",0x%x", (*current) & 3);
current += 1;
- } else if (third_byte == 0x0b) {
+ } else if (third_byte == 0x0B) {
get_modrm(*current, &mod, &regop, &rm);
// roundsd xmm, xmm/m64, imm8
AppendToBuffer("roundsd %s,", NameOfXMMRegister(regop));
@@ -1716,7 +1714,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
}
} else {
get_modrm(*current, &mod, &regop, &rm);
- if (opcode == 0x1f) {
+ if (opcode == 0x1F) {
current++;
if (rm == 4) { // SIB byte present.
current++;
@@ -1774,17 +1772,17 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0x71) {
current += 1;
AppendToBuffer("ps%sw %s,%d", sf_str[regop / 2], NameOfXMMRegister(rm),
- *current & 0x7f);
+ *current & 0x7F);
current += 1;
} else if (opcode == 0x72) {
current += 1;
AppendToBuffer("ps%sd %s,%d", sf_str[regop / 2], NameOfXMMRegister(rm),
- *current & 0x7f);
+ *current & 0x7F);
current += 1;
} else if (opcode == 0x73) {
current += 1;
AppendToBuffer("ps%sq %s,%d", sf_str[regop / 2], NameOfXMMRegister(rm),
- *current & 0x7f);
+ *current & 0x7F);
current += 1;
} else if (opcode == 0xB1) {
current += PrintOperands("cmpxchg", OPER_REG_OP_ORDER, current);
@@ -1973,6 +1971,11 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("lddqu %s,", NameOfXMMRegister(regop));
current += PrintRightOperand(current);
+ } else if (opcode == 0x7C) {
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("haddps %s,", NameOfXMMRegister(regop));
+ current += PrintRightOperand(current);
} else {
UnimplementedInstruction();
}
@@ -2011,11 +2014,22 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(", %d", (*current) & 7);
current += 1;
+ } else if (opcode == 0x6F) {
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("movdqu %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
} else if (opcode == 0x7E) {
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("movq %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x7F) {
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("movdqu ");
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if ((opcode & 0xF8) == 0x58 || opcode == 0x51) {
// XMM arithmetic. Mnemonic was retrieved at the start of this function.
int mod, regop, rm;
@@ -2096,7 +2110,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (opcode == 0x2e) {
+ } else if (opcode == 0x2E) {
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("ucomiss %s,", NameOfXMMRegister(regop));
@@ -2194,6 +2208,9 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
byte_size_operand_ = true;
}
current += PrintOperands(mnemonic, OPER_REG_OP_ORDER, current);
+ } else if (opcode == 0xAE && (*(data + 2) & 0xF8) == 0xE8) {
+ AppendToBuffer("lfence");
+ current = data + 3;
} else {
UnimplementedInstruction();
}
@@ -2373,10 +2390,15 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
}
case SHORT_IMMEDIATE_INSTR: {
- byte* addr =
- reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
- AppendToBuffer("%s rax,%s", idesc.mnem, NameOfAddress(addr));
- data += 5;
+ int32_t imm;
+ if (operand_size() == OPERAND_WORD_SIZE) {
+ imm = *reinterpret_cast<int16_t*>(data + 1);
+ data += 3;
+ } else {
+ imm = *reinterpret_cast<int32_t*>(data + 1);
+ data += 5;
+ }
+ AppendToBuffer("%s rax,0x%x", idesc.mnem, imm);
break;
}
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index 59c9532cb3..22bad696d2 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -44,8 +44,6 @@ const Register LoadDescriptor::SlotRegister() { return rax; }
const Register LoadWithVectorDescriptor::VectorRegister() { return rbx; }
-const Register LoadICProtoArrayDescriptor::HandlerRegister() { return rdi; }
-
const Register StoreDescriptor::ReceiverRegister() { return rdx; }
const Register StoreDescriptor::NameRegister() { return rcx; }
const Register StoreDescriptor::ValueRegister() { return rax; }
@@ -205,6 +203,11 @@ void TransitionElementsKindDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void AbortJSDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rdx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index e305aaa1a5..5019be3727 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -186,7 +186,8 @@ void MacroAssembler::PushAddress(ExternalReference source) {
int64_t address = reinterpret_cast<int64_t>(source.address());
if (is_int32(address) && !serializer_enabled()) {
if (emit_debug_code()) {
- Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
+ Move(kScratchRegister, reinterpret_cast<Address>(kZapValue),
+ Assembler::RelocInfoNone());
}
Push(Immediate(static_cast<int32_t>(address)));
return;
@@ -255,8 +256,9 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- Move(value, kZapValue, Assembler::RelocInfoNone());
- Move(dst, kZapValue, Assembler::RelocInfoNone());
+ Move(value, reinterpret_cast<Address>(kZapValue),
+ Assembler::RelocInfoNone());
+ Move(dst, reinterpret_cast<Address>(kZapValue), Assembler::RelocInfoNone());
}
}
@@ -298,15 +300,35 @@ void TurboAssembler::CallRecordWriteStub(
Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
RecordWriteDescriptor::kFPMode));
- pushq(object);
- pushq(address);
-
- popq(slot_parameter);
- popq(object_parameter);
+ // Prepare argument registers for calling RecordWrite
+ // slot_parameter <= address
+ // object_parameter <= object
+ if (slot_parameter != object) {
+ // Normal case
+ Move(slot_parameter, address);
+ Move(object_parameter, object);
+ } else if (object_parameter != address) {
+ // Only slot_parameter and object are the same register
+ // object_parameter <= object
+ // slot_parameter <= address
+ Move(object_parameter, object);
+ Move(slot_parameter, address);
+ } else {
+ // slot_parameter \/ address
+ // object_parameter /\ object
+ xchgq(slot_parameter, object_parameter);
+ }
LoadAddress(isolate_parameter, ExternalReference::isolate_address(isolate()));
- Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
- Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
+
+ Smi* smi_rsa = Smi::FromEnum(remembered_set_action);
+ Smi* smi_fm = Smi::FromEnum(fp_mode);
+ Move(remembered_set_parameter, smi_rsa);
+ if (smi_rsa != smi_fm) {
+ Move(fp_mode_parameter, smi_fm);
+ } else {
+ movq(fp_mode_parameter, remembered_set_parameter);
+ }
Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreRegisters(registers);
@@ -366,20 +388,22 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- Move(address, kZapValue, Assembler::RelocInfoNone());
- Move(value, kZapValue, Assembler::RelocInfoNone());
+ Move(address, reinterpret_cast<Address>(kZapValue),
+ Assembler::RelocInfoNone());
+ Move(value, reinterpret_cast<Address>(kZapValue),
+ Assembler::RelocInfoNone());
}
}
-void TurboAssembler::Assert(Condition cc, BailoutReason reason) {
+void TurboAssembler::Assert(Condition cc, AbortReason reason) {
if (emit_debug_code()) Check(cc, reason);
}
-void TurboAssembler::AssertUnreachable(BailoutReason reason) {
+void TurboAssembler::AssertUnreachable(AbortReason reason) {
if (emit_debug_code()) Abort(reason);
}
-void TurboAssembler::Check(Condition cc, BailoutReason reason) {
+void TurboAssembler::Check(Condition cc, AbortReason reason) {
Label L;
j(cc, &L, Label::kNear);
Abort(reason);
@@ -401,9 +425,9 @@ void TurboAssembler::CheckStackAlignment() {
}
}
-void TurboAssembler::Abort(BailoutReason reason) {
+void TurboAssembler::Abort(AbortReason reason) {
#ifdef DEBUG
- const char* msg = GetBailoutReason(reason);
+ const char* msg = GetAbortReason(reason);
if (msg != nullptr) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -1622,9 +1646,7 @@ void TurboAssembler::Call(ExternalReference ext) {
#endif
LoadAddress(kScratchRegister, ext);
call(kScratchRegister);
-#ifdef DEBUG
DCHECK_EQ(end_position, pc_offset());
-#endif
}
void TurboAssembler::Call(const Operand& op) {
@@ -1642,9 +1664,7 @@ void TurboAssembler::Call(Address destination, RelocInfo::Mode rmode) {
#endif
Move(kScratchRegister, destination, rmode);
call(kScratchRegister);
-#ifdef DEBUG
DCHECK_EQ(pc_offset(), end_position);
-#endif
}
void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
@@ -1653,9 +1673,52 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
#endif
DCHECK(RelocInfo::IsCodeTarget(rmode));
call(code_object, rmode);
-#ifdef DEBUG
DCHECK_EQ(end_position, pc_offset());
+}
+
+void TurboAssembler::RetpolineCall(Register reg) {
+ Label setup_return, setup_target, inner_indirect_branch, capture_spec;
+
+ jmp(&setup_return); // Jump past the entire retpoline below.
+
+ bind(&inner_indirect_branch);
+ call(&setup_target);
+
+ bind(&capture_spec);
+ pause();
+ jmp(&capture_spec);
+
+ bind(&setup_target);
+ movq(Operand(rsp, 0), reg);
+ ret(0);
+
+ bind(&setup_return);
+ call(&inner_indirect_branch); // Callee will return after this instruction.
+}
+
+void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) {
+#ifdef DEBUG
+// TODO(titzer): CallSize() is wrong for RetpolineCalls
+// int end_position = pc_offset() + CallSize(destination);
#endif
+ Move(kScratchRegister, destination, rmode);
+ RetpolineCall(kScratchRegister);
+ // TODO(titzer): CallSize() is wrong for RetpolineCalls
+ // DCHECK_EQ(pc_offset(), end_position);
+}
+
+void TurboAssembler::RetpolineJump(Register reg) {
+ Label setup_target, capture_spec;
+
+ call(&setup_target);
+
+ bind(&capture_spec);
+ pause();
+ jmp(&capture_spec);
+
+ bind(&setup_target);
+ movq(Operand(rsp, 0), reg);
+ ret(0);
}
void TurboAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
@@ -1919,9 +1982,11 @@ MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ Push(Immediate(0)); // Padding.
+
// Link the current handler as the next handler.
ExternalReference handler_address(IsolateAddressId::kHandlerAddress,
isolate());
@@ -1999,7 +2064,7 @@ void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
Condition is_smi = CheckSmi(object);
- Check(NegateCondition(is_smi), kOperandIsASmi);
+ Check(NegateCondition(is_smi), AbortReason::kOperandIsASmi);
}
}
@@ -2007,7 +2072,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
Condition is_smi = CheckSmi(object);
- Check(is_smi, kOperandIsNotASmi);
+ Check(is_smi, AbortReason::kOperandIsNotASmi);
}
}
@@ -2015,27 +2080,27 @@ void MacroAssembler::AssertSmi(Register object) {
void MacroAssembler::AssertSmi(const Operand& object) {
if (emit_debug_code()) {
Condition is_smi = CheckSmi(object);
- Check(is_smi, kOperandIsNotASmi);
+ Check(is_smi, AbortReason::kOperandIsNotASmi);
}
}
void MacroAssembler::AssertFixedArray(Register object) {
if (emit_debug_code()) {
testb(object, Immediate(kSmiTagMask));
- Check(not_equal, kOperandIsASmiAndNotAFixedArray);
+ Check(not_equal, AbortReason::kOperandIsASmiAndNotAFixedArray);
Push(object);
CmpObjectType(object, FIXED_ARRAY_TYPE, object);
Pop(object);
- Check(equal, kOperandIsNotAFixedArray);
+ Check(equal, AbortReason::kOperandIsNotAFixedArray);
}
}
void TurboAssembler::AssertZeroExtended(Register int32_register) {
if (emit_debug_code()) {
DCHECK_NE(int32_register, kScratchRegister);
- movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
+ movq(kScratchRegister, int64_t{0x0000000100000000});
cmpq(kScratchRegister, int32_register);
- Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
+ Check(above_equal, AbortReason::k32BitValueInRegisterIsNotZeroExtended);
}
}
@@ -2043,11 +2108,11 @@ void TurboAssembler::AssertZeroExtended(Register int32_register) {
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
testb(object, Immediate(kSmiTagMask));
- Check(not_equal, kOperandIsASmiAndNotAFunction);
+ Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
Push(object);
CmpObjectType(object, JS_FUNCTION_TYPE, object);
Pop(object);
- Check(equal, kOperandIsNotAFunction);
+ Check(equal, AbortReason::kOperandIsNotAFunction);
}
}
@@ -2055,18 +2120,18 @@ void MacroAssembler::AssertFunction(Register object) {
void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
testb(object, Immediate(kSmiTagMask));
- Check(not_equal, kOperandIsASmiAndNotABoundFunction);
+ Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction);
Push(object);
CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
Pop(object);
- Check(equal, kOperandIsNotABoundFunction);
+ Check(equal, AbortReason::kOperandIsNotABoundFunction);
}
}
void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
testb(object, Immediate(kSmiTagMask));
- Check(not_equal, kOperandIsASmiAndNotAGeneratorObject);
+ Check(not_equal, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
// Load map
Register map = object;
@@ -2084,7 +2149,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
bind(&do_check);
// Restore generator object to register and perform assertion
Pop(object);
- Check(equal, kOperandIsNotAGeneratorObject);
+ Check(equal, AbortReason::kOperandIsNotAGeneratorObject);
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
@@ -2094,7 +2159,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
Cmp(object, isolate()->factory()->undefined_value());
j(equal, &done_checking);
Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
- Assert(equal, kExpectedUndefinedOrCell);
+ Assert(equal, AbortReason::kExpectedUndefinedOrCell);
bind(&done_checking);
}
}
@@ -2161,7 +2226,7 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
if (FLAG_debug_code) {
cmpp(rsp, new_sp_reg);
- Check(below, kStackAccessBelowStackPointer);
+ Check(below, AbortReason::kStackAccessBelowStackPointer);
}
// Copy return address from caller's frame to current frame's return address
@@ -2387,7 +2452,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
isolate()->factory()->undefined_value(),
RelocInfo::EMBEDDED_OBJECT);
cmpp(Operand(rsp, 0), kScratchRegister);
- Check(not_equal, kCodeObjectNotProperlyPatched);
+ Check(not_equal, AbortReason::kCodeObjectNotProperlyPatched);
}
}
@@ -2395,7 +2460,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
cmpp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(StackFrame::TypeToMarker(type)));
- Check(equal, kStackFrameTypesMustMatch);
+ Check(equal, AbortReason::kStackFrameTypesMustMatch);
}
movp(rsp, rbp);
popq(rbp);
@@ -2548,7 +2613,7 @@ void MacroAssembler::LeaveExitFrameEpilogue() {
Operand context_operand = ExternalOperand(context_address);
movp(rsi, context_operand);
#ifdef DEBUG
- movp(context_operand, Immediate(0));
+ movp(context_operand, Immediate(Context::kInvalidContext));
#endif
// Clear the top frame.
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 73650f36e5..4ceab2cf9c 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -345,6 +345,11 @@ class TurboAssembler : public Assembler {
void Call(ExternalReference ext);
void Call(Label* target) { call(target); }
+ void RetpolineCall(Register reg);
+ void RetpolineCall(Address destination, RelocInfo::Mode rmode);
+
+ void RetpolineJump(Register reg);
+
void CallForDeoptimization(Address target, RelocInfo::Mode rmode) {
call(target, rmode);
}
@@ -383,21 +388,21 @@ class TurboAssembler : public Assembler {
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cc, BailoutReason reason);
+ void Assert(Condition cc, AbortReason reason);
// Like Assert(), but without condition.
// Use --debug_code to enable.
- void AssertUnreachable(BailoutReason reason);
+ void AssertUnreachable(AbortReason reason);
// Abort execution if a 64 bit register containing a 32 bit payload does not
// have zeros in the top 32 bits, enabled via --debug-code.
void AssertZeroExtended(Register reg);
// Like Assert(), but always enabled.
- void Check(Condition cc, BailoutReason reason);
+ void Check(Condition cc, AbortReason reason);
// Print a message to stdout and abort execution.
- void Abort(BailoutReason msg);
+ void Abort(AbortReason msg);
// Check that the stack is aligned.
void CheckStackAlignment();
diff --git a/deps/v8/src/x64/simulator-x64.cc b/deps/v8/src/x64/simulator-x64.cc
index 701842eab3..4797ae91bb 100644
--- a/deps/v8/src/x64/simulator-x64.cc
+++ b/deps/v8/src/x64/simulator-x64.cc
@@ -3,3 +3,5 @@
// found in the LICENSE file.
#include "src/x64/simulator-x64.h"
+
+// Since there is no simulator for the x64 architecture this file is empty.
diff --git a/deps/v8/src/x64/simulator-x64.h b/deps/v8/src/x64/simulator-x64.h
index f1351c88cf..ce9f3592dc 100644
--- a/deps/v8/src/x64/simulator-x64.h
+++ b/deps/v8/src/x64/simulator-x64.h
@@ -5,46 +5,6 @@
#ifndef V8_X64_SIMULATOR_X64_H_
#define V8_X64_SIMULATOR_X64_H_
-#include "src/allocation.h"
-
-namespace v8 {
-namespace internal {
-
-// Since there is no simulator for the x64 architecture the only thing we can
-// do is to call the entry directly.
-// TODO(X64): Don't pass p0, since it isn't used?
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- (entry(p0, p1, p2, p3, p4))
-
-typedef int (*regexp_matcher)(String*, int, const byte*,
- const byte*, int*, int, Address, int, Isolate*);
-
-// Call the generated regexp code directly. The code at the entry address should
-// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
-
-// The stack limit beyond which we will throw stack overflow errors in
-// generated code. Because generated code on x64 uses the C stack, we
-// just use the C stack limit.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
- uintptr_t c_limit) {
- return c_limit;
- }
-
- static inline uintptr_t RegisterCTryCatch(Isolate* isolate,
- uintptr_t try_catch_address) {
- USE(isolate);
- return try_catch_address;
- }
-
- static inline void UnregisterCTryCatch(Isolate* isolate) { USE(isolate); }
-};
-
-} // namespace internal
-} // namespace v8
+// Since there is no simulator for the x64 architecture this file is empty.
#endif // V8_X64_SIMULATOR_X64_H_
diff --git a/deps/v8/src/zone/accounting-allocator.cc b/deps/v8/src/zone/accounting-allocator.cc
index ee841fb4af..8ef141b4c1 100644
--- a/deps/v8/src/zone/accounting-allocator.cc
+++ b/deps/v8/src/zone/accounting-allocator.cc
@@ -10,6 +10,8 @@
#include <malloc.h> // NOLINT
#endif
+#include "src/allocation.h"
+
namespace v8 {
namespace internal {
@@ -82,11 +84,7 @@ Segment* AccountingAllocator::GetSegment(size_t bytes) {
}
Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
- void* memory = malloc(bytes);
- if (memory == nullptr) {
- V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
- memory = malloc(bytes);
- }
+ void* memory = AllocWithRetry(bytes);
if (memory != nullptr) {
base::AtomicWord current =
base::Relaxed_AtomicIncrement(&current_memory_usage_, bytes);
diff --git a/deps/v8/src/zone/zone-containers.h b/deps/v8/src/zone/zone-containers.h
index 78d25cc644..5e9fd0440a 100644
--- a/deps/v8/src/zone/zone-containers.h
+++ b/deps/v8/src/zone/zone-containers.h
@@ -41,6 +41,11 @@ class ZoneVector : public std::vector<T, ZoneAllocator<T>> {
ZoneVector(size_t size, T def, Zone* zone)
: std::vector<T, ZoneAllocator<T>>(size, def, ZoneAllocator<T>(zone)) {}
+ // Constructs a new vector and fills it with the contents of the given
+ // initializer list.
+ ZoneVector(std::initializer_list<T> list, Zone* zone)
+ : std::vector<T, ZoneAllocator<T>>(list, ZoneAllocator<T>(zone)) {}
+
// Constructs a new vector and fills it with the contents of the range
// [first, last).
template <class InputIt>
diff --git a/deps/v8/src/zone/zone.cc b/deps/v8/src/zone/zone.cc
index de8146de05..470f4c4177 100644
--- a/deps/v8/src/zone/zone.cc
+++ b/deps/v8/src/zone/zone.cc
@@ -42,7 +42,8 @@ const size_t kASanRedzoneBytes = 0;
} // namespace
-Zone::Zone(AccountingAllocator* allocator, const char* name)
+Zone::Zone(AccountingAllocator* allocator, const char* name,
+ SegmentSize segment_size)
: allocation_size_(0),
segment_bytes_allocated_(0),
position_(0),
@@ -50,7 +51,8 @@ Zone::Zone(AccountingAllocator* allocator, const char* name)
allocator_(allocator),
segment_head_(nullptr),
name_(name),
- sealed_(false) {
+ sealed_(false),
+ segment_size_(segment_size) {
allocator_->ZoneCreation(this);
}
@@ -148,6 +150,9 @@ Address Zone::NewExpand(size_t size) {
V8::FatalProcessOutOfMemory("Zone");
return nullptr;
}
+ if (segment_size_ == SegmentSize::kLarge) {
+ new_size = kMaximumSegmentSize;
+ }
if (new_size < kMinimumSegmentSize) {
new_size = kMinimumSegmentSize;
} else if (new_size > kMaximumSegmentSize) {
diff --git a/deps/v8/src/zone/zone.h b/deps/v8/src/zone/zone.h
index c8c1fe3515..e15e3d116e 100644
--- a/deps/v8/src/zone/zone.h
+++ b/deps/v8/src/zone/zone.h
@@ -34,9 +34,13 @@ namespace internal {
//
// Note: The implementation is inherently not thread safe. Do not use
// from multi-threaded code.
+
+enum class SegmentSize { kLarge, kDefault };
+
class V8_EXPORT_PRIVATE Zone final {
public:
- Zone(AccountingAllocator* allocator, const char* name);
+ Zone(AccountingAllocator* allocator, const char* name,
+ SegmentSize segment_size = SegmentSize::kDefault);
~Zone();
// Allocate 'size' bytes of memory in the Zone; expands the Zone by
@@ -109,6 +113,7 @@ class V8_EXPORT_PRIVATE Zone final {
Segment* segment_head_;
const char* name_;
bool sealed_;
+ SegmentSize segment_size_;
};
// ZoneObject is an abstraction that helps define classes of objects
diff --git a/deps/v8/test/BUILD.gn b/deps/v8/test/BUILD.gn
index 14fa27bffd..532a3ddaa2 100644
--- a/deps/v8/test/BUILD.gn
+++ b/deps/v8/test/BUILD.gn
@@ -28,6 +28,7 @@ group("gn_all") {
":benchmarks_run",
":bot_default_run",
":default_run",
+ ":d8_default_run",
":mozilla_run",
"test262:test262_run",
]
@@ -44,6 +45,7 @@ group("default_tests") {
if (v8_test_isolation_mode != "noop") {
deps = [
":cctest_run",
+ ":debugger_run",
":fuzzer_run",
":inspector-test_run",
":intl_run",
@@ -74,6 +76,19 @@ v8_isolate_run("default") {
isolate = "default.isolate"
}
+v8_isolate_run("d8_default") {
+ deps = [
+ ":debugger_run",
+ ":intl_run",
+ ":message_run",
+ ":mjsunit_run",
+ ":preparser_run",
+ ":webkit_run",
+ ]
+
+ isolate = "d8_default.isolate"
+}
+
v8_isolate_run("optimize_for_size") {
deps = [
":cctest_run",
@@ -123,6 +138,14 @@ v8_isolate_run("cctest_exe") {
isolate = "cctest/cctest_exe.isolate"
}
+v8_isolate_run("debugger") {
+ deps = [
+ "..:d8_run",
+ ]
+
+ isolate = "debugger/debugger.isolate"
+}
+
v8_isolate_run("fuzzer") {
deps = [
"..:v8_simple_json_fuzzer",
diff --git a/deps/v8/test/benchmarks/testcfg.py b/deps/v8/test/benchmarks/testcfg.py
index 63c0f9f1f8..fd956aa765 100644
--- a/deps/v8/test/benchmarks/testcfg.py
+++ b/deps/v8/test/benchmarks/testcfg.py
@@ -34,29 +34,18 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
-class BenchmarksVariantGenerator(testsuite.VariantGenerator):
- # Both --noopt and --stressopt are very slow. Add TF but without
- # always opt to match the way the benchmarks are run for performance
- # testing.
- def FilterVariantsByTest(self, testcase):
- outcomes = self.suite.GetStatusFileOutcomes(testcase)
- if statusfile.OnlyStandardVariant(outcomes):
- return self.standard_variant
- return self.fast_variants
+class VariantsGenerator(testsuite.VariantsGenerator):
+ def _get_variants(self, test):
+ return self._standard_variant
- def GetFlagSets(self, testcase, variant):
- return testsuite.FAST_VARIANT_FLAGS[variant]
-
-
-class BenchmarksTestSuite(testsuite.TestSuite):
+class TestSuite(testsuite.TestSuite):
def __init__(self, name, root):
- super(BenchmarksTestSuite, self).__init__(name, root)
+ super(TestSuite, self).__init__(name, root)
self.testroot = os.path.join(root, "data")
def ListTests(self, context):
- tests = []
- for test in [
+ tests = map(self._create_test, [
"kraken/ai-astar",
"kraken/audio-beat-detection",
"kraken/audio-dft",
@@ -113,40 +102,47 @@ class BenchmarksTestSuite(testsuite.TestSuite):
"sunspider/string-fasta",
"sunspider/string-tagcloud",
"sunspider/string-unpack-code",
- "sunspider/string-validate-input"]:
- tests.append(testcase.TestCase(self, test))
+ "sunspider/string-validate-input",
+ ])
return tests
- def GetParametersForTestCase(self, testcase, context):
+ def _test_class(self):
+ return TestCase
+
+ def _variants_gen_class(self):
+ return VariantsGenerator
+
+ def _LegacyVariantsGeneratorFactory(self):
+ return testsuite.StandardLegacyVariantsGenerator
+
+
+class TestCase(testcase.TestCase):
+ def _get_files_params(self, ctx):
+ path = self.path
+ testroot = self.suite.testroot
files = []
- if testcase.path.startswith("kraken"):
- files.append(os.path.join(self.testroot, "%s-data.js" % testcase.path))
- files.append(os.path.join(self.testroot, "%s.js" % testcase.path))
- elif testcase.path.startswith("octane"):
- files.append(os.path.join(self.testroot, "octane/base.js"))
- files.append(os.path.join(self.testroot, "%s.js" % testcase.path))
- if testcase.path.startswith("octane/gbemu"):
- files.append(os.path.join(self.testroot, "octane/gbemu-part2.js"))
- elif testcase.path.startswith("octane/typescript"):
- files.append(os.path.join(self.testroot,
+ if path.startswith("kraken"):
+ files.append(os.path.join(testroot, "%s-data.js" % path))
+ files.append(os.path.join(testroot, "%s.js" % path))
+ elif path.startswith("octane"):
+ files.append(os.path.join(testroot, "octane/base.js"))
+ files.append(os.path.join(testroot, "%s.js" % path))
+ if path.startswith("octane/gbemu"):
+ files.append(os.path.join(testroot, "octane/gbemu-part2.js"))
+ elif path.startswith("octane/typescript"):
+ files.append(os.path.join(testroot,
"octane/typescript-compiler.js"))
- files.append(os.path.join(self.testroot, "octane/typescript-input.js"))
- elif testcase.path.startswith("octane/zlib"):
- files.append(os.path.join(self.testroot, "octane/zlib-data.js"))
+ files.append(os.path.join(testroot, "octane/typescript-input.js"))
+ elif path.startswith("octane/zlib"):
+ files.append(os.path.join(testroot, "octane/zlib-data.js"))
files += ["-e", "BenchmarkSuite.RunSuites({});"]
- elif testcase.path.startswith("sunspider"):
- files.append(os.path.join(self.testroot, "%s.js" % testcase.path))
-
- return files, testcase.flags + context.mode_flags, {}
-
- def GetSourceForTest(self, testcase):
- filename = os.path.join(self.testroot, testcase.path + ".js")
- with open(filename) as f:
- return f.read()
+ elif path.startswith("sunspider"):
+ files.append(os.path.join(testroot, "%s.js" % path))
+ return files
- def _VariantGeneratorFactory(self):
- return BenchmarksVariantGenerator
+ def _get_source_path(self):
+ return os.path.join(self.suite.testroot, self.path + self._get_suffix())
def GetSuite(name, root):
- return BenchmarksTestSuite(name, root)
+ return TestSuite(name, root)
diff --git a/deps/v8/test/bot_default.gyp b/deps/v8/test/bot_default.gyp
index 13c77e2d03..11223e068f 100644
--- a/deps/v8/test/bot_default.gyp
+++ b/deps/v8/test/bot_default.gyp
@@ -11,6 +11,7 @@
'type': 'none',
'dependencies': [
'cctest/cctest.gyp:cctest_run',
+ 'debugger/debugger.gyp:debugger_run',
'fuzzer/fuzzer.gyp:fuzzer_run',
'inspector/inspector.gyp:inspector-test_run',
'intl/intl.gyp:intl_run',
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
index ca81ef1f0d..b6c9109d75 100644
--- a/deps/v8/test/cctest/BUILD.gn
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -82,8 +82,10 @@ v8_source_set("cctest_sources") {
"compiler/test-run-load-store.cc",
"compiler/test-run-machops.cc",
"compiler/test-run-native-calls.cc",
+ "compiler/test-run-retpoline.cc",
"compiler/test-run-stackcheck.cc",
"compiler/test-run-stubs.cc",
+ "compiler/test-run-tail-calls.cc",
"compiler/test-run-unwinding-info.cc",
"compiler/test-run-variables.cc",
"compiler/test-run-wasm-machops.cc",
@@ -103,6 +105,7 @@ v8_source_set("cctest_sources") {
"heap/test-array-buffer-tracker.cc",
"heap/test-compaction.cc",
"heap/test-concurrent-marking.cc",
+ "heap/test-embedder-tracing.cc",
"heap/test-heap.cc",
"heap/test-incremental-marking.cc",
"heap/test-invalidated-slots.cc",
diff --git a/deps/v8/test/cctest/assembler-helper-arm.cc b/deps/v8/test/cctest/assembler-helper-arm.cc
index 73079ed701..3ba5f6018e 100644
--- a/deps/v8/test/cctest/assembler-helper-arm.cc
+++ b/deps/v8/test/cctest/assembler-helper-arm.cc
@@ -4,15 +4,15 @@
#include "test/cctest/assembler-helper-arm.h"
+#include "src/assembler-inl.h"
+#include "src/isolate-inl.h"
#include "src/v8.h"
#include "test/cctest/cctest.h"
-#include "src/isolate-inl.h"
-
namespace v8 {
namespace internal {
-Address AssembleCode(std::function<void(Assembler&)> assemble) {
+Handle<Code> AssembleCodeImpl(std::function<void(Assembler&)> assemble) {
Isolate* isolate = CcTest::i_isolate();
Assembler assm(isolate, nullptr, 0);
@@ -26,7 +26,7 @@ Address AssembleCode(std::function<void(Assembler&)> assemble) {
if (FLAG_print_code) {
code->Print();
}
- return code->entry();
+ return code;
}
} // namespace internal
diff --git a/deps/v8/test/cctest/assembler-helper-arm.h b/deps/v8/test/cctest/assembler-helper-arm.h
index dd24087bda..c0b0cf8255 100644
--- a/deps/v8/test/cctest/assembler-helper-arm.h
+++ b/deps/v8/test/cctest/assembler-helper-arm.h
@@ -7,20 +7,27 @@
#include <functional>
-#include "src/macro-assembler.h"
+#include "src/handles.h"
+#include "src/simulator.h"
namespace v8 {
namespace internal {
-// These function prototypes have 5 arguments since they are used with the
-// CALL_GENERATED_CODE macro.
-typedef Object* (*F_iiiii)(int x, int p1, int p2, int p3, int p4);
-typedef Object* (*F_piiii)(void* p0, int p1, int p2, int p3, int p4);
-typedef Object* (*F_ppiii)(void* p0, void* p1, int p2, int p3, int p4);
-typedef Object* (*F_pppii)(void* p0, void* p1, void* p2, int p3, int p4);
-typedef Object* (*F_ippii)(int p0, void* p1, void* p2, int p3, int p4);
-
-Address AssembleCode(std::function<void(Assembler&)> assemble);
+// TODO(arm): Refine these signatures per test case, they can have arbitrary
+// return and argument types and arbitrary number of arguments.
+using F_iiiii = Object*(int x, int p1, int p2, int p3, int p4);
+using F_piiii = Object*(void* p0, int p1, int p2, int p3, int p4);
+using F_ppiii = Object*(void* p0, void* p1, int p2, int p3, int p4);
+using F_pppii = Object*(void* p0, void* p1, void* p2, int p3, int p4);
+using F_ippii = Object*(int p0, void* p1, void* p2, int p3, int p4);
+
+Handle<Code> AssembleCodeImpl(std::function<void(Assembler&)> assemble);
+
+template <typename Signature>
+GeneratedCode<Signature> AssembleCode(
+ std::function<void(Assembler&)> assemble) {
+ return GeneratedCode<Signature>::FromCode(*AssembleCodeImpl(assemble));
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 8d33884b5b..5daef0425b 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -269,7 +269,7 @@ int main(int argc, char* argv[]) {
v8::V8::Initialize();
v8::V8::InitializeExternalStartupData(argv[0]);
- if (i::trap_handler::UseTrapHandler()) {
+ if (i::trap_handler::IsTrapHandlerEnabled()) {
v8::V8::RegisterDefaultSignalHandler();
}
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index 92c64b8c87..5124495ee8 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -71,8 +71,10 @@
'compiler/test-run-load-store.cc',
'compiler/test-run-machops.cc',
'compiler/test-run-native-calls.cc',
+ 'compiler/test-run-retpoline.cc',
'compiler/test-run-stackcheck.cc',
'compiler/test-run-stubs.cc',
+ 'compiler/test-run-tail-calls.cc',
'compiler/test-run-variables.cc',
'compiler/test-run-wasm-machops.cc',
'compiler/value-helper.cc',
@@ -93,6 +95,7 @@
'heap/test-array-buffer-tracker.cc',
'heap/test-compaction.cc',
'heap/test-concurrent-marking.cc',
+ 'heap/test-embedder-tracing.cc',
'heap/test-heap.cc',
'heap/test-incremental-marking.cc',
'heap/test-invalidated-slots.cc',
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index 8a7b6d1462..37c4c0cfac 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -571,16 +571,22 @@ static inline void CheckDoubleEquals(double expected, double actual) {
static inline uint8_t* AllocateAssemblerBuffer(
size_t* allocated,
size_t requested = v8::internal::AssemblerBase::kMinimalBufferSize) {
- size_t page_size = v8::base::OS::AllocatePageSize();
+ size_t page_size = v8::internal::AllocatePageSize();
size_t alloc_size = RoundUp(requested, page_size);
- void* result =
- v8::base::OS::Allocate(nullptr, alloc_size, page_size,
- v8::base::OS::MemoryPermission::kReadWriteExecute);
+ void* result = v8::internal::AllocatePages(
+ nullptr, alloc_size, page_size, v8::PageAllocator::kReadWriteExecute);
CHECK(result);
*allocated = alloc_size;
return static_cast<uint8_t*>(result);
}
+static inline void MakeAssemblerBufferExecutable(uint8_t* buffer,
+ size_t allocated) {
+ bool result = v8::internal::SetPermissions(buffer, allocated,
+ v8::PageAllocator::kReadExecute);
+ CHECK(result);
+}
+
static v8::debug::DebugDelegate dummy_delegate;
static inline void EnableDebugger(v8::Isolate* isolate) {
@@ -674,10 +680,18 @@ class ManualGCScope {
class TestPlatform : public v8::Platform {
public:
// v8::Platform implementation.
+ v8::PageAllocator* GetPageAllocator() override {
+ return old_platform_->GetPageAllocator();
+ }
+
void OnCriticalMemoryPressure() override {
old_platform_->OnCriticalMemoryPressure();
}
+ bool OnCriticalMemoryPressure(size_t length) override {
+ return old_platform_->OnCriticalMemoryPressure(length);
+ }
+
std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
v8::Isolate* isolate) override {
return old_platform_->GetForegroundTaskRunner(isolate);
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index d3c8a8d393..2840bccf8c 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -71,17 +71,25 @@
# BUG(5193). The cpu profiler tests are notoriously flaky.
'test-cpu-profiler/CollectCpuProfile': [SKIP],
+ 'test-cpu-profiler/CollectCpuProfileSamples': [SKIP],
'test-cpu-profiler/CollectDeoptEvents': [SKIP],
'test-cpu-profiler/CpuProfileDeepStack': [SKIP],
+ 'test-cpu-profiler/DeoptAtFirstLevelInlinedSource': [SKIP],
+ 'test-cpu-profiler/DeoptAtSecondLevelInlinedSource': [SKIP],
+ 'test-cpu-profiler/DeoptUntrackedFunction': [SKIP],
'test-cpu-profiler/FunctionApplySample': [SKIP],
'test-cpu-profiler/HotDeoptNoFrameEntry': [SKIP],
'test-cpu-profiler/JsNative1JsNative2JsSample': [SKIP],
'test-cpu-profiler/JsNativeJsRuntimeJsSample': [SKIP],
'test-cpu-profiler/JsNativeJsRuntimeJsSampleMultiple': [SKIP],
'test-cpu-profiler/JsNativeJsSample': [SKIP],
+ 'test-cpu-profiler/NativeAccessorUninitializedIC': [SKIP],
'test-cpu-profiler/SampleWhenFrameIsNotSetup': [SKIP],
'test-sampler/LibSamplerCollectSample': [SKIP],
+ # BUG(7202). The test is flaky.
+ 'test-cpu-profiler/NativeFrameStackTrace': [SKIP],
+
# BUG(7054)
'test-cpu-profiler/StaticCollectSampleAPI': [SKIP],
@@ -104,6 +112,7 @@
# Test that serialization with unknown external reference fails.
'test-serialize/SnapshotCreatorUnknownExternalReferences': [FAIL],
+ 'test-serialize/SnapshotCreatorUnknownHandles': [FAIL],
'test-serialize/SnapshotCreatorNoExternalReferencesCustomFail1': [FAIL],
'test-serialize/SnapshotCreatorNoExternalReferencesCustomFail2': [FAIL],
@@ -113,6 +122,7 @@
'test-strings/StringOOM*': [PASS, ['mode == debug', SKIP]],
'test-serialize/CustomSnapshotDataBlobImmortalImmovableRoots': [PASS, ['mode == debug', SKIP]],
'test-parsing/ObjectRestNegativeTestSlow': [PASS, ['mode == debug', SKIP]],
+
}], # ALWAYS
##############################################################################
@@ -158,6 +168,15 @@
}], # 'arch == arm64 and mode == debug and simulator_run'
##############################################################################
+# TODO(ahaas): Port multiple return values to ARM, MIPS, S390 and PPC
+['arch == s390 or arch == s390x or arch == ppc or arch == ppc64', {
+ 'test-multiple-return/*': [SKIP],
+}],
+# TODO(ahaas): Port multiple return values to ARM, MIPS, S390 and PPC
+['arch == mips or arch == mips64 or arch == mipsel or arch == mips64el or arch == s390 or arch == s390x or arch == ppc or arch == ppc64', {
+ 'test-multiple-return/ReturnLastValue*': [SKIP],
+}],
+##############################################################################
['asan == True', {
# Skip tests not suitable for ASAN.
'test-assembler-x64/AssemblerX64XchglOperations': [SKIP],
@@ -178,6 +197,9 @@
'test-cpu-profiler/TickEvents': [SKIP],
# BUG(v8:6924). The test allocates a lot of memory.
'test-api/NewStringRangeError': [PASS, NO_VARIANTS],
+
+ # BUG(chromium:794911).
+ 'test-cpu-profiler/TracingCpuProfiler': [SKIP],
}], # 'tsan == True'
##############################################################################
@@ -385,10 +407,4 @@
'test-dtoa/*': [SKIP],
}], # variant == wasm_traps
-##############################################################################
-# BUG(v8:7138).
-['arch == arm and not simulator_run and variant == wasm_traps', {
- '*': [SKIP],
-}], # arch == arm and not simulator_run and variant == wasm_traps
-
]
diff --git a/deps/v8/test/cctest/compiler/c-signature.h b/deps/v8/test/cctest/compiler/c-signature.h
index 1c2f9638f4..0aea6e938b 100644
--- a/deps/v8/test/cctest/compiler/c-signature.h
+++ b/deps/v8/test/cctest/compiler/c-signature.h
@@ -28,18 +28,16 @@ namespace compiler {
V(int*, MachineType::Pointer())
template <typename T>
-inline MachineType MachineTypeForC() {
- while (false) {
- // All other types T must be assignable to Object*
- *(static_cast<Object* volatile*>(0)) = static_cast<T>(0);
- }
+inline constexpr MachineType MachineTypeForC() {
+ static_assert(std::is_convertible<T, Object*>::value,
+ "all non-specialized types must be convertible to Object*");
return MachineType::AnyTagged();
}
-#define DECLARE_TEMPLATE_SPECIALIZATION(ctype, mtype) \
- template <> \
- inline MachineType MachineTypeForC<ctype>() { \
- return mtype; \
+#define DECLARE_TEMPLATE_SPECIALIZATION(ctype, mtype) \
+ template <> \
+ inline MachineType constexpr MachineTypeForC<ctype>() { \
+ return mtype; \
}
FOREACH_CTYPE_MACHINE_TYPE_MAPPING(DECLARE_TEMPLATE_SPECIALIZATION)
#undef DECLARE_TEMPLATE_SPECIALIZATION
@@ -51,21 +49,13 @@ class CSignature : public MachineSignature {
: MachineSignature(return_count, parameter_count, reps) {}
public:
- template <typename P1 = void, typename P2 = void, typename P3 = void,
- typename P4 = void, typename P5 = void>
+ template <typename... Params>
static void VerifyParams(MachineSignature* sig) {
- // Verifies the C signature against the machine types. Maximum {5} params.
- CHECK_LT(sig->parameter_count(), 6u);
- const int kMax = 5;
- MachineType params[] = {MachineTypeForC<P1>(), MachineTypeForC<P2>(),
- MachineTypeForC<P3>(), MachineTypeForC<P4>(),
- MachineTypeForC<P5>()};
- for (int p = kMax - 1; p >= 0; p--) {
- if (p < static_cast<int>(sig->parameter_count())) {
- CHECK_EQ(sig->GetParam(p), params[p]);
- } else {
- CHECK_EQ(MachineType::None(), params[p]);
- }
+ // Verifies the C signature against the machine types.
+ std::array<MachineType, sizeof...(Params)> params{
+ {MachineTypeForC<Params>()...}};
+ for (size_t p = 0; p < params.size(); ++p) {
+ CHECK_EQ(sig->GetParam(p), params[p]);
}
}
@@ -73,96 +63,59 @@ class CSignature : public MachineSignature {
return reinterpret_cast<CSignature*>(msig);
}
+ template <typename... ParamMachineTypes>
static CSignature* New(Zone* zone, MachineType ret,
- MachineType p1 = MachineType::None(),
- MachineType p2 = MachineType::None(),
- MachineType p3 = MachineType::None(),
- MachineType p4 = MachineType::None(),
- MachineType p5 = MachineType::None()) {
- MachineType* buffer = zone->NewArray<MachineType>(6);
- int pos = 0;
+ ParamMachineTypes... params) {
+ constexpr size_t param_count = sizeof...(params);
+ std::array<MachineType, param_count> param_arr{{params...}};
+ const size_t buffer_size =
+ param_count + (ret == MachineType::None() ? 0 : 1);
+ MachineType* buffer = zone->NewArray<MachineType>(buffer_size);
+ size_t pos = 0;
size_t return_count = 0;
if (ret != MachineType::None()) {
buffer[pos++] = ret;
return_count++;
}
- buffer[pos++] = p1;
- buffer[pos++] = p2;
- buffer[pos++] = p3;
- buffer[pos++] = p4;
- buffer[pos++] = p5;
- size_t param_count = 5;
- if (p5 == MachineType::None()) param_count--;
- if (p4 == MachineType::None()) param_count--;
- if (p3 == MachineType::None()) param_count--;
- if (p2 == MachineType::None()) param_count--;
- if (p1 == MachineType::None()) param_count--;
- for (size_t i = 0; i < param_count; i++) {
- // Check that there are no MachineType::None()'s in the middle of
- // parameters.
- CHECK_NE(MachineType::None(), buffer[return_count + i]);
+ for (MachineType p : param_arr) {
+ // Check that there are no MachineType::None()'s in the parameters.
+ CHECK_NE(MachineType::None(), p);
+ buffer[pos++] = p;
}
+ DCHECK_EQ(buffer_size, pos);
return new (zone) CSignature(return_count, param_count, buffer);
}
};
-
-template <typename Ret, uint16_t kParamCount>
-class CSignatureOf : public CSignature {
- protected:
- MachineType storage_[1 + kParamCount];
-
- CSignatureOf()
- : CSignature(MachineTypeForC<Ret>() != MachineType::None() ? 1 : 0,
- kParamCount, reinterpret_cast<MachineType*>(&storage_)) {
- if (return_count_ == 1) storage_[0] = MachineTypeForC<Ret>();
- }
- void Set(int index, MachineType type) {
- CHECK_LE(0, index);
- CHECK_LT(index, kParamCount);
- reps_[return_count_ + index] = type;
- }
-};
-
// Helper classes for instantiating Signature objects to be callable from C.
-template <typename Ret>
-class CSignature0 : public CSignatureOf<Ret, 0> {
- public:
- CSignature0() : CSignatureOf<Ret, 0>() {}
-};
-
-template <typename Ret, typename P1>
-class CSignature1 : public CSignatureOf<Ret, 1> {
+template <typename Ret, typename... Params>
+class CSignatureOf : public CSignature {
public:
- CSignature1() : CSignatureOf<Ret, 1>() {
- this->Set(0, MachineTypeForC<P1>());
+ CSignatureOf() : CSignature(kReturnCount, kParamCount, storage_) {
+ constexpr std::array<MachineType, kParamCount> param_types{
+ MachineTypeForC<Params>()...};
+ if (kReturnCount == 1) storage_[0] = MachineTypeForC<Ret>();
+ static_assert(
+ std::is_same<decltype(*reps_), decltype(*param_types.data())>::value,
+ "type mismatch, cannot memcpy");
+ memcpy(storage_ + kReturnCount, param_types.data(),
+ sizeof(*storage_) * kParamCount);
}
-};
-template <typename Ret, typename P1, typename P2>
-class CSignature2 : public CSignatureOf<Ret, 2> {
- public:
- CSignature2() : CSignatureOf<Ret, 2>() {
- this->Set(0, MachineTypeForC<P1>());
- this->Set(1, MachineTypeForC<P2>());
- }
-};
+ private:
+ static constexpr size_t kReturnCount =
+ MachineTypeForC<Ret>() == MachineType::None() ? 0 : 1;
+ static constexpr size_t kParamCount = sizeof...(Params);
-template <typename Ret, typename P1, typename P2, typename P3>
-class CSignature3 : public CSignatureOf<Ret, 3> {
- public:
- CSignature3() : CSignatureOf<Ret, 3>() {
- this->Set(0, MachineTypeForC<P1>());
- this->Set(1, MachineTypeForC<P2>());
- this->Set(2, MachineTypeForC<P3>());
- }
+ MachineType storage_[kReturnCount + kParamCount];
};
-typedef CSignature2<int32_t, int32_t, int32_t> CSignature_i_ii;
-typedef CSignature2<uint32_t, uint32_t, uint32_t> CSignature_u_uu;
-typedef CSignature2<float, float, float> CSignature_f_ff;
-typedef CSignature2<double, double, double> CSignature_d_dd;
-typedef CSignature2<Object*, Object*, Object*> CSignature_o_oo;
+typedef CSignatureOf<int32_t, int32_t, int32_t> CSignature_i_ii;
+typedef CSignatureOf<uint32_t, uint32_t, uint32_t> CSignature_u_uu;
+typedef CSignatureOf<float, float, float> CSignature_f_ff;
+typedef CSignatureOf<double, double, double> CSignature_d_dd;
+typedef CSignatureOf<Object*, Object*, Object*> CSignature_o_oo;
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/call-tester.h b/deps/v8/test/cctest/compiler/call-tester.h
index 20dc3c8b2d..2907d4d246 100644
--- a/deps/v8/test/cctest/compiler/call-tester.h
+++ b/deps/v8/test/cctest/compiler/call-tester.h
@@ -6,119 +6,15 @@
#define V8_CCTEST_COMPILER_CALL_TESTER_H_
#include "src/handles.h"
+#include "src/objects/code.h"
#include "src/simulator.h"
#include "test/cctest/compiler/c-signature.h"
-#if V8_TARGET_ARCH_IA32
-#if __GNUC__
-#define V8_CDECL __attribute__((cdecl))
-#else
-#define V8_CDECL __cdecl
-#endif
-#else
-#define V8_CDECL
-#endif
-
namespace v8 {
namespace internal {
namespace compiler {
template <typename R>
-inline R CastReturnValue(uintptr_t r) {
- return reinterpret_cast<R>(r);
-}
-
-template <>
-inline void CastReturnValue(uintptr_t r) {}
-
-template <>
-inline bool CastReturnValue(uintptr_t r) {
- return static_cast<bool>(r);
-}
-
-template <>
-inline int32_t CastReturnValue(uintptr_t r) {
- return static_cast<int32_t>(r);
-}
-
-template <>
-inline uint32_t CastReturnValue(uintptr_t r) {
- return static_cast<uint32_t>(r);
-}
-
-template <>
-inline int64_t CastReturnValue(uintptr_t r) {
- return static_cast<int64_t>(r);
-}
-
-template <>
-inline uint64_t CastReturnValue(uintptr_t r) {
- return static_cast<uint64_t>(r);
-}
-
-template <>
-inline int16_t CastReturnValue(uintptr_t r) {
- return static_cast<int16_t>(r);
-}
-
-template <>
-inline uint16_t CastReturnValue(uintptr_t r) {
- return static_cast<uint16_t>(r);
-}
-
-template <>
-inline int8_t CastReturnValue(uintptr_t r) {
- return static_cast<int8_t>(r);
-}
-
-template <>
-inline uint8_t CastReturnValue(uintptr_t r) {
- return static_cast<uint8_t>(r);
-}
-
-template <>
-inline double CastReturnValue(uintptr_t r) {
- UNREACHABLE();
-}
-
-template <typename R>
-struct ParameterTraits {
- static uintptr_t Cast(R r) { return static_cast<uintptr_t>(r); }
-};
-
-template <>
-struct ParameterTraits<int*> {
- static uintptr_t Cast(int* r) { return reinterpret_cast<uintptr_t>(r); }
-};
-
-template <typename T>
-struct ParameterTraits<T*> {
- static uintptr_t Cast(void* r) { return reinterpret_cast<uintptr_t>(r); }
-};
-
-
-#if !V8_TARGET_ARCH_32_BIT
-
-// Additional template specialization required for mips64 to sign-extend
-// parameters defined by calling convention.
-template <>
-struct ParameterTraits<int32_t> {
- static int64_t Cast(int32_t r) { return static_cast<int64_t>(r); }
-};
-
-#if !V8_TARGET_ARCH_PPC64
-template <>
-struct ParameterTraits<uint32_t> {
- static int64_t Cast(uint32_t r) {
- return static_cast<int64_t>(static_cast<int32_t>(r));
- }
-};
-#endif
-
-#endif // !V8_TARGET_ARCH_64_BIT
-
-
-template <typename R>
class CallHelper {
public:
explicit CallHelper(Isolate* isolate, MachineSignature* csig)
@@ -129,9 +25,10 @@ class CallHelper {
template <typename... Params>
R Call(Params... args) {
- using FType = R(V8_CDECL*)(Params...);
CSignature::VerifyParams<Params...>(csig_);
- return DoCall(FUNCTION_CAST<FType>(Generate()), args...);
+ byte* entry = Generate();
+ auto fn = GeneratedCode<R, Params...>::FromAddress(isolate_, entry);
+ return fn.Call(args...);
}
protected:
@@ -140,51 +37,6 @@ class CallHelper {
virtual byte* Generate() = 0;
private:
-#if USE_SIMULATOR && V8_TARGET_ARCH_ARM64
- uintptr_t CallSimulator(byte* f, Simulator::CallArgument* args) {
- Simulator* simulator = Simulator::current(isolate_);
- return static_cast<uintptr_t>(simulator->CallInt64(f, args));
- }
-
- template <typename F, typename... Params>
- R DoCall(F* f, Params... args) {
- Simulator::CallArgument args_arr[] = {Simulator::CallArgument(args)...,
- Simulator::CallArgument::End()};
- return CastReturnValue<R>(CallSimulator(FUNCTION_ADDR(f), args_arr));
- }
-#elif USE_SIMULATOR && \
- (V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390X)
- uintptr_t CallSimulator(byte* f, int64_t p1 = 0, int64_t p2 = 0,
- int64_t p3 = 0, int64_t p4 = 0, int64_t p5 = 0) {
- Simulator* simulator = Simulator::current(isolate_);
- return static_cast<uintptr_t>(simulator->Call(f, 5, p1, p2, p3, p4, p5));
- }
-
- template <typename F, typename... Params>
- R DoCall(F* f, Params... args) {
- return CastReturnValue<R>(CallSimulator(
- FUNCTION_ADDR(f), ParameterTraits<Params>::Cast(args)...));
- }
-#elif USE_SIMULATOR && (V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390)
- uintptr_t CallSimulator(byte* f, int32_t p1 = 0, int32_t p2 = 0,
- int32_t p3 = 0, int32_t p4 = 0, int32_t p5 = 0) {
- Simulator* simulator = Simulator::current(isolate_);
- return static_cast<uintptr_t>(simulator->Call(f, 5, p1, p2, p3, p4, p5));
- }
-
- template <typename F, typename... Params>
- R DoCall(F* f, Params... args) {
- return CastReturnValue<R>(CallSimulator(
- FUNCTION_ADDR(f), ParameterTraits<Params>::Cast(args)...));
- }
-#else
- template <typename F, typename... Params>
- R DoCall(F* f, Params... args) {
- return f(args...);
- }
-#endif
-
Isolate* isolate_;
};
diff --git a/deps/v8/test/cctest/compiler/code-assembler-tester.h b/deps/v8/test/cctest/compiler/code-assembler-tester.h
index 7a75441ad0..20029809b4 100644
--- a/deps/v8/test/cctest/compiler/code-assembler-tester.h
+++ b/deps/v8/test/cctest/compiler/code-assembler-tester.h
@@ -36,10 +36,11 @@ class CodeAssemblerTester {
scope_(isolate),
state_(isolate, &zone_, 0, kind, "test") {}
- CodeAssemblerTester(Isolate* isolate, CallDescriptor* call_descriptor)
+ CodeAssemblerTester(Isolate* isolate, CallDescriptor* call_descriptor,
+ const char* name = "test")
: zone_(isolate->allocator(), ZONE_NAME),
scope_(isolate),
- state_(isolate, &zone_, call_descriptor, Code::STUB, "test", 0, -1) {}
+ state_(isolate, &zone_, call_descriptor, Code::STUB, name, 0, -1) {}
CodeAssemblerState* state() { return &state_; }
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.cc b/deps/v8/test/cctest/compiler/codegen-tester.cc
index a3548fe8d0..f66385a92e 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.cc
+++ b/deps/v8/test/cctest/compiler/codegen-tester.cc
@@ -48,92 +48,92 @@ TEST(CompareWrapper) {
CHECK_EQ(true, wWord32Equal.Int32Compare(257, 257));
CHECK_EQ(true, wWord32Equal.Int32Compare(65539, 65539));
CHECK_EQ(true, wWord32Equal.Int32Compare(-1, -1));
- CHECK_EQ(true, wWord32Equal.Int32Compare(0xffffffff, 0xffffffff));
+ CHECK_EQ(true, wWord32Equal.Int32Compare(0xFFFFFFFF, 0xFFFFFFFF));
CHECK_EQ(false, wWord32Equal.Int32Compare(0, 1));
CHECK_EQ(false, wWord32Equal.Int32Compare(257, 256));
CHECK_EQ(false, wWord32Equal.Int32Compare(65539, 65537));
CHECK_EQ(false, wWord32Equal.Int32Compare(-1, -2));
- CHECK_EQ(false, wWord32Equal.Int32Compare(0xffffffff, 0xfffffffe));
+ CHECK_EQ(false, wWord32Equal.Int32Compare(0xFFFFFFFF, 0xFFFFFFFE));
CHECK_EQ(false, wInt32LessThan.Int32Compare(0, 0));
CHECK_EQ(false, wInt32LessThan.Int32Compare(357, 357));
CHECK_EQ(false, wInt32LessThan.Int32Compare(75539, 75539));
CHECK_EQ(false, wInt32LessThan.Int32Compare(-1, -1));
- CHECK_EQ(false, wInt32LessThan.Int32Compare(0xffffffff, 0xffffffff));
+ CHECK_EQ(false, wInt32LessThan.Int32Compare(0xFFFFFFFF, 0xFFFFFFFF));
CHECK_EQ(true, wInt32LessThan.Int32Compare(0, 1));
CHECK_EQ(true, wInt32LessThan.Int32Compare(456, 457));
CHECK_EQ(true, wInt32LessThan.Int32Compare(85537, 85539));
CHECK_EQ(true, wInt32LessThan.Int32Compare(-2, -1));
- CHECK_EQ(true, wInt32LessThan.Int32Compare(0xfffffffe, 0xffffffff));
+ CHECK_EQ(true, wInt32LessThan.Int32Compare(0xFFFFFFFE, 0xFFFFFFFF));
CHECK_EQ(false, wInt32LessThan.Int32Compare(1, 0));
CHECK_EQ(false, wInt32LessThan.Int32Compare(457, 456));
CHECK_EQ(false, wInt32LessThan.Int32Compare(85539, 85537));
CHECK_EQ(false, wInt32LessThan.Int32Compare(-1, -2));
- CHECK_EQ(false, wInt32LessThan.Int32Compare(0xffffffff, 0xfffffffe));
+ CHECK_EQ(false, wInt32LessThan.Int32Compare(0xFFFFFFFF, 0xFFFFFFFE));
CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(0, 0));
CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(357, 357));
CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(75539, 75539));
CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(-1, -1));
- CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(0xffffffff, 0xffffffff));
+ CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(0xFFFFFFFF, 0xFFFFFFFF));
CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(0, 1));
CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(456, 457));
CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(85537, 85539));
CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(-2, -1));
- CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(0xfffffffe, 0xffffffff));
+ CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(0xFFFFFFFE, 0xFFFFFFFF));
CHECK_EQ(false, wInt32LessThanOrEqual.Int32Compare(1, 0));
CHECK_EQ(false, wInt32LessThanOrEqual.Int32Compare(457, 456));
CHECK_EQ(false, wInt32LessThanOrEqual.Int32Compare(85539, 85537));
CHECK_EQ(false, wInt32LessThanOrEqual.Int32Compare(-1, -2));
- CHECK_EQ(false, wInt32LessThanOrEqual.Int32Compare(0xffffffff, 0xfffffffe));
+ CHECK_EQ(false, wInt32LessThanOrEqual.Int32Compare(0xFFFFFFFF, 0xFFFFFFFE));
// Unsigned comparisons.
CHECK_EQ(false, wUint32LessThan.Int32Compare(0, 0));
CHECK_EQ(false, wUint32LessThan.Int32Compare(357, 357));
CHECK_EQ(false, wUint32LessThan.Int32Compare(75539, 75539));
CHECK_EQ(false, wUint32LessThan.Int32Compare(-1, -1));
- CHECK_EQ(false, wUint32LessThan.Int32Compare(0xffffffff, 0xffffffff));
- CHECK_EQ(false, wUint32LessThan.Int32Compare(0xffffffff, 0));
+ CHECK_EQ(false, wUint32LessThan.Int32Compare(0xFFFFFFFF, 0xFFFFFFFF));
+ CHECK_EQ(false, wUint32LessThan.Int32Compare(0xFFFFFFFF, 0));
CHECK_EQ(false, wUint32LessThan.Int32Compare(-2999, 0));
CHECK_EQ(true, wUint32LessThan.Int32Compare(0, 1));
CHECK_EQ(true, wUint32LessThan.Int32Compare(456, 457));
CHECK_EQ(true, wUint32LessThan.Int32Compare(85537, 85539));
CHECK_EQ(true, wUint32LessThan.Int32Compare(-11, -10));
- CHECK_EQ(true, wUint32LessThan.Int32Compare(0xfffffffe, 0xffffffff));
- CHECK_EQ(true, wUint32LessThan.Int32Compare(0, 0xffffffff));
+ CHECK_EQ(true, wUint32LessThan.Int32Compare(0xFFFFFFFE, 0xFFFFFFFF));
+ CHECK_EQ(true, wUint32LessThan.Int32Compare(0, 0xFFFFFFFF));
CHECK_EQ(true, wUint32LessThan.Int32Compare(0, -2996));
CHECK_EQ(false, wUint32LessThan.Int32Compare(1, 0));
CHECK_EQ(false, wUint32LessThan.Int32Compare(457, 456));
CHECK_EQ(false, wUint32LessThan.Int32Compare(85539, 85537));
CHECK_EQ(false, wUint32LessThan.Int32Compare(-10, -21));
- CHECK_EQ(false, wUint32LessThan.Int32Compare(0xffffffff, 0xfffffffe));
+ CHECK_EQ(false, wUint32LessThan.Int32Compare(0xFFFFFFFF, 0xFFFFFFFE));
CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(0, 0));
CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(357, 357));
CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(75539, 75539));
CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(-1, -1));
- CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(0xffffffff, 0xffffffff));
+ CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(0xFFFFFFFF, 0xFFFFFFFF));
CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(0, 1));
CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(456, 457));
CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(85537, 85539));
CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(-300, -299));
CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(-300, -300));
- CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(0xfffffffe, 0xffffffff));
+ CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(0xFFFFFFFE, 0xFFFFFFFF));
CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(0, -2995));
CHECK_EQ(false, wUint32LessThanOrEqual.Int32Compare(1, 0));
CHECK_EQ(false, wUint32LessThanOrEqual.Int32Compare(457, 456));
CHECK_EQ(false, wUint32LessThanOrEqual.Int32Compare(85539, 85537));
CHECK_EQ(false, wUint32LessThanOrEqual.Int32Compare(-130, -170));
- CHECK_EQ(false, wUint32LessThanOrEqual.Int32Compare(0xffffffff, 0xfffffffe));
+ CHECK_EQ(false, wUint32LessThanOrEqual.Int32Compare(0xFFFFFFFF, 0xFFFFFFFE));
CHECK_EQ(false, wUint32LessThanOrEqual.Int32Compare(-2997, 0));
CompareWrapper wFloat64Equal(IrOpcode::kFloat64Equal);
@@ -319,7 +319,6 @@ void Int32BinopInputShapeTester::TestAllInputShapes() {
gen->gen(&m, n0, n1);
- if (false) printf("Int32BinopInputShapeTester i=%d, j=%d\n", i, j);
if (i >= 0) {
input_a = inputs[i];
RunRight(&m);
@@ -340,7 +339,6 @@ void Int32BinopInputShapeTester::Run(RawMachineAssemblerTester<int32_t>* m) {
input_a = *pl;
input_b = *pr;
int32_t expect = gen->expected(input_a, input_b);
- if (false) printf(" cmp(a=%d, b=%d) ?== %d\n", input_a, input_b, expect);
CHECK_EQ(expect, m->Call(input_a, input_b));
}
}
@@ -352,7 +350,6 @@ void Int32BinopInputShapeTester::RunLeft(
FOR_UINT32_INPUTS(i) {
input_a = *i;
int32_t expect = gen->expected(input_a, input_b);
- if (false) printf(" cmp(a=%d, b=%d) ?== %d\n", input_a, input_b, expect);
CHECK_EQ(expect, m->Call(input_a, input_b));
}
}
@@ -363,7 +360,6 @@ void Int32BinopInputShapeTester::RunRight(
FOR_UINT32_INPUTS(i) {
input_b = *i;
int32_t expect = gen->expected(input_a, input_b);
- if (false) printf(" cmp(a=%d, b=%d) ?== %d\n", input_a, input_b, expect);
CHECK_EQ(expect, m->Call(input_a, input_b));
}
}
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.h b/deps/v8/test/cctest/compiler/codegen-tester.h
index c33e7d1ca9..7e7e4be0b1 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.h
+++ b/deps/v8/test/cctest/compiler/codegen-tester.h
@@ -22,22 +22,18 @@ class RawMachineAssemblerTester : public HandleAndZoneScope,
public CallHelper<ReturnType>,
public RawMachineAssembler {
public:
- RawMachineAssemblerTester(MachineType p0 = MachineType::None(),
- MachineType p1 = MachineType::None(),
- MachineType p2 = MachineType::None(),
- MachineType p3 = MachineType::None(),
- MachineType p4 = MachineType::None())
+ template <typename... ParamMachTypes>
+ explicit RawMachineAssemblerTester(ParamMachTypes... p)
: HandleAndZoneScope(),
CallHelper<ReturnType>(
main_isolate(),
- CSignature::New(main_zone(), MachineTypeForC<ReturnType>(), p0, p1,
- p2, p3, p4)),
+ CSignature::New(main_zone(), MachineTypeForC<ReturnType>(), p...)),
RawMachineAssembler(
main_isolate(), new (main_zone()) Graph(main_zone()),
Linkage::GetSimplifiedCDescriptor(
main_zone(),
- CSignature::New(main_zone(), MachineTypeForC<ReturnType>(), p0,
- p1, p2, p3, p4),
+ CSignature::New(main_zone(), MachineTypeForC<ReturnType>(),
+ p...),
true),
MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags(),
@@ -79,17 +75,24 @@ class RawMachineAssemblerTester : public HandleAndZoneScope,
MaybeHandle<Code> code_;
};
-
template <typename ReturnType>
class BufferedRawMachineAssemblerTester
: public RawMachineAssemblerTester<int32_t> {
public:
- BufferedRawMachineAssemblerTester(MachineType p0 = MachineType::None(),
- MachineType p1 = MachineType::None(),
- MachineType p2 = MachineType::None(),
- MachineType p3 = MachineType::None())
- : BufferedRawMachineAssemblerTester(ComputeParameterCount(p0, p1, p2, p3),
- p0, p1, p2, p3) {}
+ template <typename... ParamMachTypes>
+ explicit BufferedRawMachineAssemblerTester(ParamMachTypes... p)
+ : RawMachineAssemblerTester<int32_t>(
+ MachineType::Pointer(), ((void)p, MachineType::Pointer())...),
+ test_graph_signature_(
+ CSignature::New(this->main_zone(), MachineType::Int32(), p...)),
+ return_parameter_index_(sizeof...(p)) {
+ static_assert(sizeof...(p) <= arraysize(parameter_nodes_),
+ "increase parameter_nodes_ array");
+ std::array<MachineType, sizeof...(p)> p_arr{{p...}};
+ for (size_t i = 0; i < p_arr.size(); ++i) {
+ parameter_nodes_[i] = Load(p_arr[i], RawMachineAssembler::Parameter(i));
+ }
+ }
virtual byte* Generate() { return RawMachineAssemblerTester::Generate(); }
@@ -99,7 +102,7 @@ class BufferedRawMachineAssemblerTester
// parameters from memory. Thereby it is possible to pass 64 bit parameters
// to the IR graph.
Node* Parameter(size_t index) {
- CHECK_GT(4, index);
+ CHECK_GT(arraysize(parameter_nodes_), index);
return parameter_nodes_[index];
}
@@ -114,141 +117,37 @@ class BufferedRawMachineAssemblerTester
RawMachineAssembler::Return(Int32Constant(1234));
}
- ReturnType Call() {
- ReturnType return_value;
- CSignature::VerifyParams(test_graph_signature_);
- CallHelper<int32_t>::Call(reinterpret_cast<void*>(&return_value));
- return return_value;
- }
-
- template <typename P0>
- ReturnType Call(P0 p0) {
+ template <typename... Params>
+ ReturnType Call(Params... p) {
ReturnType return_value;
- CSignature::VerifyParams<P0>(test_graph_signature_);
- CallHelper<int32_t>::Call(reinterpret_cast<void*>(&p0),
+ CSignature::VerifyParams<Params...>(test_graph_signature_);
+ CallHelper<int32_t>::Call(reinterpret_cast<void*>(&p)...,
reinterpret_cast<void*>(&return_value));
return return_value;
}
- template <typename P0, typename P1>
- ReturnType Call(P0 p0, P1 p1) {
- ReturnType return_value;
- CSignature::VerifyParams<P0, P1>(test_graph_signature_);
- CallHelper<int32_t>::Call(reinterpret_cast<void*>(&p0),
- reinterpret_cast<void*>(&p1),
- reinterpret_cast<void*>(&return_value));
- return return_value;
- }
-
- template <typename P0, typename P1, typename P2>
- ReturnType Call(P0 p0, P1 p1, P2 p2) {
- ReturnType return_value;
- CSignature::VerifyParams<P0, P1, P2>(test_graph_signature_);
- CallHelper<int32_t>::Call(
- reinterpret_cast<void*>(&p0), reinterpret_cast<void*>(&p1),
- reinterpret_cast<void*>(&p2), reinterpret_cast<void*>(&return_value));
- return return_value;
- }
-
- template <typename P0, typename P1, typename P2, typename P3>
- ReturnType Call(P0 p0, P1 p1, P2 p2, P3 p3) {
- ReturnType return_value;
- CSignature::VerifyParams<P0, P1, P2, P3>(test_graph_signature_);
- CallHelper<int32_t>::Call(
- reinterpret_cast<void*>(&p0), reinterpret_cast<void*>(&p1),
- reinterpret_cast<void*>(&p2), reinterpret_cast<void*>(&p3),
- reinterpret_cast<void*>(&return_value));
- return return_value;
- }
-
private:
- BufferedRawMachineAssemblerTester(uint32_t return_parameter_index,
- MachineType p0, MachineType p1,
- MachineType p2, MachineType p3)
- : RawMachineAssemblerTester<int32_t>(
- MachineType::Pointer(),
- p0 == MachineType::None() ? MachineType::None()
- : MachineType::Pointer(),
- p1 == MachineType::None() ? MachineType::None()
- : MachineType::Pointer(),
- p2 == MachineType::None() ? MachineType::None()
- : MachineType::Pointer(),
- p3 == MachineType::None() ? MachineType::None()
- : MachineType::Pointer()),
- test_graph_signature_(
- CSignature::New(main_zone(), MachineType::Int32(), p0, p1, p2, p3)),
- return_parameter_index_(return_parameter_index) {
- parameter_nodes_[0] = p0 == MachineType::None()
- ? nullptr
- : Load(p0, RawMachineAssembler::Parameter(0));
- parameter_nodes_[1] = p1 == MachineType::None()
- ? nullptr
- : Load(p1, RawMachineAssembler::Parameter(1));
- parameter_nodes_[2] = p2 == MachineType::None()
- ? nullptr
- : Load(p2, RawMachineAssembler::Parameter(2));
- parameter_nodes_[3] = p3 == MachineType::None()
- ? nullptr
- : Load(p3, RawMachineAssembler::Parameter(3));
- }
-
-
- static uint32_t ComputeParameterCount(MachineType p0, MachineType p1,
- MachineType p2, MachineType p3) {
- if (p0 == MachineType::None()) {
- return 0;
- }
- if (p1 == MachineType::None()) {
- return 1;
- }
- if (p2 == MachineType::None()) {
- return 2;
- }
- if (p3 == MachineType::None()) {
- return 3;
- }
- return 4;
- }
-
-
CSignature* test_graph_signature_;
Node* parameter_nodes_[4];
uint32_t return_parameter_index_;
};
-
template <>
class BufferedRawMachineAssemblerTester<void>
: public RawMachineAssemblerTester<void> {
public:
- BufferedRawMachineAssemblerTester(MachineType p0 = MachineType::None(),
- MachineType p1 = MachineType::None(),
- MachineType p2 = MachineType::None(),
- MachineType p3 = MachineType::None())
- : RawMachineAssemblerTester<void>(
- p0 == MachineType::None() ? MachineType::None()
- : MachineType::Pointer(),
- p1 == MachineType::None() ? MachineType::None()
- : MachineType::Pointer(),
- p2 == MachineType::None() ? MachineType::None()
- : MachineType::Pointer(),
- p3 == MachineType::None() ? MachineType::None()
- : MachineType::Pointer()),
+ template <typename... ParamMachTypes>
+ explicit BufferedRawMachineAssemblerTester(ParamMachTypes... p)
+ : RawMachineAssemblerTester<void>(((void)p, MachineType::Pointer())...),
test_graph_signature_(
CSignature::New(RawMachineAssemblerTester<void>::main_zone(),
- MachineType::None(), p0, p1, p2, p3)) {
- parameter_nodes_[0] = p0 == MachineType::None()
- ? nullptr
- : Load(p0, RawMachineAssembler::Parameter(0));
- parameter_nodes_[1] = p1 == MachineType::None()
- ? nullptr
- : Load(p1, RawMachineAssembler::Parameter(1));
- parameter_nodes_[2] = p2 == MachineType::None()
- ? nullptr
- : Load(p2, RawMachineAssembler::Parameter(2));
- parameter_nodes_[3] = p3 == MachineType::None()
- ? nullptr
- : Load(p3, RawMachineAssembler::Parameter(3));
+ MachineType::None(), p...)) {
+ static_assert(sizeof...(p) <= arraysize(parameter_nodes_),
+ "increase parameter_nodes_ array");
+ std::array<MachineType, sizeof...(p)> p_arr{{p...}};
+ for (size_t i = 0; i < p_arr.size(); ++i) {
+ parameter_nodes_[i] = Load(p_arr[i], RawMachineAssembler::Parameter(i));
+ }
}
virtual byte* Generate() { return RawMachineAssemblerTester::Generate(); }
@@ -259,49 +158,21 @@ class BufferedRawMachineAssemblerTester<void>
// parameters from memory. Thereby it is possible to pass 64 bit parameters
// to the IR graph.
Node* Parameter(size_t index) {
- CHECK_GT(4, index);
+ CHECK_GT(arraysize(parameter_nodes_), index);
return parameter_nodes_[index];
}
-
- void Call() {
- CSignature::VerifyParams(test_graph_signature_);
- CallHelper<void>::Call();
- }
-
- template <typename P0>
- void Call(P0 p0) {
- CSignature::VerifyParams<P0>(test_graph_signature_);
- CallHelper<void>::Call(reinterpret_cast<void*>(&p0));
- }
-
- template <typename P0, typename P1>
- void Call(P0 p0, P1 p1) {
- CSignature::VerifyParams<P0, P1>(test_graph_signature_);
- CallHelper<void>::Call(reinterpret_cast<void*>(&p0),
- reinterpret_cast<void*>(&p1));
- }
-
- template <typename P0, typename P1, typename P2>
- void Call(P0 p0, P1 p1, P2 p2) {
- CSignature::VerifyParams<P0, P1, P2>(test_graph_signature_);
- CallHelper<void>::Call(reinterpret_cast<void*>(&p0),
- reinterpret_cast<void*>(&p1),
- reinterpret_cast<void*>(&p2));
- }
-
- template <typename P0, typename P1, typename P2, typename P3>
- void Call(P0 p0, P1 p1, P2 p2, P3 p3) {
- CSignature::VerifyParams<P0, P1, P2, P3>(test_graph_signature_);
- CallHelper<void>::Call(
- reinterpret_cast<void*>(&p0), reinterpret_cast<void*>(&p1),
- reinterpret_cast<void*>(&p2), reinterpret_cast<void*>(&p3));
+ template <typename... Params>
+ void Call(Params... p) {
+ CSignature::VerifyParams<Params...>(test_graph_signature_);
+ CallHelper<void>::Call(reinterpret_cast<void*>(&p)...);
}
private:
CSignature* test_graph_signature_;
Node* parameter_nodes_[4];
};
+
static const bool USE_RESULT_BUFFER = true;
static const bool USE_RETURN_REGISTER = false;
static const int32_t CHECK_VALUE = 0x99BEEDCE;
diff --git a/deps/v8/test/cctest/compiler/graph-builder-tester.h b/deps/v8/test/cctest/compiler/graph-builder-tester.h
index 1bf1d40587..24c2877938 100644
--- a/deps/v8/test/cctest/compiler/graph-builder-tester.h
+++ b/deps/v8/test/cctest/compiler/graph-builder-tester.h
@@ -50,16 +50,12 @@ class GraphBuilderTester : public HandleAndZoneScope,
public GraphAndBuilders,
public CallHelper<ReturnType> {
public:
- explicit GraphBuilderTester(MachineType p0 = MachineType::None(),
- MachineType p1 = MachineType::None(),
- MachineType p2 = MachineType::None(),
- MachineType p3 = MachineType::None(),
- MachineType p4 = MachineType::None())
+ template <typename... ParamMachTypes>
+ explicit GraphBuilderTester(ParamMachTypes... p)
: GraphAndBuilders(main_zone()),
CallHelper<ReturnType>(
main_isolate(),
- CSignature::New(main_zone(), MachineTypeForC<ReturnType>(), p0, p1,
- p2, p3, p4)),
+ CSignature::New(main_zone(), MachineTypeForC<ReturnType>(), p...)),
effect_(nullptr),
return_(nullptr),
parameters_(main_zone()->template NewArray<Node*>(parameter_count())) {
@@ -192,37 +188,10 @@ class GraphBuilderTester : public HandleAndZoneScope,
return NewNode(simplified()->StoreElement(access), object, index, value);
}
- Node* NewNode(const Operator* op) {
- return MakeNode(op, 0, static_cast<Node**>(nullptr));
- }
-
- Node* NewNode(const Operator* op, Node* n1) { return MakeNode(op, 1, &n1); }
-
- Node* NewNode(const Operator* op, Node* n1, Node* n2) {
- Node* buffer[] = {n1, n2};
- return MakeNode(op, arraysize(buffer), buffer);
- }
-
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3) {
- Node* buffer[] = {n1, n2, n3};
- return MakeNode(op, arraysize(buffer), buffer);
- }
-
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
- Node* buffer[] = {n1, n2, n3, n4};
- return MakeNode(op, arraysize(buffer), buffer);
- }
-
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5) {
- Node* buffer[] = {n1, n2, n3, n4, n5};
- return MakeNode(op, arraysize(buffer), buffer);
- }
-
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5, Node* n6) {
- Node* nodes[] = {n1, n2, n3, n4, n5, n6};
- return MakeNode(op, arraysize(nodes), nodes);
+ template <typename... NodePtrs>
+ Node* NewNode(const Operator* op, NodePtrs... n) {
+ std::array<Node*, sizeof...(n)> inputs{{n...}};
+ return MakeNode(op, inputs.size(), inputs.data());
}
Node* NewNode(const Operator* op, int value_input_count,
diff --git a/deps/v8/test/cctest/compiler/test-code-generator.cc b/deps/v8/test/cctest/compiler/test-code-generator.cc
index a131d861f7..10158c2741 100644
--- a/deps/v8/test/cctest/compiler/test-code-generator.cc
+++ b/deps/v8/test/cctest/compiler/test-code-generator.cc
@@ -49,8 +49,10 @@ Handle<Code> BuildTeardownFunction(Isolate* isolate, CallDescriptor* descriptor,
// arguments:
// ~~~
// FixedArray setup(CodeObject* test, FixedArray state_in) {
+// FixedArray state_out = AllocateFixedArray(state_in.length());
// // `test` will tail-call to its first parameter which will be `teardown`.
-// return test(teardown, state_in[0], state_in[1], state_in[2], ...);
+// return test(teardown, state_out, state_in[0], state_in[1],
+// state_in[2], ...);
// }
// ~~~
//
@@ -58,11 +60,14 @@ Handle<Code> BuildTeardownFunction(Isolate* isolate, CallDescriptor* descriptor,
// values to pass to the `test` function. The array will have been created using
// `GenerateInitialState()` and needs to be converted in the following way:
//
-// | Parameter type | FixedArray element | Conversion |
-// |----------------+--------------------+------------------------------------|
-// | kTagged | Smi | None. |
-// | kFloat32 | HeapNumber | Load value and convert to Float32. |
-// | kFloat64 | HeapNumber | Load value. |
+// | Parameter type | FixedArray element | Conversion |
+// |----------------+---------------------+------------------------------------|
+// | kTagged | Smi | None. |
+// | kFloat32 | HeapNumber | Load value and convert to Float32. |
+// | kFloat64 | HeapNumber | Load value. |
+// | kSimd128 | FixedArray<Smi>[4] | Untag each Smi and write the |
+// | | | results into lanes of a new |
+// | | | 128-bit vector. |
//
Handle<Code> BuildSetupFunction(Isolate* isolate, CallDescriptor* descriptor,
std::vector<AllocatedOperand> parameters) {
@@ -73,6 +78,32 @@ Handle<Code> BuildSetupFunction(Isolate* isolate, CallDescriptor* descriptor,
params.push_back(__ Parameter(0));
params.push_back(
__ HeapConstant(BuildTeardownFunction(isolate, descriptor, parameters)));
+ // First allocate the FixedArray which will hold the final results. Here we
+ // should take care of all allocations, meaning we allocate HeapNumbers and
+ // FixedArrays representing Simd128 values.
+ Node* state_out = __ AllocateFixedArray(PACKED_ELEMENTS,
+ __ IntPtrConstant(parameters.size()));
+ for (int i = 0; i < static_cast<int>(parameters.size()); i++) {
+ switch (parameters[i].representation()) {
+ case MachineRepresentation::kTagged:
+ break;
+ case MachineRepresentation::kFloat32:
+ case MachineRepresentation::kFloat64:
+ __ StoreFixedArrayElement(state_out, i, __ AllocateHeapNumber());
+ break;
+ case MachineRepresentation::kSimd128: {
+ __ StoreFixedArrayElement(
+ state_out, i,
+ __ AllocateFixedArray(PACKED_SMI_ELEMENTS, __ IntPtrConstant(4)));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ params.push_back(state_out);
+ // Then take each element of the initial state and pass them as arguments.
Node* state_in = __ Parameter(1);
for (int i = 0; i < static_cast<int>(parameters.size()); i++) {
Node* element = __ LoadFixedArrayElement(state_in, __ IntPtrConstant(i));
@@ -87,6 +118,21 @@ Handle<Code> BuildSetupFunction(Isolate* isolate, CallDescriptor* descriptor,
case MachineRepresentation::kFloat64:
element = __ LoadHeapNumberValue(element);
break;
+ case MachineRepresentation::kSimd128: {
+ Node* vector = tester.raw_assembler_for_testing()->AddNode(
+ tester.raw_assembler_for_testing()->machine()->I32x4Splat(),
+ __ Int32Constant(0));
+ for (int lane = 0; lane < 4; lane++) {
+ Node* lane_value = __ SmiToWord32(
+ __ LoadFixedArrayElement(element, __ IntPtrConstant(lane)));
+ vector = tester.raw_assembler_for_testing()->AddNode(
+ tester.raw_assembler_for_testing()->machine()->I32x4ReplaceLane(
+ lane),
+ vector, lane_value);
+ }
+ element = vector;
+ break;
+ }
default:
UNREACHABLE();
break;
@@ -99,45 +145,59 @@ Handle<Code> BuildSetupFunction(Isolate* isolate, CallDescriptor* descriptor,
return tester.GenerateCodeCloseAndEscape();
}
-// Build the `teardown` function. It allocates and fills a FixedArray with all
-// its parameters. The parameters need to be consistent with `parameters`.
+// Build the `teardown` function. It takes a FixedArray as argument, fills it
+// with the rest of its parameters and returns it. The parameters need to be
+// consistent with `parameters`.
// ~~~
-// FixedArray teardown(CodeObject* /* unused */,
+// FixedArray teardown(CodeObject* /* unused */, FixedArray result,
// // Tagged registers.
// Object* r0, Object* r1, ...,
// // FP registers.
// Float32 s0, Float64 d1, ...,
// // Mixed stack slots.
// Float64 mem0, Object* mem1, Float32 mem2, ...) {
-// return new FixedArray(r0, r1, ..., s0, d1, ..., mem0, mem1, mem2, ...);
+// result[0] = r0;
+// result[1] = r1;
+// ...
+// result[..] = s0;
+// ...
+// result[..] = mem0;
+// ...
+// return result;
// }
// ~~~
//
// This function needs to convert its parameters into values fit for a
// FixedArray, essentially reverting what the `setup` function did:
//
-// | Parameter type | Parameter value | Conversion |
-// |----------------+-------------------+----------------------------|
-// | kTagged | Smi or HeapNumber | None. |
-// | kFloat32 | Raw Float32 | Convert to Float64 and |
-// | | | allocate a new HeapNumber. |
-// | kFloat64 | Raw Float64 | Allocate a new HeapNumber. |
+// | Parameter type | Parameter value | Conversion |
+// |----------------+-------------------+--------------------------------------|
+// | kTagged | Smi or HeapNumber | None. |
+// | kFloat32 | Raw Float32 | Convert to Float64. |
+// | kFloat64 | Raw Float64 | None. |
+// | kSimd128 | Raw Simd128 | Split into 4 Word32 values and tag |
+// | | | them. |
//
// Note that it is possible for a `kTagged` value to go from a Smi to a
// HeapNumber. This is because `AssembleMove` will allocate a new HeapNumber if
// it is asked to move a FP constant to a tagged register or slot.
//
+// Finally, it is important that this function does not call `RecordWrite` which
+// is why "setup" is in charge of all allocations and we are using
+// SKIP_WRITE_BARRIER. The reason for this is that `RecordWrite` may clobber the
+// top 64 bits of Simd128 registers. This is the case on x64, ia32 and Arm64 for
+// example.
Handle<Code> BuildTeardownFunction(Isolate* isolate, CallDescriptor* descriptor,
std::vector<AllocatedOperand> parameters) {
CodeAssemblerTester tester(isolate, descriptor);
CodeStubAssembler assembler(tester.state());
- Node* result_array = __ AllocateFixedArray(
- PACKED_ELEMENTS, __ IntPtrConstant(parameters.size()));
+ Node* result_array = __ Parameter(1);
for (int i = 0; i < static_cast<int>(parameters.size()); i++) {
- // The first argument is not used.
- Node* param = __ Parameter(i + 1);
+ // The first argument is not used and the second is "result_array".
+ Node* param = __ Parameter(i + 2);
switch (parameters[i].representation()) {
case MachineRepresentation::kTagged:
+ __ StoreFixedArrayElement(result_array, i, param, SKIP_WRITE_BARRIER);
break;
// Box FP values into HeapNumbers.
case MachineRepresentation::kFloat32:
@@ -145,13 +205,28 @@ Handle<Code> BuildTeardownFunction(Isolate* isolate, CallDescriptor* descriptor,
tester.raw_assembler_for_testing()->ChangeFloat32ToFloat64(param);
// Fallthrough
case MachineRepresentation::kFloat64:
- param = __ AllocateHeapNumberWithValue(param);
+ __ StoreObjectFieldNoWriteBarrier(
+ __ LoadFixedArrayElement(result_array, i), HeapNumber::kValueOffset,
+ param, MachineRepresentation::kFloat64);
break;
+ case MachineRepresentation::kSimd128: {
+ Node* vector = __ LoadFixedArrayElement(result_array, i);
+ for (int lane = 0; lane < 4; lane++) {
+ Node* lane_value =
+ __ SmiFromWord32(tester.raw_assembler_for_testing()->AddNode(
+ tester.raw_assembler_for_testing()
+ ->machine()
+ ->I32x4ExtractLane(lane),
+ param));
+ __ StoreFixedArrayElement(vector, lane, lane_value,
+ SKIP_WRITE_BARRIER);
+ }
+ break;
+ }
default:
UNREACHABLE();
break;
}
- __ StoreFixedArrayElement(result_array, i, param);
}
__ Return(result_array);
return tester.GenerateCodeCloseAndEscape();
@@ -159,7 +234,7 @@ Handle<Code> BuildTeardownFunction(Isolate* isolate, CallDescriptor* descriptor,
// Print the content of `value`, representing the register or stack slot
// described by `operand`.
-void PrintStateValue(std::ostream& os, Handle<Object> value,
+void PrintStateValue(std::ostream& os, Isolate* isolate, Handle<Object> value,
AllocatedOperand operand) {
switch (operand.representation()) {
case MachineRepresentation::kTagged:
@@ -173,6 +248,18 @@ void PrintStateValue(std::ostream& os, Handle<Object> value,
case MachineRepresentation::kFloat64:
os << value->Number();
break;
+ case MachineRepresentation::kSimd128: {
+ FixedArray* vector = FixedArray::cast(*value);
+ os << "[";
+ for (int lane = 0; lane < 4; lane++) {
+ os << Smi::cast(*vector->GetValueChecked<Smi>(isolate, lane))->value();
+ if (lane < 3) {
+ os << ", ";
+ }
+ }
+ os << "]";
+ break;
+ }
default:
UNREACHABLE();
break;
@@ -187,6 +274,16 @@ void PrintStateValue(std::ostream& os, Handle<Object> value,
os << ")";
}
+bool TestSimd128Moves() {
+#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)
+ // TODO(mips): Implement support for the kSimd128 representation in
+ // AssembleMove and AssembleSwap on MIPS.
+ return false;
+#else
+ return CpuFeatures::SupportsWasmSimd128();
+#endif
+}
+
} // namespace
#undef __
@@ -196,11 +293,11 @@ void PrintStateValue(std::ostream& os, Handle<Object> value,
// with. It has the ability to randomly generate lists of moves and run the code
// generated by the CodeGeneratorTester.
//
-// At the moment, only the following representations are tested:
+// The following representations are tested:
// - kTagged
// - kFloat32
// - kFloat64
-// - TODO(planglois): Add support for kSimd128.
+// - kSimd128 (if supported)
// There is no need to test using Word32 or Word64 as they are the same as
// Tagged as far as the code generator is concerned.
//
@@ -215,43 +312,49 @@ void PrintStateValue(std::ostream& os, Handle<Object> value,
//
// - The `setup` function receives a FixedArray as the initial state. It
// unpacks it and passes each element as arguments to the generated code
-// `test`. We also pass the `teardown` function as a first argument. Thanks
-// to the custom CallDescriptor, registers and stack slots get initialised
-// according to the content of the FixedArray.
+// `test`. We also pass the `teardown` function as a first argument as well
+// as a newly allocated FixedArray as a second argument which will hold the
+// final results. Thanks to the custom CallDescriptor, registers and stack
+// slots get initialised according to the content of the initial FixedArray.
//
// - The `test` function performs the list of moves on its parameters and
// eventually tail-calls to its first parameter, which is the `teardown`
// function.
//
-// - The `teardown` function allocates a new FixedArray and fills it with all
-// its parameters. Thanks to the tail-call, this is as if the `setup`
-// function called `teardown` directly, except now moves were performed!
+// - The `teardown` function receives the final results as a FixedArray, fills
+// it with the rest of its arguments and returns it. Thanks to the
+// tail-call, this is as if the `setup` function called `teardown` directly,
+// except now moves were performed!
//
// .----------------setup--------------------------.
// | Take a FixedArray as parameters with |
// | all the initial values of registers |
// | and stack slots. | <- CodeStubAssembler
// | |
-// | Call test(teardown, state[0], state[1], ...); |
+// | Allocate a new FixedArray `result` with |
+// | initial values. |
+// | |
+// | Call test(teardown, result, state[0], |
+// | state[1], state[2], ...); |
// '-----------------------------------------------'
// |
// V
-// .----------------test-----------------------------.
-// | - Move(param3, param42); |
-// | - Swap(param64, param1); |
-// | - Move(param2, param6); | <- CodeGeneratorTester
-// | ... |
-// | |
-// | // "teardown" is the first parameter as well as |
-// | // the callee. |
-// | TailCall param0(param0, param1, param2, ...); |
-// '-------------------------------------------------'
+// .----------------test-------------------------------.
+// | - Move(param3, param42); |
+// | - Swap(param64, param4); |
+// | - Move(param2, param6); | <- CodeGeneratorTester
+// | ... |
+// | |
+// | // "teardown" is the first parameter as well as |
+// | // the callee. |
+// | TailCall teardown(teardown, result, param2, ...); |
+// '---------------------------------------------------'
// |
// V
-// .----------------teardown--------------.
-// | Create a FixedArray with all |
-// | parameters and return it. | <- CodeStubAssembler
-// '--------------------------------------'
+// .----------------teardown---------------------------.
+// | Fill in the incoming `result` FixedArray with all |
+// | parameters and return it. | <- CodeStubAssembler
+// '---------------------------------------------------'
class TestEnvironment : public HandleAndZoneScope {
public:
@@ -263,8 +366,7 @@ class TestEnvironment : public HandleAndZoneScope {
static constexpr int kTaggedSlotCount = 64;
static constexpr int kFloat32SlotCount = 64;
static constexpr int kFloat64SlotCount = 64;
- static constexpr int kStackParameterCount =
- kTaggedSlotCount + kFloat32SlotCount + kFloat64SlotCount;
+ static constexpr int kSimd128SlotCount = 16;
// TODO(all): Test all types of constants (e.g. ExternalReference and
// HeapObject).
@@ -273,79 +375,138 @@ class TestEnvironment : public HandleAndZoneScope {
static constexpr int kDoubleConstantCount = 4;
TestEnvironment()
- : blocks_(main_zone()),
+ : blocks_(1, main_zone()),
code_(main_isolate(), main_zone(), &blocks_),
rng_(CcTest::random_number_generator()),
- // TODO(planglois): Support kSimd128.
supported_reps_({MachineRepresentation::kTagged,
MachineRepresentation::kFloat32,
MachineRepresentation::kFloat64}) {
+ // Create and initialize a single empty block in blocks_.
+ InstructionBlock* block = new (main_zone()) InstructionBlock(
+ main_zone(), RpoNumber::FromInt(0), RpoNumber::Invalid(),
+ RpoNumber::Invalid(), false, false);
+ block->set_ao_number(RpoNumber::FromInt(0));
+ blocks_[0] = block;
+
+ int stack_slot_count =
+ kTaggedSlotCount + kFloat32SlotCount + kFloat64SlotCount;
+ if (TestSimd128Moves()) {
+ stack_slot_count += kSimd128SlotCount;
+ supported_reps_.push_back(MachineRepresentation::kSimd128);
+ }
// The "teardown" and "test" functions share the same descriptor with the
// following signature:
// ~~~
- // FixedArray f(CodeObject* teardown,
+ // FixedArray f(CodeObject* teardown, FixedArray preallocated_result,
// // Tagged registers.
// Object*, Object*, ...,
// // FP registers.
- // Float32, Float64, ...,
+ // Float32, Float64, Simd128, ...,
// // Mixed stack slots.
- // Float64, Object*, Float32, ...);
+ // Float64, Object*, Float32, Simd128, ...);
// ~~~
- LocationSignature::Builder test_signature(main_zone(), 1,
- 1 + kGeneralRegisterCount +
- kDoubleRegisterCount +
- kStackParameterCount);
+ LocationSignature::Builder test_signature(
+ main_zone(), 1,
+ 2 + kGeneralRegisterCount + kDoubleRegisterCount + stack_slot_count);
// The first parameter will be the code object of the "teardown"
// function. This way, the "test" function can tail-call to it.
test_signature.AddParam(LinkageLocation::ForRegister(
kReturnRegister0.code(), MachineType::AnyTagged()));
+ // The second parameter will be a pre-allocated FixedArray that the
+ // "teardown" function will fill with result and then return. We place this
+ // parameter on the first stack argument slot which is always -1. And
+ // therefore slots to perform moves on start at -2.
+ test_signature.AddParam(
+ LinkageLocation::ForCallerFrameSlot(-1, MachineType::AnyTagged()));
+ int slot_parameter_n = -2;
+ const int kTotalStackParameterCount = stack_slot_count + 1;
+
// Initialise registers.
+ // Make sure that the target has enough general purpose registers to
+ // generate a call to a CodeObject using this descriptor. We have reserved
+ // kReturnRegister0 as the first parameter, and the call will need a
+ // register to hold the CodeObject address. So the maximum number of
+ // registers left to test with is the number of available registers minus 2.
+ DCHECK_LE(
+ kGeneralRegisterCount,
+ RegisterConfiguration::Default()->num_allocatable_general_registers() -
+ 2);
+
int32_t general_mask =
RegisterConfiguration::Default()->allocatable_general_codes_mask();
// kReturnRegister0 is used to hold the "teardown" code object, do not
// generate moves using it.
std::unique_ptr<const RegisterConfiguration> registers(
RegisterConfiguration::RestrictGeneralRegisters(
- general_mask & ~(1 << kReturnRegister0.code())));
+ general_mask & ~kReturnRegister0.bit()));
for (int i = 0; i < kGeneralRegisterCount; i++) {
int code = registers->GetAllocatableGeneralCode(i);
AddRegister(&test_signature, MachineRepresentation::kTagged, code);
}
- // We assume that Double and Float registers alias, depending on
- // kSimpleFPAliasing. For this reason, we allocate a Float and a Double in
- // pairs.
- static_assert((kDoubleRegisterCount % 2) == 0,
- "kDoubleRegisterCount should be a multiple of two.");
+ // We assume that Double, Float and Simd128 registers alias, depending on
+ // kSimpleFPAliasing. For this reason, we allocate a Float, Double and
+ // Simd128 together, hence the reason why `kDoubleRegisterCount` should be a
+ // multiple of 3 and 2 in case Simd128 is not supported.
+ static_assert(
+ ((kDoubleRegisterCount % 2) == 0) && ((kDoubleRegisterCount % 3) == 0),
+ "kDoubleRegisterCount should be a multiple of two and three.");
for (int i = 0; i < kDoubleRegisterCount; i += 2) {
- // Make sure we do not allocate FP registers which alias. We double the
- // index for Float registers if the aliasing is not "Simple":
- // Simple -> s0, d1, s2, d3, s4, d5, ...
- // Arm32-style -> s0, d1, s4, d3, s8, d5, ...
- // This isn't space-efficient at all but suits our need.
- static_assert(kDoubleRegisterCount < 16,
- "Arm has a d16 register but no overlapping s32 register.");
- int float_code =
- registers->GetAllocatableFloatCode(kSimpleFPAliasing ? i : i * 2);
- int double_code = registers->GetAllocatableDoubleCode(i + 1);
- AddRegister(&test_signature, MachineRepresentation::kFloat32, float_code);
- AddRegister(&test_signature, MachineRepresentation::kFloat64,
- double_code);
+ if (kSimpleFPAliasing) {
+ // Allocate three registers at once if kSimd128 is supported, else
+ // allocate in pairs.
+ AddRegister(&test_signature, MachineRepresentation::kFloat32,
+ registers->GetAllocatableFloatCode(i));
+ AddRegister(&test_signature, MachineRepresentation::kFloat64,
+ registers->GetAllocatableDoubleCode(i + 1));
+ if (TestSimd128Moves()) {
+ AddRegister(&test_signature, MachineRepresentation::kSimd128,
+ registers->GetAllocatableSimd128Code(i + 2));
+ i++;
+ }
+ } else {
+ // Make sure we do not allocate FP registers which alias. To do this, we
+ // allocate three 128-bit registers and then convert two of them to a
+ // float and a double. With this aliasing scheme, a Simd128 register
+ // aliases two Double registers and four Float registers, so we need to
+ // scale indexes accordingly:
+ //
+ // Simd128 register: q0, q1, q2, q3, q4, q5
+ // | | | |
+ // V V V V
+ // Aliases: s0, d2, q2, s12, d8, q5
+ //
+ // This isn't space efficient at all but suits our need.
+ static_assert(
+ kDoubleRegisterCount < 8,
+ "Arm has a q8 and a d16 register but no overlapping s32 register.");
+ int first_simd128 = registers->GetAllocatableSimd128Code(i);
+ int second_simd128 = registers->GetAllocatableSimd128Code(i + 1);
+ AddRegister(&test_signature, MachineRepresentation::kFloat32,
+ first_simd128 * 4);
+ AddRegister(&test_signature, MachineRepresentation::kFloat64,
+ second_simd128 * 2);
+ if (TestSimd128Moves()) {
+ int third_simd128 = registers->GetAllocatableSimd128Code(i + 2);
+ AddRegister(&test_signature, MachineRepresentation::kSimd128,
+ third_simd128);
+ i++;
+ }
+ }
}
// Initialise stack slots.
- // Stack parameters start at -1.
- int slot_parameter_n = -1;
-
- // TODO(planglois): Support kSimd128 stack slots.
std::map<MachineRepresentation, int> slots = {
{MachineRepresentation::kTagged, kTaggedSlotCount},
{MachineRepresentation::kFloat32, kFloat32SlotCount},
{MachineRepresentation::kFloat64, kFloat64SlotCount}};
+ if (TestSimd128Moves()) {
+ slots.emplace(MachineRepresentation::kSimd128, kSimd128SlotCount);
+ }
// Allocate new slots until we run out of them.
while (std::any_of(slots.cbegin(), slots.cend(),
@@ -408,7 +569,7 @@ class TestEnvironment : public HandleAndZoneScope {
LinkageLocation::ForAnyRegister(
MachineType::AnyTagged()), // target location
test_signature.Build(), // location_sig
- kStackParameterCount, // stack_parameter_count
+ kTotalStackParameterCount, // stack_parameter_count
Operator::kNoProperties, // properties
kNoCalleeSaved, // callee-saved registers
kNoCalleeSaved, // callee-saved fp
@@ -489,6 +650,15 @@ class TestEnvironment : public HandleAndZoneScope {
state->set(
i, *main_isolate()->factory()->NewHeapNumber(rng_->NextDouble()));
break;
+ case MachineRepresentation::kSimd128: {
+ Handle<FixedArray> vector =
+ main_isolate()->factory()->NewFixedArray(4);
+ for (int lane = 0; lane < 4; lane++) {
+ vector->set(lane, Smi::FromInt(rng_->NextInt(Smi::kMaxValue)));
+ }
+ state->set(i, *vector);
+ break;
+ }
default:
UNREACHABLE();
break;
@@ -611,17 +781,45 @@ class TestEnvironment : public HandleAndZoneScope {
actual->GetValueChecked<Object>(main_isolate(), i);
Handle<Object> expected_value =
expected->GetValueChecked<Object>(main_isolate(), i);
- if (!actual_value->StrictEquals(*expected_value)) {
+ if (!CompareValues(actual_value, expected_value,
+ layout_[i].representation())) {
std::ostringstream expected_str;
- PrintStateValue(expected_str, expected_value, layout_[i]);
+ PrintStateValue(expected_str, main_isolate(), expected_value,
+ layout_[i]);
std::ostringstream actual_str;
- PrintStateValue(actual_str, actual_value, layout_[i]);
+ PrintStateValue(actual_str, main_isolate(), actual_value, layout_[i]);
V8_Fatal(__FILE__, __LINE__, "Expected: '%s' but got '%s'",
expected_str.str().c_str(), actual_str.str().c_str());
}
}
}
+ bool CompareValues(Handle<Object> actual, Handle<Object> expected,
+ MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kFloat32:
+ case MachineRepresentation::kFloat64:
+ return actual->StrictEquals(*expected);
+ case MachineRepresentation::kSimd128:
+ for (int lane = 0; lane < 4; lane++) {
+ Handle<Smi> actual_lane =
+ FixedArray::cast(*actual)->GetValueChecked<Smi>(main_isolate(),
+ lane);
+ Handle<Smi> expected_lane =
+ FixedArray::cast(*expected)->GetValueChecked<Smi>(main_isolate(),
+ lane);
+ if (!actual_lane->StrictEquals(*expected_lane)) {
+ return false;
+ }
+ }
+ return true;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+
enum OperandConstraint {
kNone,
// Restrict operands to non-constants. This is useful when generating a
@@ -741,7 +939,7 @@ constexpr int TestEnvironment::kDoubleRegisterCount;
constexpr int TestEnvironment::kTaggedSlotCount;
constexpr int TestEnvironment::kFloat32SlotCount;
constexpr int TestEnvironment::kFloat64SlotCount;
-constexpr int TestEnvironment::kStackParameterCount;
+constexpr int TestEnvironment::kSimd128SlotCount;
constexpr int TestEnvironment::kSmiConstantCount;
constexpr int TestEnvironment::kFloatConstantCount;
constexpr int TestEnvironment::kDoubleConstantCount;
@@ -772,6 +970,16 @@ class CodeGeneratorTester {
// frame is too big.
}
+ Instruction* CreateTailCall(int stack_slot_delta) {
+ int optional_padding_slot = stack_slot_delta;
+ InstructionOperand callee[] = {
+ ImmediateOperand(ImmediateOperand::INLINE, optional_padding_slot),
+ ImmediateOperand(ImmediateOperand::INLINE, stack_slot_delta)};
+ Instruction* tail_call = Instruction::New(zone_, kArchTailCallCodeObject, 0,
+ nullptr, 2, callee, 0, nullptr);
+ return tail_call;
+ }
+
enum PushTypeFlag {
kRegisterPush = CodeGenerator::kRegisterPush,
kStackSlotPush = CodeGenerator::kStackSlotPush,
@@ -819,32 +1027,39 @@ class CodeGeneratorTester {
}
Handle<Code> Finalize() {
+ generator_.FinishCode();
+ generator_.safepoints()->Emit(generator_.tasm(),
+ frame_.GetTotalFrameSlotCount());
+ return generator_.FinalizeCode();
+ }
+
+ Handle<Code> FinalizeForExecuting() {
+ InstructionSequence* sequence = generator_.code();
+
+ sequence->StartBlock(RpoNumber::FromInt(0));
// The environment expects this code to tail-call to it's first parameter
// placed in `kReturnRegister0`.
- generator_.AssembleArchInstruction(
- Instruction::New(zone_, kArchPrepareTailCall));
+ sequence->AddInstruction(Instruction::New(zone_, kArchPrepareTailCall));
+ // We use either zero or one slots.
+ int first_unused_stack_slot =
+ V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0;
+ int optional_padding_slot = first_unused_stack_slot;
InstructionOperand callee[] = {
AllocatedOperand(LocationOperand::REGISTER,
MachineRepresentation::kTagged,
kReturnRegister0.code()),
- ImmediateOperand(
- ImmediateOperand::INLINE,
- V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0)};
+ ImmediateOperand(ImmediateOperand::INLINE, optional_padding_slot),
+ ImmediateOperand(ImmediateOperand::INLINE, first_unused_stack_slot)};
Instruction* tail_call = Instruction::New(zone_, kArchTailCallCodeObject, 0,
- nullptr, 2, callee, 0, nullptr);
- int first_unused_stack_slot;
- if (generator_.GetSlotAboveSPBeforeTailCall(tail_call,
- &first_unused_stack_slot)) {
- generator_.AssembleTailCallBeforeGap(tail_call, first_unused_stack_slot);
- generator_.AssembleTailCallAfterGap(tail_call, first_unused_stack_slot);
- }
- generator_.AssembleArchInstruction(tail_call);
+ nullptr, 3, callee, 0, nullptr);
+ sequence->AddInstruction(tail_call);
+ sequence->EndBlock(RpoNumber::FromInt(0));
- generator_.FinishCode();
- generator_.safepoints()->Emit(generator_.tasm(),
- frame_.GetTotalFrameSlotCount());
- return generator_.FinalizeCode();
+ generator_.AssembleBlock(
+ sequence->InstructionBlockAt(RpoNumber::FromInt(0)));
+
+ return Finalize();
}
private:
@@ -890,7 +1105,7 @@ TEST(FuzzAssembleMove) {
c.CheckAssembleMove(&m->source(), &m->destination());
}
- Handle<Code> test = c.Finalize();
+ Handle<Code> test = c.FinalizeForExecuting();
if (FLAG_print_code) {
test->Print();
}
@@ -911,7 +1126,7 @@ TEST(FuzzAssembleSwap) {
c.CheckAssembleSwap(&s->source(), &s->destination());
}
- Handle<Code> test = c.Finalize();
+ Handle<Code> test = c.FinalizeForExecuting();
if (FLAG_print_code) {
test->Print();
}
@@ -943,7 +1158,7 @@ TEST(FuzzAssembleMoveAndSwap) {
}
}
- Handle<Code> test = c.Finalize();
+ Handle<Code> test = c.FinalizeForExecuting();
if (FLAG_print_code) {
test->Print();
}
@@ -1003,7 +1218,7 @@ TEST(AssembleTailCallGap) {
{
// Generate a series of register pushes only.
CodeGeneratorTester c(&env);
- Instruction* instr = Instruction::New(env.main_zone(), kArchNop);
+ Instruction* instr = c.CreateTailCall(first_slot + 4);
instr
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
env.main_zone())
@@ -1032,7 +1247,7 @@ TEST(AssembleTailCallGap) {
{
// Generate a series of stack pushes only.
CodeGeneratorTester c(&env);
- Instruction* instr = Instruction::New(env.main_zone(), kArchNop);
+ Instruction* instr = c.CreateTailCall(first_slot + 4);
instr
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
env.main_zone())
@@ -1061,7 +1276,7 @@ TEST(AssembleTailCallGap) {
{
// Generate a mix of stack and register pushes.
CodeGeneratorTester c(&env);
- Instruction* instr = Instruction::New(env.main_zone(), kArchNop);
+ Instruction* instr = c.CreateTailCall(first_slot + 4);
instr
->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
env.main_zone())
diff --git a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
index 9d9d634e33..3ae652e869 100644
--- a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
@@ -520,7 +520,7 @@ TEST(JSToString1) {
{ // ToString(number)
Node* r = R.ReduceUnop(op, Type::Number());
- CHECK_EQ(IrOpcode::kJSToString, r->opcode());
+ CHECK_EQ(IrOpcode::kNumberToString, r->opcode());
}
{ // ToString(string)
diff --git a/deps/v8/test/cctest/compiler/test-loop-analysis.cc b/deps/v8/test/cctest/compiler/test-loop-analysis.cc
index 29e0c2f444..734fa4bea3 100644
--- a/deps/v8/test/cctest/compiler/test-loop-analysis.cc
+++ b/deps/v8/test/cctest/compiler/test-loop-analysis.cc
@@ -45,7 +45,7 @@ class LoopFinderTester : HandleAndZoneScope {
zero(jsgraph.Int32Constant(0)),
one(jsgraph.OneConstant()),
half(jsgraph.Constant(0.5)),
- self(graph.NewNode(common.Int32Constant(0xaabbccdd))),
+ self(graph.NewNode(common.Int32Constant(0xAABBCCDD))),
dead(graph.NewNode(common.Dead())),
loop_tree(nullptr) {
graph.SetEnd(end);
diff --git a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
index b41fe5184b..df18062acf 100644
--- a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
+++ b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
@@ -447,12 +447,12 @@ static void CheckJsShift(ReducerTester* R) {
Node* x = R->Parameter(0);
Node* y = R->Parameter(1);
- Node* thirty_one = R->Constant<int32_t>(0x1f);
+ Node* thirty_one = R->Constant<int32_t>(0x1F);
Node* y_and_thirty_one =
R->graph.NewNode(R->machine.Word32And(), y, thirty_one);
// If the underlying machine shift instructions 'and' their right operand
- // with 0x1f then: x << (y & 0x1f) => x << y
+ // with 0x1F then: x << (y & 0x1F) => x << y
R->CheckFoldBinop(x, y, x, y_and_thirty_one);
}
diff --git a/deps/v8/test/cctest/compiler/test-multiple-return.cc b/deps/v8/test/cctest/compiler/test-multiple-return.cc
index 6be7814756..1f46e87077 100644
--- a/deps/v8/test/cctest/compiler/test-multiple-return.cc
+++ b/deps/v8/test/cctest/compiler/test-multiple-return.cc
@@ -5,13 +5,14 @@
#include <cmath>
#include <functional>
#include <limits>
+#include <memory>
#include "src/assembler.h"
#include "src/base/bits.h"
-#include "src/base/utils/random-number-generator.h"
#include "src/codegen.h"
#include "src/compiler.h"
#include "src/compiler/linkage.h"
+#include "src/machine-type.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "test/cctest/cctest.h"
@@ -24,92 +25,360 @@ namespace compiler {
namespace {
-CallDescriptor* GetCallDescriptor(Zone* zone, int return_count,
- int param_count) {
- LocationSignature::Builder locations(zone, return_count, param_count);
+int size(MachineType type) {
+ return 1 << ElementSizeLog2Of(type.representation());
+}
+
+int num_registers(MachineType type) {
const RegisterConfiguration* config = RegisterConfiguration::Default();
+ switch (type.representation()) {
+ case MachineRepresentation::kWord32:
+ case MachineRepresentation::kWord64:
+ return config->num_allocatable_general_registers();
+ case MachineRepresentation::kFloat32:
+ return config->num_allocatable_float_registers();
+ case MachineRepresentation::kFloat64:
+ return config->num_allocatable_double_registers();
+ default:
+ UNREACHABLE();
+ }
+}
- // Add return location(s).
- CHECK(return_count <= config->num_allocatable_general_registers());
- for (int i = 0; i < return_count; i++) {
- locations.AddReturn(LinkageLocation::ForRegister(
- config->allocatable_general_codes()[i], MachineType::AnyTagged()));
+const int* codes(MachineType type) {
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
+ switch (type.representation()) {
+ case MachineRepresentation::kWord32:
+ case MachineRepresentation::kWord64:
+ return config->allocatable_general_codes();
+ case MachineRepresentation::kFloat32:
+ return config->allocatable_float_codes();
+ case MachineRepresentation::kFloat64:
+ return config->allocatable_double_codes();
+ default:
+ UNREACHABLE();
}
+}
- // Add register and/or stack parameter(s).
- CHECK(param_count <= config->num_allocatable_general_registers());
+CallDescriptor* CreateMonoCallDescriptor(Zone* zone, int return_count,
+ int param_count, MachineType type) {
+ LocationSignature::Builder locations(zone, return_count, param_count);
+
+ int span = std::max(1, size(type) / kPointerSize);
+ int stack_params = 0;
for (int i = 0; i < param_count; i++) {
- locations.AddParam(LinkageLocation::ForRegister(
- config->allocatable_general_codes()[i], MachineType::AnyTagged()));
+ LinkageLocation location = LinkageLocation::ForAnyRegister();
+ if (i < num_registers(type)) {
+ location = LinkageLocation::ForRegister(codes(type)[i], type);
+ } else {
+ int slot = span * (i - param_count);
+ location = LinkageLocation::ForCallerFrameSlot(slot, type);
+ stack_params += span;
+ }
+ locations.AddParam(location);
+ }
+
+ int stack_returns = 0;
+ for (int i = 0; i < return_count; i++) {
+ LinkageLocation location = LinkageLocation::ForAnyRegister();
+ if (i < num_registers(type)) {
+ location = LinkageLocation::ForRegister(codes(type)[i], type);
+ } else {
+ int slot = span * (num_registers(type) - i) - stack_params - 1;
+ location = LinkageLocation::ForCallerFrameSlot(slot, type);
+ stack_returns += span;
+ }
+ locations.AddReturn(location);
}
const RegList kCalleeSaveRegisters = 0;
const RegList kCalleeSaveFPRegisters = 0;
- // The target for wasm calls is always a code object.
MachineType target_type = MachineType::AnyTagged();
- LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+ LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
return new (zone) CallDescriptor( // --
CallDescriptor::kCallCodeObject, // kind
target_type, // target MachineType
target_loc, // target location
locations.Build(), // location_sig
- 0, // js_parameter_count
+ stack_params, // on-stack parameter count
compiler::Operator::kNoProperties, // properties
kCalleeSaveRegisters, // callee-saved registers
kCalleeSaveFPRegisters, // callee-saved fp regs
CallDescriptor::kNoFlags, // flags
- "c-call");
+ "c-call", // debug name
+ 0, // allocatable registers
+ stack_returns); // on-stack return count
}
+
} // namespace
+Node* Constant(RawMachineAssembler& m, MachineType type, int value) {
+ switch (type.representation()) {
+ case MachineRepresentation::kWord32:
+ return m.Int32Constant(static_cast<int32_t>(value));
+ case MachineRepresentation::kWord64:
+ return m.Int64Constant(static_cast<int64_t>(value));
+ case MachineRepresentation::kFloat32:
+ return m.Float32Constant(static_cast<float>(value));
+ case MachineRepresentation::kFloat64:
+ return m.Float64Constant(static_cast<double>(value));
+ default:
+ UNREACHABLE();
+ }
+}
-TEST(ReturnThreeValues) {
- v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator, ZONE_NAME);
- CallDescriptor* desc = GetCallDescriptor(&zone, 3, 2);
- HandleAndZoneScope handles;
- RawMachineAssembler m(handles.main_isolate(),
- new (handles.main_zone()) Graph(handles.main_zone()),
- desc, MachineType::PointerRepresentation(),
- InstructionSelector::SupportedMachineOperatorFlags());
-
- Node* p0 = m.Parameter(0);
- Node* p1 = m.Parameter(1);
- Node* add = m.Int32Add(p0, p1);
- Node* sub = m.Int32Sub(p0, p1);
- Node* mul = m.Int32Mul(p0, p1);
- m.Return(add, sub, mul);
-
- CompilationInfo info(ArrayVector("testing"), handles.main_zone(), Code::STUB);
- Handle<Code> code = Pipeline::GenerateCodeForTesting(
- &info, handles.main_isolate(), desc, m.graph(), m.Export());
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_code) {
- OFStream os(stdout);
- code->Disassemble("three_value", os);
+Node* Add(RawMachineAssembler& m, MachineType type, Node* a, Node* b) {
+ switch (type.representation()) {
+ case MachineRepresentation::kWord32:
+ return m.Int32Add(a, b);
+ case MachineRepresentation::kWord64:
+ return m.Int64Add(a, b);
+ case MachineRepresentation::kFloat32:
+ return m.Float32Add(a, b);
+ case MachineRepresentation::kFloat64:
+ return m.Float64Add(a, b);
+ default:
+ UNREACHABLE();
}
+}
+
+Node* Sub(RawMachineAssembler& m, MachineType type, Node* a, Node* b) {
+ switch (type.representation()) {
+ case MachineRepresentation::kWord32:
+ return m.Int32Sub(a, b);
+ case MachineRepresentation::kWord64:
+ return m.Int64Sub(a, b);
+ case MachineRepresentation::kFloat32:
+ return m.Float32Sub(a, b);
+ case MachineRepresentation::kFloat64:
+ return m.Float64Sub(a, b);
+ default:
+ UNREACHABLE();
+ }
+}
+
+Node* Mul(RawMachineAssembler& m, MachineType type, Node* a, Node* b) {
+ switch (type.representation()) {
+ case MachineRepresentation::kWord32:
+ return m.Int32Mul(a, b);
+ case MachineRepresentation::kWord64:
+ return m.Int64Mul(a, b);
+ case MachineRepresentation::kFloat32:
+ return m.Float32Mul(a, b);
+ case MachineRepresentation::kFloat64:
+ return m.Float64Mul(a, b);
+ default:
+ UNREACHABLE();
+ }
+}
+
+Node* ToInt32(RawMachineAssembler& m, MachineType type, Node* a) {
+ switch (type.representation()) {
+ case MachineRepresentation::kWord32:
+ return a;
+ case MachineRepresentation::kWord64:
+ return m.TruncateInt64ToInt32(a);
+ case MachineRepresentation::kFloat32:
+ return m.TruncateFloat32ToInt32(a);
+ case MachineRepresentation::kFloat64:
+ return m.RoundFloat64ToInt32(a);
+ default:
+ UNREACHABLE();
+ }
+}
+
+void TestReturnMultipleValues(MachineType type) {
+ const int kMaxCount = 20;
+ for (int count = 0; count < kMaxCount; ++count) {
+ printf("\n==== type = %s, count = %d ====\n\n\n",
+ MachineReprToString(type.representation()), count);
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+ CallDescriptor* desc = CreateMonoCallDescriptor(&zone, count, 2, type);
+ HandleAndZoneScope handles;
+ RawMachineAssembler m(handles.main_isolate(),
+ new (handles.main_zone()) Graph(handles.main_zone()),
+ desc, MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags());
+
+ Node* p0 = m.Parameter(0);
+ Node* p1 = m.Parameter(1);
+ typedef Node* Node_ptr;
+ std::unique_ptr<Node_ptr[]> returns(new Node_ptr[count]);
+ for (int i = 0; i < count; ++i) {
+ if (i % 3 == 0) returns[i] = Add(m, type, p0, p1);
+ if (i % 3 == 1) returns[i] = Sub(m, type, p0, p1);
+ if (i % 3 == 2) returns[i] = Mul(m, type, p0, p1);
+ }
+ m.Return(count, returns.get());
+
+ CompilationInfo info(ArrayVector("testing"), handles.main_zone(),
+ Code::STUB);
+ Handle<Code> code = Pipeline::GenerateCodeForTesting(
+ &info, handles.main_isolate(), desc, m.graph(), m.Export());
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_code) {
+ OFStream os(stdout);
+ code->Disassemble("multi_value", os);
+ }
#endif
- RawMachineAssemblerTester<int32_t> mt;
- Node* a = mt.Int32Constant(123);
- Node* b = mt.Int32Constant(456);
- Node* ret3 = mt.AddNode(mt.common()->Call(desc), mt.HeapConstant(code), a, b);
- Node* x = mt.AddNode(mt.common()->Projection(0), ret3);
- Node* y = mt.AddNode(mt.common()->Projection(1), ret3);
- Node* z = mt.AddNode(mt.common()->Projection(2), ret3);
- Node* ret = mt.Int32Add(mt.Int32Add(x, y), z);
- mt.Return(ret);
+ const int a = 47, b = 12;
+ int expect = 0;
+ for (int i = 0, sign = +1; i < count; ++i) {
+ if (i % 3 == 0) expect += sign * (a + b);
+ if (i % 3 == 1) expect += sign * (a - b);
+ if (i % 3 == 2) expect += sign * (a * b);
+ if (i % 4 == 0) sign = -sign;
+ }
+
+ RawMachineAssemblerTester<int32_t> mt;
+ Node* na = Constant(mt, type, a);
+ Node* nb = Constant(mt, type, b);
+ Node* ret_multi =
+ mt.AddNode(mt.common()->Call(desc), mt.HeapConstant(code), na, nb);
+ Node* ret = Constant(mt, type, 0);
+ bool sign = false;
+ for (int i = 0; i < count; ++i) {
+ Node* x = (count == 1)
+ ? ret_multi
+ : mt.AddNode(mt.common()->Projection(i), ret_multi);
+ ret = sign ? Sub(mt, type, ret, x) : Add(mt, type, ret, x);
+ if (i % 4 == 0) sign = !sign;
+ }
+ mt.Return(ToInt32(mt, type, ret));
#ifdef ENABLE_DISASSEMBLER
- Handle<Code> code2 = mt.GetCode();
- if (FLAG_print_code) {
- OFStream os(stdout);
- code2->Disassemble("three_value_call", os);
+ Handle<Code> code2 = mt.GetCode();
+ if (FLAG_print_code) {
+ OFStream os(stdout);
+ code2->Disassemble("multi_value_call", os);
+ }
+#endif
+ CHECK_EQ(expect, mt.Call());
+ }
+}
+
+#define TEST_MULTI(Type, type) \
+ TEST(ReturnMultiple##Type) { TestReturnMultipleValues(type); }
+
+TEST_MULTI(Int32, MachineType::Int32())
+#if (!V8_TARGET_ARCH_32_BIT)
+TEST_MULTI(Int64, MachineType::Int64())
+#endif
+TEST_MULTI(Float32, MachineType::Float32())
+TEST_MULTI(Float64, MachineType::Float64())
+
+#undef TEST_MULTI
+
+void ReturnLastValue(MachineType type) {
+ int slot_counts[] = {1, 2, 3, 600};
+ for (auto slot_count : slot_counts) {
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+ const int return_count = num_registers(type) + slot_count;
+
+ CallDescriptor* desc =
+ CreateMonoCallDescriptor(&zone, return_count, 0, type);
+
+ HandleAndZoneScope handles;
+ RawMachineAssembler m(handles.main_isolate(),
+ new (handles.main_zone()) Graph(handles.main_zone()),
+ desc, MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags());
+
+ std::unique_ptr<Node* []> returns(new Node*[return_count]);
+
+ for (int i = 0; i < return_count; ++i) {
+ returns[i] = Constant(m, type, i);
+ }
+
+ m.Return(return_count, returns.get());
+
+ CompilationInfo info(ArrayVector("testing"), handles.main_zone(),
+ Code::STUB);
+ Handle<Code> code = Pipeline::GenerateCodeForTesting(
+ &info, handles.main_isolate(), desc, m.graph(), m.Export());
+
+ // Generate caller.
+ int expect = return_count - 1;
+ RawMachineAssemblerTester<int32_t> mt;
+ Node* code_node = mt.HeapConstant(code);
+
+ Node* call = mt.AddNode(mt.common()->Call(desc), 1, &code_node);
+
+ mt.Return(ToInt32(
+ mt, type, mt.AddNode(mt.common()->Projection(return_count - 1), call)));
+
+ CHECK_EQ(expect, mt.Call());
}
+}
+
+TEST(ReturnLastValueInt32) { ReturnLastValue(MachineType::Int32()); }
+#if (!V8_TARGET_ARCH_32_BIT)
+TEST(ReturnLastValueInt64) { ReturnLastValue(MachineType::Int64()); }
#endif
- CHECK_EQ((123 + 456) + (123 - 456) + (123 * 456), mt.Call());
+TEST(ReturnLastValueFloat32) { ReturnLastValue(MachineType::Float32()); }
+TEST(ReturnLastValueFloat64) { ReturnLastValue(MachineType::Float64()); }
+
+void ReturnSumOfReturns(MachineType type) {
+ for (int unused_stack_slots = 0; unused_stack_slots <= 2;
+ ++unused_stack_slots) {
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+ // Let {unused_stack_slots + 1} returns be on the stack.
+ const int return_count = num_registers(type) + unused_stack_slots + 1;
+
+ CallDescriptor* desc =
+ CreateMonoCallDescriptor(&zone, return_count, 0, type);
+
+ HandleAndZoneScope handles;
+ RawMachineAssembler m(handles.main_isolate(),
+ new (handles.main_zone()) Graph(handles.main_zone()),
+ desc, MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags());
+
+ std::unique_ptr<Node* []> returns(new Node*[return_count]);
+
+ for (int i = 0; i < return_count; ++i) {
+ returns[i] = Constant(m, type, i);
+ }
+
+ m.Return(return_count, returns.get());
+
+ CompilationInfo info(ArrayVector("testing"), handles.main_zone(),
+ Code::STUB);
+ Handle<Code> code = Pipeline::GenerateCodeForTesting(
+ &info, handles.main_isolate(), desc, m.graph(), m.Export());
+
+ // Generate caller.
+ RawMachineAssemblerTester<int32_t> mt;
+ Node* code_node = mt.HeapConstant(code);
+
+ Node* call = mt.AddNode(mt.common()->Call(desc), 1, &code_node);
+
+ uint32_t expect = 0;
+ Node* result = mt.Int32Constant(0);
+
+ for (int i = 0; i < return_count; ++i) {
+ expect += i;
+ result = mt.Int32Add(
+ result,
+ ToInt32(mt, type, mt.AddNode(mt.common()->Projection(i), call)));
+ }
+
+ mt.Return(result);
+
+ CHECK_EQ(expect, mt.Call());
+ }
}
+TEST(ReturnSumOfReturnsInt32) { ReturnSumOfReturns(MachineType::Int32()); }
+#if (!V8_TARGET_ARCH_32_BIT)
+TEST(ReturnSumOfReturnsInt64) { ReturnSumOfReturns(MachineType::Int64()); }
+#endif
+TEST(ReturnSumOfReturnsFloat32) { ReturnSumOfReturns(MachineType::Float32()); }
+TEST(ReturnSumOfReturnsFloat64) { ReturnSumOfReturns(MachineType::Float64()); }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-operator.cc b/deps/v8/test/cctest/compiler/test-operator.cc
index 10dad490b8..7428359223 100644
--- a/deps/v8/test/cctest/compiler/test-operator.cc
+++ b/deps/v8/test/cctest/compiler/test-operator.cc
@@ -261,7 +261,7 @@ TEST(TestOpParameter_Operator1float) {
TEST(TestOpParameter_Operator1int) {
- int values[] = {7777, -66, 0, 11, 1, 0x666aff};
+ int values[] = {7777, -66, 0, 11, 1, 0x666AFF};
for (size_t i = 0; i < arraysize(values); i++) {
Operator1<int> op(33, NONE, "Scurvy", 0, 0, 0, 0, 0, 0, values[i]);
diff --git a/deps/v8/test/cctest/compiler/test-representation-change.cc b/deps/v8/test/cctest/compiler/test-representation-change.cc
index e61713f836..08cd73f4ce 100644
--- a/deps/v8/test/cctest/compiler/test-representation-change.cc
+++ b/deps/v8/test/cctest/compiler/test-representation-change.cc
@@ -285,7 +285,7 @@ static void CheckChange(IrOpcode::Value expected, MachineRepresentation from,
from_type->Maybe(Type::MinusZero())
? use_info.minus_zero_check()
: CheckForMinusZeroMode::kDontCheckForMinusZero;
- CHECK_EQ(mode, CheckMinusZeroModeOf(c->op()));
+ CHECK_EQ(mode, CheckMinusZeroParametersOf(c->op()).mode());
}
}
@@ -444,11 +444,13 @@ TEST(SignednessInWord32) {
static void TestMinusZeroCheck(IrOpcode::Value expected, Type* from_type) {
RepresentationChangerTester r;
- CheckChange(expected, MachineRepresentation::kFloat64, from_type,
- UseInfo::CheckedSignedSmallAsWord32(kDistinguishZeros));
+ CheckChange(
+ expected, MachineRepresentation::kFloat64, from_type,
+ UseInfo::CheckedSignedSmallAsWord32(kDistinguishZeros, VectorSlotPair()));
- CheckChange(expected, MachineRepresentation::kFloat64, from_type,
- UseInfo::CheckedSignedSmallAsWord32(kIdentifyZeros));
+ CheckChange(
+ expected, MachineRepresentation::kFloat64, from_type,
+ UseInfo::CheckedSignedSmallAsWord32(kIdentifyZeros, VectorSlotPair()));
CheckChange(expected, MachineRepresentation::kFloat64, from_type,
UseInfo::CheckedSigned32AsWord32(kDistinguishZeros));
diff --git a/deps/v8/test/cctest/compiler/test-run-jsops.cc b/deps/v8/test/cctest/compiler/test-run-jsops.cc
index 49033f7995..dfa2299cff 100644
--- a/deps/v8/test/cctest/compiler/test-run-jsops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jsops.cc
@@ -104,7 +104,7 @@ TEST(BinopShiftRightLogical) {
FunctionTester T("(function(a,b) { return a >>> b; })");
T.CheckCall(4, 8, 1);
- T.CheckCall(0x7ffffffc, -8, 1);
+ T.CheckCall(0x7FFFFFFC, -8, 1);
T.CheckCall(T.Val(4), T.Val("8"), T.Val(1));
T.CheckCall(T.Val(4), T.Val(8), T.Val("1"));
}
diff --git a/deps/v8/test/cctest/compiler/test-run-load-store.cc b/deps/v8/test/cctest/compiler/test-run-load-store.cc
index 2774bbe814..8014d5bffc 100644
--- a/deps/v8/test/cctest/compiler/test-run-load-store.cc
+++ b/deps/v8/test/cctest/compiler/test-run-load-store.cc
@@ -16,33 +16,6 @@
#include "test/cctest/compiler/value-helper.h"
-namespace {
-template <typename Type>
-void CheckOobValue(Type val) {
- UNREACHABLE();
-}
-
-template <>
-void CheckOobValue(int32_t val) {
- CHECK_EQ(0, val);
-}
-
-template <>
-void CheckOobValue(int64_t val) {
- CHECK_EQ(0, val);
-}
-
-template <>
-void CheckOobValue(float val) {
- CHECK(std::isnan(val));
-}
-
-template <>
-void CheckOobValue(double val) {
- CHECK(std::isnan(val));
-}
-} // namespace
-
namespace v8 {
namespace internal {
namespace compiler {
@@ -80,7 +53,7 @@ void RunLoadInt32Offset(TestAlignment t) {
int32_t p1 = 0; // loads directly from this location.
int32_t offsets[] = {-2000000, -100, -101, 1, 3,
- 7, 120, 2000, 2000000000, 0xff};
+ 7, 120, 2000, 2000000000, 0xFF};
for (size_t i = 0; i < arraysize(offsets); i++) {
RawMachineAssemblerTester<int32_t> m;
@@ -109,7 +82,7 @@ void RunLoadStoreFloat32Offset(TestAlignment t) {
float p2 = 0.0f; // and stores directly into this location.
FOR_INT32_INPUTS(i) {
- int32_t magic = 0x2342aabb + *i * 3;
+ int32_t magic = 0x2342AABB + *i * 3;
RawMachineAssemblerTester<int32_t> m;
int32_t offset = *i;
byte* from = reinterpret_cast<byte*>(&p1) - offset;
@@ -146,7 +119,7 @@ void RunLoadStoreFloat64Offset(TestAlignment t) {
double p2 = 0; // and stores directly into this location.
FOR_INT32_INPUTS(i) {
- int32_t magic = 0x2342aabb + *i * 3;
+ int32_t magic = 0x2342AABB + *i * 3;
RawMachineAssemblerTester<int32_t> m;
int32_t offset = *i;
byte* from = reinterpret_cast<byte*>(&p1) - offset;
@@ -418,9 +391,9 @@ void RunLoadStoreSignExtend32(TestAlignment t) {
FOR_INT32_INPUTS(i) {
buffer[0] = *i;
- CHECK_EQ(static_cast<int8_t>(*i & 0xff), m.Call());
- CHECK_EQ(static_cast<int8_t>(*i & 0xff), buffer[1]);
- CHECK_EQ(static_cast<int16_t>(*i & 0xffff), buffer[2]);
+ CHECK_EQ(static_cast<int8_t>(*i & 0xFF), m.Call());
+ CHECK_EQ(static_cast<int8_t>(*i & 0xFF), buffer[1]);
+ CHECK_EQ(static_cast<int16_t>(*i & 0xFFFF), buffer[2]);
CHECK_EQ(*i, buffer[3]);
}
}
@@ -451,9 +424,9 @@ void RunLoadStoreZeroExtend32(TestAlignment t) {
FOR_UINT32_INPUTS(i) {
buffer[0] = *i;
- CHECK_EQ((*i & 0xff), m.Call());
- CHECK_EQ((*i & 0xff), buffer[1]);
- CHECK_EQ((*i & 0xffff), buffer[2]);
+ CHECK_EQ((*i & 0xFF), m.Call());
+ CHECK_EQ((*i & 0xFF), buffer[1]);
+ CHECK_EQ((*i & 0xFFFF), buffer[2]);
CHECK_EQ(*i, buffer[3]);
}
}
@@ -479,7 +452,7 @@ TEST(RunUnalignedLoadStoreZeroExtend32) {
namespace {
void RunLoadStoreSignExtend64(TestAlignment t) {
- if (true) return; // TODO(titzer): sign extension of loads to 64-bit.
+ if ((true)) return; // TODO(titzer): sign extension of loads to 64-bit.
int64_t buffer[5];
RawMachineAssemblerTester<int64_t> m;
Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Int8());
@@ -512,10 +485,10 @@ void RunLoadStoreSignExtend64(TestAlignment t) {
FOR_INT64_INPUTS(i) {
buffer[0] = *i;
- CHECK_EQ(static_cast<int8_t>(*i & 0xff), m.Call());
- CHECK_EQ(static_cast<int8_t>(*i & 0xff), buffer[1]);
- CHECK_EQ(static_cast<int16_t>(*i & 0xffff), buffer[2]);
- CHECK_EQ(static_cast<int32_t>(*i & 0xffffffff), buffer[3]);
+ CHECK_EQ(static_cast<int8_t>(*i & 0xFF), m.Call());
+ CHECK_EQ(static_cast<int8_t>(*i & 0xFF), buffer[1]);
+ CHECK_EQ(static_cast<int16_t>(*i & 0xFFFF), buffer[2]);
+ CHECK_EQ(static_cast<int32_t>(*i & 0xFFFFFFFF), buffer[3]);
CHECK_EQ(*i, buffer[4]);
}
}
@@ -555,31 +528,16 @@ void RunLoadStoreZeroExtend64(TestAlignment t) {
FOR_UINT64_INPUTS(i) {
buffer[0] = *i;
- CHECK_EQ((*i & 0xff), m.Call());
- CHECK_EQ((*i & 0xff), buffer[1]);
- CHECK_EQ((*i & 0xffff), buffer[2]);
- CHECK_EQ((*i & 0xffffffff), buffer[3]);
+ CHECK_EQ((*i & 0xFF), m.Call());
+ CHECK_EQ((*i & 0xFF), buffer[1]);
+ CHECK_EQ((*i & 0xFFFF), buffer[2]);
+ CHECK_EQ((*i & 0xFFFFFFFF), buffer[3]);
CHECK_EQ(*i, buffer[4]);
}
}
} // namespace
-TEST(RunCheckedLoadInt64) {
- int64_t buffer[] = {0x66bbccddeeff0011LL, 0x1122334455667788LL};
- RawMachineAssemblerTester<int64_t> m(MachineType::Int32());
- Node* base = m.PointerConstant(buffer);
- Node* index = m.Parameter(0);
- Node* length = m.Int32Constant(16);
- Node* load = m.AddNode(m.machine()->CheckedLoad(MachineType::Int64()), base,
- index, length);
- m.Return(load);
-
- CHECK_EQ(buffer[0], m.Call(0));
- CHECK_EQ(buffer[1], m.Call(8));
- CheckOobValue(m.Call(16));
-}
-
TEST(RunLoadStoreSignExtend64) {
RunLoadStoreSignExtend64(TestAlignment::kAligned);
}
@@ -596,33 +554,6 @@ TEST(RunUnalignedLoadStoreZeroExtend64) {
RunLoadStoreZeroExtend64(TestAlignment::kUnaligned);
}
-TEST(RunCheckedStoreInt64) {
- const int64_t write = 0x5566778899aabbLL;
- const int64_t before = 0x33bbccddeeff0011LL;
- int64_t buffer[] = {before, before};
- RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
- Node* base = m.PointerConstant(buffer);
- Node* index = m.Parameter(0);
- Node* length = m.Int32Constant(16);
- Node* value = m.Int64Constant(write);
- Node* store =
- m.AddNode(m.machine()->CheckedStore(MachineRepresentation::kWord64), base,
- index, length, value);
- USE(store);
- m.Return(m.Int32Constant(11));
-
- CHECK_EQ(11, m.Call(16));
- CHECK_EQ(before, buffer[0]);
- CHECK_EQ(before, buffer[1]);
-
- CHECK_EQ(11, m.Call(0));
- CHECK_EQ(write, buffer[0]);
- CHECK_EQ(before, buffer[1]);
-
- CHECK_EQ(11, m.Call(8));
- CHECK_EQ(write, buffer[0]);
- CHECK_EQ(write, buffer[1]);
-}
#endif
namespace {
@@ -677,515 +608,6 @@ TEST(RunUnalignedLoadStoreTruncation) {
LoadStoreTruncation<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
}
-void TestRunOobCheckedLoad(bool length_is_immediate) {
- USE(CheckOobValue<int32_t>);
- USE(CheckOobValue<int64_t>);
- USE(CheckOobValue<float>);
- USE(CheckOobValue<double>);
-
- RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
- MachineType::Int32());
- MachineOperatorBuilder machine(m.zone());
- const int32_t kNumElems = 27;
- const int32_t kLength = kNumElems * 4;
-
- int32_t buffer[kNumElems];
- Node* base = m.PointerConstant(buffer);
- Node* offset = m.Parameter(0);
- Node* len = length_is_immediate ? m.Int32Constant(kLength) : m.Parameter(1);
- Node* node =
- m.AddNode(machine.CheckedLoad(MachineType::Int32()), base, offset, len);
- m.Return(node);
-
- {
- // randomize memory.
- v8::base::RandomNumberGenerator rng;
- rng.SetSeed(100);
- rng.NextBytes(&buffer[0], sizeof(buffer));
- }
-
- // in-bounds accesses.
- for (int32_t i = 0; i < kNumElems; i++) {
- int32_t offset = static_cast<int32_t>(i * sizeof(int32_t));
- int32_t expected = buffer[i];
- CHECK_EQ(expected, m.Call(offset, kLength));
- }
-
- // slightly out-of-bounds accesses.
- for (int32_t i = kLength; i < kNumElems + 30; i++) {
- int32_t offset = static_cast<int32_t>(i * sizeof(int32_t));
- CheckOobValue(m.Call(offset, kLength));
- }
-
- // way out-of-bounds accesses.
- for (int32_t offset = -2000000000; offset <= 2000000000;
- offset += 100000000) {
- if (offset == 0) continue;
- CheckOobValue(m.Call(offset, kLength));
- }
-}
-
-TEST(RunOobCheckedLoad) { TestRunOobCheckedLoad(false); }
-
-TEST(RunOobCheckedLoadImm) { TestRunOobCheckedLoad(true); }
-
-void TestRunOobCheckedStore(bool length_is_immediate) {
- RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
- MachineType::Int32());
- MachineOperatorBuilder machine(m.zone());
- const int32_t kNumElems = 29;
- const int32_t kValue = -78227234;
- const int32_t kLength = kNumElems * 4;
-
- int32_t buffer[kNumElems + kNumElems];
- Node* base = m.PointerConstant(buffer);
- Node* offset = m.Parameter(0);
- Node* len = length_is_immediate ? m.Int32Constant(kLength) : m.Parameter(1);
- Node* val = m.Int32Constant(kValue);
- m.AddNode(machine.CheckedStore(MachineRepresentation::kWord32), base, offset,
- len, val);
- m.Return(val);
-
- // in-bounds accesses.
- for (int32_t i = 0; i < kNumElems; i++) {
- memset(buffer, 0, sizeof(buffer));
- int32_t offset = static_cast<int32_t>(i * sizeof(int32_t));
- CHECK_EQ(kValue, m.Call(offset, kLength));
- for (int32_t j = 0; j < kNumElems + kNumElems; j++) {
- if (i == j) {
- CHECK_EQ(kValue, buffer[j]);
- } else {
- CHECK_EQ(0, buffer[j]);
- }
- }
- }
-
- memset(buffer, 0, sizeof(buffer));
-
- // slightly out-of-bounds accesses.
- for (int32_t i = kLength; i < kNumElems + 30; i++) {
- int32_t offset = static_cast<int32_t>(i * sizeof(int32_t));
- CHECK_EQ(kValue, m.Call(offset, kLength));
- for (int32_t j = 0; j < kNumElems + kNumElems; j++) {
- CHECK_EQ(0, buffer[j]);
- }
- }
-
- // way out-of-bounds accesses.
- for (int32_t offset = -2000000000; offset <= 2000000000;
- offset += 100000000) {
- if (offset == 0) continue;
- CHECK_EQ(kValue, m.Call(offset, kLength));
- for (int32_t j = 0; j < kNumElems + kNumElems; j++) {
- CHECK_EQ(0, buffer[j]);
- }
- }
-}
-
-TEST(RunOobCheckedStore) { TestRunOobCheckedStore(false); }
-
-TEST(RunOobCheckedStoreImm) { TestRunOobCheckedStore(true); }
-
-// TODO(titzer): CheckedLoad/CheckedStore don't support 64-bit offsets.
-#define ALLOW_64_BIT_OFFSETS 0
-
-#if V8_TARGET_ARCH_64_BIT && ALLOW_64_BIT_OFFSETS
-
-void TestRunOobCheckedLoad64(uint32_t pseudo_base, bool length_is_immediate) {
- RawMachineAssemblerTester<int32_t> m(MachineType::Uint64(),
- MachineType::Uint64());
- MachineOperatorBuilder machine(m.zone());
- const uint32_t kNumElems = 25;
- const uint32_t kLength = kNumElems * 4;
- int32_t real_buffer[kNumElems];
-
- // Simulate the end of a large buffer.
- int32_t* buffer = real_buffer - (pseudo_base / 4);
- uint64_t length = kLength + pseudo_base;
-
- Node* base = m.PointerConstant(buffer);
- Node* offset = m.Parameter(0);
- Node* len = length_is_immediate ? m.Int64Constant(length) : m.Parameter(1);
- Node* node =
- m.AddNode(machine.CheckedLoad(MachineType::Int32()), base, offset, len);
- m.Return(node);
-
- {
- // randomize memory.
- v8::base::RandomNumberGenerator rng;
- rng.SetSeed(100);
- rng.NextBytes(&real_buffer[0], sizeof(real_buffer));
- }
-
- // in-bounds accesses.
- for (uint32_t i = 0; i < kNumElems; i++) {
- uint64_t offset = pseudo_base + i * 4;
- int32_t expected = real_buffer[i];
- CHECK_EQ(expected, m.Call(offset, length));
- }
-
- // in-bounds accesses w.r.t lower 32-bits, but upper bits set.
- for (uint64_t i = 0x100000000ULL; i != 0; i <<= 1) {
- uint64_t offset = pseudo_base + i;
- CheckOobValue(m.Call(offset, length));
- }
-
- // slightly out-of-bounds accesses.
- for (uint32_t i = kLength; i < kNumElems + 30; i++) {
- uint64_t offset = pseudo_base + i * 4;
- CheckOobValue(0, m.Call(offset, length));
- }
-
- // way out-of-bounds accesses.
- for (uint64_t offset = length; offset < 100 * A_BILLION; offset += A_GIG) {
- if (offset < length) continue;
- CheckOobValue(0, m.Call(offset, length));
- }
-}
-
-TEST(RunOobCheckedLoad64_0) {
- TestRunOobCheckedLoad64(0, false);
- TestRunOobCheckedLoad64(0, true);
-}
-
-TEST(RunOobCheckedLoad64_1) {
- TestRunOobCheckedLoad64(1 * A_BILLION, false);
- TestRunOobCheckedLoad64(1 * A_BILLION, true);
-}
-
-TEST(RunOobCheckedLoad64_2) {
- TestRunOobCheckedLoad64(2 * A_BILLION, false);
- TestRunOobCheckedLoad64(2 * A_BILLION, true);
-}
-
-TEST(RunOobCheckedLoad64_3) {
- TestRunOobCheckedLoad64(3 * A_BILLION, false);
- TestRunOobCheckedLoad64(3 * A_BILLION, true);
-}
-
-TEST(RunOobCheckedLoad64_4) {
- TestRunOobCheckedLoad64(4 * A_BILLION, false);
- TestRunOobCheckedLoad64(4 * A_BILLION, true);
-}
-
-void TestRunOobCheckedStore64(uint32_t pseudo_base, bool length_is_immediate) {
- RawMachineAssemblerTester<int32_t> m(MachineType::Uint64(),
- MachineType::Uint64());
- MachineOperatorBuilder machine(m.zone());
- const uint32_t kNumElems = 21;
- const uint32_t kLength = kNumElems * 4;
- const uint32_t kValue = 897234987;
- int32_t real_buffer[kNumElems + kNumElems];
-
- // Simulate the end of a large buffer.
- int32_t* buffer = real_buffer - (pseudo_base / 4);
- uint64_t length = kLength + pseudo_base;
-
- Node* base = m.PointerConstant(buffer);
- Node* offset = m.Parameter(0);
- Node* len = length_is_immediate ? m.Int64Constant(length) : m.Parameter(1);
- Node* val = m.Int32Constant(kValue);
- m.AddNode(machine.CheckedStore(MachineRepresentation::kWord32), base, offset,
- len, val);
- m.Return(val);
-
- // in-bounds accesses.
- for (uint32_t i = 0; i < kNumElems; i++) {
- memset(real_buffer, 0, sizeof(real_buffer));
- uint64_t offset = pseudo_base + i * 4;
- CHECK_EQ(kValue, m.Call(offset, length));
- for (uint32_t j = 0; j < kNumElems + kNumElems; j++) {
- if (i == j) {
- CHECK_EQ(kValue, real_buffer[j]);
- } else {
- CHECK_EQ(0, real_buffer[j]);
- }
- }
- }
-
- memset(real_buffer, 0, sizeof(real_buffer));
-
- // in-bounds accesses w.r.t lower 32-bits, but upper bits set.
- for (uint64_t i = 0x100000000ULL; i != 0; i <<= 1) {
- uint64_t offset = pseudo_base + i;
- CHECK_EQ(kValue, m.Call(offset, length));
- for (int32_t j = 0; j < kNumElems + kNumElems; j++) {
- CHECK_EQ(0, real_buffer[j]);
- }
- }
-
- // slightly out-of-bounds accesses.
- for (uint32_t i = kLength; i < kNumElems + 30; i++) {
- uint64_t offset = pseudo_base + i * 4;
- CHECK_EQ(kValue, m.Call(offset, length));
- for (int32_t j = 0; j < kNumElems + kNumElems; j++) {
- CHECK_EQ(0, real_buffer[j]);
- }
- }
-
- // way out-of-bounds accesses.
- for (uint64_t offset = length; offset < 100 * A_BILLION; offset += A_GIG) {
- if (offset < length) continue;
- CHECK_EQ(kValue, m.Call(offset, length));
- for (int32_t j = 0; j < kNumElems + kNumElems; j++) {
- CHECK_EQ(0, real_buffer[j]);
- }
- }
-}
-
-TEST(RunOobCheckedStore64_0) {
- TestRunOobCheckedStore64(0, false);
- TestRunOobCheckedStore64(0, true);
-}
-
-TEST(RunOobCheckedStore64_1) {
- TestRunOobCheckedStore64(1 * A_BILLION, false);
- TestRunOobCheckedStore64(1 * A_BILLION, true);
-}
-
-TEST(RunOobCheckedStore64_2) {
- TestRunOobCheckedStore64(2 * A_BILLION, false);
- TestRunOobCheckedStore64(2 * A_BILLION, true);
-}
-
-TEST(RunOobCheckedStore64_3) {
- TestRunOobCheckedStore64(3 * A_BILLION, false);
- TestRunOobCheckedStore64(3 * A_BILLION, true);
-}
-
-TEST(RunOobCheckedStore64_4) {
- TestRunOobCheckedStore64(4 * A_BILLION, false);
- TestRunOobCheckedStore64(4 * A_BILLION, true);
-}
-
-#endif
-
-void TestRunOobCheckedLoad_pseudo(uint64_t x, bool length_is_immediate) {
- RawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
- MachineType::Uint32());
-
- uint32_t pseudo_base = static_cast<uint32_t>(x);
- MachineOperatorBuilder machine(m.zone());
- const uint32_t kNumElems = 29;
- const uint32_t kLength = pseudo_base + kNumElems * 4;
-
- int32_t buffer[kNumElems];
- Node* base = m.PointerConstant(reinterpret_cast<byte*>(buffer) - pseudo_base);
- Node* offset = m.Parameter(0);
- Node* len = length_is_immediate ? m.Int32Constant(kLength) : m.Parameter(1);
- Node* node =
- m.AddNode(machine.CheckedLoad(MachineType::Int32()), base, offset, len);
- m.Return(node);
-
- {
- // randomize memory.
- v8::base::RandomNumberGenerator rng;
- rng.SetSeed(100);
- rng.NextBytes(&buffer[0], sizeof(buffer));
- }
-
- // in-bounds accesses.
- for (uint32_t i = 0; i < kNumElems; i++) {
- uint32_t offset = static_cast<uint32_t>(i * sizeof(int32_t));
- uint32_t expected = buffer[i];
- CHECK_EQ(expected,
- static_cast<uint32_t>(m.Call(offset + pseudo_base, kLength)));
- }
-
- // slightly out-of-bounds accesses.
- for (uint32_t i = kNumElems; i < kNumElems + 30; i++) {
- uint32_t offset = i * sizeof(int32_t);
- CheckOobValue(m.Call(offset + pseudo_base, kLength));
- }
-
- // way out-of-bounds accesses.
- for (uint64_t i = pseudo_base + sizeof(buffer); i < 0xFFFFFFFF;
- i += A_BILLION) {
- uint32_t offset = static_cast<uint32_t>(i);
- CheckOobValue(m.Call(offset, kLength));
- }
-}
-
-TEST(RunOobCheckedLoad_pseudo0) {
- TestRunOobCheckedLoad_pseudo(0, false);
- TestRunOobCheckedLoad_pseudo(0, true);
-}
-
-TEST(RunOobCheckedLoad_pseudo1) {
- TestRunOobCheckedLoad_pseudo(100000, false);
- TestRunOobCheckedLoad_pseudo(100000, true);
-}
-
-TEST(RunOobCheckedLoad_pseudo2) {
- TestRunOobCheckedLoad_pseudo(A_BILLION, false);
- TestRunOobCheckedLoad_pseudo(A_BILLION, true);
-}
-
-TEST(RunOobCheckedLoad_pseudo3) {
- TestRunOobCheckedLoad_pseudo(A_GIG, false);
- TestRunOobCheckedLoad_pseudo(A_GIG, true);
-}
-
-TEST(RunOobCheckedLoad_pseudo4) {
- TestRunOobCheckedLoad_pseudo(2 * A_BILLION, false);
- TestRunOobCheckedLoad_pseudo(2 * A_BILLION, true);
-}
-
-TEST(RunOobCheckedLoad_pseudo5) {
- TestRunOobCheckedLoad_pseudo(2 * A_GIG, false);
- TestRunOobCheckedLoad_pseudo(2 * A_GIG, true);
-}
-
-TEST(RunOobCheckedLoad_pseudo6) {
- TestRunOobCheckedLoad_pseudo(3 * A_BILLION, false);
- TestRunOobCheckedLoad_pseudo(3 * A_BILLION, true);
-}
-
-TEST(RunOobCheckedLoad_pseudo7) {
- TestRunOobCheckedLoad_pseudo(3 * A_GIG, false);
- TestRunOobCheckedLoad_pseudo(3 * A_GIG, true);
-}
-
-TEST(RunOobCheckedLoad_pseudo8) {
- TestRunOobCheckedLoad_pseudo(4 * A_BILLION, false);
- TestRunOobCheckedLoad_pseudo(4 * A_BILLION, true);
-}
-
-template <typename MemType>
-void TestRunOobCheckedLoadT_pseudo(uint64_t x, bool length_is_immediate) {
- const int32_t kReturn = 11999;
- const uint32_t kNumElems = 29;
- MemType buffer[kNumElems];
- uint32_t pseudo_base = static_cast<uint32_t>(x);
- const uint32_t kLength = static_cast<uint32_t>(pseudo_base + sizeof(buffer));
-
- MemType result;
-
- RawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
- MachineType::Uint32());
- MachineOperatorBuilder machine(m.zone());
- Node* base = m.PointerConstant(reinterpret_cast<byte*>(buffer) - pseudo_base);
- Node* offset = m.Parameter(0);
- Node* len = length_is_immediate ? m.Int32Constant(kLength) : m.Parameter(1);
- Node* node = m.AddNode(machine.CheckedLoad(MachineTypeForC<MemType>()), base,
- offset, len);
- Node* store = m.StoreToPointer(
- &result, MachineTypeForC<MemType>().representation(), node);
- USE(store);
- m.Return(m.Int32Constant(kReturn));
-
- {
- // randomize memory.
- v8::base::RandomNumberGenerator rng;
- rng.SetSeed(103);
- rng.NextBytes(&buffer[0], sizeof(buffer));
- }
-
- // in-bounds accesses.
- for (uint32_t i = 0; i < kNumElems; i++) {
- uint32_t offset = static_cast<uint32_t>(i * sizeof(MemType));
- MemType expected = buffer[i];
- CHECK_EQ(kReturn, m.Call(offset + pseudo_base, kLength));
- CHECK_EQ(expected, result);
- }
-
- // slightly out-of-bounds accesses.
- for (uint32_t i = kNumElems; i < kNumElems + 30; i++) {
- uint32_t offset = static_cast<uint32_t>(i * sizeof(MemType));
- CHECK_EQ(kReturn, m.Call(offset + pseudo_base, kLength));
- CheckOobValue(result);
- }
-
- // way out-of-bounds accesses.
- for (uint64_t i = pseudo_base + sizeof(buffer); i < 0xFFFFFFFF;
- i += A_BILLION) {
- uint32_t offset = static_cast<uint32_t>(i);
- CHECK_EQ(kReturn, m.Call(offset, kLength));
- CheckOobValue(result);
- }
-}
-
-TEST(RunOobCheckedLoadT_pseudo0) {
- TestRunOobCheckedLoadT_pseudo<int32_t>(0, false);
- TestRunOobCheckedLoadT_pseudo<int32_t>(0, true);
- TestRunOobCheckedLoadT_pseudo<float>(0, false);
- TestRunOobCheckedLoadT_pseudo<float>(0, true);
- TestRunOobCheckedLoadT_pseudo<double>(0, false);
- TestRunOobCheckedLoadT_pseudo<double>(0, true);
-}
-
-TEST(RunOobCheckedLoadT_pseudo1) {
- TestRunOobCheckedLoadT_pseudo<int32_t>(100000, false);
- TestRunOobCheckedLoadT_pseudo<int32_t>(100000, true);
- TestRunOobCheckedLoadT_pseudo<float>(100000, false);
- TestRunOobCheckedLoadT_pseudo<float>(100000, true);
- TestRunOobCheckedLoadT_pseudo<double>(100000, false);
- TestRunOobCheckedLoadT_pseudo<double>(100000, true);
-}
-
-TEST(RunOobCheckedLoadT_pseudo2) {
- TestRunOobCheckedLoadT_pseudo<int32_t>(A_BILLION, false);
- TestRunOobCheckedLoadT_pseudo<int32_t>(A_BILLION, true);
- TestRunOobCheckedLoadT_pseudo<float>(A_BILLION, false);
- TestRunOobCheckedLoadT_pseudo<float>(A_BILLION, true);
- TestRunOobCheckedLoadT_pseudo<double>(A_BILLION, false);
- TestRunOobCheckedLoadT_pseudo<double>(A_BILLION, true);
-}
-
-TEST(RunOobCheckedLoadT_pseudo3) {
- TestRunOobCheckedLoadT_pseudo<int32_t>(A_GIG, false);
- TestRunOobCheckedLoadT_pseudo<int32_t>(A_GIG, true);
- TestRunOobCheckedLoadT_pseudo<float>(A_GIG, false);
- TestRunOobCheckedLoadT_pseudo<float>(A_GIG, true);
- TestRunOobCheckedLoadT_pseudo<double>(A_GIG, false);
- TestRunOobCheckedLoadT_pseudo<double>(A_GIG, true);
-}
-
-TEST(RunOobCheckedLoadT_pseudo4) {
- TestRunOobCheckedLoadT_pseudo<int32_t>(2 * A_BILLION, false);
- TestRunOobCheckedLoadT_pseudo<int32_t>(2 * A_BILLION, true);
- TestRunOobCheckedLoadT_pseudo<float>(2 * A_BILLION, false);
- TestRunOobCheckedLoadT_pseudo<float>(2 * A_BILLION, true);
- TestRunOobCheckedLoadT_pseudo<double>(2 * A_BILLION, false);
- TestRunOobCheckedLoadT_pseudo<double>(2 * A_BILLION, true);
-}
-
-TEST(RunOobCheckedLoadT_pseudo5) {
- TestRunOobCheckedLoadT_pseudo<int32_t>(2 * A_GIG, false);
- TestRunOobCheckedLoadT_pseudo<int32_t>(2 * A_GIG, true);
- TestRunOobCheckedLoadT_pseudo<float>(2 * A_GIG, false);
- TestRunOobCheckedLoadT_pseudo<float>(2 * A_GIG, true);
- TestRunOobCheckedLoadT_pseudo<double>(2 * A_GIG, false);
- TestRunOobCheckedLoadT_pseudo<double>(2 * A_GIG, true);
-}
-
-TEST(RunOobCheckedLoadT_pseudo6) {
- TestRunOobCheckedLoadT_pseudo<int32_t>(3 * A_BILLION, false);
- TestRunOobCheckedLoadT_pseudo<int32_t>(3 * A_BILLION, true);
- TestRunOobCheckedLoadT_pseudo<float>(3 * A_BILLION, false);
- TestRunOobCheckedLoadT_pseudo<float>(3 * A_BILLION, true);
- TestRunOobCheckedLoadT_pseudo<double>(3 * A_BILLION, false);
- TestRunOobCheckedLoadT_pseudo<double>(3 * A_BILLION, true);
-}
-
-TEST(RunOobCheckedLoadT_pseudo7) {
- TestRunOobCheckedLoadT_pseudo<int32_t>(3 * A_GIG, false);
- TestRunOobCheckedLoadT_pseudo<int32_t>(3 * A_GIG, true);
- TestRunOobCheckedLoadT_pseudo<float>(3 * A_GIG, false);
- TestRunOobCheckedLoadT_pseudo<float>(3 * A_GIG, true);
- TestRunOobCheckedLoadT_pseudo<double>(3 * A_GIG, false);
- TestRunOobCheckedLoadT_pseudo<double>(3 * A_GIG, true);
-}
-
-TEST(RunOobCheckedLoadT_pseudo8) {
- TestRunOobCheckedLoadT_pseudo<int32_t>(4 * A_BILLION, false);
- TestRunOobCheckedLoadT_pseudo<int32_t>(4 * A_BILLION, true);
- TestRunOobCheckedLoadT_pseudo<float>(4 * A_BILLION, false);
- TestRunOobCheckedLoadT_pseudo<float>(4 * A_BILLION, true);
- TestRunOobCheckedLoadT_pseudo<double>(4 * A_BILLION, false);
- TestRunOobCheckedLoadT_pseudo<double>(4 * A_BILLION, true);
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index ff8c83536b..7f752b8872 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -74,13 +74,13 @@ TEST(RunWord32ReverseBits) {
m.Return(m.AddNode(m.machine()->Word32ReverseBits().op(), m.Parameter(0)));
CHECK_EQ(uint32_t(0x00000000), m.Call(uint32_t(0x00000000)));
- CHECK_EQ(uint32_t(0x12345678), m.Call(uint32_t(0x1e6a2c48)));
- CHECK_EQ(uint32_t(0xfedcba09), m.Call(uint32_t(0x905d3b7f)));
+ CHECK_EQ(uint32_t(0x12345678), m.Call(uint32_t(0x1E6A2C48)));
+ CHECK_EQ(uint32_t(0xFEDCBA09), m.Call(uint32_t(0x905D3B7F)));
CHECK_EQ(uint32_t(0x01010101), m.Call(uint32_t(0x80808080)));
CHECK_EQ(uint32_t(0x01020408), m.Call(uint32_t(0x10204080)));
- CHECK_EQ(uint32_t(0xf0703010), m.Call(uint32_t(0x080c0e0f)));
- CHECK_EQ(uint32_t(0x1f8d0a3a), m.Call(uint32_t(0x5c50b1f8)));
- CHECK_EQ(uint32_t(0xffffffff), m.Call(uint32_t(0xffffffff)));
+ CHECK_EQ(uint32_t(0xF0703010), m.Call(uint32_t(0x080C0E0F)));
+ CHECK_EQ(uint32_t(0x1F8D0A3A), m.Call(uint32_t(0x5C50B1F8)));
+ CHECK_EQ(uint32_t(0xFFFFFFFF), m.Call(uint32_t(0xFFFFFFFF)));
}
TEST(RunWord32ReverseBytes) {
@@ -93,12 +93,12 @@ TEST(RunWord32ReverseBytes) {
CHECK_EQ(uint32_t(0x00000000), m.Call(uint32_t(0x00000000)));
CHECK_EQ(uint32_t(0x12345678), m.Call(uint32_t(0x78563412)));
- CHECK_EQ(uint32_t(0xfedcba09), m.Call(uint32_t(0x09badcfe)));
+ CHECK_EQ(uint32_t(0xFEDCBA09), m.Call(uint32_t(0x09BADCFE)));
CHECK_EQ(uint32_t(0x01010101), m.Call(uint32_t(0x01010101)));
CHECK_EQ(uint32_t(0x01020408), m.Call(uint32_t(0x08040201)));
- CHECK_EQ(uint32_t(0xf0703010), m.Call(uint32_t(0x103070f0)));
- CHECK_EQ(uint32_t(0x1f8d0a3a), m.Call(uint32_t(0x3a0a8d1f)));
- CHECK_EQ(uint32_t(0xffffffff), m.Call(uint32_t(0xffffffff)));
+ CHECK_EQ(uint32_t(0xF0703010), m.Call(uint32_t(0x103070F0)));
+ CHECK_EQ(uint32_t(0x1F8D0A3A), m.Call(uint32_t(0x3A0A8D1F)));
+ CHECK_EQ(uint32_t(0xFFFFFFFF), m.Call(uint32_t(0xFFFFFFFF)));
}
TEST(RunWord32Ctz) {
@@ -114,34 +114,34 @@ TEST(RunWord32Ctz) {
CHECK_EQ(30, m.Call(uint32_t(0x40000000)));
CHECK_EQ(29, m.Call(uint32_t(0x20000000)));
CHECK_EQ(28, m.Call(uint32_t(0x10000000)));
- CHECK_EQ(27, m.Call(uint32_t(0xa8000000)));
- CHECK_EQ(26, m.Call(uint32_t(0xf4000000)));
+ CHECK_EQ(27, m.Call(uint32_t(0xA8000000)));
+ CHECK_EQ(26, m.Call(uint32_t(0xF4000000)));
CHECK_EQ(25, m.Call(uint32_t(0x62000000)));
CHECK_EQ(24, m.Call(uint32_t(0x91000000)));
- CHECK_EQ(23, m.Call(uint32_t(0xcd800000)));
+ CHECK_EQ(23, m.Call(uint32_t(0xCD800000)));
CHECK_EQ(22, m.Call(uint32_t(0x09400000)));
- CHECK_EQ(21, m.Call(uint32_t(0xaf200000)));
- CHECK_EQ(20, m.Call(uint32_t(0xac100000)));
- CHECK_EQ(19, m.Call(uint32_t(0xe0b80000)));
- CHECK_EQ(18, m.Call(uint32_t(0x9ce40000)));
- CHECK_EQ(17, m.Call(uint32_t(0xc7920000)));
- CHECK_EQ(16, m.Call(uint32_t(0xb8f10000)));
- CHECK_EQ(15, m.Call(uint32_t(0x3b9f8000)));
- CHECK_EQ(14, m.Call(uint32_t(0xdb4c4000)));
- CHECK_EQ(13, m.Call(uint32_t(0xe9a32000)));
- CHECK_EQ(12, m.Call(uint32_t(0xfca61000)));
- CHECK_EQ(11, m.Call(uint32_t(0x6c8a7800)));
- CHECK_EQ(10, m.Call(uint32_t(0x8ce5a400)));
- CHECK_EQ(9, m.Call(uint32_t(0xcb7d0200)));
- CHECK_EQ(8, m.Call(uint32_t(0xcb4dc100)));
- CHECK_EQ(7, m.Call(uint32_t(0xdfbec580)));
- CHECK_EQ(6, m.Call(uint32_t(0x27a9db40)));
- CHECK_EQ(5, m.Call(uint32_t(0xde3bcb20)));
- CHECK_EQ(4, m.Call(uint32_t(0xd7e8a610)));
- CHECK_EQ(3, m.Call(uint32_t(0x9afdbc88)));
- CHECK_EQ(2, m.Call(uint32_t(0x9afdbc84)));
- CHECK_EQ(1, m.Call(uint32_t(0x9afdbc82)));
- CHECK_EQ(0, m.Call(uint32_t(0x9afdbc81)));
+ CHECK_EQ(21, m.Call(uint32_t(0xAF200000)));
+ CHECK_EQ(20, m.Call(uint32_t(0xAC100000)));
+ CHECK_EQ(19, m.Call(uint32_t(0xE0B80000)));
+ CHECK_EQ(18, m.Call(uint32_t(0x9CE40000)));
+ CHECK_EQ(17, m.Call(uint32_t(0xC7920000)));
+ CHECK_EQ(16, m.Call(uint32_t(0xB8F10000)));
+ CHECK_EQ(15, m.Call(uint32_t(0x3B9F8000)));
+ CHECK_EQ(14, m.Call(uint32_t(0xDB4C4000)));
+ CHECK_EQ(13, m.Call(uint32_t(0xE9A32000)));
+ CHECK_EQ(12, m.Call(uint32_t(0xFCA61000)));
+ CHECK_EQ(11, m.Call(uint32_t(0x6C8A7800)));
+ CHECK_EQ(10, m.Call(uint32_t(0x8CE5A400)));
+ CHECK_EQ(9, m.Call(uint32_t(0xCB7D0200)));
+ CHECK_EQ(8, m.Call(uint32_t(0xCB4DC100)));
+ CHECK_EQ(7, m.Call(uint32_t(0xDFBEC580)));
+ CHECK_EQ(6, m.Call(uint32_t(0x27A9DB40)));
+ CHECK_EQ(5, m.Call(uint32_t(0xDE3BCB20)));
+ CHECK_EQ(4, m.Call(uint32_t(0xD7E8A610)));
+ CHECK_EQ(3, m.Call(uint32_t(0x9AFDBC88)));
+ CHECK_EQ(2, m.Call(uint32_t(0x9AFDBC84)));
+ CHECK_EQ(1, m.Call(uint32_t(0x9AFDBC82)));
+ CHECK_EQ(0, m.Call(uint32_t(0x9AFDBC81)));
}
TEST(RunWord32Clz) {
@@ -155,11 +155,11 @@ TEST(RunWord32Clz) {
CHECK_EQ(4, m.Call(uint32_t(0x08050000)));
CHECK_EQ(5, m.Call(uint32_t(0x04006000)));
CHECK_EQ(6, m.Call(uint32_t(0x02000000)));
- CHECK_EQ(7, m.Call(uint32_t(0x010000a0)));
- CHECK_EQ(8, m.Call(uint32_t(0x00800c00)));
+ CHECK_EQ(7, m.Call(uint32_t(0x010000A0)));
+ CHECK_EQ(8, m.Call(uint32_t(0x00800C00)));
CHECK_EQ(9, m.Call(uint32_t(0x00400000)));
- CHECK_EQ(10, m.Call(uint32_t(0x0020000d)));
- CHECK_EQ(11, m.Call(uint32_t(0x00100f00)));
+ CHECK_EQ(10, m.Call(uint32_t(0x0020000D)));
+ CHECK_EQ(11, m.Call(uint32_t(0x00100F00)));
CHECK_EQ(12, m.Call(uint32_t(0x00080000)));
CHECK_EQ(13, m.Call(uint32_t(0x00041000)));
CHECK_EQ(14, m.Call(uint32_t(0x00020020)));
@@ -195,11 +195,11 @@ TEST(RunWord32Popcnt) {
CHECK_EQ(0, m.Call(uint32_t(0x00000000)));
CHECK_EQ(1, m.Call(uint32_t(0x00000001)));
CHECK_EQ(1, m.Call(uint32_t(0x80000000)));
- CHECK_EQ(32, m.Call(uint32_t(0xffffffff)));
- CHECK_EQ(6, m.Call(uint32_t(0x000dc100)));
- CHECK_EQ(9, m.Call(uint32_t(0xe00dc100)));
- CHECK_EQ(11, m.Call(uint32_t(0xe00dc103)));
- CHECK_EQ(9, m.Call(uint32_t(0x000dc107)));
+ CHECK_EQ(32, m.Call(uint32_t(0xFFFFFFFF)));
+ CHECK_EQ(6, m.Call(uint32_t(0x000DC100)));
+ CHECK_EQ(9, m.Call(uint32_t(0xE00DC100)));
+ CHECK_EQ(11, m.Call(uint32_t(0xE00DC103)));
+ CHECK_EQ(9, m.Call(uint32_t(0x000DC107)));
}
@@ -213,13 +213,13 @@ TEST(RunWord64ReverseBits) {
m.Return(m.AddNode(m.machine()->Word64ReverseBits().op(), m.Parameter(0)));
CHECK_EQ(uint64_t(0x0000000000000000), m.Call(uint64_t(0x0000000000000000)));
- CHECK_EQ(uint64_t(0x1234567890abcdef), m.Call(uint64_t(0xf7b3d5091e6a2c48)));
- CHECK_EQ(uint64_t(0xfedcba0987654321), m.Call(uint64_t(0x84c2a6e1905d3b7f)));
+ CHECK_EQ(uint64_t(0x1234567890ABCDEF), m.Call(uint64_t(0xF7B3D5091E6A2C48)));
+ CHECK_EQ(uint64_t(0xFEDCBA0987654321), m.Call(uint64_t(0x84C2A6E1905D3B7F)));
CHECK_EQ(uint64_t(0x0101010101010101), m.Call(uint64_t(0x8080808080808080)));
- CHECK_EQ(uint64_t(0x0102040803060c01), m.Call(uint64_t(0x803060c010204080)));
- CHECK_EQ(uint64_t(0xf0703010e060200f), m.Call(uint64_t(0xf0040607080c0e0f)));
- CHECK_EQ(uint64_t(0x2f8a6df01c21fa3b), m.Call(uint64_t(0xdc5f84380fb651f4)));
- CHECK_EQ(uint64_t(0xffffffffffffffff), m.Call(uint64_t(0xffffffffffffffff)));
+ CHECK_EQ(uint64_t(0x0102040803060C01), m.Call(uint64_t(0x803060C010204080)));
+ CHECK_EQ(uint64_t(0xF0703010E060200F), m.Call(uint64_t(0xF0040607080C0E0F)));
+ CHECK_EQ(uint64_t(0x2F8A6DF01C21FA3B), m.Call(uint64_t(0xDC5F84380FB651F4)));
+ CHECK_EQ(uint64_t(0xFFFFFFFFFFFFFFFF), m.Call(uint64_t(0xFFFFFFFFFFFFFFFF)));
}
TEST(RunWord64ReverseBytes) {
@@ -231,13 +231,13 @@ TEST(RunWord64ReverseBytes) {
m.Return(m.AddNode(m.machine()->Word64ReverseBytes().op(), m.Parameter(0)));
CHECK_EQ(uint64_t(0x0000000000000000), m.Call(uint64_t(0x0000000000000000)));
- CHECK_EQ(uint64_t(0x1234567890abcdef), m.Call(uint64_t(0xefcdab9078563412)));
- CHECK_EQ(uint64_t(0xfedcba0987654321), m.Call(uint64_t(0x2143658709badcfe)));
+ CHECK_EQ(uint64_t(0x1234567890ABCDEF), m.Call(uint64_t(0xEFCDAB9078563412)));
+ CHECK_EQ(uint64_t(0xFEDCBA0987654321), m.Call(uint64_t(0x2143658709BADCFE)));
CHECK_EQ(uint64_t(0x0101010101010101), m.Call(uint64_t(0x0101010101010101)));
- CHECK_EQ(uint64_t(0x0102040803060c01), m.Call(uint64_t(0x010c060308040201)));
- CHECK_EQ(uint64_t(0xf0703010e060200f), m.Call(uint64_t(0x0f2060e0103070f0)));
- CHECK_EQ(uint64_t(0x2f8a6df01c21fa3b), m.Call(uint64_t(0x3bfa211cf06d8a2f)));
- CHECK_EQ(uint64_t(0xffffffffffffffff), m.Call(uint64_t(0xffffffffffffffff)));
+ CHECK_EQ(uint64_t(0x0102040803060C01), m.Call(uint64_t(0x010C060308040201)));
+ CHECK_EQ(uint64_t(0xF0703010E060200F), m.Call(uint64_t(0x0F2060E0103070F0)));
+ CHECK_EQ(uint64_t(0x2F8A6DF01C21FA3B), m.Call(uint64_t(0x3BFA211CF06D8A2F)));
+ CHECK_EQ(uint64_t(0xFFFFFFFFFFFFFFFF), m.Call(uint64_t(0xFFFFFFFFFFFFFFFF)));
}
TEST(RunWord64Clz) {
@@ -251,11 +251,11 @@ TEST(RunWord64Clz) {
CHECK_EQ(4, m.Call(uint64_t(0x0805000000000000)));
CHECK_EQ(5, m.Call(uint64_t(0x0400600000000000)));
CHECK_EQ(6, m.Call(uint64_t(0x0200000000000000)));
- CHECK_EQ(7, m.Call(uint64_t(0x010000a000000000)));
- CHECK_EQ(8, m.Call(uint64_t(0x00800c0000000000)));
+ CHECK_EQ(7, m.Call(uint64_t(0x010000A000000000)));
+ CHECK_EQ(8, m.Call(uint64_t(0x00800C0000000000)));
CHECK_EQ(9, m.Call(uint64_t(0x0040000000000000)));
- CHECK_EQ(10, m.Call(uint64_t(0x0020000d00000000)));
- CHECK_EQ(11, m.Call(uint64_t(0x00100f0000000000)));
+ CHECK_EQ(10, m.Call(uint64_t(0x0020000D00000000)));
+ CHECK_EQ(11, m.Call(uint64_t(0x00100F0000000000)));
CHECK_EQ(12, m.Call(uint64_t(0x0008000000000000)));
CHECK_EQ(13, m.Call(uint64_t(0x0004100000000000)));
CHECK_EQ(14, m.Call(uint64_t(0x0002002000000000)));
@@ -283,11 +283,11 @@ TEST(RunWord64Clz) {
CHECK_EQ(36, m.Call(uint64_t(0x0000000008050000)));
CHECK_EQ(37, m.Call(uint64_t(0x0000000004006000)));
CHECK_EQ(38, m.Call(uint64_t(0x0000000002000000)));
- CHECK_EQ(39, m.Call(uint64_t(0x00000000010000a0)));
- CHECK_EQ(40, m.Call(uint64_t(0x0000000000800c00)));
+ CHECK_EQ(39, m.Call(uint64_t(0x00000000010000A0)));
+ CHECK_EQ(40, m.Call(uint64_t(0x0000000000800C00)));
CHECK_EQ(41, m.Call(uint64_t(0x0000000000400000)));
- CHECK_EQ(42, m.Call(uint64_t(0x000000000020000d)));
- CHECK_EQ(43, m.Call(uint64_t(0x0000000000100f00)));
+ CHECK_EQ(42, m.Call(uint64_t(0x000000000020000D)));
+ CHECK_EQ(43, m.Call(uint64_t(0x0000000000100F00)));
CHECK_EQ(44, m.Call(uint64_t(0x0000000000080000)));
CHECK_EQ(45, m.Call(uint64_t(0x0000000000041000)));
CHECK_EQ(46, m.Call(uint64_t(0x0000000000020020)));
@@ -325,66 +325,66 @@ TEST(RunWord64Ctz) {
CHECK_EQ(62, m.Call(uint64_t(0x4000000000000000)));
CHECK_EQ(61, m.Call(uint64_t(0x2000000000000000)));
CHECK_EQ(60, m.Call(uint64_t(0x1000000000000000)));
- CHECK_EQ(59, m.Call(uint64_t(0xa800000000000000)));
- CHECK_EQ(58, m.Call(uint64_t(0xf400000000000000)));
+ CHECK_EQ(59, m.Call(uint64_t(0xA800000000000000)));
+ CHECK_EQ(58, m.Call(uint64_t(0xF400000000000000)));
CHECK_EQ(57, m.Call(uint64_t(0x6200000000000000)));
CHECK_EQ(56, m.Call(uint64_t(0x9100000000000000)));
- CHECK_EQ(55, m.Call(uint64_t(0xcd80000000000000)));
+ CHECK_EQ(55, m.Call(uint64_t(0xCD80000000000000)));
CHECK_EQ(54, m.Call(uint64_t(0x0940000000000000)));
- CHECK_EQ(53, m.Call(uint64_t(0xaf20000000000000)));
- CHECK_EQ(52, m.Call(uint64_t(0xac10000000000000)));
- CHECK_EQ(51, m.Call(uint64_t(0xe0b8000000000000)));
- CHECK_EQ(50, m.Call(uint64_t(0x9ce4000000000000)));
- CHECK_EQ(49, m.Call(uint64_t(0xc792000000000000)));
- CHECK_EQ(48, m.Call(uint64_t(0xb8f1000000000000)));
- CHECK_EQ(47, m.Call(uint64_t(0x3b9f800000000000)));
- CHECK_EQ(46, m.Call(uint64_t(0xdb4c400000000000)));
- CHECK_EQ(45, m.Call(uint64_t(0xe9a3200000000000)));
- CHECK_EQ(44, m.Call(uint64_t(0xfca6100000000000)));
- CHECK_EQ(43, m.Call(uint64_t(0x6c8a780000000000)));
- CHECK_EQ(42, m.Call(uint64_t(0x8ce5a40000000000)));
- CHECK_EQ(41, m.Call(uint64_t(0xcb7d020000000000)));
- CHECK_EQ(40, m.Call(uint64_t(0xcb4dc10000000000)));
- CHECK_EQ(39, m.Call(uint64_t(0xdfbec58000000000)));
- CHECK_EQ(38, m.Call(uint64_t(0x27a9db4000000000)));
- CHECK_EQ(37, m.Call(uint64_t(0xde3bcb2000000000)));
- CHECK_EQ(36, m.Call(uint64_t(0xd7e8a61000000000)));
- CHECK_EQ(35, m.Call(uint64_t(0x9afdbc8800000000)));
- CHECK_EQ(34, m.Call(uint64_t(0x9afdbc8400000000)));
- CHECK_EQ(33, m.Call(uint64_t(0x9afdbc8200000000)));
- CHECK_EQ(32, m.Call(uint64_t(0x9afdbc8100000000)));
+ CHECK_EQ(53, m.Call(uint64_t(0xAF20000000000000)));
+ CHECK_EQ(52, m.Call(uint64_t(0xAC10000000000000)));
+ CHECK_EQ(51, m.Call(uint64_t(0xE0B8000000000000)));
+ CHECK_EQ(50, m.Call(uint64_t(0x9CE4000000000000)));
+ CHECK_EQ(49, m.Call(uint64_t(0xC792000000000000)));
+ CHECK_EQ(48, m.Call(uint64_t(0xB8F1000000000000)));
+ CHECK_EQ(47, m.Call(uint64_t(0x3B9F800000000000)));
+ CHECK_EQ(46, m.Call(uint64_t(0xDB4C400000000000)));
+ CHECK_EQ(45, m.Call(uint64_t(0xE9A3200000000000)));
+ CHECK_EQ(44, m.Call(uint64_t(0xFCA6100000000000)));
+ CHECK_EQ(43, m.Call(uint64_t(0x6C8A780000000000)));
+ CHECK_EQ(42, m.Call(uint64_t(0x8CE5A40000000000)));
+ CHECK_EQ(41, m.Call(uint64_t(0xCB7D020000000000)));
+ CHECK_EQ(40, m.Call(uint64_t(0xCB4DC10000000000)));
+ CHECK_EQ(39, m.Call(uint64_t(0xDFBEC58000000000)));
+ CHECK_EQ(38, m.Call(uint64_t(0x27A9DB4000000000)));
+ CHECK_EQ(37, m.Call(uint64_t(0xDE3BCB2000000000)));
+ CHECK_EQ(36, m.Call(uint64_t(0xD7E8A61000000000)));
+ CHECK_EQ(35, m.Call(uint64_t(0x9AFDBC8800000000)));
+ CHECK_EQ(34, m.Call(uint64_t(0x9AFDBC8400000000)));
+ CHECK_EQ(33, m.Call(uint64_t(0x9AFDBC8200000000)));
+ CHECK_EQ(32, m.Call(uint64_t(0x9AFDBC8100000000)));
CHECK_EQ(31, m.Call(uint64_t(0x0000000080000000)));
CHECK_EQ(30, m.Call(uint64_t(0x0000000040000000)));
CHECK_EQ(29, m.Call(uint64_t(0x0000000020000000)));
CHECK_EQ(28, m.Call(uint64_t(0x0000000010000000)));
- CHECK_EQ(27, m.Call(uint64_t(0x00000000a8000000)));
- CHECK_EQ(26, m.Call(uint64_t(0x00000000f4000000)));
+ CHECK_EQ(27, m.Call(uint64_t(0x00000000A8000000)));
+ CHECK_EQ(26, m.Call(uint64_t(0x00000000F4000000)));
CHECK_EQ(25, m.Call(uint64_t(0x0000000062000000)));
CHECK_EQ(24, m.Call(uint64_t(0x0000000091000000)));
- CHECK_EQ(23, m.Call(uint64_t(0x00000000cd800000)));
+ CHECK_EQ(23, m.Call(uint64_t(0x00000000CD800000)));
CHECK_EQ(22, m.Call(uint64_t(0x0000000009400000)));
- CHECK_EQ(21, m.Call(uint64_t(0x00000000af200000)));
- CHECK_EQ(20, m.Call(uint64_t(0x00000000ac100000)));
- CHECK_EQ(19, m.Call(uint64_t(0x00000000e0b80000)));
- CHECK_EQ(18, m.Call(uint64_t(0x000000009ce40000)));
- CHECK_EQ(17, m.Call(uint64_t(0x00000000c7920000)));
- CHECK_EQ(16, m.Call(uint64_t(0x00000000b8f10000)));
- CHECK_EQ(15, m.Call(uint64_t(0x000000003b9f8000)));
- CHECK_EQ(14, m.Call(uint64_t(0x00000000db4c4000)));
- CHECK_EQ(13, m.Call(uint64_t(0x00000000e9a32000)));
- CHECK_EQ(12, m.Call(uint64_t(0x00000000fca61000)));
- CHECK_EQ(11, m.Call(uint64_t(0x000000006c8a7800)));
- CHECK_EQ(10, m.Call(uint64_t(0x000000008ce5a400)));
- CHECK_EQ(9, m.Call(uint64_t(0x00000000cb7d0200)));
- CHECK_EQ(8, m.Call(uint64_t(0x00000000cb4dc100)));
- CHECK_EQ(7, m.Call(uint64_t(0x00000000dfbec580)));
- CHECK_EQ(6, m.Call(uint64_t(0x0000000027a9db40)));
- CHECK_EQ(5, m.Call(uint64_t(0x00000000de3bcb20)));
- CHECK_EQ(4, m.Call(uint64_t(0x00000000d7e8a610)));
- CHECK_EQ(3, m.Call(uint64_t(0x000000009afdbc88)));
- CHECK_EQ(2, m.Call(uint64_t(0x000000009afdbc84)));
- CHECK_EQ(1, m.Call(uint64_t(0x000000009afdbc82)));
- CHECK_EQ(0, m.Call(uint64_t(0x000000009afdbc81)));
+ CHECK_EQ(21, m.Call(uint64_t(0x00000000AF200000)));
+ CHECK_EQ(20, m.Call(uint64_t(0x00000000AC100000)));
+ CHECK_EQ(19, m.Call(uint64_t(0x00000000E0B80000)));
+ CHECK_EQ(18, m.Call(uint64_t(0x000000009CE40000)));
+ CHECK_EQ(17, m.Call(uint64_t(0x00000000C7920000)));
+ CHECK_EQ(16, m.Call(uint64_t(0x00000000B8F10000)));
+ CHECK_EQ(15, m.Call(uint64_t(0x000000003B9F8000)));
+ CHECK_EQ(14, m.Call(uint64_t(0x00000000DB4C4000)));
+ CHECK_EQ(13, m.Call(uint64_t(0x00000000E9A32000)));
+ CHECK_EQ(12, m.Call(uint64_t(0x00000000FCA61000)));
+ CHECK_EQ(11, m.Call(uint64_t(0x000000006C8A7800)));
+ CHECK_EQ(10, m.Call(uint64_t(0x000000008CE5A400)));
+ CHECK_EQ(9, m.Call(uint64_t(0x00000000CB7D0200)));
+ CHECK_EQ(8, m.Call(uint64_t(0x00000000CB4DC100)));
+ CHECK_EQ(7, m.Call(uint64_t(0x00000000DFBEC580)));
+ CHECK_EQ(6, m.Call(uint64_t(0x0000000027A9DB40)));
+ CHECK_EQ(5, m.Call(uint64_t(0x00000000DE3BCB20)));
+ CHECK_EQ(4, m.Call(uint64_t(0x00000000D7E8A610)));
+ CHECK_EQ(3, m.Call(uint64_t(0x000000009AFDBC88)));
+ CHECK_EQ(2, m.Call(uint64_t(0x000000009AFDBC84)));
+ CHECK_EQ(1, m.Call(uint64_t(0x000000009AFDBC82)));
+ CHECK_EQ(0, m.Call(uint64_t(0x000000009AFDBC81)));
}
@@ -399,11 +399,11 @@ TEST(RunWord64Popcnt) {
CHECK_EQ(0, m.Call(uint64_t(0x0000000000000000)));
CHECK_EQ(1, m.Call(uint64_t(0x0000000000000001)));
CHECK_EQ(1, m.Call(uint64_t(0x8000000000000000)));
- CHECK_EQ(64, m.Call(uint64_t(0xffffffffffffffff)));
- CHECK_EQ(12, m.Call(uint64_t(0x000dc100000dc100)));
- CHECK_EQ(18, m.Call(uint64_t(0xe00dc100e00dc100)));
- CHECK_EQ(22, m.Call(uint64_t(0xe00dc103e00dc103)));
- CHECK_EQ(18, m.Call(uint64_t(0x000dc107000dc107)));
+ CHECK_EQ(64, m.Call(uint64_t(0xFFFFFFFFFFFFFFFF)));
+ CHECK_EQ(12, m.Call(uint64_t(0x000DC100000DC100)));
+ CHECK_EQ(18, m.Call(uint64_t(0xE00DC100E00DC100)));
+ CHECK_EQ(22, m.Call(uint64_t(0xE00DC103E00DC103)));
+ CHECK_EQ(18, m.Call(uint64_t(0x000DC107000DC107)));
}
#endif // V8_TARGET_ARCH_64_BIT
@@ -421,7 +421,7 @@ static Node* Int32Input(RawMachineAssemblerTester<int32_t>* m, int index) {
case 4:
return m->Int32Constant(-1);
case 5:
- return m->Int32Constant(0xff);
+ return m->Int32Constant(0xFF);
case 6:
return m->Int32Constant(0x01234567);
case 7:
@@ -483,9 +483,9 @@ static Node* Int64Input(RawMachineAssemblerTester<int64_t>* m, int index) {
case 4:
return m->Int64Constant(-1);
case 5:
- return m->Int64Constant(0xff);
+ return m->Int64Constant(0xFF);
case 6:
- return m->Int64Constant(0x0123456789abcdefLL);
+ return m->Int64Constant(0x0123456789ABCDEFLL);
case 7:
return m->Load(MachineType::Int64(), m->PointerConstant(nullptr));
default:
@@ -886,8 +886,8 @@ TEST(RunDiamondPhiParam) {
MachineType::Int32(), MachineType::Int32(), MachineType::Int32());
BuildDiamondPhi(&m, m.Parameter(0), MachineRepresentation::kWord32,
m.Parameter(1), m.Parameter(2));
- int32_t c1 = 0x260cb75a;
- int32_t c2 = 0xcd3e9c8b;
+ int32_t c1 = 0x260CB75A;
+ int32_t c2 = 0xCD3E9C8B;
int result = m.Call(0, c1, c2);
CHECK_EQ(c2, result);
result = m.Call(1, c1, c2);
@@ -941,8 +941,8 @@ TEST(RunLoopPhiParam) {
m.Bind(&end);
m.Return(phi);
- int32_t c1 = 0xa81903b4;
- int32_t c2 = 0x5a1207da;
+ int32_t c1 = 0xA81903B4;
+ int32_t c2 = 0x5A1207DA;
int result = m.Call(0, c1, c2);
CHECK_EQ(c1, result);
result = m.Call(1, c1, c2);
@@ -2422,10 +2422,10 @@ TEST(RunWord32AndAndWord32ShlP) {
RawMachineAssemblerTester<int32_t> m;
Uint32BinopTester bt(&m);
bt.AddReturn(
- m.Word32Shl(bt.param0, m.Word32And(bt.param1, m.Int32Constant(0x1f))));
+ m.Word32Shl(bt.param0, m.Word32And(bt.param1, m.Int32Constant(0x1F))));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t expected = *i << (*j & 0x1f);
+ uint32_t expected = *i << (*j & 0x1F);
CHECK_EQ(expected, bt.call(*i, *j));
}
}
@@ -2434,10 +2434,10 @@ TEST(RunWord32AndAndWord32ShlP) {
RawMachineAssemblerTester<int32_t> m;
Uint32BinopTester bt(&m);
bt.AddReturn(
- m.Word32Shl(bt.param0, m.Word32And(m.Int32Constant(0x1f), bt.param1)));
+ m.Word32Shl(bt.param0, m.Word32And(m.Int32Constant(0x1F), bt.param1)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t expected = *i << (0x1f & *j);
+ uint32_t expected = *i << (0x1F & *j);
CHECK_EQ(expected, bt.call(*i, *j));
}
}
@@ -2450,10 +2450,10 @@ TEST(RunWord32AndAndWord32ShrP) {
RawMachineAssemblerTester<int32_t> m;
Uint32BinopTester bt(&m);
bt.AddReturn(
- m.Word32Shr(bt.param0, m.Word32And(bt.param1, m.Int32Constant(0x1f))));
+ m.Word32Shr(bt.param0, m.Word32And(bt.param1, m.Int32Constant(0x1F))));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t expected = *i >> (*j & 0x1f);
+ uint32_t expected = *i >> (*j & 0x1F);
CHECK_EQ(expected, bt.call(*i, *j));
}
}
@@ -2462,10 +2462,10 @@ TEST(RunWord32AndAndWord32ShrP) {
RawMachineAssemblerTester<int32_t> m;
Uint32BinopTester bt(&m);
bt.AddReturn(
- m.Word32Shr(bt.param0, m.Word32And(m.Int32Constant(0x1f), bt.param1)));
+ m.Word32Shr(bt.param0, m.Word32And(m.Int32Constant(0x1F), bt.param1)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t expected = *i >> (0x1f & *j);
+ uint32_t expected = *i >> (0x1F & *j);
CHECK_EQ(expected, bt.call(*i, *j));
}
}
@@ -2478,10 +2478,10 @@ TEST(RunWord32AndAndWord32SarP) {
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
bt.AddReturn(
- m.Word32Sar(bt.param0, m.Word32And(bt.param1, m.Int32Constant(0x1f))));
+ m.Word32Sar(bt.param0, m.Word32And(bt.param1, m.Int32Constant(0x1F))));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- int32_t expected = *i >> (*j & 0x1f);
+ int32_t expected = *i >> (*j & 0x1F);
CHECK_EQ(expected, bt.call(*i, *j));
}
}
@@ -2490,10 +2490,10 @@ TEST(RunWord32AndAndWord32SarP) {
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
bt.AddReturn(
- m.Word32Sar(bt.param0, m.Word32And(m.Int32Constant(0x1f), bt.param1)));
+ m.Word32Sar(bt.param0, m.Word32And(m.Int32Constant(0x1F), bt.param1)));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- int32_t expected = *i >> (0x1f & *j);
+ int32_t expected = *i >> (0x1F & *j);
CHECK_EQ(expected, bt.call(*i, *j));
}
}
@@ -3524,8 +3524,10 @@ TEST(RunWord32EqualAndWord32ShrP) {
TEST(RunDeadNodes) {
for (int i = 0; true; i++) {
- RawMachineAssemblerTester<int32_t> m(i == 5 ? MachineType::Int32()
- : MachineType::None());
+ RawMachineAssemblerTester<int32_t> m_v;
+ RawMachineAssemblerTester<int32_t> m_i(MachineType::Int32());
+ RawMachineAssemblerTester<int32_t>& m = i == 5 ? m_i : m_v;
+
int constant = 0x55 + i;
switch (i) {
case 0:
@@ -4181,9 +4183,9 @@ TEST(RunInt32PairAdd) {
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
- m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ m.Call(static_cast<uint32_t>(*i & 0xFFFFFFFF),
static_cast<uint32_t>(*i >> 32),
- static_cast<uint32_t>(*j & 0xffffffff),
+ static_cast<uint32_t>(*j & 0xFFFFFFFF),
static_cast<uint32_t>(*j >> 32));
CHECK_EQ(*i + *j, ToInt64(low, high));
}
@@ -4202,9 +4204,9 @@ TEST(RunInt32PairAddUseOnlyHighWord) {
FOR_UINT64_INPUTS(j) {
CHECK_EQ(
static_cast<uint32_t>((*i + *j) >> 32),
- static_cast<uint32_t>(m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ static_cast<uint32_t>(m.Call(static_cast<uint32_t>(*i & 0xFFFFFFFF),
static_cast<uint32_t>(*i >> 32),
- static_cast<uint32_t>(*j & 0xffffffff),
+ static_cast<uint32_t>(*j & 0xFFFFFFFF),
static_cast<uint32_t>(*j >> 32))));
}
}
@@ -4264,9 +4266,9 @@ TEST(RunInt32PairSub) {
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
- m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ m.Call(static_cast<uint32_t>(*i & 0xFFFFFFFF),
static_cast<uint32_t>(*i >> 32),
- static_cast<uint32_t>(*j & 0xffffffff),
+ static_cast<uint32_t>(*j & 0xFFFFFFFF),
static_cast<uint32_t>(*j >> 32));
CHECK_EQ(*i - *j, ToInt64(low, high));
}
@@ -4285,9 +4287,9 @@ TEST(RunInt32PairSubUseOnlyHighWord) {
FOR_UINT64_INPUTS(j) {
CHECK_EQ(
static_cast<uint32_t>((*i - *j) >> 32),
- static_cast<uint32_t>(m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ static_cast<uint32_t>(m.Call(static_cast<uint32_t>(*i & 0xFFFFFFFF),
static_cast<uint32_t>(*i >> 32),
- static_cast<uint32_t>(*j & 0xffffffff),
+ static_cast<uint32_t>(*j & 0xFFFFFFFF),
static_cast<uint32_t>(*j >> 32))));
}
}
@@ -4347,9 +4349,9 @@ TEST(RunInt32PairMul) {
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
- m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ m.Call(static_cast<uint32_t>(*i & 0xFFFFFFFF),
static_cast<uint32_t>(*i >> 32),
- static_cast<uint32_t>(*j & 0xffffffff),
+ static_cast<uint32_t>(*j & 0xFFFFFFFF),
static_cast<uint32_t>(*j >> 32));
CHECK_EQ(*i * *j, ToInt64(low, high));
}
@@ -4368,9 +4370,9 @@ TEST(RunInt32PairMulUseOnlyHighWord) {
FOR_UINT64_INPUTS(j) {
CHECK_EQ(
static_cast<uint32_t>((*i * *j) >> 32),
- static_cast<uint32_t>(m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ static_cast<uint32_t>(m.Call(static_cast<uint32_t>(*i & 0xFFFFFFFF),
static_cast<uint32_t>(*i >> 32),
- static_cast<uint32_t>(*j & 0xffffffff),
+ static_cast<uint32_t>(*j & 0xFFFFFFFF),
static_cast<uint32_t>(*j >> 32))));
}
}
@@ -4430,7 +4432,7 @@ TEST(RunWord32PairShl) {
FOR_UINT64_INPUTS(i) {
for (uint32_t j = 0; j < 64; j++) {
- m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ m.Call(static_cast<uint32_t>(*i & 0xFFFFFFFF),
static_cast<uint32_t>(*i >> 32), j);
CHECK_EQ(*i << j, ToInt64(low, high));
}
@@ -4448,7 +4450,7 @@ TEST(RunWord32PairShlUseOnlyHighWord) {
for (uint32_t j = 0; j < 64; j++) {
CHECK_EQ(
static_cast<uint32_t>((*i << j) >> 32),
- static_cast<uint32_t>(m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ static_cast<uint32_t>(m.Call(static_cast<uint32_t>(*i & 0xFFFFFFFF),
static_cast<uint32_t>(*i >> 32), j)));
}
}
@@ -4504,7 +4506,7 @@ TEST(RunWord32PairShr) {
FOR_UINT64_INPUTS(i) {
for (uint32_t j = 0; j < 64; j++) {
- m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ m.Call(static_cast<uint32_t>(*i & 0xFFFFFFFF),
static_cast<uint32_t>(*i >> 32), j);
CHECK_EQ(*i >> j, ToInt64(low, high));
}
@@ -4522,7 +4524,7 @@ TEST(RunWord32PairShrUseOnlyHighWord) {
for (uint32_t j = 0; j < 64; j++) {
CHECK_EQ(
static_cast<uint32_t>((*i >> j) >> 32),
- static_cast<uint32_t>(m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ static_cast<uint32_t>(m.Call(static_cast<uint32_t>(*i & 0xFFFFFFFF),
static_cast<uint32_t>(*i >> 32), j)));
}
}
@@ -4546,7 +4548,7 @@ TEST(RunWord32PairSar) {
FOR_INT64_INPUTS(i) {
for (uint32_t j = 0; j < 64; j++) {
- m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ m.Call(static_cast<uint32_t>(*i & 0xFFFFFFFF),
static_cast<uint32_t>(*i >> 32), j);
CHECK_EQ(*i >> j, static_cast<int64_t>(ToInt64(low, high)));
}
@@ -4564,7 +4566,7 @@ TEST(RunWord32PairSarUseOnlyHighWord) {
for (uint32_t j = 0; j < 64; j++) {
CHECK_EQ(
static_cast<uint32_t>((*i >> j) >> 32),
- static_cast<uint32_t>(m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ static_cast<uint32_t>(m.Call(static_cast<uint32_t>(*i & 0xFFFFFFFF),
static_cast<uint32_t>(*i >> 32), j)));
}
}
@@ -4573,7 +4575,7 @@ TEST(RunWord32PairSarUseOnlyHighWord) {
TEST(RunDeadChangeFloat64ToInt32) {
RawMachineAssemblerTester<int32_t> m;
- const int magic = 0x88abcda4;
+ const int magic = 0x88ABCDA4;
m.ChangeFloat64ToInt32(m.Float64Constant(999.78));
m.Return(m.Int32Constant(magic));
CHECK_EQ(magic, m.Call());
@@ -4582,7 +4584,7 @@ TEST(RunDeadChangeFloat64ToInt32) {
TEST(RunDeadChangeInt32ToFloat64) {
RawMachineAssemblerTester<int32_t> m;
- const int magic = 0x8834abcd;
+ const int magic = 0x8834ABCD;
m.ChangeInt32ToFloat64(m.Int32Constant(magic - 6888));
m.Return(m.Int32Constant(magic));
CHECK_EQ(magic, m.Call());
@@ -5538,7 +5540,7 @@ TEST(RunInt32MulWithOverflowInBranchP) {
TEST(RunWord64EqualInBranchP) {
int64_t input;
RawMachineLabel blocka, blockb;
- RawMachineAssemblerTester<int64_t> m;
+ RawMachineAssemblerTester<int32_t> m;
if (!m.machine()->Is64()) return;
Node* value = m.LoadFromPointer(&input, MachineType::Int64());
m.Branch(m.Word64Equal(value, m.Int64Constant(0)), &blocka, &blockb);
@@ -5546,11 +5548,11 @@ TEST(RunWord64EqualInBranchP) {
m.Return(m.Int32Constant(1));
m.Bind(&blockb);
m.Return(m.Int32Constant(2));
- input = V8_INT64_C(0);
+ input = int64_t{0};
CHECK_EQ(1, m.Call());
- input = V8_INT64_C(1);
+ input = int64_t{1};
CHECK_EQ(2, m.Call());
- input = V8_INT64_C(0x100000000);
+ input = int64_t{0x100000000};
CHECK_EQ(2, m.Call());
}
@@ -5722,9 +5724,9 @@ TEST(RunFloat64InsertLowWord32) {
m.Return(m.Float64InsertLowWord32(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT64_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- double expected = bit_cast<double>(
- (bit_cast<uint64_t>(*i) & ~(V8_UINT64_C(0xFFFFFFFF))) |
- (static_cast<uint64_t>(bit_cast<uint32_t>(*j))));
+ double expected =
+ bit_cast<double>((bit_cast<uint64_t>(*i) & ~(uint64_t{0xFFFFFFFF})) |
+ (static_cast<uint64_t>(bit_cast<uint32_t>(*j))));
CHECK_DOUBLE_EQ(expected, m.Call(*i, *j));
}
}
@@ -6187,8 +6189,7 @@ TEST(RunFloat64RoundTiesAway) {
namespace {
-int32_t const kMagicFoo0 = 0xdeadbeef;
-
+int32_t const kMagicFoo0 = 0xDEADBEEF;
int32_t foo0() { return kMagicFoo0; }
@@ -6483,80 +6484,80 @@ TEST(RunRoundUint64ToFloat64) {
uint64_t input;
uint64_t expected;
} values[] = {{0x0, 0x0},
- {0x1, 0x3ff0000000000000},
- {0xffffffff, 0x41efffffffe00000},
- {0x1b09788b, 0x41bb09788b000000},
- {0x4c5fce8, 0x419317f3a0000000},
- {0xcc0de5bf, 0x41e981bcb7e00000},
+ {0x1, 0x3FF0000000000000},
+ {0xFFFFFFFF, 0x41EFFFFFFFE00000},
+ {0x1B09788B, 0x41BB09788B000000},
+ {0x4C5FCE8, 0x419317F3A0000000},
+ {0xCC0DE5BF, 0x41E981BCB7E00000},
{0x2, 0x4000000000000000},
{0x3, 0x4008000000000000},
{0x4, 0x4010000000000000},
{0x5, 0x4014000000000000},
{0x8, 0x4020000000000000},
{0x9, 0x4022000000000000},
- {0xffffffffffffffff, 0x43f0000000000000},
- {0xfffffffffffffffe, 0x43f0000000000000},
- {0xfffffffffffffffd, 0x43f0000000000000},
- {0x100000000, 0x41f0000000000000},
- {0xffffffff00000000, 0x43efffffffe00000},
- {0x1b09788b00000000, 0x43bb09788b000000},
- {0x4c5fce800000000, 0x439317f3a0000000},
- {0xcc0de5bf00000000, 0x43e981bcb7e00000},
+ {0xFFFFFFFFFFFFFFFF, 0x43F0000000000000},
+ {0xFFFFFFFFFFFFFFFE, 0x43F0000000000000},
+ {0xFFFFFFFFFFFFFFFD, 0x43F0000000000000},
+ {0x100000000, 0x41F0000000000000},
+ {0xFFFFFFFF00000000, 0x43EFFFFFFFE00000},
+ {0x1B09788B00000000, 0x43BB09788B000000},
+ {0x4C5FCE800000000, 0x439317F3A0000000},
+ {0xCC0DE5BF00000000, 0x43E981BCB7E00000},
{0x200000000, 0x4200000000000000},
{0x300000000, 0x4208000000000000},
{0x400000000, 0x4210000000000000},
{0x500000000, 0x4214000000000000},
{0x800000000, 0x4220000000000000},
{0x900000000, 0x4222000000000000},
- {0x273a798e187937a3, 0x43c39d3cc70c3c9c},
- {0xece3af835495a16b, 0x43ed9c75f06a92b4},
- {0xb668ecc11223344, 0x43a6cd1d98224467},
- {0x9e, 0x4063c00000000000},
- {0x43, 0x4050c00000000000},
- {0xaf73, 0x40e5ee6000000000},
- {0x116b, 0x40b16b0000000000},
- {0x658ecc, 0x415963b300000000},
- {0x2b3b4c, 0x41459da600000000},
- {0x88776655, 0x41e10eeccaa00000},
- {0x70000000, 0x41dc000000000000},
- {0x7200000, 0x419c800000000000},
- {0x7fffffff, 0x41dfffffffc00000},
- {0x56123761, 0x41d5848dd8400000},
- {0x7fffff00, 0x41dfffffc0000000},
- {0x761c4761eeeeeeee, 0x43dd8711d87bbbbc},
- {0x80000000eeeeeeee, 0x43e00000001dddde},
- {0x88888888dddddddd, 0x43e11111111bbbbc},
- {0xa0000000dddddddd, 0x43e40000001bbbbc},
- {0xddddddddaaaaaaaa, 0x43ebbbbbbbb55555},
- {0xe0000000aaaaaaaa, 0x43ec000000155555},
- {0xeeeeeeeeeeeeeeee, 0x43edddddddddddde},
- {0xfffffffdeeeeeeee, 0x43efffffffbdddde},
- {0xf0000000dddddddd, 0x43ee0000001bbbbc},
- {0x7fffffdddddddd, 0x435ffffff7777777},
- {0x3fffffaaaaaaaa, 0x434fffffd5555555},
- {0x1fffffaaaaaaaa, 0x433fffffaaaaaaaa},
- {0xfffff, 0x412ffffe00000000},
- {0x7ffff, 0x411ffffc00000000},
- {0x3ffff, 0x410ffff800000000},
- {0x1ffff, 0x40fffff000000000},
- {0xffff, 0x40efffe000000000},
- {0x7fff, 0x40dfffc000000000},
- {0x3fff, 0x40cfff8000000000},
- {0x1fff, 0x40bfff0000000000},
- {0xfff, 0x40affe0000000000},
- {0x7ff, 0x409ffc0000000000},
- {0x3ff, 0x408ff80000000000},
- {0x1ff, 0x407ff00000000000},
- {0x3fffffffffff, 0x42cfffffffffff80},
- {0x1fffffffffff, 0x42bfffffffffff00},
- {0xfffffffffff, 0x42affffffffffe00},
- {0x7ffffffffff, 0x429ffffffffffc00},
- {0x3ffffffffff, 0x428ffffffffff800},
- {0x1ffffffffff, 0x427ffffffffff000},
- {0x8000008000000000, 0x43e0000010000000},
- {0x8000008000000001, 0x43e0000010000000},
- {0x8000000000000400, 0x43e0000000000000},
- {0x8000000000000401, 0x43e0000000000001}};
+ {0x273A798E187937A3, 0x43C39D3CC70C3C9C},
+ {0xECE3AF835495A16B, 0x43ED9C75F06A92B4},
+ {0xB668ECC11223344, 0x43A6CD1D98224467},
+ {0x9E, 0x4063C00000000000},
+ {0x43, 0x4050C00000000000},
+ {0xAF73, 0x40E5EE6000000000},
+ {0x116B, 0x40B16B0000000000},
+ {0x658ECC, 0x415963B300000000},
+ {0x2B3B4C, 0x41459DA600000000},
+ {0x88776655, 0x41E10EECCAA00000},
+ {0x70000000, 0x41DC000000000000},
+ {0x7200000, 0x419C800000000000},
+ {0x7FFFFFFF, 0x41DFFFFFFFC00000},
+ {0x56123761, 0x41D5848DD8400000},
+ {0x7FFFFF00, 0x41DFFFFFC0000000},
+ {0x761C4761EEEEEEEE, 0x43DD8711D87BBBBC},
+ {0x80000000EEEEEEEE, 0x43E00000001DDDDE},
+ {0x88888888DDDDDDDD, 0x43E11111111BBBBC},
+ {0xA0000000DDDDDDDD, 0x43E40000001BBBBC},
+ {0xDDDDDDDDAAAAAAAA, 0x43EBBBBBBBB55555},
+ {0xE0000000AAAAAAAA, 0x43EC000000155555},
+ {0xEEEEEEEEEEEEEEEE, 0x43EDDDDDDDDDDDDE},
+ {0xFFFFFFFDEEEEEEEE, 0x43EFFFFFFFBDDDDE},
+ {0xF0000000DDDDDDDD, 0x43EE0000001BBBBC},
+ {0x7FFFFFDDDDDDDD, 0x435FFFFFF7777777},
+ {0x3FFFFFAAAAAAAA, 0x434FFFFFD5555555},
+ {0x1FFFFFAAAAAAAA, 0x433FFFFFAAAAAAAA},
+ {0xFFFFF, 0x412FFFFE00000000},
+ {0x7FFFF, 0x411FFFFC00000000},
+ {0x3FFFF, 0x410FFFF800000000},
+ {0x1FFFF, 0x40FFFFF000000000},
+ {0xFFFF, 0x40EFFFE000000000},
+ {0x7FFF, 0x40DFFFC000000000},
+ {0x3FFF, 0x40CFFF8000000000},
+ {0x1FFF, 0x40BFFF0000000000},
+ {0xFFF, 0x40AFFE0000000000},
+ {0x7FF, 0x409FFC0000000000},
+ {0x3FF, 0x408FF80000000000},
+ {0x1FF, 0x407FF00000000000},
+ {0x3FFFFFFFFFFF, 0x42CFFFFFFFFFFF80},
+ {0x1FFFFFFFFFFF, 0x42BFFFFFFFFFFF00},
+ {0xFFFFFFFFFFF, 0x42AFFFFFFFFFFE00},
+ {0x7FFFFFFFFFF, 0x429FFFFFFFFFFC00},
+ {0x3FFFFFFFFFF, 0x428FFFFFFFFFF800},
+ {0x1FFFFFFFFFF, 0x427FFFFFFFFFF000},
+ {0x8000008000000000, 0x43E0000010000000},
+ {0x8000008000000001, 0x43E0000010000000},
+ {0x8000000000000400, 0x43E0000000000000},
+ {0x8000000000000401, 0x43E0000000000001}};
BufferedRawMachineAssemblerTester<double> m(MachineType::Uint64());
m.Return(m.RoundUint64ToFloat64(m.Parameter(0)));
@@ -6572,81 +6573,81 @@ TEST(RunRoundUint64ToFloat32) {
uint64_t input;
uint32_t expected;
} values[] = {{0x0, 0x0},
- {0x1, 0x3f800000},
- {0xffffffff, 0x4f800000},
- {0x1b09788b, 0x4dd84bc4},
- {0x4c5fce8, 0x4c98bf9d},
- {0xcc0de5bf, 0x4f4c0de6},
+ {0x1, 0x3F800000},
+ {0xFFFFFFFF, 0x4F800000},
+ {0x1B09788B, 0x4DD84BC4},
+ {0x4C5FCE8, 0x4C98BF9D},
+ {0xCC0DE5BF, 0x4F4C0DE6},
{0x2, 0x40000000},
{0x3, 0x40400000},
{0x4, 0x40800000},
- {0x5, 0x40a00000},
+ {0x5, 0x40A00000},
{0x8, 0x41000000},
{0x9, 0x41100000},
- {0xffffffffffffffff, 0x5f800000},
- {0xfffffffffffffffe, 0x5f800000},
- {0xfffffffffffffffd, 0x5f800000},
+ {0xFFFFFFFFFFFFFFFF, 0x5F800000},
+ {0xFFFFFFFFFFFFFFFE, 0x5F800000},
+ {0xFFFFFFFFFFFFFFFD, 0x5F800000},
{0x0, 0x0},
- {0x100000000, 0x4f800000},
- {0xffffffff00000000, 0x5f800000},
- {0x1b09788b00000000, 0x5dd84bc4},
- {0x4c5fce800000000, 0x5c98bf9d},
- {0xcc0de5bf00000000, 0x5f4c0de6},
+ {0x100000000, 0x4F800000},
+ {0xFFFFFFFF00000000, 0x5F800000},
+ {0x1B09788B00000000, 0x5DD84BC4},
+ {0x4C5FCE800000000, 0x5C98BF9D},
+ {0xCC0DE5BF00000000, 0x5F4C0DE6},
{0x200000000, 0x50000000},
{0x300000000, 0x50400000},
{0x400000000, 0x50800000},
- {0x500000000, 0x50a00000},
+ {0x500000000, 0x50A00000},
{0x800000000, 0x51000000},
{0x900000000, 0x51100000},
- {0x273a798e187937a3, 0x5e1ce9e6},
- {0xece3af835495a16b, 0x5f6ce3b0},
- {0xb668ecc11223344, 0x5d3668ed},
- {0x9e, 0x431e0000},
+ {0x273A798E187937A3, 0x5E1CE9E6},
+ {0xECE3AF835495A16B, 0x5F6CE3B0},
+ {0xB668ECC11223344, 0x5D3668ED},
+ {0x9E, 0x431E0000},
{0x43, 0x42860000},
- {0xaf73, 0x472f7300},
- {0x116b, 0x458b5800},
- {0x658ecc, 0x4acb1d98},
- {0x2b3b4c, 0x4a2ced30},
- {0x88776655, 0x4f087766},
- {0x70000000, 0x4ee00000},
- {0x7200000, 0x4ce40000},
- {0x7fffffff, 0x4f000000},
- {0x56123761, 0x4eac246f},
- {0x7fffff00, 0x4efffffe},
- {0x761c4761eeeeeeee, 0x5eec388f},
- {0x80000000eeeeeeee, 0x5f000000},
- {0x88888888dddddddd, 0x5f088889},
- {0xa0000000dddddddd, 0x5f200000},
- {0xddddddddaaaaaaaa, 0x5f5dddde},
- {0xe0000000aaaaaaaa, 0x5f600000},
- {0xeeeeeeeeeeeeeeee, 0x5f6eeeef},
- {0xfffffffdeeeeeeee, 0x5f800000},
- {0xf0000000dddddddd, 0x5f700000},
- {0x7fffffdddddddd, 0x5b000000},
- {0x3fffffaaaaaaaa, 0x5a7fffff},
- {0x1fffffaaaaaaaa, 0x59fffffd},
- {0xfffff, 0x497ffff0},
- {0x7ffff, 0x48ffffe0},
- {0x3ffff, 0x487fffc0},
- {0x1ffff, 0x47ffff80},
- {0xffff, 0x477fff00},
- {0x7fff, 0x46fffe00},
- {0x3fff, 0x467ffc00},
- {0x1fff, 0x45fff800},
- {0xfff, 0x457ff000},
- {0x7ff, 0x44ffe000},
- {0x3ff, 0x447fc000},
- {0x1ff, 0x43ff8000},
- {0x3fffffffffff, 0x56800000},
- {0x1fffffffffff, 0x56000000},
- {0xfffffffffff, 0x55800000},
- {0x7ffffffffff, 0x55000000},
- {0x3ffffffffff, 0x54800000},
- {0x1ffffffffff, 0x54000000},
- {0x8000008000000000, 0x5f000000},
- {0x8000008000000001, 0x5f000001},
- {0x8000000000000400, 0x5f000000},
- {0x8000000000000401, 0x5f000000}};
+ {0xAF73, 0x472F7300},
+ {0x116B, 0x458B5800},
+ {0x658ECC, 0x4ACB1D98},
+ {0x2B3B4C, 0x4A2CED30},
+ {0x88776655, 0x4F087766},
+ {0x70000000, 0x4EE00000},
+ {0x7200000, 0x4CE40000},
+ {0x7FFFFFFF, 0x4F000000},
+ {0x56123761, 0x4EAC246F},
+ {0x7FFFFF00, 0x4EFFFFFE},
+ {0x761C4761EEEEEEEE, 0x5EEC388F},
+ {0x80000000EEEEEEEE, 0x5F000000},
+ {0x88888888DDDDDDDD, 0x5F088889},
+ {0xA0000000DDDDDDDD, 0x5F200000},
+ {0xDDDDDDDDAAAAAAAA, 0x5F5DDDDE},
+ {0xE0000000AAAAAAAA, 0x5F600000},
+ {0xEEEEEEEEEEEEEEEE, 0x5F6EEEEF},
+ {0xFFFFFFFDEEEEEEEE, 0x5F800000},
+ {0xF0000000DDDDDDDD, 0x5F700000},
+ {0x7FFFFFDDDDDDDD, 0x5B000000},
+ {0x3FFFFFAAAAAAAA, 0x5A7FFFFF},
+ {0x1FFFFFAAAAAAAA, 0x59FFFFFD},
+ {0xFFFFF, 0x497FFFF0},
+ {0x7FFFF, 0x48FFFFE0},
+ {0x3FFFF, 0x487FFFC0},
+ {0x1FFFF, 0x47FFFF80},
+ {0xFFFF, 0x477FFF00},
+ {0x7FFF, 0x46FFFE00},
+ {0x3FFF, 0x467FFC00},
+ {0x1FFF, 0x45FFF800},
+ {0xFFF, 0x457FF000},
+ {0x7FF, 0x44FFE000},
+ {0x3FF, 0x447FC000},
+ {0x1FF, 0x43FF8000},
+ {0x3FFFFFFFFFFF, 0x56800000},
+ {0x1FFFFFFFFFFF, 0x56000000},
+ {0xFFFFFFFFFFF, 0x55800000},
+ {0x7FFFFFFFFFF, 0x55000000},
+ {0x3FFFFFFFFFF, 0x54800000},
+ {0x1FFFFFFFFFF, 0x54000000},
+ {0x8000008000000000, 0x5F000000},
+ {0x8000008000000001, 0x5F000001},
+ {0x8000000000000400, 0x5F000000},
+ {0x8000000000000401, 0x5F000000}};
BufferedRawMachineAssemblerTester<float> m(MachineType::Uint64());
m.Return(m.RoundUint64ToFloat32(m.Parameter(0)));
@@ -6737,7 +6738,7 @@ TEST(RunComputedCodeObject) {
// TODO(titzer): all this descriptor hackery is just to call the above
// functions as code objects instead of direct addresses.
- CSignature0<int32_t> sig;
+ CSignatureOf<int32_t> sig;
CallDescriptor* c = Linkage::GetSimplifiedCDescriptor(r.zone(), &sig);
LinkageLocation ret[] = {c->GetReturnLocation(0)};
Signature<LinkageLocation> loc(1, 0, ret);
@@ -6832,7 +6833,7 @@ TEST(Regression5923) {
TEST(Regression5951) {
BufferedRawMachineAssemblerTester<int64_t> m(MachineType::Int64());
m.Return(m.Word64And(m.Word64Shr(m.Parameter(0), m.Int64Constant(0)),
- m.Int64Constant(0xffffffffffffffffl)));
+ m.Int64Constant(0xFFFFFFFFFFFFFFFFl)));
int64_t input = 1234;
CHECK_EQ(input, m.Call(input));
}
@@ -6879,7 +6880,7 @@ TEST(Regression6028) {
TEST(Regression5951_32bit) {
BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Int32());
m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(0)),
- m.Int32Constant(0xffffffff)));
+ m.Int32Constant(0xFFFFFFFF)));
int32_t input = 1234;
CHECK_EQ(input, m.Call(input));
}
diff --git a/deps/v8/test/cctest/compiler/test-run-native-calls.cc b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
index 8472c1e70e..42196e6454 100644
--- a/deps/v8/test/cctest/compiler/test-run-native-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
@@ -207,7 +207,7 @@ class RegisterConfig {
compiler::Operator::kNoProperties, // properties
kCalleeSaveRegisters, // callee-saved registers
kCalleeSaveFPRegisters, // callee-saved fp regs
- CallDescriptor::kUseNativeStack, // flags
+ CallDescriptor::kNoFlags, // flags
"c-call");
}
@@ -446,7 +446,7 @@ class Computer {
inner = CompileGraph("Compute", desc, &graph, raw.Export());
}
- CSignature0<int32_t> csig;
+ CSignatureOf<int32_t> csig;
ArgsBuffer<CType> io(num_params, seed);
{
@@ -583,7 +583,7 @@ static void CopyTwentyInt32(CallDescriptor* desc) {
inner = CompileGraph("CopyTwentyInt32", desc, &graph, raw.Export());
}
- CSignature0<int32_t> csig;
+ CSignatureOf<int32_t> csig;
Handle<Code> wrapper = Handle<Code>::null();
{
// Loads parameters from the input buffer and calls the above code.
@@ -1076,7 +1076,7 @@ void MixedParamTest(int start) {
char bytes[kDoubleSize];
V8_ALIGNED(8) char output[kDoubleSize];
int expected_size = 0;
- CSignature0<int32_t> csig;
+ CSignatureOf<int32_t> csig;
{
// Wrap the select code with a callable function that passes constants.
Zone zone(&allocator, ZONE_NAME);
@@ -1211,7 +1211,7 @@ TEST(RunStackSlotInt32) {
#if !V8_TARGET_ARCH_32_BIT
TEST(RunStackSlotInt64) {
- int64_t magic = 0x123456789abcdef0;
+ int64_t magic = 0x123456789ABCDEF0;
TestStackSlot(MachineType::Int64(), magic);
}
#endif
diff --git a/deps/v8/test/cctest/compiler/test-run-retpoline.cc b/deps/v8/test/cctest/compiler/test-run-retpoline.cc
new file mode 100644
index 0000000000..152ed448ef
--- /dev/null
+++ b/deps/v8/test/cctest/compiler/test-run-retpoline.cc
@@ -0,0 +1,208 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/assembler-inl.h"
+#include "src/code-stub-assembler.h"
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/code-assembler-tester.h"
+#include "test/cctest/compiler/function-tester.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+namespace test_run_retpoline {
+
+#define __ assembler.
+
+namespace {
+
+// Function that takes a number of pointer-sized integer arguments, calculates a
+// weighted sum of them and returns it.
+Handle<Code> BuildCallee(Isolate* isolate, CallDescriptor* descriptor) {
+ CodeAssemblerTester tester(isolate, descriptor, "callee");
+ CodeStubAssembler assembler(tester.state());
+ int param_count = static_cast<int>(descriptor->StackParameterCount());
+ Node* sum = __ IntPtrConstant(0);
+ for (int i = 0; i < param_count; ++i) {
+ Node* product = __ IntPtrMul(__ Parameter(i), __ IntPtrConstant(i + 1));
+ sum = __ IntPtrAdd(sum, product);
+ }
+ __ Return(sum);
+ return tester.GenerateCodeCloseAndEscape();
+}
+
+// Function that tail-calls another function with a number of pointer-sized
+// integer arguments.
+Handle<Code> BuildCaller(Isolate* isolate, CallDescriptor* descriptor,
+ CallDescriptor* callee_descriptor, bool tail) {
+ CodeAssemblerTester tester(isolate, descriptor, "caller");
+ CodeStubAssembler assembler(tester.state());
+ std::vector<Node*> params;
+ // The first parameter is always the callee.
+ Handle<Code> callee = BuildCallee(isolate, callee_descriptor);
+ // defeat the instruction selector.
+ CodeStubAssembler::Variable target_var(&assembler,
+ MachineRepresentation::kTagged);
+ CodeStubAssembler::Label t(&assembler), f(&assembler),
+ end(&assembler, &target_var);
+ __ Branch(__ Int32Constant(0), &t, &f);
+ __ BIND(&t);
+ target_var.Bind(__ HeapConstant(callee));
+ __ Goto(&end);
+ __ BIND(&f);
+ target_var.Bind(__ HeapConstant(callee));
+ __ Goto(&end);
+ __ BIND(&end);
+ params.push_back(target_var.value());
+
+ int param_count = static_cast<int>(callee_descriptor->StackParameterCount());
+ for (int i = 0; i < param_count; ++i) {
+ params.push_back(__ IntPtrConstant(i));
+ }
+ DCHECK_EQ(param_count + 1, params.size());
+ if (tail) {
+ tester.raw_assembler_for_testing()->TailCallN(
+ callee_descriptor, param_count + 1, params.data());
+ } else {
+ Node* result = tester.raw_assembler_for_testing()->CallN(
+ callee_descriptor, param_count + 1, params.data());
+ __ Return(result);
+ }
+ return tester.GenerateCodeCloseAndEscape();
+}
+
+// Setup function, which calls "caller".
+Handle<Code> BuildSetupFunction(Isolate* isolate,
+ CallDescriptor* caller_descriptor,
+ CallDescriptor* callee_descriptor, bool tail) {
+ CodeAssemblerTester tester(isolate, 0);
+ CodeStubAssembler assembler(tester.state());
+ std::vector<Node*> params;
+ // The first parameter is always the callee.
+ params.push_back(__ HeapConstant(
+ BuildCaller(isolate, caller_descriptor, callee_descriptor, tail)));
+ // Set up arguments for "Caller".
+ int param_count = static_cast<int>(caller_descriptor->StackParameterCount());
+ for (int i = 0; i < param_count; ++i) {
+ // Use values that are different from the ones we will pass to this
+ // function's callee later.
+ params.push_back(__ IntPtrConstant(i + 42));
+ }
+ DCHECK_EQ(param_count + 1, params.size());
+ Node* raw_result = tester.raw_assembler_for_testing()->CallN(
+ caller_descriptor, param_count + 1, params.data());
+ __ Return(__ SmiTag(raw_result));
+ return tester.GenerateCodeCloseAndEscape();
+}
+
+CallDescriptor* CreateDescriptorForStackArguments(Zone* zone,
+ int stack_param_count) {
+ LocationSignature::Builder locations(zone, 1,
+ static_cast<size_t>(stack_param_count));
+
+ locations.AddReturn(LinkageLocation::ForRegister(kReturnRegister0.code(),
+ MachineType::IntPtr()));
+
+ for (int i = 0; i < stack_param_count; ++i) {
+ locations.AddParam(LinkageLocation::ForCallerFrameSlot(
+ i - stack_param_count, MachineType::IntPtr()));
+ }
+
+ return new (zone)
+ CallDescriptor(CallDescriptor::kCallCodeObject, // kind
+ MachineType::AnyTagged(), // target MachineType
+ LinkageLocation::ForAnyRegister(
+ MachineType::AnyTagged()), // target location
+ locations.Build(), // location_sig
+ stack_param_count, // stack_parameter_count
+ Operator::kNoProperties, // properties
+ kNoCalleeSaved, // callee-saved registers
+ kNoCalleeSaved, // callee-saved fp
+ CallDescriptor::kRetpoline); // flags
+}
+
+// Test a tail call from a caller with n parameters to a callee with m
+// parameters. All parameters are pointer-sized.
+void TestHelper(int n, int m, bool tail) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ CallDescriptor* caller_descriptor =
+ CreateDescriptorForStackArguments(zone, n);
+ CallDescriptor* callee_descriptor =
+ CreateDescriptorForStackArguments(zone, m);
+ Handle<Code> setup =
+ BuildSetupFunction(isolate, caller_descriptor, callee_descriptor, tail);
+ FunctionTester ft(setup, 0);
+ Handle<Object> result = ft.Call().ToHandleChecked();
+ int expected = 0;
+ for (int i = 0; i < m; ++i) expected += (i + 1) * i;
+ CHECK_EQ(expected, Handle<Smi>::cast(result)->value());
+}
+
+} // namespace
+
+#undef __
+
+TEST(RetpolineOddEven) {
+ TestHelper(1, 0, false);
+ TestHelper(1, 2, false);
+ TestHelper(3, 2, false);
+ TestHelper(3, 4, false);
+}
+
+TEST(RetpolineOddEvenTail) {
+ TestHelper(1, 0, true);
+ TestHelper(1, 2, true);
+ TestHelper(3, 2, true);
+ TestHelper(3, 4, true);
+}
+
+TEST(RetpolineOddOdd) {
+ TestHelper(1, 1, false);
+ TestHelper(1, 3, false);
+ TestHelper(3, 1, false);
+ TestHelper(3, 3, false);
+}
+
+TEST(RetpolineOddOddTail) {
+ TestHelper(1, 1, true);
+ TestHelper(1, 3, true);
+ TestHelper(3, 1, true);
+ TestHelper(3, 3, true);
+}
+
+TEST(RetpolineEvenEven) {
+ TestHelper(0, 0, false);
+ TestHelper(0, 2, false);
+ TestHelper(2, 0, false);
+ TestHelper(2, 2, false);
+}
+
+TEST(RetpolineEvenEvenTail) {
+ TestHelper(0, 0, true);
+ TestHelper(0, 2, true);
+ TestHelper(2, 0, true);
+ TestHelper(2, 2, true);
+}
+
+TEST(RetpolineEvenOdd) {
+ TestHelper(0, 1, false);
+ TestHelper(0, 3, false);
+ TestHelper(2, 1, false);
+ TestHelper(2, 3, false);
+}
+
+TEST(RetpolineEvenOddTail) {
+ TestHelper(0, 1, true);
+ TestHelper(0, 3, true);
+ TestHelper(2, 1, true);
+ TestHelper(2, 3, true);
+}
+
+} // namespace test_run_retpoline
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-run-tail-calls.cc b/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
new file mode 100644
index 0000000000..2fbc90d46d
--- /dev/null
+++ b/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
@@ -0,0 +1,171 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/assembler-inl.h"
+#include "src/base/utils/random-number-generator.h"
+#include "src/code-stub-assembler.h"
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/code-assembler-tester.h"
+#include "test/cctest/compiler/function-tester.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+namespace test_run_tail_calls {
+
+#define __ assembler.
+
+namespace {
+
+// Function that takes a number of pointer-sized integer arguments, calculates a
+// weighted sum of them and returns it.
+Handle<Code> BuildCallee(Isolate* isolate, CallDescriptor* descriptor) {
+ CodeAssemblerTester tester(isolate, descriptor, "callee");
+ CodeStubAssembler assembler(tester.state());
+ int param_count = static_cast<int>(descriptor->StackParameterCount());
+ Node* sum = __ IntPtrConstant(0);
+ for (int i = 0; i < param_count; ++i) {
+ Node* product = __ IntPtrMul(__ Parameter(i), __ IntPtrConstant(i + 1));
+ sum = __ IntPtrAdd(sum, product);
+ }
+ __ Return(sum);
+ return tester.GenerateCodeCloseAndEscape();
+}
+
+// Function that tail-calls another function with a number of pointer-sized
+// integer arguments.
+Handle<Code> BuildCaller(Isolate* isolate, CallDescriptor* descriptor,
+ CallDescriptor* callee_descriptor) {
+ CodeAssemblerTester tester(isolate, descriptor, "caller");
+ CodeStubAssembler assembler(tester.state());
+ std::vector<Node*> params;
+ // The first parameter is always the callee.
+ params.push_back(__ HeapConstant(BuildCallee(isolate, callee_descriptor)));
+ int param_count = static_cast<int>(callee_descriptor->StackParameterCount());
+ for (int i = 0; i < param_count; ++i) {
+ params.push_back(__ IntPtrConstant(i));
+ }
+ DCHECK_EQ(param_count + 1, params.size());
+ tester.raw_assembler_for_testing()->TailCallN(callee_descriptor,
+ param_count + 1, params.data());
+ return tester.GenerateCodeCloseAndEscape();
+}
+
+// Setup function, which calls "caller".
+Handle<Code> BuildSetupFunction(Isolate* isolate,
+ CallDescriptor* caller_descriptor,
+ CallDescriptor* callee_descriptor) {
+ CodeAssemblerTester tester(isolate, 0);
+ CodeStubAssembler assembler(tester.state());
+ std::vector<Node*> params;
+ // The first parameter is always the callee.
+ params.push_back(__ HeapConstant(
+ BuildCaller(isolate, caller_descriptor, callee_descriptor)));
+ // Set up arguments for "Caller".
+ int param_count = static_cast<int>(caller_descriptor->StackParameterCount());
+ for (int i = 0; i < param_count; ++i) {
+ // Use values that are different from the ones we will pass to this
+ // function's callee later.
+ params.push_back(__ IntPtrConstant(i + 42));
+ }
+ DCHECK_EQ(param_count + 1, params.size());
+ Node* raw_result = tester.raw_assembler_for_testing()->CallN(
+ caller_descriptor, param_count + 1, params.data());
+ __ Return(__ SmiTag(raw_result));
+ return tester.GenerateCodeCloseAndEscape();
+}
+
+CallDescriptor* CreateDescriptorForStackArguments(Zone* zone,
+ int stack_param_count) {
+ LocationSignature::Builder locations(zone, 1,
+ static_cast<size_t>(stack_param_count));
+
+ locations.AddReturn(LinkageLocation::ForRegister(kReturnRegister0.code(),
+ MachineType::IntPtr()));
+
+ for (int i = 0; i < stack_param_count; ++i) {
+ locations.AddParam(LinkageLocation::ForCallerFrameSlot(
+ i - stack_param_count, MachineType::IntPtr()));
+ }
+
+ return new (zone)
+ CallDescriptor(CallDescriptor::kCallCodeObject, // kind
+ MachineType::AnyTagged(), // target MachineType
+ LinkageLocation::ForAnyRegister(
+ MachineType::AnyTagged()), // target location
+ locations.Build(), // location_sig
+ stack_param_count, // stack_parameter_count
+ Operator::kNoProperties, // properties
+ kNoCalleeSaved, // callee-saved registers
+ kNoCalleeSaved, // callee-saved fp
+ CallDescriptor::kNoFlags); // flags
+}
+
+// Test a tail call from a caller with n parameters to a callee with m
+// parameters. All parameters are pointer-sized.
+void TestHelper(int n, int m) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ CallDescriptor* caller_descriptor =
+ CreateDescriptorForStackArguments(zone, n);
+ CallDescriptor* callee_descriptor =
+ CreateDescriptorForStackArguments(zone, m);
+ Handle<Code> setup =
+ BuildSetupFunction(isolate, caller_descriptor, callee_descriptor);
+ FunctionTester ft(setup, 0);
+ Handle<Object> result = ft.Call().ToHandleChecked();
+ int expected = 0;
+ for (int i = 0; i < m; ++i) expected += (i + 1) * i;
+ CHECK_EQ(expected, Handle<Smi>::cast(result)->value());
+}
+
+} // namespace
+
+#undef __
+
+TEST(CallerOddCalleeEven) {
+ TestHelper(1, 0);
+ TestHelper(1, 2);
+ TestHelper(3, 2);
+ TestHelper(3, 4);
+}
+
+TEST(CallerOddCalleeOdd) {
+ TestHelper(1, 1);
+ TestHelper(1, 3);
+ TestHelper(3, 1);
+ TestHelper(3, 3);
+}
+
+TEST(CallerEvenCalleeEven) {
+ TestHelper(0, 0);
+ TestHelper(0, 2);
+ TestHelper(2, 0);
+ TestHelper(2, 2);
+}
+
+TEST(CallerEvenCalleeOdd) {
+ TestHelper(0, 1);
+ TestHelper(0, 3);
+ TestHelper(2, 1);
+ TestHelper(2, 3);
+}
+
+TEST(FuzzStackParamCount) {
+ const int kNumTests = 20;
+ const int kMaxSlots = 30;
+ base::RandomNumberGenerator* const rng = CcTest::random_number_generator();
+ for (int i = 0; i < kNumTests; ++i) {
+ int n = rng->NextInt(kMaxSlots);
+ int m = rng->NextInt(kMaxSlots);
+ TestHelper(n, m);
+ }
+}
+
+} // namespace test_run_tail_calls
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-run-wasm-machops.cc b/deps/v8/test/cctest/compiler/test-run-wasm-machops.cc
index b451b73e0d..2f1536433f 100644
--- a/deps/v8/test/cctest/compiler/test-run-wasm-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-wasm-machops.cc
@@ -164,14 +164,14 @@ TEST(Uint32LessThanMemoryRelocation) {
Node* cond = m.AddNode(m.machine()->Uint32LessThan(), index, limit);
m.Branch(cond, &within_bounds, &out_of_bounds);
m.Bind(&within_bounds);
- m.Return(m.Int32Constant(0xaced));
+ m.Return(m.Int32Constant(0xACED));
m.Bind(&out_of_bounds);
- m.Return(m.Int32Constant(0xdeadbeef));
+ m.Return(m.Int32Constant(0xDEADBEEF));
// Check that index is out of bounds with current size
- CHECK_EQ(0xdeadbeef, m.Call());
+ CHECK_EQ(0xDEADBEEF, m.Call());
wasm_context.SetRawMemory(wasm_context.mem_start, 0x400);
// Check that after limit is increased, index is within bounds.
- CHECK_EQ(0xacedu, m.Call());
+ CHECK_EQ(0xACEDu, m.Call());
}
TEST(Uint32LessThanFunctionTableRelocation) {
@@ -183,17 +183,17 @@ TEST(Uint32LessThanFunctionTableRelocation) {
Node* cond = m.AddNode(m.machine()->Uint32LessThan(), index, limit);
m.Branch(cond, &within_bounds, &out_of_bounds);
m.Bind(&within_bounds);
- m.Return(m.Int32Constant(0xaced));
+ m.Return(m.Int32Constant(0xACED));
m.Bind(&out_of_bounds);
- m.Return(m.Int32Constant(0xdeadbeef));
+ m.Return(m.Int32Constant(0xDEADBEEF));
// Check that index is out of bounds with current size
- CHECK_EQ(0xdeadbeef, m.Call());
+ CHECK_EQ(0xDEADBEEF, m.Call());
m.GenerateCode();
Handle<Code> code = m.GetCode();
UpdateFunctionTableSizeReferences(code, 0x200, 0x400);
// Check that after limit is increased, index is within bounds.
- CHECK_EQ(0xaced, m.Call());
+ CHECK_EQ(0xACED, m.Call());
}
} // namespace compiler
diff --git a/deps/v8/test/cctest/compiler/value-helper.h b/deps/v8/test/cctest/compiler/value-helper.h
index 28e64c703e..a69a371e36 100644
--- a/deps/v8/test/cctest/compiler/value-helper.h
+++ b/deps/v8/test/cctest/compiler/value-helper.h
@@ -239,19 +239,19 @@ class ValueHelper {
}
static constexpr uint32_t uint32_array[] = {
- 0x00000000, 0x00000001, 0xffffffff, 0x1b09788b, 0x04c5fce8, 0xcc0de5bf,
+ 0x00000000, 0x00000001, 0xFFFFFFFF, 0x1B09788B, 0x04C5FCE8, 0xCC0DE5BF,
// This row is useful for testing lea optimizations on intel.
0x00000002, 0x00000003, 0x00000004, 0x00000005, 0x00000008, 0x00000009,
- 0x273a798e, 0x187937a3, 0xece3af83, 0x5495a16b, 0x0b668ecc, 0x11223344,
- 0x0000009e, 0x00000043, 0x0000af73, 0x0000116b, 0x00658ecc, 0x002b3b4c,
- 0x88776655, 0x70000000, 0x07200000, 0x7fffffff, 0x56123761, 0x7fffff00,
- 0x761c4761, 0x80000000, 0x88888888, 0xa0000000, 0xdddddddd, 0xe0000000,
- 0xeeeeeeee, 0xfffffffd, 0xf0000000, 0x007fffff, 0x003fffff, 0x001fffff,
- 0x000fffff, 0x0007ffff, 0x0003ffff, 0x0001ffff, 0x0000ffff, 0x00007fff,
- 0x00003fff, 0x00001fff, 0x00000fff, 0x000007ff, 0x000003ff, 0x000001ff,
+ 0x273A798E, 0x187937A3, 0xECE3AF83, 0x5495A16B, 0x0B668ECC, 0x11223344,
+ 0x0000009E, 0x00000043, 0x0000AF73, 0x0000116B, 0x00658ECC, 0x002B3B4C,
+ 0x88776655, 0x70000000, 0x07200000, 0x7FFFFFFF, 0x56123761, 0x7FFFFF00,
+ 0x761C4761, 0x80000000, 0x88888888, 0xA0000000, 0xDDDDDDDD, 0xE0000000,
+ 0xEEEEEEEE, 0xFFFFFFFD, 0xF0000000, 0x007FFFFF, 0x003FFFFF, 0x001FFFFF,
+ 0x000FFFFF, 0x0007FFFF, 0x0003FFFF, 0x0001FFFF, 0x0000FFFF, 0x00007FFF,
+ 0x00003FFF, 0x00001FFF, 0x00000FFF, 0x000007FF, 0x000003FF, 0x000001FF,
// Bit pattern of a quiet NaN and signaling NaN, with or without
// additional payload.
- 0x7fc00000, 0x7f800000, 0x7fffffff, 0x7f876543};
+ 0x7FC00000, 0x7F800000, 0x7FFFFFFF, 0x7F876543};
static constexpr Vector<const uint32_t> uint32_vector() {
return ArrayVector(uint32_array);
@@ -262,30 +262,30 @@ class ValueHelper {
}
static constexpr uint64_t uint64_array[] = {
- 0x00000000, 0x00000001, 0xffffffff, 0x1b09788b, 0x04c5fce8, 0xcc0de5bf,
+ 0x00000000, 0x00000001, 0xFFFFFFFF, 0x1B09788B, 0x04C5FCE8, 0xCC0DE5BF,
0x00000002, 0x00000003, 0x00000004, 0x00000005, 0x00000008, 0x00000009,
- 0xffffffffffffffff, 0xfffffffffffffffe, 0xfffffffffffffffd,
- 0x0000000000000000, 0x0000000100000000, 0xffffffff00000000,
- 0x1b09788b00000000, 0x04c5fce800000000, 0xcc0de5bf00000000,
+ 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFE, 0xFFFFFFFFFFFFFFFD,
+ 0x0000000000000000, 0x0000000100000000, 0xFFFFFFFF00000000,
+ 0x1B09788B00000000, 0x04C5FCE800000000, 0xCC0DE5BF00000000,
0x0000000200000000, 0x0000000300000000, 0x0000000400000000,
0x0000000500000000, 0x0000000800000000, 0x0000000900000000,
- 0x273a798e187937a3, 0xece3af835495a16b, 0x0b668ecc11223344, 0x0000009e,
- 0x00000043, 0x0000af73, 0x0000116b, 0x00658ecc, 0x002b3b4c, 0x88776655,
- 0x70000000, 0x07200000, 0x7fffffff, 0x56123761, 0x7fffff00,
- 0x761c4761eeeeeeee, 0x80000000eeeeeeee, 0x88888888dddddddd,
- 0xa0000000dddddddd, 0xddddddddaaaaaaaa, 0xe0000000aaaaaaaa,
- 0xeeeeeeeeeeeeeeee, 0xfffffffdeeeeeeee, 0xf0000000dddddddd,
- 0x007fffffdddddddd, 0x003fffffaaaaaaaa, 0x001fffffaaaaaaaa, 0x000fffff,
- 0x0007ffff, 0x0003ffff, 0x0001ffff, 0x0000ffff, 0x00007fff, 0x00003fff,
- 0x00001fff, 0x00000fff, 0x000007ff, 0x000003ff, 0x000001ff,
- 0x00003fffffffffff, 0x00001fffffffffff, 0x00000fffffffffff,
- 0x000007ffffffffff, 0x000003ffffffffff, 0x000001ffffffffff,
+ 0x273A798E187937A3, 0xECE3AF835495A16B, 0x0B668ECC11223344, 0x0000009E,
+ 0x00000043, 0x0000AF73, 0x0000116B, 0x00658ECC, 0x002B3B4C, 0x88776655,
+ 0x70000000, 0x07200000, 0x7FFFFFFF, 0x56123761, 0x7FFFFF00,
+ 0x761C4761EEEEEEEE, 0x80000000EEEEEEEE, 0x88888888DDDDDDDD,
+ 0xA0000000DDDDDDDD, 0xDDDDDDDDAAAAAAAA, 0xE0000000AAAAAAAA,
+ 0xEEEEEEEEEEEEEEEE, 0xFFFFFFFDEEEEEEEE, 0xF0000000DDDDDDDD,
+ 0x007FFFFFDDDDDDDD, 0x003FFFFFAAAAAAAA, 0x001FFFFFAAAAAAAA, 0x000FFFFF,
+ 0x0007FFFF, 0x0003FFFF, 0x0001FFFF, 0x0000FFFF, 0x00007FFF, 0x00003FFF,
+ 0x00001FFF, 0x00000FFF, 0x000007FF, 0x000003FF, 0x000001FF,
+ 0x00003FFFFFFFFFFF, 0x00001FFFFFFFFFFF, 0x00000FFFFFFFFFFF,
+ 0x000007FFFFFFFFFF, 0x000003FFFFFFFFFF, 0x000001FFFFFFFFFF,
0x8000008000000000, 0x8000008000000001, 0x8000000000000400,
0x8000000000000401, 0x0000000000000020,
// Bit pattern of a quiet NaN and signaling NaN, with or without
// additional payload.
- 0x7ff8000000000000, 0x7ff0000000000000, 0x7ff8123456789abc,
- 0x7ff7654321fedcba};
+ 0x7FF8000000000000, 0x7FF0000000000000, 0x7FF8123456789ABC,
+ 0x7FF7654321FEDCBA};
static constexpr Vector<const uint64_t> uint64_vector() {
return ArrayVector(uint64_array);
diff --git a/deps/v8/test/cctest/heap/heap-tester.h b/deps/v8/test/cctest/heap/heap-tester.h
index 692514d854..b7cab9e16f 100644
--- a/deps/v8/test/cctest/heap/heap-tester.h
+++ b/deps/v8/test/cctest/heap/heap-tester.h
@@ -41,6 +41,7 @@
V(Regress670675) \
V(Regress5831) \
V(Regress777177) \
+ V(Regress791582) \
V(Regress779503) \
V(RegressMissingWriteBarrierInAllocate) \
V(WriteBarriersInCopyJSObject)
diff --git a/deps/v8/test/cctest/heap/heap-utils.cc b/deps/v8/test/cctest/heap/heap-utils.cc
index 30bbde2c76..433097e359 100644
--- a/deps/v8/test/cctest/heap/heap-utils.cc
+++ b/deps/v8/test/cctest/heap/heap-utils.cc
@@ -20,7 +20,7 @@ void SealCurrentObjects(Heap* heap) {
heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
GarbageCollectionReason::kTesting);
heap->mark_compact_collector()->EnsureSweepingCompleted();
- heap->old_space()->EmptyAllocationInfo();
+ heap->old_space()->FreeLinearAllocationArea();
for (Page* page : *heap->old_space()) {
page->MarkNeverAllocateForTesting();
}
@@ -68,11 +68,10 @@ std::vector<Handle<FixedArray>> CreatePadding(Heap* heap, int padding_size,
int length;
int free_memory = padding_size;
if (tenure == i::TENURED) {
- heap->old_space()->EmptyAllocationInfo();
+ heap->old_space()->FreeLinearAllocationArea();
int overall_free_memory = static_cast<int>(heap->old_space()->Available());
CHECK(padding_size <= overall_free_memory || overall_free_memory == 0);
} else {
- heap->new_space()->DisableInlineAllocationSteps();
int overall_free_memory =
static_cast<int>(*heap->new_space()->allocation_limit_address() -
*heap->new_space()->allocation_top_address());
@@ -105,7 +104,7 @@ std::vector<Handle<FixedArray>> CreatePadding(Heap* heap, int padding_size,
void AllocateAllButNBytes(v8::internal::NewSpace* space, int extra_bytes,
std::vector<Handle<FixedArray>>* out_handles) {
- space->DisableInlineAllocationSteps();
+ PauseAllocationObserversScope pause_observers(space->heap());
int space_remaining = static_cast<int>(*space->allocation_limit_address() -
*space->allocation_top_address());
CHECK(space_remaining >= extra_bytes);
@@ -124,7 +123,7 @@ void FillCurrentPage(v8::internal::NewSpace* space,
bool FillUpOnePage(v8::internal::NewSpace* space,
std::vector<Handle<FixedArray>>* out_handles) {
- space->DisableInlineAllocationSteps();
+ PauseAllocationObserversScope pause_observers(space->heap());
int space_remaining = static_cast<int>(*space->allocation_limit_address() -
*space->allocation_top_address());
if (space_remaining == 0) return false;
@@ -176,12 +175,12 @@ void SimulateFullSpace(v8::internal::PagedSpace* space) {
if (collector->sweeping_in_progress()) {
collector->EnsureSweepingCompleted();
}
- space->EmptyAllocationInfo();
+ space->FreeLinearAllocationArea();
space->ResetFreeList();
}
void AbandonCurrentlyFreeMemory(PagedSpace* space) {
- space->EmptyAllocationInfo();
+ space->FreeLinearAllocationArea();
for (Page* page : *space) {
page->MarkNeverAllocateForTesting();
}
@@ -205,7 +204,7 @@ void ForceEvacuationCandidate(Page* page) {
int remaining = static_cast<int>(limit - top);
space->heap()->CreateFillerObjectAt(top, remaining,
ClearRecordedSlots::kNo);
- space->EmptyAllocationInfo();
+ space->FreeLinearAllocationArea();
}
}
diff --git a/deps/v8/test/cctest/heap/test-embedder-tracing.cc b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
new file mode 100644
index 0000000000..82e4e3ddf8
--- /dev/null
+++ b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
@@ -0,0 +1,197 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8.h"
+#include "src/api.h"
+#include "src/objects-inl.h"
+#include "src/objects/module.h"
+#include "src/objects/script.h"
+#include "src/objects/shared-function-info.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+namespace heap {
+
+namespace {
+
+v8::Local<v8::Object> ConstructTraceableJSApiObject(
+ v8::Local<v8::Context> context, void* first_field, void* second_field) {
+ v8::EscapableHandleScope scope(context->GetIsolate());
+ v8::Local<v8::FunctionTemplate> function_t =
+ v8::FunctionTemplate::New(context->GetIsolate());
+ v8::Local<v8::ObjectTemplate> instance_t = function_t->InstanceTemplate();
+ instance_t->SetInternalFieldCount(2);
+ v8::Local<v8::Function> function =
+ function_t->GetFunction(context).ToLocalChecked();
+ v8::Local<v8::Object> instance =
+ function->NewInstance(context).ToLocalChecked();
+ instance->SetAlignedPointerInInternalField(0, first_field);
+ instance->SetAlignedPointerInInternalField(1, second_field);
+ CHECK(!instance.IsEmpty());
+ i::Handle<i::JSReceiver> js_obj = v8::Utils::OpenHandle(*instance);
+ CHECK_EQ(i::JS_API_OBJECT_TYPE, js_obj->map()->instance_type());
+ return scope.Escape(instance);
+}
+
+class TestEmbedderHeapTracer final : public v8::EmbedderHeapTracer {
+ public:
+ explicit TestEmbedderHeapTracer(v8::Isolate* isolate) : isolate_(isolate) {}
+
+ void RegisterV8References(
+ const std::vector<std::pair<void*, void*>>& embedder_fields) final {
+ registered_from_v8_.insert(registered_from_v8_.end(),
+ embedder_fields.begin(), embedder_fields.end());
+ }
+
+ void AddReferenceForTracing(v8::Persistent<v8::Object>* persistent) {
+ to_register_with_v8_.push_back(persistent);
+ }
+
+ bool AdvanceTracing(double deadline_in_ms,
+ AdvanceTracingActions actions) final {
+ for (auto persistent : to_register_with_v8_) {
+ persistent->RegisterExternalReference(isolate_);
+ }
+ to_register_with_v8_.clear();
+ return false;
+ }
+
+ void TracePrologue() final {}
+ void TraceEpilogue() final {}
+ void AbortTracing() final {}
+ void EnterFinalPause() final {}
+
+ bool IsRegisteredFromV8(void* first_field) const {
+ for (auto pair : registered_from_v8_) {
+ if (pair.first == first_field) return true;
+ }
+ return false;
+ }
+
+ private:
+ v8::Isolate* const isolate_;
+ std::vector<std::pair<void*, void*>> registered_from_v8_;
+ std::vector<v8::Persistent<v8::Object>*> to_register_with_v8_;
+};
+
+} // namespace
+
+TEST(V8RegisteringEmbedderReference) {
+ // Tests that wrappers are properly registered with the embedder heap
+ // tracer.
+ ManualGCScope manual_gc;
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ TestEmbedderHeapTracer tracer(isolate);
+ isolate->SetEmbedderHeapTracer(&tracer);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+
+ void* first_field = reinterpret_cast<void*>(0x2);
+ v8::Local<v8::Object> api_object =
+ ConstructTraceableJSApiObject(context, first_field, nullptr);
+ CHECK(!api_object.IsEmpty());
+ CcTest::CollectGarbage(i::OLD_SPACE);
+ CHECK(tracer.IsRegisteredFromV8(first_field));
+}
+
+TEST(EmbedderRegisteringV8Reference) {
+ // Tests that references that are registered by the embedder heap tracer are
+ // considered live by V8.
+ ManualGCScope manual_gc;
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ TestEmbedderHeapTracer tracer(isolate);
+ isolate->SetEmbedderHeapTracer(&tracer);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+
+ v8::Persistent<v8::Object> g;
+ {
+ v8::HandleScope inner_scope(isolate);
+ v8::Local<v8::Object> o =
+ v8::Local<v8::Object>::New(isolate, v8::Object::New(isolate));
+ g.Reset(isolate, o);
+ g.SetWeak();
+ }
+ tracer.AddReferenceForTracing(&g);
+ CcTest::CollectGarbage(i::OLD_SPACE);
+ CHECK(!g.IsEmpty());
+}
+
+namespace {
+
+void ResurrectingFinalizer(
+ const v8::WeakCallbackInfo<v8::Global<v8::Object>>& data) {
+ data.GetParameter()->ClearWeak();
+}
+
+} // namespace
+
+TEST(TracingInRevivedSubgraph) {
+ // Tests that wrappers are traced when they are contained with in a subgraph
+ // that is revived by a finalizer.
+ ManualGCScope manual_gc;
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ TestEmbedderHeapTracer tracer(isolate);
+ isolate->SetEmbedderHeapTracer(&tracer);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+
+ v8::Global<v8::Object> g;
+ void* first_field = reinterpret_cast<void*>(0x4);
+ {
+ v8::HandleScope inner_scope(isolate);
+ v8::Local<v8::Object> api_object =
+ ConstructTraceableJSApiObject(context, first_field, nullptr);
+ CHECK(!api_object.IsEmpty());
+ v8::Local<v8::Object> o =
+ v8::Local<v8::Object>::New(isolate, v8::Object::New(isolate));
+ o->Set(context, v8_str("link"), api_object).FromJust();
+ g.Reset(isolate, o);
+ g.SetWeak(&g, ResurrectingFinalizer, v8::WeakCallbackType::kFinalizer);
+ }
+ CcTest::CollectGarbage(i::OLD_SPACE);
+ CHECK(tracer.IsRegisteredFromV8(first_field));
+}
+
+TEST(TracingInEphemerons) {
+ // Tests that wrappers that are part of ephemerons are traced.
+ ManualGCScope manual_gc;
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ TestEmbedderHeapTracer tracer(isolate);
+ isolate->SetEmbedderHeapTracer(&tracer);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+
+ v8::Local<v8::Object> key =
+ v8::Local<v8::Object>::New(isolate, v8::Object::New(isolate));
+ void* first_field = reinterpret_cast<void*>(0x8);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ Handle<JSWeakMap> weak_map = i_isolate->factory()->NewJSWeakMap();
+ {
+ v8::HandleScope inner_scope(isolate);
+ v8::Local<v8::Object> api_object =
+ ConstructTraceableJSApiObject(context, first_field, nullptr);
+ CHECK(!api_object.IsEmpty());
+ Handle<JSObject> js_key =
+ handle(JSObject::cast(*v8::Utils::OpenHandle(*key)));
+ Handle<JSReceiver> js_api_object = v8::Utils::OpenHandle(*api_object);
+ int32_t hash = js_key->GetOrCreateHash(i_isolate)->value();
+ JSWeakCollection::Set(weak_map, js_key, js_api_object, hash);
+ }
+ CcTest::CollectGarbage(i::OLD_SPACE);
+ CHECK(tracer.IsRegisteredFromV8(first_field));
+}
+
+} // namespace heap
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index d70c1e502c..d14d39b9ee 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -751,7 +751,7 @@ TEST(DeleteWeakGlobalHandle) {
TEST(BytecodeArray) {
if (FLAG_never_compact) return;
- static const uint8_t kRawBytes[] = {0xc3, 0x7e, 0xa5, 0x5a};
+ static const uint8_t kRawBytes[] = {0xC3, 0x7E, 0xA5, 0x5A};
static const int kRawBytesSize = sizeof(kRawBytes);
static const int kFrameSize = 32;
static const int kParameterCount = 2;
@@ -810,7 +810,7 @@ TEST(BytecodeArray) {
}
TEST(BytecodeArrayAging) {
- static const uint8_t kRawBytes[] = {0xc3, 0x7e, 0xa5, 0x5a};
+ static const uint8_t kRawBytes[] = {0xC3, 0x7E, 0xA5, 0x5A};
static const int kRawBytesSize = sizeof(kRawBytes);
static const int kFrameSize = 32;
static const int kParameterCount = 2;
@@ -1176,7 +1176,7 @@ TEST(StringAllocation) {
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- const unsigned char chars[] = { 0xe5, 0xa4, 0xa7 };
+ const unsigned char chars[] = {0xE5, 0xA4, 0xA7};
for (int length = 0; length < 100; length++) {
v8::HandleScope scope(CcTest::isolate());
char* non_one_byte = NewArray<char>(3 * length + 1);
@@ -1704,7 +1704,7 @@ static Address AlignOldSpace(AllocationAlignment alignment, int offset) {
}
Address top = *top_addr;
// Now force the remaining allocation onto the free list.
- CcTest::heap()->old_space()->EmptyAllocationInfo();
+ CcTest::heap()->old_space()->FreeLinearAllocationArea();
return top;
}
@@ -3943,7 +3943,8 @@ static Handle<Code> DummyOptimizedCode(Isolate* isolate) {
v8::internal::CodeObjectRequired::kYes);
CodeDesc desc;
masm.Push(isolate->factory()->undefined_value());
- masm.Drop(1);
+ masm.Push(isolate->factory()->undefined_value());
+ masm.Drop(2);
masm.GetCode(isolate, &desc);
Handle<Object> undefined(isolate->heap()->undefined_value(), isolate);
Handle<Code> code =
@@ -5175,7 +5176,7 @@ HEAP_TEST(Regress589413) {
// Make sure the byte arrays will be promoted on the next GC.
CcTest::CollectGarbage(NEW_SPACE);
// This number is close to large free list category threshold.
- const int N = 0x3eee;
+ const int N = 0x3EEE;
{
std::vector<FixedArray*> arrays;
std::set<Page*> pages;
@@ -5676,9 +5677,8 @@ TEST(UncommitUnusedLargeObjectMemory) {
CcTest::CollectAllGarbage();
CHECK(chunk->CommittedPhysicalMemory() < committed_memory_before);
- size_t shrinked_size =
- RoundUp((array->address() - chunk->address()) + array->Size(),
- base::OS::CommitPageSize());
+ size_t shrinked_size = RoundUp(
+ (array->address() - chunk->address()) + array->Size(), CommitPageSize());
CHECK_EQ(shrinked_size, chunk->CommittedPhysicalMemory());
}
diff --git a/deps/v8/test/cctest/heap/test-incremental-marking.cc b/deps/v8/test/cctest/heap/test-incremental-marking.cc
index a28f9a1ad7..0548ba4ba4 100644
--- a/deps/v8/test/cctest/heap/test-incremental-marking.cc
+++ b/deps/v8/test/cctest/heap/test-incremental-marking.cc
@@ -33,11 +33,14 @@ namespace heap {
class MockPlatform : public TestPlatform {
public:
- MockPlatform() : task_(nullptr) {
+ MockPlatform() : task_(nullptr), old_platform_(i::V8::GetCurrentPlatform()) {
// Now that it's completely constructed, make this the current platform.
i::V8::SetPlatformForTesting(this);
}
- virtual ~MockPlatform() { delete task_; }
+ virtual ~MockPlatform() {
+ delete task_;
+ i::V8::SetPlatformForTesting(old_platform_);
+ }
void CallOnForegroundThread(v8::Isolate* isolate, Task* task) override {
task_ = task;
@@ -56,6 +59,7 @@ class MockPlatform : public TestPlatform {
private:
Task* task_;
+ v8::Platform* old_platform_;
};
TEST(IncrementalMarkingUsingTasks) {
diff --git a/deps/v8/test/cctest/heap/test-mark-compact.cc b/deps/v8/test/cctest/heap/test-mark-compact.cc
index e7f3e93160..2eb5c567c6 100644
--- a/deps/v8/test/cctest/heap/test-mark-compact.cc
+++ b/deps/v8/test/cctest/heap/test-mark-compact.cc
@@ -331,7 +331,7 @@ TEST(Regress5829) {
array->set_length(9);
heap->CreateFillerObjectAt(old_end - kPointerSize, kPointerSize,
ClearRecordedSlots::kNo);
- heap->old_space()->EmptyAllocationInfo();
+ heap->old_space()->FreeLinearAllocationArea();
Page* page = Page::FromAddress(array->address());
IncrementalMarking::MarkingState* marking_state = marking->marking_state();
for (auto object_and_size :
diff --git a/deps/v8/test/cctest/heap/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index d9deb10475..bec9f978e8 100644
--- a/deps/v8/test/cctest/heap/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -102,13 +102,12 @@ static void VerifyMemoryChunk(Isolate* isolate,
reserve_area_size, commit_area_size, executable, nullptr);
size_t alignment = code_range != nullptr && code_range->valid()
? MemoryChunk::kAlignment
- : base::OS::CommitPageSize();
+ : CommitPageSize();
size_t reserved_size =
((executable == EXECUTABLE))
? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
alignment)
- : RoundUp(header_size + reserve_area_size,
- base::OS::CommitPageSize());
+ : RoundUp(header_size + reserve_area_size, CommitPageSize());
CHECK(memory_chunk->size() == reserved_size);
CHECK(memory_chunk->area_start() <
memory_chunk->address() + memory_chunk->size());
@@ -231,7 +230,6 @@ TEST(MemoryAllocator) {
NOT_EXECUTABLE);
first_page->InsertAfter(faked_space.anchor()->prev_page());
- CHECK(Page::IsValid(first_page));
CHECK(first_page->next_page() == faked_space.anchor());
total_pages++;
@@ -243,7 +241,6 @@ TEST(MemoryAllocator) {
Page* other = memory_allocator->AllocatePage(
faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
NOT_EXECUTABLE);
- CHECK(Page::IsValid(other));
total_pages++;
other->InsertAfter(first_page);
int page_count = 0;
@@ -254,7 +251,7 @@ TEST(MemoryAllocator) {
CHECK(total_pages == page_count);
Page* second_page = first_page->next_page();
- CHECK(Page::IsValid(second_page));
+ CHECK_NOT_NULL(second_page);
// OldSpace's destructor will tear down the space and free up all pages.
}
@@ -442,7 +439,7 @@ class Observer : public AllocationObserver {
explicit Observer(intptr_t step_size)
: AllocationObserver(step_size), count_(0) {}
- void Step(int bytes_allocated, Address, size_t) override { count_++; }
+ void Step(int bytes_allocated, Address addr, size_t) override { count_++; }
int count() const { return count_; }
@@ -621,6 +618,47 @@ HEAP_TEST(Regress777177) {
old_space->RemoveAllocationObserver(&observer);
}
+HEAP_TEST(Regress791582) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ HandleScope scope(isolate);
+ NewSpace* new_space = heap->new_space();
+ if (new_space->TotalCapacity() < new_space->MaximumCapacity()) {
+ new_space->Grow();
+ }
+
+ int until_page_end = static_cast<int>(new_space->limit() - new_space->top());
+
+ if (until_page_end % kPointerSize != 0) {
+ // The test works if the size of allocation area size is a multiple of
+ // pointer size. This is usually the case unless some allocation observer
+ // is already active (e.g. incremental marking observer).
+ return;
+ }
+
+ Observer observer(128);
+ new_space->AddAllocationObserver(&observer);
+
+ {
+ AllocationResult result =
+ new_space->AllocateRaw(until_page_end, kWordAligned);
+ HeapObject* obj = result.ToObjectChecked();
+ heap->CreateFillerObjectAt(obj->address(), until_page_end,
+ ClearRecordedSlots::kNo);
+ // Simulate allocation folding moving the top pointer back.
+ *new_space->allocation_top_address() = obj->address();
+ }
+
+ {
+ // This triggers assert in crbug.com/791582
+ AllocationResult result = new_space->AllocateRaw(256, kWordAligned);
+ HeapObject* obj = result.ToObjectChecked();
+ heap->CreateFillerObjectAt(obj->address(), 256, ClearRecordedSlots::kNo);
+ }
+ new_space->RemoveAllocationObserver(&observer);
+}
+
TEST(ShrinkPageToHighWaterMarkFreeSpaceEnd) {
FLAG_stress_incremental_marking = false;
CcTest::InitializeVM();
@@ -636,8 +674,8 @@ TEST(ShrinkPageToHighWaterMarkFreeSpaceEnd) {
// Reset space so high water mark is consistent.
PagedSpace* old_space = CcTest::heap()->old_space();
+ old_space->FreeLinearAllocationArea();
old_space->ResetFreeList();
- old_space->EmptyAllocationInfo();
HeapObject* filler =
HeapObject::FromAddress(array->address() + array->Size());
@@ -645,7 +683,7 @@ TEST(ShrinkPageToHighWaterMarkFreeSpaceEnd) {
size_t shrunk = old_space->ShrinkPageToHighWaterMark(page);
size_t should_have_shrunk =
RoundDown(static_cast<size_t>(Page::kAllocatableMemory - array->Size()),
- base::OS::CommitPageSize());
+ CommitPageSize());
CHECK_EQ(should_have_shrunk, shrunk);
}
@@ -665,7 +703,7 @@ TEST(ShrinkPageToHighWaterMarkNoFiller) {
// Reset space so high water mark and fillers are consistent.
PagedSpace* old_space = CcTest::heap()->old_space();
old_space->ResetFreeList();
- old_space->EmptyAllocationInfo();
+ old_space->FreeLinearAllocationArea();
size_t shrunk = old_space->ShrinkPageToHighWaterMark(page);
CHECK_EQ(0u, shrunk);
@@ -687,8 +725,8 @@ TEST(ShrinkPageToHighWaterMarkOneWordFiller) {
// Reset space so high water mark and fillers are consistent.
PagedSpace* old_space = CcTest::heap()->old_space();
+ old_space->FreeLinearAllocationArea();
old_space->ResetFreeList();
- old_space->EmptyAllocationInfo();
HeapObject* filler =
HeapObject::FromAddress(array->address() + array->Size());
@@ -714,8 +752,8 @@ TEST(ShrinkPageToHighWaterMarkTwoWordFiller) {
// Reset space so high water mark and fillers are consistent.
PagedSpace* old_space = CcTest::heap()->old_space();
+ old_space->FreeLinearAllocationArea();
old_space->ResetFreeList();
- old_space->EmptyAllocationInfo();
HeapObject* filler =
HeapObject::FromAddress(array->address() + array->Size());
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
index 312316c9b7..50d084fbc1 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
@@ -14,7 +14,7 @@ snippet: "
"
frame size: 9
parameter count: 1
-bytecode array length: 230
+bytecode array length: 216
bytecodes: [
B(Ldar), R(0),
B(JumpIfUndefined), U8(18),
@@ -23,7 +23,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
B(Star), R(1),
B(Mov), R(closure), R(2),
@@ -36,10 +36,7 @@ bytecodes: [
B(Ldar), R(0),
/* 17 E> */ B(SuspendGenerator), R(0), R(0), U8(6), U8(0),
/* 22 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(0), R(0), U8(6),
- B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(0), U8(1),
+ B(ResumeGenerator), R(0), R(1), R(0), U8(6),
B(Star), R(6),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
@@ -48,17 +45,14 @@ bytecodes: [
B(LdaZero),
B(Star), R(2),
B(Mov), R(6), R(3),
- B(Jump), U8(107),
+ B(Jump), U8(100),
B(LdaUndefined),
B(Star), R(7),
B(Mov), R(0), R(6),
B(CallJSRuntime), U8(%async_generator_await_uncaught), R(6), U8(2),
B(SuspendGenerator), R(0), R(0), U8(6), U8(1),
/* 22 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(0), R(0), U8(6),
- B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(0), U8(1),
+ B(ResumeGenerator), R(0), R(1), R(0), U8(6),
B(Star), R(6),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(Star), R(7),
@@ -120,7 +114,7 @@ bytecodes: [
]
constant pool: [
Smi [37],
- Smi [86],
+ Smi [79],
Smi [15],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
@@ -130,8 +124,8 @@ constant pool: [
Smi [23],
]
handlers: [
- [40, 175, 183],
- [43, 136, 138],
+ [40, 161, 169],
+ [43, 122, 124],
]
---
@@ -141,7 +135,7 @@ snippet: "
"
frame size: 9
parameter count: 1
-bytecode array length: 283
+bytecode array length: 262
bytecodes: [
B(Ldar), R(0),
B(JumpIfUndefined), U8(18),
@@ -150,7 +144,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
B(Star), R(1),
B(Mov), R(closure), R(2),
@@ -163,10 +157,7 @@ bytecodes: [
B(Ldar), R(0),
/* 17 E> */ B(SuspendGenerator), R(0), R(0), U8(6), U8(0),
/* 31 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(0), R(0), U8(6),
- B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(0), U8(1),
+ B(ResumeGenerator), R(0), R(1), R(0), U8(6),
B(Star), R(6),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(3), U8(2), I8(0),
@@ -175,7 +166,7 @@ bytecodes: [
B(LdaZero),
B(Star), R(2),
B(Mov), R(6), R(3),
- B(Jump), U8(160),
+ B(Jump), U8(146),
/* 22 S> */ B(LdaSmi), I8(42),
B(Star), R(7),
B(LdaFalse),
@@ -184,10 +175,7 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorYield), R(6), U8(3),
B(SuspendGenerator), R(0), R(0), U8(6), U8(1),
/* 31 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(0), R(0), U8(6),
- B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(0), U8(1),
+ B(ResumeGenerator), R(0), R(1), R(0), U8(6),
B(Star), R(6),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(5), U8(2), I8(0),
@@ -196,17 +184,14 @@ bytecodes: [
B(LdaZero),
B(Star), R(2),
B(Mov), R(6), R(3),
- B(Jump), U8(107),
+ B(Jump), U8(100),
B(LdaUndefined),
B(Star), R(7),
B(Mov), R(0), R(6),
B(CallJSRuntime), U8(%async_generator_await_uncaught), R(6), U8(2),
B(SuspendGenerator), R(0), R(0), U8(6), U8(2),
/* 31 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(0), R(0), U8(6),
- B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(0), U8(1),
+ B(ResumeGenerator), R(0), R(1), R(0), U8(6),
B(Star), R(6),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(Star), R(7),
@@ -268,8 +253,8 @@ bytecodes: [
]
constant pool: [
Smi [37],
- Smi [90],
- Smi [139],
+ Smi [83],
+ Smi [125],
Smi [15],
Smi [7],
Smi [15],
@@ -281,8 +266,8 @@ constant pool: [
Smi [23],
]
handlers: [
- [40, 228, 236],
- [43, 189, 191],
+ [40, 207, 215],
+ [43, 168, 170],
]
---
@@ -290,263 +275,248 @@ snippet: "
async function* f() { for (let x of [42]) yield x }
f();
"
-frame size: 22
+frame size: 23
parameter count: 1
-bytecode array length: 571
+bytecode array length: 536
bytecodes: [
B(Ldar), R(2),
B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(2), U8(1),
- B(PushContext), R(11),
+ B(PushContext), R(12),
B(RestoreGeneratorState), R(2),
- B(Star), R(10),
+ B(Star), R(11),
B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
- B(Star), R(10),
- B(Mov), R(closure), R(11),
- B(Mov), R(this), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(11), U8(2),
+ B(Star), R(11),
+ B(Mov), R(closure), R(12),
+ B(Mov), R(this), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(12), U8(2),
B(Star), R(2),
/* 17 E> */ B(StackCheck),
- B(Mov), R(context), R(13),
B(Mov), R(context), R(14),
+ B(Mov), R(context), R(15),
B(Ldar), R(2),
- /* 17 E> */ B(SuspendGenerator), R(2), R(0), U8(15), U8(0),
+ /* 17 E> */ B(SuspendGenerator), R(2), R(0), U8(16), U8(0),
/* 50 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(2), R(0), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(10),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(2), U8(1),
- B(Star), R(15),
+ B(ResumeGenerator), R(2), R(11), R(0), U8(16),
+ B(Star), R(16),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
B(SwitchOnSmiNoFeedback), U8(3), U8(2), I8(0),
- B(Ldar), R(15),
+ B(Ldar), R(16),
/* 17 E> */ B(Throw),
B(LdaZero),
- B(Star), R(11),
- B(Mov), R(15), R(12),
- B(JumpConstant), U8(21),
+ B(Star), R(12),
+ B(Mov), R(16), R(13),
+ B(JumpConstant), U8(20),
B(LdaZero),
- B(Star), R(6),
- B(Mov), R(context), R(17),
+ B(Star), R(7),
B(Mov), R(context), R(18),
+ B(Mov), R(context), R(19),
/* 36 S> */ B(CreateArrayLiteral), U8(5), U8(0), U8(37),
- B(Star), R(19),
- B(LdaNamedProperty), R(19), U8(6), U8(1),
B(Star), R(20),
- B(CallProperty0), R(20), R(19), U8(3),
+ B(LdaNamedProperty), R(20), U8(6), U8(1),
+ B(Star), R(21),
+ B(CallProperty0), R(21), R(20), U8(3),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(4),
- B(Ldar), R(10),
- B(SwitchOnSmiNoFeedback), U8(7), U8(1), I8(1),
+ /* 36 E> */ B(LdaNamedProperty), R(4), U8(7), U8(5),
+ B(Star), R(5),
+ B(Ldar), R(11),
+ B(SwitchOnSmiNoFeedback), U8(8), U8(1), I8(1),
B(LdaSmi), I8(-2),
- /* 36 E> */ B(TestEqualStrictNoFeedback), R(10),
+ B(TestEqualStrictNoFeedback), R(11),
B(JumpIfTrue), U8(4),
- B(Abort), U8(42),
- /* 31 S> */ B(LdaNamedProperty), R(4), U8(8), U8(5),
- B(Star), R(19),
- B(CallProperty0), R(19), R(4), U8(7),
- B(Star), R(5),
- /* 31 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(5), U8(1),
+ B(Abort), U8(15),
+ /* 31 S> */ B(CallProperty0), R(5), R(4), U8(7),
+ B(Star), R(6),
+ /* 31 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
- B(LdaNamedProperty), R(5), U8(9), U8(9),
- B(JumpIfToBooleanTrue), U8(77),
- B(LdaNamedProperty), R(5), U8(10), U8(11),
- B(Star), R(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
+ B(LdaNamedProperty), R(6), U8(9), U8(9),
+ B(JumpIfToBooleanTrue), U8(70),
+ B(LdaNamedProperty), R(6), U8(10), U8(11),
+ B(Star), R(8),
B(LdaSmi), I8(2),
- B(Star), R(6),
- B(Mov), R(7), R(3),
+ B(Star), R(7),
+ B(Mov), R(8), R(3),
/* 22 E> */ B(StackCheck),
B(Mov), R(3), R(0),
/* 42 S> */ B(LdaFalse),
- B(Star), R(21),
- B(Mov), R(2), R(19),
- B(Mov), R(0), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorYield), R(19), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(19), U8(1),
+ B(Star), R(22),
+ B(Mov), R(2), R(20),
+ B(Mov), R(0), R(21),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorYield), R(20), U8(3),
+ B(SuspendGenerator), R(2), R(0), U8(20), U8(1),
/* 50 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(2), R(0), U8(19),
- B(LdaSmi), I8(-2),
- B(Star), R(10),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(2), U8(1),
- B(Star), R(19),
+ B(ResumeGenerator), R(2), R(11), R(0), U8(20),
+ B(Star), R(20),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
B(SwitchOnSmiNoFeedback), U8(11), U8(2), I8(0),
- B(Ldar), R(19),
+ B(Ldar), R(20),
/* 42 E> */ B(Throw),
B(LdaZero),
- B(Star), R(15),
- B(Mov), R(19), R(16),
+ B(Star), R(16),
+ B(Mov), R(20), R(17),
B(Jump), U8(62),
B(LdaZero),
- B(Star), R(6),
- B(JumpLoop), U8(116), I8(0),
+ B(Star), R(7),
+ B(JumpLoop), U8(103), I8(0),
B(Jump), U8(40),
- B(Star), R(19),
+ B(Star), R(20),
B(Ldar), R(closure),
- B(CreateCatchContext), R(19), U8(13), U8(14),
- B(Star), R(18),
+ B(CreateCatchContext), R(20), U8(13), U8(14),
+ B(Star), R(19),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(18),
- B(PushContext), R(19),
+ B(Ldar), R(19),
+ B(PushContext), R(20),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(6), U8(13),
+ B(TestEqualStrict), R(7), U8(13),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(6),
+ B(Star), R(7),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(20),
- B(CallRuntime), U16(Runtime::kReThrow), R(20), U8(1),
- B(PopContext), R(19),
+ B(Star), R(21),
+ B(CallRuntime), U16(Runtime::kReThrow), R(21), U8(1),
+ B(PopContext), R(20),
B(LdaSmi), I8(-1),
+ B(Star), R(17),
B(Star), R(16),
- B(Star), R(15),
B(Jump), U8(8),
- B(Star), R(16),
+ B(Star), R(17),
B(LdaSmi), I8(1),
- B(Star), R(15),
+ B(Star), R(16),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(17),
+ B(Star), R(18),
B(LdaZero),
- B(TestEqualStrict), R(6), U8(14),
- B(JumpIfTrue), U8(104),
+ B(TestEqualStrict), R(7), U8(14),
+ B(JumpIfTrue), U8(90),
B(LdaNamedProperty), R(4), U8(15), U8(15),
- B(Star), R(8),
+ B(Star), R(9),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
- B(Jump), U8(93),
+ B(Jump), U8(79),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(6), U8(17),
- B(JumpIfFalse), U8(61),
- B(Ldar), R(8),
+ B(TestEqualStrict), R(7), U8(17),
+ B(JumpIfFalse), U8(47),
+ B(Ldar), R(9),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(143),
- B(Star), R(18),
- B(LdaConstant), U8(16),
+ B(Wide), B(LdaSmi), I16(144),
B(Star), R(19),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(18), U8(2),
+ B(LdaConstant), U8(16),
+ B(Star), R(20),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(19), U8(2),
B(Throw),
- B(Mov), R(context), R(18),
- B(Mov), R(8), R(19),
- B(Mov), R(4), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(19), U8(2),
- B(Jump), U8(20),
- B(Star), R(19),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(19), U8(13), U8(17),
- B(Star), R(18),
+ B(Mov), R(context), R(19),
+ B(Mov), R(9), R(20),
+ B(Mov), R(4), R(21),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(20), U8(2),
+ B(Jump), U8(6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(18),
- B(PushContext), R(19),
- B(PopContext), R(19),
+ B(Ldar), R(19),
B(Jump), U8(27),
- B(Mov), R(8), R(18),
- B(Mov), R(4), R(19),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(18), U8(2),
- B(Star), R(9),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(9), U8(1),
+ B(Mov), R(9), R(19),
+ B(Mov), R(4), R(20),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(19), U8(2),
+ B(Star), R(10),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(10), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
- B(Ldar), R(17),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+ B(Ldar), R(18),
B(SetPendingMessage),
- B(Ldar), R(15),
- B(SwitchOnSmiNoFeedback), U8(18), U8(2), I8(0),
+ B(Ldar), R(16),
+ B(SwitchOnSmiNoFeedback), U8(17), U8(2), I8(0),
B(Jump), U8(13),
B(LdaZero),
- B(Star), R(11),
- B(Mov), R(16), R(12),
- B(Jump), U8(110),
- B(Ldar), R(16),
+ B(Star), R(12),
+ B(Mov), R(17), R(13),
+ B(Jump), U8(103),
+ B(Ldar), R(17),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(16),
- B(Mov), R(2), R(15),
- B(CallJSRuntime), U8(%async_generator_await_uncaught), R(15), U8(2),
- B(SuspendGenerator), R(2), R(0), U8(15), U8(2),
+ B(Star), R(17),
+ B(Mov), R(2), R(16),
+ B(CallJSRuntime), U8(%async_generator_await_uncaught), R(16), U8(2),
+ B(SuspendGenerator), R(2), R(0), U8(16), U8(2),
/* 50 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(2), R(0), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(10),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(2), U8(1),
- B(Star), R(15),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(ResumeGenerator), R(2), R(11), R(0), U8(16),
B(Star), R(16),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(Star), R(17),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(16),
+ B(TestEqualStrictNoFeedback), R(17),
B(JumpIfTrue), U8(5),
- B(Ldar), R(15),
+ B(Ldar), R(16),
B(ReThrow),
B(LdaZero),
- B(Star), R(11),
- B(Mov), R(15), R(12),
+ B(Star), R(12),
+ B(Mov), R(16), R(13),
B(Jump), U8(55),
B(Jump), U8(39),
- B(Star), R(15),
+ B(Star), R(16),
B(Ldar), R(closure),
- B(CreateCatchContext), R(15), U8(13), U8(20),
- B(Star), R(14),
+ B(CreateCatchContext), R(16), U8(13), U8(19),
+ B(Star), R(15),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(14),
- B(PushContext), R(15),
+ B(Ldar), R(15),
+ B(PushContext), R(16),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(17),
- B(Mov), R(2), R(16),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorReject), R(16), U8(2),
- B(PopContext), R(15),
- B(Star), R(12),
+ B(Star), R(18),
+ B(Mov), R(2), R(17),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorReject), R(17), U8(2),
+ B(PopContext), R(16),
+ B(Star), R(13),
B(LdaSmi), I8(1),
- B(Star), R(11),
+ B(Star), R(12),
B(Jump), U8(16),
B(LdaSmi), I8(-1),
+ B(Star), R(13),
B(Star), R(12),
- B(Star), R(11),
B(Jump), U8(8),
- B(Star), R(12),
+ B(Star), R(13),
B(LdaSmi), I8(2),
- B(Star), R(11),
+ B(Star), R(12),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(13),
+ B(Star), R(14),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorClose), R(2), U8(1),
- B(Ldar), R(13),
+ B(Ldar), R(14),
B(SetPendingMessage),
- B(Ldar), R(11),
- B(SwitchOnSmiNoFeedback), U8(22), U8(3), I8(0),
+ B(Ldar), R(12),
+ B(SwitchOnSmiNoFeedback), U8(21), U8(3), I8(0),
B(Jump), U8(22),
B(LdaTrue),
- B(Star), R(16),
- B(Mov), R(2), R(14),
- B(Mov), R(12), R(15),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorResolve), R(14), U8(3),
+ B(Star), R(17),
+ B(Mov), R(2), R(15),
+ B(Mov), R(13), R(16),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorResolve), R(15), U8(3),
/* 50 S> */ B(Return),
- B(Ldar), R(12),
+ B(Ldar), R(13),
/* 50 S> */ B(Return),
- B(Ldar), R(12),
+ B(Ldar), R(13),
B(ReThrow),
B(LdaUndefined),
/* 50 S> */ B(Return),
]
constant pool: [
Smi [37],
- Smi [104],
- Smi [427],
+ Smi [103],
+ Smi [399],
Smi [15],
Smi [7],
TUPLE2_TYPE,
SYMBOL_TYPE,
- Smi [78],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ Smi [72],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
Smi [15],
@@ -555,21 +525,20 @@ constant pool: [
FIXED_ARRAY_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
- FIXED_ARRAY_TYPE,
Smi [6],
Smi [14],
FIXED_ARRAY_TYPE,
- Smi [448],
+ Smi [420],
Smi [6],
Smi [20],
Smi [23],
]
handlers: [
- [40, 516, 524],
- [43, 477, 479],
- [90, 277, 285],
- [93, 237, 239],
- [346, 356, 358],
+ [40, 481, 489],
+ [43, 442, 444],
+ [83, 263, 271],
+ [86, 223, 225],
+ [332, 342, 344],
]
---
@@ -578,9 +547,9 @@ snippet: "
async function* f() { yield* g() }
f();
"
-frame size: 17
+frame size: 18
parameter count: 1
-bytecode array length: 560
+bytecode array length: 526
bytecodes: [
B(Ldar), R(0),
B(JumpIfUndefined), U8(18),
@@ -589,7 +558,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(5), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
B(Star), R(1),
B(Mov), R(closure), R(2),
@@ -602,10 +571,7 @@ bytecodes: [
B(Ldar), R(0),
/* 44 E> */ B(SuspendGenerator), R(0), R(0), U8(6), U8(0),
/* 60 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(0), R(0), U8(6),
- B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(0), U8(1),
+ B(ResumeGenerator), R(0), R(1), R(0), U8(6),
B(Star), R(6),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(5), U8(2), I8(0),
@@ -616,44 +582,44 @@ bytecodes: [
B(Mov), R(6), R(3),
B(JumpConstant), U8(22),
/* 49 S> */ B(LdaGlobal), U8(7), U8(0),
- B(Star), R(12),
- /* 56 E> */ B(CallUndefinedReceiver0), R(12), U8(2),
- B(Star), R(10),
- B(LdaNamedProperty), R(10), U8(8), U8(4),
+ B(Star), R(13),
+ /* 56 E> */ B(CallUndefinedReceiver0), R(13), U8(2),
+ B(Star), R(11),
+ B(LdaNamedProperty), R(11), U8(8), U8(4),
B(JumpIfUndefined), U8(17),
B(JumpIfNull), U8(15),
- B(Star), R(11),
- B(CallProperty0), R(11), R(10), U8(6),
+ B(Star), R(12),
+ B(CallProperty0), R(12), R(11), U8(6),
B(JumpIfJSReceiver), U8(23),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
- B(LdaNamedProperty), R(10), U8(9), U8(8),
- B(Star), R(11),
- B(CallProperty0), R(11), R(10), U8(10),
- B(Star), R(11),
- B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(11), U8(1),
+ B(LdaNamedProperty), R(11), U8(9), U8(8),
+ B(Star), R(12),
+ B(CallProperty0), R(12), R(11), U8(10),
+ B(Star), R(12),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(12), U8(1),
B(Star), R(8),
+ B(LdaNamedProperty), R(8), U8(10), U8(12),
+ B(Star), R(10),
B(LdaUndefined),
B(Star), R(9),
B(LdaZero),
B(Star), R(7),
B(Ldar), R(1),
- B(SwitchOnSmiNoFeedback), U8(10), U8(3), I8(1),
+ B(SwitchOnSmiNoFeedback), U8(11), U8(3), I8(1),
B(LdaSmi), I8(-2),
B(TestEqualStrictNoFeedback), R(1),
B(JumpIfTrue), U8(4),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(Ldar), R(7),
- B(SwitchOnSmiNoFeedback), U8(13), U8(2), I8(1),
- B(LdaNamedProperty), R(8), U8(15), U8(12),
- B(Star), R(12),
- B(CallProperty1), R(12), R(8), R(9), U8(14),
- B(Jump), U8(118),
+ B(SwitchOnSmiNoFeedback), U8(14), U8(2), I8(1),
+ B(CallProperty1), R(10), R(8), R(9), U8(14),
+ B(Jump), U8(112),
B(LdaNamedProperty), R(8), U8(16), U8(16),
B(JumpIfUndefined), U8(13),
B(JumpIfNull), U8(11),
- B(Star), R(12),
- B(CallProperty1), R(12), R(8), R(9), U8(18),
- B(Jump), U8(101),
+ B(Star), R(13),
+ B(CallProperty1), R(13), R(8), R(9), U8(18),
+ B(Jump), U8(95),
B(LdaZero),
B(Star), R(2),
B(Mov), R(9), R(3),
@@ -661,23 +627,21 @@ bytecodes: [
B(LdaNamedProperty), R(8), U8(17), U8(20),
B(JumpIfUndefined), U8(13),
B(JumpIfNull), U8(11),
- B(Star), R(12),
- B(CallProperty1), R(12), R(8), R(9), U8(22),
- B(Jump), U8(76),
+ B(Star), R(13),
+ B(CallProperty1), R(13), R(8), R(9), U8(22),
+ B(Jump), U8(70),
B(LdaNamedProperty), R(8), U8(16), U8(24),
- B(Star), R(12),
- B(JumpIfUndefined), U8(63),
- B(JumpIfNull), U8(61),
- B(CallProperty0), R(12), R(8), U8(26),
+ B(JumpIfUndefined), U8(59),
+ B(JumpIfNull), U8(57),
+ B(Star), R(13),
+ B(CallProperty0), R(13), R(8), U8(26),
+ B(Jump), U8(2),
B(Star), R(14),
B(Mov), R(0), R(13),
B(CallJSRuntime), U8(%async_generator_await_uncaught), R(13), U8(2),
B(SuspendGenerator), R(0), R(0), U8(13), U8(2),
/* 60 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(0), R(0), U8(13),
- B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(0), U8(1),
+ B(ResumeGenerator), R(0), R(1), R(0), U8(13),
B(Star), R(13),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(Star), R(14),
@@ -687,49 +651,43 @@ bytecodes: [
B(Ldar), R(13),
B(ReThrow),
B(Ldar), R(13),
- B(Mov), R(13), R(6),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
+ B(JumpIfJSReceiver), U8(9),
+ B(Star), R(15),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(15), U8(1),
B(CallRuntime), U16(Runtime::kThrowThrowMethodMissing), R(0), U8(0),
- B(Star), R(13),
- B(Mov), R(0), R(12),
- B(CallJSRuntime), U8(%async_generator_await_uncaught), R(12), U8(2),
- B(SuspendGenerator), R(0), R(0), U8(12), U8(3),
+ B(Star), R(14),
+ B(Mov), R(0), R(13),
+ B(CallJSRuntime), U8(%async_generator_await_uncaught), R(13), U8(2),
+ B(SuspendGenerator), R(0), R(0), U8(13), U8(3),
/* 60 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(0), R(0), U8(12),
- B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(0), U8(1),
- B(Star), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
+ B(ResumeGenerator), R(0), R(1), R(0), U8(13),
B(Star), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
+ B(Star), R(14),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(13),
+ B(TestEqualStrictNoFeedback), R(14),
B(JumpIfTrue), U8(5),
- B(Ldar), R(12),
+ B(Ldar), R(13),
B(ReThrow),
- B(Ldar), R(12),
- B(Mov), R(12), R(6),
+ B(Ldar), R(13),
+ B(Mov), R(13), R(6),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
B(LdaNamedProperty), R(6), U8(18), U8(28),
- B(JumpIfToBooleanTrue), U8(47),
+ B(JumpIfToBooleanTrue), U8(40),
B(LdaNamedProperty), R(6), U8(19), U8(30),
- B(Star), R(15),
- B(LdaFalse),
B(Star), R(16),
- B(Mov), R(0), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorYield), R(14), U8(3),
- B(SuspendGenerator), R(0), R(0), U8(14), U8(1),
+ B(LdaFalse),
+ B(Star), R(17),
+ B(Mov), R(0), R(15),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorYield), R(15), U8(3),
+ B(SuspendGenerator), R(0), R(0), U8(15), U8(1),
/* 60 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(0), R(0), U8(14),
- B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(0), U8(1),
+ B(ResumeGenerator), R(0), R(1), R(0), U8(15),
B(Star), R(9),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(Star), R(7),
- B(JumpLoop), U8(252), I8(0),
+ B(JumpLoop), U8(226), I8(0),
B(LdaNamedProperty), R(6), U8(19), U8(32),
B(Star), R(8),
B(LdaSmi), I8(1),
@@ -738,17 +696,14 @@ bytecodes: [
B(LdaZero),
B(Star), R(2),
B(Mov), R(8), R(3),
- B(Jump), U8(107),
+ B(Jump), U8(100),
B(LdaUndefined),
B(Star), R(7),
B(Mov), R(0), R(6),
B(CallJSRuntime), U8(%async_generator_await_uncaught), R(6), U8(2),
B(SuspendGenerator), R(0), R(0), U8(6), U8(4),
/* 60 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(0), R(0), U8(6),
- B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(0), U8(1),
+ B(ResumeGenerator), R(0), R(1), R(0), U8(6),
B(Star), R(6),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(Star), R(7),
@@ -810,35 +765,35 @@ bytecodes: [
]
constant pool: [
Smi [37],
- Smi [125],
- Smi [125],
- Smi [125],
- Smi [416],
+ Smi [124],
+ Smi [124],
+ Smi [124],
+ Smi [389],
Smi [15],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["g"],
SYMBOL_TYPE,
SYMBOL_TYPE,
- Smi [230],
- Smi [102],
- Smi [162],
- Smi [17],
- Smi [42],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ Smi [211],
+ Smi [98],
+ Smi [150],
+ Smi [11],
+ Smi [36],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["throw"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
FIXED_ARRAY_TYPE,
- Smi [437],
- Smi [324],
+ Smi [410],
+ Smi [297],
Smi [6],
Smi [20],
Smi [23],
]
handlers: [
- [40, 505, 513],
- [43, 466, 468],
+ [40, 471, 479],
+ [43, 432, 434],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassFields.golden
index afb3d3e8de..b8b77107a7 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassFields.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassFields.golden
@@ -12,23 +12,19 @@ snippet: "
class A {
a;
['b'];
- static c;
- static ['d'];
}
class B {
a = 1;
['b'] = this.a;
- static c = 3;
- static ['d'] = this.c;
}
new A;
new B;
}
"
-frame size: 11
+frame size: 10
parameter count: 1
-bytecode array length: 193
+bytecode array length: 129
bytecodes: [
/* 30 E> */ B(StackCheck),
B(Ldar), R(closure),
@@ -37,8 +33,6 @@ bytecodes: [
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
B(LdaTheHole),
- B(StaCurrentContextSlot), U8(5),
- B(LdaTheHole),
B(Star), R(8),
B(CreateClosure), U8(2), U8(0), U8(2),
B(Star), R(5),
@@ -47,84 +41,56 @@ bytecodes: [
B(LdaConstant), U8(3),
B(StaCurrentContextSlot), U8(4),
B(Star), R(9),
- B(LdaConstant), U8(4),
- B(Star), R(10),
- B(LdaConstant), U8(5),
- B(TestEqualStrictNoFeedback), R(10),
B(Mov), R(5), R(7),
- B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
- B(Ldar), R(10),
- B(StaCurrentContextSlot), U8(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(4),
B(Star), R(6),
- B(Mov), R(5), R(1),
- B(CreateClosure), U8(6), U8(1), U8(2),
+ B(Mov), R(7), R(1),
+ B(CreateClosure), U8(4), U8(1), U8(2),
B(Star), R(7),
- B(StaNamedProperty), R(5), U8(7), U8(2),
- B(CreateClosure), U8(8), U8(4), U8(2),
- B(Star), R(9),
- B(CallProperty0), R(9), R(1), U8(5),
+ B(StaNamedProperty), R(5), U8(5), U8(2),
B(PopContext), R(4),
B(Mov), R(1), R(2),
B(Ldar), R(closure),
- /* 38 E> */ B(CreateBlockContext), U8(9),
+ /* 38 E> */ B(CreateBlockContext), U8(6),
B(PushContext), R(4),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
B(LdaTheHole),
- B(StaCurrentContextSlot), U8(5),
- B(LdaTheHole),
B(Star), R(8),
- B(CreateClosure), U8(11), U8(7), U8(2),
+ B(CreateClosure), U8(8), U8(4), U8(2),
B(Star), R(5),
- B(LdaConstant), U8(10),
+ B(LdaConstant), U8(7),
B(Star), R(6),
B(LdaConstant), U8(3),
B(StaCurrentContextSlot), U8(4),
B(Star), R(9),
- B(LdaConstant), U8(4),
- B(Star), R(10),
- B(LdaConstant), U8(5),
- B(TestEqualStrictNoFeedback), R(10),
B(Mov), R(5), R(7),
- B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
- B(Ldar), R(10),
- B(StaCurrentContextSlot), U8(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(4),
B(Star), R(6),
- B(Mov), R(5), R(0),
- B(CreateClosure), U8(12), U8(8), U8(2),
+ B(Mov), R(7), R(0),
+ B(CreateClosure), U8(9), U8(5), U8(2),
B(Star), R(7),
- B(StaNamedProperty), R(5), U8(7), U8(9),
- B(CreateClosure), U8(13), U8(11), U8(2),
- B(Star), R(9),
- B(CallProperty0), R(9), R(0), U8(12),
+ B(StaNamedProperty), R(5), U8(5), U8(6),
B(PopContext), R(4),
B(Mov), R(0), R(3),
- /* 197 S> */ B(Ldar), R(2),
- /* 197 E> */ B(Construct), R(2), R(0), U8(0), U8(14),
- /* 206 S> */ B(Ldar), R(0),
- /* 206 E> */ B(Construct), R(0), R(0), U8(0), U8(16),
+ /* 120 S> */ B(Ldar), R(1),
+ /* 120 E> */ B(Construct), R(1), R(0), U8(0), U8(8),
+ /* 129 S> */ B(Ldar), R(0),
+ /* 129 E> */ B(Construct), R(0), R(0), U8(0), U8(10),
B(LdaUndefined),
- /* 215 S> */ B(Return),
+ /* 138 S> */ B(Return),
]
constant pool: [
FIXED_ARRAY_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["b"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["d"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["prototype"],
SHARED_FUNCTION_INFO_TYPE,
SYMBOL_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
FIXED_ARRAY_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -135,15 +101,11 @@ snippet: "
class A extends class {} {
a;
['b'];
- static c;
- static ['d'];
}
class B extends class {} {
a = 1;
['b'] = this.a;
- static c = 3;
- static ['d'] = this.c;
foo() { return 1; }
constructor() {
super();
@@ -153,8 +115,6 @@ snippet: "
class C extends B {
a = 1;
['b'] = this.a;
- static c = 3;
- static ['d'] = super.foo();
constructor() {
(() => super())();
}
@@ -167,7 +127,7 @@ snippet: "
"
frame size: 15
parameter count: 1
-bytecode array length: 346
+bytecode array length: 244
bytecodes: [
/* 30 E> */ B(StackCheck),
B(Ldar), R(closure),
@@ -176,8 +136,6 @@ bytecodes: [
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
B(LdaTheHole),
- B(StaCurrentContextSlot), U8(5),
- B(LdaTheHole),
B(Star), R(14),
B(CreateClosure), U8(3), U8(0), U8(2),
B(Star), R(11),
@@ -193,118 +151,79 @@ bytecodes: [
B(LdaConstant), U8(5),
B(StaCurrentContextSlot), U8(4),
B(Star), R(11),
- B(LdaConstant), U8(6),
- B(Star), R(12),
- B(LdaConstant), U8(7),
- B(TestEqualStrictNoFeedback), R(12),
- B(Mov), R(13), R(10),
B(Mov), R(7), R(9),
- B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
- B(Ldar), R(12),
- B(StaCurrentContextSlot), U8(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(5),
+ B(Mov), R(13), R(10),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(4),
B(Star), R(8),
- B(Mov), R(7), R(2),
- B(CreateClosure), U8(8), U8(2), U8(2),
+ B(Mov), R(9), R(2),
+ B(CreateClosure), U8(6), U8(2), U8(2),
B(Star), R(9),
- B(StaNamedProperty), R(7), U8(9), U8(3),
- B(CreateClosure), U8(10), U8(5), U8(2),
- B(Star), R(11),
- B(CallProperty0), R(11), R(2), U8(6),
+ B(StaNamedProperty), R(7), U8(7), U8(3),
B(PopContext), R(6),
B(Mov), R(2), R(3),
B(Ldar), R(closure),
- /* 38 E> */ B(CreateBlockContext), U8(11),
+ /* 38 E> */ B(CreateBlockContext), U8(8),
B(PushContext), R(6),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
B(LdaTheHole),
- B(StaCurrentContextSlot), U8(5),
- B(LdaTheHole),
B(Star), R(14),
- B(CreateClosure), U8(14), U8(8), U8(2),
+ B(CreateClosure), U8(11), U8(5), U8(2),
B(Star), R(11),
- B(LdaConstant), U8(13),
+ B(LdaConstant), U8(10),
B(Star), R(12),
B(Mov), R(11), R(13),
B(CallRuntime), U16(Runtime::kDefineClass), R(12), U8(3),
B(Star), R(12),
- B(CreateClosure), U8(15), U8(9), U8(2),
+ B(CreateClosure), U8(12), U8(6), U8(2),
B(Star), R(7),
- B(LdaConstant), U8(12),
+ B(LdaConstant), U8(9),
B(Star), R(8),
B(LdaConstant), U8(5),
B(StaCurrentContextSlot), U8(4),
B(Star), R(11),
- B(LdaConstant), U8(6),
+ B(CreateClosure), U8(13), U8(7), U8(2),
B(Star), R(12),
- B(LdaConstant), U8(7),
- B(TestEqualStrictNoFeedback), R(12),
B(Mov), R(7), R(9),
B(Mov), R(13), R(10),
- B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
- B(Ldar), R(12),
- B(StaCurrentContextSlot), U8(5),
- B(CreateClosure), U8(16), U8(10), U8(2),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(6),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(5),
B(Star), R(8),
- B(Mov), R(7), R(1),
- B(CreateClosure), U8(17), U8(11), U8(2),
+ B(Mov), R(9), R(1),
+ B(CreateClosure), U8(14), U8(8), U8(2),
B(Star), R(9),
- B(StaNamedProperty), R(7), U8(9), U8(12),
- B(CreateClosure), U8(18), U8(14), U8(2),
- B(Star), R(11),
- B(CallProperty0), R(11), R(1), U8(15),
+ B(StaNamedProperty), R(7), U8(7), U8(9),
B(PopContext), R(6),
B(Mov), R(1), R(4),
B(Ldar), R(closure),
- /* 122 E> */ B(CreateBlockContext), U8(19),
+ /* 90 E> */ B(CreateBlockContext), U8(15),
B(PushContext), R(6),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(LdaTheHole),
- B(StaCurrentContextSlot), U8(5),
- /* 313 E> */ B(CreateClosure), U8(21), U8(17), U8(2),
+ /* 236 E> */ B(CreateClosure), U8(17), U8(11), U8(2),
B(Star), R(7),
- B(LdaConstant), U8(20),
+ B(LdaConstant), U8(16),
B(Star), R(8),
B(LdaConstant), U8(5),
B(StaCurrentContextSlot), U8(4),
B(Star), R(11),
- B(LdaConstant), U8(6),
- B(Star), R(12),
- B(LdaConstant), U8(7),
- B(TestEqualStrictNoFeedback), R(12),
- B(Mov), R(1), R(10),
B(Mov), R(7), R(9),
- B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
- B(Ldar), R(12),
- B(StaCurrentContextSlot), U8(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(5),
+ B(Mov), R(1), R(10),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(4),
B(Star), R(8),
- B(Mov), R(7), R(0),
- B(CreateClosure), U8(22), U8(18), U8(2),
+ B(Mov), R(9), R(0),
+ B(CreateClosure), U8(18), U8(12), U8(2),
B(Star), R(9),
- B(StaNamedProperty), R(7), U8(9), U8(19),
- B(CreateClosure), U8(23), U8(21), U8(2),
- B(Star), R(11),
- B(Ldar), R(0),
- B(StaNamedProperty), R(11), U8(24), U8(22),
- B(CallProperty0), R(11), R(0), U8(24),
+ B(StaNamedProperty), R(7), U8(7), U8(13),
B(PopContext), R(6),
B(Mov), R(0), R(5),
- /* 456 S> */ B(Ldar), R(3),
- /* 456 E> */ B(Construct), R(3), R(0), U8(0), U8(26),
- /* 465 S> */ B(Ldar), R(4),
- /* 465 E> */ B(Construct), R(4), R(0), U8(0), U8(28),
- /* 474 S> */ B(Ldar), R(0),
- /* 474 E> */ B(Construct), R(0), R(0), U8(0), U8(30),
+ /* 329 S> */ B(Ldar), R(2),
+ /* 329 E> */ B(Construct), R(2), R(0), U8(0), U8(15),
+ /* 338 S> */ B(Ldar), R(1),
+ /* 338 E> */ B(Construct), R(1), R(0), U8(0), U8(17),
+ /* 347 S> */ B(Ldar), R(0),
+ /* 347 E> */ B(Construct), R(0), R(0), U8(0), U8(19),
B(LdaUndefined),
- /* 483 S> */ B(Return),
+ /* 356 S> */ B(Return),
]
constant pool: [
FIXED_ARRAY_TYPE,
@@ -313,11 +232,8 @@ constant pool: [
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["b"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["d"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["prototype"],
SHARED_FUNCTION_INFO_TYPE,
SYMBOL_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
FIXED_ARRAY_TYPE,
FIXED_ARRAY_TYPE,
FIXED_ARRAY_TYPE,
@@ -325,13 +241,10 @@ constant pool: [
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
FIXED_ARRAY_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
- SYMBOL_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden
index cec1c48f32..0d1edb6424 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden
@@ -22,7 +22,7 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kDeclareGlobalsForInterpreter), R(1), U8(3),
/* 0 E> */ B(StackCheck),
/* 8 S> */ B(LdaSmi), I8(1),
- /* 8 E> */ B(StaGlobalSloppy), U8(1), U8(2),
+ /* 8 E> */ B(StaGlobal), U8(1), U8(2),
B(LdaUndefined),
/* 10 S> */ B(Return),
]
@@ -74,9 +74,9 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kDeclareGlobalsForInterpreter), R(1), U8(3),
/* 0 E> */ B(StackCheck),
/* 8 S> */ B(LdaSmi), I8(1),
- /* 8 E> */ B(StaGlobalSloppy), U8(1), U8(2),
+ /* 8 E> */ B(StaGlobal), U8(1), U8(2),
/* 11 S> */ B(LdaSmi), I8(2),
- /* 12 E> */ B(StaGlobalSloppy), U8(1), U8(4),
+ /* 12 E> */ B(StaGlobal), U8(1), U8(4),
B(Star), R(0),
/* 15 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
index 16e36dd707..fd83a29fd9 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
@@ -14,155 +14,173 @@ snippet: "
}
f();
"
-frame size: 23
+frame size: 24
parameter count: 1
-bytecode array length: 589
+bytecode array length: 554
bytecodes: [
B(Ldar), R(2),
B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(2), U8(1),
- B(PushContext), R(12),
+ B(PushContext), R(13),
B(RestoreGeneratorState), R(2),
- B(Star), R(11),
+ B(Star), R(12),
B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
- B(Star), R(11),
- B(Mov), R(closure), R(12),
- B(Mov), R(this), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(12), U8(2),
+ B(Star), R(12),
+ B(Mov), R(closure), R(13),
+ B(Mov), R(this), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(13), U8(2),
B(Star), R(2),
/* 16 E> */ B(StackCheck),
B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
- B(Star), R(10),
- B(Mov), R(context), R(14),
+ B(Star), R(11),
B(Mov), R(context), R(15),
+ B(Mov), R(context), R(16),
B(LdaZero),
- B(Star), R(6),
- B(Mov), R(context), R(18),
+ B(Star), R(7),
B(Mov), R(context), R(19),
+ B(Mov), R(context), R(20),
/* 43 S> */ B(CreateArrayLiteral), U8(3), U8(0), U8(37),
- B(Star), R(20),
- B(LdaNamedProperty), R(20), U8(4), U8(1),
+ B(Star), R(21),
+ B(LdaNamedProperty), R(21), U8(4), U8(1),
B(JumpIfUndefined), U8(17),
B(JumpIfNull), U8(15),
- B(Star), R(21),
- B(CallProperty0), R(21), R(20), U8(3),
+ B(Star), R(22),
+ B(CallProperty0), R(22), R(21), U8(3),
B(JumpIfJSReceiver), U8(23),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
- B(LdaNamedProperty), R(20), U8(5), U8(5),
- B(Star), R(21),
- B(CallProperty0), R(21), R(20), U8(7),
- B(Star), R(21),
- B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(21), U8(1),
+ B(LdaNamedProperty), R(21), U8(5), U8(5),
+ B(Star), R(22),
+ B(CallProperty0), R(22), R(21), U8(7),
+ B(Star), R(22),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(22), U8(1),
B(Star), R(4),
- B(Ldar), R(11),
- B(SwitchOnSmiNoFeedback), U8(6), U8(1), I8(0),
+ /* 43 E> */ B(LdaNamedProperty), R(4), U8(6), U8(9),
+ B(Star), R(5),
+ B(Ldar), R(12),
+ B(SwitchOnSmiNoFeedback), U8(7), U8(1), I8(0),
B(LdaSmi), I8(-2),
- /* 43 E> */ B(TestEqualStrictNoFeedback), R(11),
+ B(TestEqualStrictNoFeedback), R(12),
B(JumpIfTrue), U8(4),
- B(Abort), U8(42),
- /* 40 S> */ B(LdaNamedProperty), R(4), U8(7), U8(9),
- B(Star), R(20),
- B(CallProperty0), R(20), R(4), U8(11),
- B(Star), R(21),
- B(Mov), R(2), R(20),
- B(Mov), R(10), R(22),
- B(CallJSRuntime), U8(%async_function_await_uncaught), R(20), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(20), U8(0),
+ B(Abort), U8(15),
+ /* 40 S> */ B(CallProperty0), R(5), R(4), U8(11),
+ B(Star), R(22),
+ B(Mov), R(2), R(21),
+ B(Mov), R(11), R(23),
+ B(CallJSRuntime), U8(%async_function_await_uncaught), R(21), U8(3),
+ B(SuspendGenerator), R(2), R(0), U8(21), U8(0),
/* 57 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(2), R(0), U8(20),
- B(LdaSmi), I8(-2),
- B(Star), R(11),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(2), U8(1),
- B(Star), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(ResumeGenerator), R(2), R(12), R(0), U8(21),
B(Star), R(21),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(Star), R(22),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(21),
+ B(TestEqualStrictNoFeedback), R(22),
B(JumpIfTrue), U8(5),
- B(Ldar), R(20),
+ B(Ldar), R(21),
B(ReThrow),
- B(Mov), R(20), R(5),
- /* 40 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(20), U8(1),
+ B(Mov), R(21), R(6),
+ /* 40 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(21), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
- B(LdaNamedProperty), R(5), U8(8), U8(13),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
+ B(LdaNamedProperty), R(6), U8(8), U8(13),
B(JumpIfToBooleanTrue), U8(25),
- B(LdaNamedProperty), R(5), U8(9), U8(15),
- B(Star), R(7),
+ B(LdaNamedProperty), R(6), U8(9), U8(15),
+ B(Star), R(8),
B(LdaSmi), I8(2),
- B(Star), R(6),
- B(Mov), R(7), R(3),
+ B(Star), R(7),
+ B(Mov), R(8), R(3),
/* 23 E> */ B(StackCheck),
B(Mov), R(3), R(0),
B(LdaZero),
- B(Star), R(6),
- B(JumpLoop), U8(111), I8(0),
+ B(Star), R(7),
+ B(JumpLoop), U8(98), I8(0),
B(Jump), U8(40),
- B(Star), R(20),
+ B(Star), R(21),
B(Ldar), R(closure),
- B(CreateCatchContext), R(20), U8(10), U8(11),
- B(Star), R(19),
+ B(CreateCatchContext), R(21), U8(10), U8(11),
+ B(Star), R(20),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(19),
- B(PushContext), R(20),
+ B(Ldar), R(20),
+ B(PushContext), R(21),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(6), U8(17),
+ B(TestEqualStrict), R(7), U8(17),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(6),
+ B(Star), R(7),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(21),
- B(CallRuntime), U16(Runtime::kReThrow), R(21), U8(1),
- B(PopContext), R(20),
+ B(Star), R(22),
+ B(CallRuntime), U16(Runtime::kReThrow), R(22), U8(1),
+ B(PopContext), R(21),
B(LdaSmi), I8(-1),
+ B(Star), R(18),
B(Star), R(17),
- B(Star), R(16),
B(Jump), U8(7),
- B(Star), R(17),
+ B(Star), R(18),
B(LdaZero),
- B(Star), R(16),
+ B(Star), R(17),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(18),
+ B(Star), R(19),
B(LdaZero),
- B(TestEqualStrict), R(6), U8(18),
- B(JumpIfTrue), U8(199),
+ B(TestEqualStrict), R(7), U8(18),
+ B(JumpIfTrue), U8(171),
B(LdaNamedProperty), R(4), U8(12), U8(19),
- B(Star), R(8),
+ B(Star), R(9),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
- B(Jump), U8(188),
+ B(Jump), U8(160),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(6), U8(21),
- B(JumpIfFalse), U8(109),
- B(Ldar), R(8),
+ B(TestEqualStrict), R(7), U8(21),
+ B(JumpIfFalse), U8(88),
+ B(Ldar), R(9),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(143),
- B(Star), R(19),
- B(LdaConstant), U8(13),
+ B(Wide), B(LdaSmi), I16(144),
B(Star), R(20),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(19), U8(2),
+ B(LdaConstant), U8(13),
+ B(Star), R(21),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(20), U8(2),
B(Throw),
- B(Mov), R(context), R(19),
- B(Mov), R(8), R(20),
+ B(Mov), R(context), R(20),
+ B(Mov), R(9), R(21),
+ B(Mov), R(4), R(22),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(21), U8(2),
+ B(Star), R(22),
+ B(Mov), R(2), R(21),
+ B(Mov), R(11), R(23),
+ B(CallJSRuntime), U8(%async_function_await_caught), R(21), U8(3),
+ B(SuspendGenerator), R(2), R(0), U8(21), U8(1),
+ /* 57 S> */ B(Return),
+ B(ResumeGenerator), R(2), R(12), R(0), U8(21),
+ B(Star), R(21),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(Star), R(22),
+ B(LdaZero),
+ B(TestEqualStrictNoFeedback), R(22),
+ B(JumpIfTrue), U8(5),
+ B(Ldar), R(21),
+ B(ReThrow),
+ B(Ldar), R(21),
+ B(Jump), U8(6),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Ldar), R(20),
+ B(Jump), U8(67),
+ B(Mov), R(9), R(20),
B(Mov), R(4), R(21),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(20), U8(2),
B(Star), R(21),
B(Mov), R(2), R(20),
- B(Mov), R(10), R(22),
- B(CallJSRuntime), U8(%async_function_await_caught), R(20), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(20), U8(1),
+ B(Mov), R(11), R(22),
+ B(CallJSRuntime), U8(%async_function_await_uncaught), R(20), U8(3),
+ B(SuspendGenerator), R(2), R(0), U8(20), U8(2),
/* 57 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(2), R(0), U8(20),
- B(LdaSmi), I8(-2),
- B(Star), R(11),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(2), U8(1),
+ B(ResumeGenerator), R(2), R(12), R(0), U8(20),
B(Star), R(20),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
B(Star), R(21),
@@ -171,111 +189,78 @@ bytecodes: [
B(JumpIfTrue), U8(5),
B(Ldar), R(20),
B(ReThrow),
- B(Ldar), R(20),
- B(Jump), U8(20),
- B(Star), R(20),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(20), U8(10), U8(14),
- B(Star), R(19),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Ldar), R(19),
- B(PushContext), R(20),
- B(PopContext), R(20),
- B(Jump), U8(74),
- B(Mov), R(8), R(19),
- B(Mov), R(4), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(19), U8(2),
- B(Star), R(20),
- B(Mov), R(2), R(19),
- B(Mov), R(10), R(21),
- B(CallJSRuntime), U8(%async_function_await_uncaught), R(19), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(19), U8(2),
- /* 57 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(2), R(0), U8(19),
- B(LdaSmi), I8(-2),
- B(Star), R(11),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(2), U8(1),
- B(Star), R(19),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
- B(Star), R(20),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(20),
- B(JumpIfTrue), U8(5),
- B(Ldar), R(19),
- B(ReThrow),
- B(Mov), R(19), R(9),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(19), U8(1),
+ B(Mov), R(20), R(10),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(20), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
- B(Ldar), R(18),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+ B(Ldar), R(19),
B(SetPendingMessage),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(16),
+ B(TestEqualStrictNoFeedback), R(17),
B(JumpIfFalse), U8(5),
- B(Ldar), R(17),
+ B(Ldar), R(18),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(17),
- B(Mov), R(10), R(16),
- B(CallJSRuntime), U8(%promise_resolve), R(16), U8(2),
+ B(Star), R(18),
+ B(Mov), R(11), R(17),
+ B(CallJSRuntime), U8(%promise_resolve), R(17), U8(2),
B(LdaZero),
- B(Star), R(12),
- B(Mov), R(10), R(13),
+ B(Star), R(13),
+ B(Mov), R(11), R(14),
B(Jump), U8(58),
B(Jump), U8(42),
- B(Star), R(16),
+ B(Star), R(17),
B(Ldar), R(closure),
- B(CreateCatchContext), R(16), U8(10), U8(15),
- B(Star), R(15),
+ B(CreateCatchContext), R(17), U8(10), U8(14),
+ B(Star), R(16),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(15),
- B(PushContext), R(16),
+ B(Ldar), R(16),
+ B(PushContext), R(17),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(18),
- B(LdaFalse),
B(Star), R(19),
- B(Mov), R(10), R(17),
- B(CallJSRuntime), U8(%promise_internal_reject), R(17), U8(3),
- B(PopContext), R(16),
+ B(LdaFalse),
+ B(Star), R(20),
+ B(Mov), R(11), R(18),
+ B(CallJSRuntime), U8(%promise_internal_reject), R(18), U8(3),
+ B(PopContext), R(17),
B(LdaZero),
- B(Star), R(12),
- B(Mov), R(10), R(13),
+ B(Star), R(13),
+ B(Mov), R(11), R(14),
B(Jump), U8(16),
B(LdaSmi), I8(-1),
+ B(Star), R(14),
B(Star), R(13),
- B(Star), R(12),
B(Jump), U8(8),
- B(Star), R(13),
+ B(Star), R(14),
B(LdaSmi), I8(1),
- B(Star), R(12),
+ B(Star), R(13),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(14),
- B(CallJSRuntime), U8(%async_function_promise_release), R(10), U8(1),
- B(Ldar), R(14),
+ B(Star), R(15),
+ B(CallJSRuntime), U8(%async_function_promise_release), R(11), U8(1),
+ B(Ldar), R(15),
B(SetPendingMessage),
- B(Ldar), R(12),
- B(SwitchOnSmiNoFeedback), U8(16), U8(2), I8(0),
- B(Jump), U8(8),
B(Ldar), R(13),
+ B(SwitchOnSmiNoFeedback), U8(15), U8(2), I8(0),
+ B(Jump), U8(8),
+ B(Ldar), R(14),
/* 57 S> */ B(Return),
- B(Ldar), R(13),
+ B(Ldar), R(14),
B(ReThrow),
B(LdaUndefined),
/* 57 S> */ B(Return),
]
constant pool: [
- Smi [89],
- Smi [339],
- Smi [419],
+ Smi [95],
+ Smi [332],
+ Smi [391],
TUPLE2_TYPE,
SYMBOL_TYPE,
SYMBOL_TYPE,
- Smi [40],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ Smi [34],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
@@ -283,16 +268,15 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
FIXED_ARRAY_TYPE,
- FIXED_ARRAY_TYPE,
Smi [6],
Smi [9],
]
handlers: [
- [46, 548, 556],
- [49, 506, 508],
- [55, 257, 265],
- [58, 217, 219],
- [325, 383, 385],
+ [46, 513, 521],
+ [49, 471, 473],
+ [55, 250, 258],
+ [58, 210, 212],
+ [318, 369, 371],
]
---
@@ -302,156 +286,174 @@ snippet: "
}
f();
"
-frame size: 23
+frame size: 24
parameter count: 1
-bytecode array length: 618
+bytecode array length: 583
bytecodes: [
B(Ldar), R(2),
B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(2), U8(1),
- B(PushContext), R(12),
+ B(PushContext), R(13),
B(RestoreGeneratorState), R(2),
- B(Star), R(11),
+ B(Star), R(12),
B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
- B(Star), R(11),
- B(Mov), R(closure), R(12),
- B(Mov), R(this), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(12), U8(2),
+ B(Star), R(12),
+ B(Mov), R(closure), R(13),
+ B(Mov), R(this), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(13), U8(2),
B(Star), R(2),
/* 16 E> */ B(StackCheck),
B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
- B(Star), R(10),
- B(Mov), R(context), R(14),
+ B(Star), R(11),
B(Mov), R(context), R(15),
+ B(Mov), R(context), R(16),
B(LdaZero),
- B(Star), R(6),
- B(Mov), R(context), R(18),
+ B(Star), R(7),
B(Mov), R(context), R(19),
+ B(Mov), R(context), R(20),
/* 43 S> */ B(CreateArrayLiteral), U8(3), U8(0), U8(37),
- B(Star), R(20),
- B(LdaNamedProperty), R(20), U8(4), U8(1),
+ B(Star), R(21),
+ B(LdaNamedProperty), R(21), U8(4), U8(1),
B(JumpIfUndefined), U8(17),
B(JumpIfNull), U8(15),
- B(Star), R(21),
- B(CallProperty0), R(21), R(20), U8(3),
+ B(Star), R(22),
+ B(CallProperty0), R(22), R(21), U8(3),
B(JumpIfJSReceiver), U8(23),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
- B(LdaNamedProperty), R(20), U8(5), U8(5),
- B(Star), R(21),
- B(CallProperty0), R(21), R(20), U8(7),
- B(Star), R(21),
- B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(21), U8(1),
+ B(LdaNamedProperty), R(21), U8(5), U8(5),
+ B(Star), R(22),
+ B(CallProperty0), R(22), R(21), U8(7),
+ B(Star), R(22),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(22), U8(1),
B(Star), R(4),
- B(Ldar), R(11),
- B(SwitchOnSmiNoFeedback), U8(6), U8(1), I8(0),
+ /* 43 E> */ B(LdaNamedProperty), R(4), U8(6), U8(9),
+ B(Star), R(5),
+ B(Ldar), R(12),
+ B(SwitchOnSmiNoFeedback), U8(7), U8(1), I8(0),
B(LdaSmi), I8(-2),
- /* 43 E> */ B(TestEqualStrictNoFeedback), R(11),
+ B(TestEqualStrictNoFeedback), R(12),
B(JumpIfTrue), U8(4),
- B(Abort), U8(42),
- /* 40 S> */ B(LdaNamedProperty), R(4), U8(7), U8(9),
- B(Star), R(20),
- B(CallProperty0), R(20), R(4), U8(11),
- B(Star), R(21),
- B(Mov), R(2), R(20),
- B(Mov), R(10), R(22),
- B(CallJSRuntime), U8(%async_function_await_uncaught), R(20), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(20), U8(0),
+ B(Abort), U8(15),
+ /* 40 S> */ B(CallProperty0), R(5), R(4), U8(11),
+ B(Star), R(22),
+ B(Mov), R(2), R(21),
+ B(Mov), R(11), R(23),
+ B(CallJSRuntime), U8(%async_function_await_uncaught), R(21), U8(3),
+ B(SuspendGenerator), R(2), R(0), U8(21), U8(0),
/* 68 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(2), R(0), U8(20),
- B(LdaSmi), I8(-2),
- B(Star), R(11),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(2), U8(1),
- B(Star), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(ResumeGenerator), R(2), R(12), R(0), U8(21),
B(Star), R(21),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(Star), R(22),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(21),
+ B(TestEqualStrictNoFeedback), R(22),
B(JumpIfTrue), U8(5),
- B(Ldar), R(20),
+ B(Ldar), R(21),
B(ReThrow),
- B(Mov), R(20), R(5),
- /* 40 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(20), U8(1),
+ B(Mov), R(21), R(6),
+ /* 40 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(21), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
- B(LdaNamedProperty), R(5), U8(8), U8(13),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
+ B(LdaNamedProperty), R(6), U8(8), U8(13),
B(JumpIfToBooleanTrue), U8(27),
- B(LdaNamedProperty), R(5), U8(9), U8(15),
- B(Star), R(7),
+ B(LdaNamedProperty), R(6), U8(9), U8(15),
+ B(Star), R(8),
B(LdaSmi), I8(2),
- B(Star), R(6),
- B(Mov), R(7), R(3),
+ B(Star), R(7),
+ B(Mov), R(8), R(3),
/* 23 E> */ B(StackCheck),
B(Mov), R(3), R(0),
/* 56 S> */ B(LdaZero),
- B(Star), R(16),
- B(Mov), R(7), R(17),
+ B(Star), R(17),
+ B(Mov), R(8), R(18),
B(Jump), U8(56),
B(Jump), U8(40),
- B(Star), R(20),
+ B(Star), R(21),
B(Ldar), R(closure),
- B(CreateCatchContext), R(20), U8(10), U8(11),
- B(Star), R(19),
+ B(CreateCatchContext), R(21), U8(10), U8(11),
+ B(Star), R(20),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(19),
- B(PushContext), R(20),
+ B(Ldar), R(20),
+ B(PushContext), R(21),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(6), U8(17),
+ B(TestEqualStrict), R(7), U8(17),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(6),
+ B(Star), R(7),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(21),
- B(CallRuntime), U16(Runtime::kReThrow), R(21), U8(1),
- B(PopContext), R(20),
+ B(Star), R(22),
+ B(CallRuntime), U16(Runtime::kReThrow), R(22), U8(1),
+ B(PopContext), R(21),
B(LdaSmi), I8(-1),
+ B(Star), R(18),
B(Star), R(17),
- B(Star), R(16),
B(Jump), U8(8),
- B(Star), R(17),
+ B(Star), R(18),
B(LdaSmi), I8(1),
- B(Star), R(16),
+ B(Star), R(17),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(18),
+ B(Star), R(19),
B(LdaZero),
- B(TestEqualStrict), R(6), U8(18),
- B(JumpIfTrue), U8(199),
+ B(TestEqualStrict), R(7), U8(18),
+ B(JumpIfTrue), U8(171),
B(LdaNamedProperty), R(4), U8(12), U8(19),
- B(Star), R(8),
+ B(Star), R(9),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
- B(Jump), U8(188),
+ B(Jump), U8(160),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(6), U8(21),
- B(JumpIfFalse), U8(109),
- B(Ldar), R(8),
+ B(TestEqualStrict), R(7), U8(21),
+ B(JumpIfFalse), U8(88),
+ B(Ldar), R(9),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(143),
- B(Star), R(19),
- B(LdaConstant), U8(13),
+ B(Wide), B(LdaSmi), I16(144),
B(Star), R(20),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(19), U8(2),
+ B(LdaConstant), U8(13),
+ B(Star), R(21),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(20), U8(2),
B(Throw),
- B(Mov), R(context), R(19),
- B(Mov), R(8), R(20),
+ B(Mov), R(context), R(20),
+ B(Mov), R(9), R(21),
+ B(Mov), R(4), R(22),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(21), U8(2),
+ B(Star), R(22),
+ B(Mov), R(2), R(21),
+ B(Mov), R(11), R(23),
+ B(CallJSRuntime), U8(%async_function_await_caught), R(21), U8(3),
+ B(SuspendGenerator), R(2), R(0), U8(21), U8(1),
+ /* 68 S> */ B(Return),
+ B(ResumeGenerator), R(2), R(12), R(0), U8(21),
+ B(Star), R(21),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(Star), R(22),
+ B(LdaZero),
+ B(TestEqualStrictNoFeedback), R(22),
+ B(JumpIfTrue), U8(5),
+ B(Ldar), R(21),
+ B(ReThrow),
+ B(Ldar), R(21),
+ B(Jump), U8(6),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Ldar), R(20),
+ B(Jump), U8(67),
+ B(Mov), R(9), R(20),
B(Mov), R(4), R(21),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(20), U8(2),
B(Star), R(21),
B(Mov), R(2), R(20),
- B(Mov), R(10), R(22),
- B(CallJSRuntime), U8(%async_function_await_caught), R(20), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(20), U8(1),
+ B(Mov), R(11), R(22),
+ B(CallJSRuntime), U8(%async_function_await_uncaught), R(20), U8(3),
+ B(SuspendGenerator), R(2), R(0), U8(20), U8(2),
/* 68 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(2), R(0), U8(20),
- B(LdaSmi), I8(-2),
- B(Star), R(11),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(2), U8(1),
+ B(ResumeGenerator), R(2), R(12), R(0), U8(20),
B(Star), R(20),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
B(Star), R(21),
@@ -460,127 +462,93 @@ bytecodes: [
B(JumpIfTrue), U8(5),
B(Ldar), R(20),
B(ReThrow),
- B(Ldar), R(20),
- B(Jump), U8(20),
- B(Star), R(20),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(20), U8(10), U8(14),
- B(Star), R(19),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Ldar), R(19),
- B(PushContext), R(20),
- B(PopContext), R(20),
- B(Jump), U8(74),
- B(Mov), R(8), R(19),
- B(Mov), R(4), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(19), U8(2),
- B(Star), R(20),
- B(Mov), R(2), R(19),
- B(Mov), R(10), R(21),
- B(CallJSRuntime), U8(%async_function_await_uncaught), R(19), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(19), U8(2),
- /* 68 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(2), R(0), U8(19),
- B(LdaSmi), I8(-2),
- B(Star), R(11),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(2), U8(1),
- B(Star), R(19),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
- B(Star), R(20),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(20),
- B(JumpIfTrue), U8(5),
- B(Ldar), R(19),
- B(ReThrow),
- B(Mov), R(19), R(9),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(19), U8(1),
+ B(Mov), R(20), R(10),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(20), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
- B(Ldar), R(18),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+ B(Ldar), R(19),
B(SetPendingMessage),
- B(Ldar), R(16),
- B(SwitchOnSmiNoFeedback), U8(15), U8(2), I8(0),
+ B(Ldar), R(17),
+ B(SwitchOnSmiNoFeedback), U8(14), U8(2), I8(0),
B(Jump), U8(13),
B(LdaZero),
- B(Star), R(12),
- B(Mov), R(17), R(13),
+ B(Star), R(13),
+ B(Mov), R(18), R(14),
B(Jump), U8(81),
- B(Ldar), R(17),
+ B(Ldar), R(18),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(17),
- B(Mov), R(10), R(16),
- B(CallJSRuntime), U8(%promise_resolve), R(16), U8(2),
+ B(Star), R(18),
+ B(Mov), R(11), R(17),
+ B(CallJSRuntime), U8(%promise_resolve), R(17), U8(2),
B(LdaSmi), I8(1),
- B(Star), R(12),
- B(Mov), R(10), R(13),
+ B(Star), R(13),
+ B(Mov), R(11), R(14),
B(Jump), U8(59),
B(Jump), U8(43),
- B(Star), R(16),
+ B(Star), R(17),
B(Ldar), R(closure),
- B(CreateCatchContext), R(16), U8(10), U8(17),
- B(Star), R(15),
+ B(CreateCatchContext), R(17), U8(10), U8(16),
+ B(Star), R(16),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(15),
- B(PushContext), R(16),
+ B(Ldar), R(16),
+ B(PushContext), R(17),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(18),
- B(LdaFalse),
B(Star), R(19),
- B(Mov), R(10), R(17),
- B(CallJSRuntime), U8(%promise_internal_reject), R(17), U8(3),
- B(PopContext), R(16),
+ B(LdaFalse),
+ B(Star), R(20),
+ B(Mov), R(11), R(18),
+ B(CallJSRuntime), U8(%promise_internal_reject), R(18), U8(3),
+ B(PopContext), R(17),
B(LdaSmi), I8(1),
- B(Star), R(12),
- B(Mov), R(10), R(13),
+ B(Star), R(13),
+ B(Mov), R(11), R(14),
B(Jump), U8(16),
B(LdaSmi), I8(-1),
+ B(Star), R(14),
B(Star), R(13),
- B(Star), R(12),
B(Jump), U8(8),
- B(Star), R(13),
+ B(Star), R(14),
B(LdaSmi), I8(2),
- B(Star), R(12),
+ B(Star), R(13),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(14),
- B(CallJSRuntime), U8(%async_function_promise_release), R(10), U8(1),
- B(Ldar), R(14),
+ B(Star), R(15),
+ B(CallJSRuntime), U8(%async_function_promise_release), R(11), U8(1),
+ B(Ldar), R(15),
B(SetPendingMessage),
- B(Ldar), R(12),
- B(SwitchOnSmiNoFeedback), U8(18), U8(3), I8(0),
+ B(Ldar), R(13),
+ B(SwitchOnSmiNoFeedback), U8(17), U8(3), I8(0),
B(Jump), U8(21),
- B(Mov), R(10), R(15),
- B(Mov), R(13), R(16),
- B(CallJSRuntime), U8(%promise_resolve), R(15), U8(2),
- B(Ldar), R(10),
+ B(Mov), R(11), R(16),
+ B(Mov), R(14), R(17),
+ B(CallJSRuntime), U8(%promise_resolve), R(16), U8(2),
+ B(Ldar), R(11),
/* 68 S> */ B(Return),
- B(Ldar), R(13),
+ B(Ldar), R(14),
/* 68 S> */ B(Return),
- B(Ldar), R(13),
+ B(Ldar), R(14),
B(ReThrow),
B(LdaUndefined),
/* 68 S> */ B(Return),
]
constant pool: [
- Smi [89],
- Smi [342],
- Smi [422],
+ Smi [95],
+ Smi [335],
+ Smi [394],
TUPLE2_TYPE,
SYMBOL_TYPE,
SYMBOL_TYPE,
- Smi [40],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ Smi [34],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
FIXED_ARRAY_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
- FIXED_ARRAY_TYPE,
Smi [6],
Smi [14],
FIXED_ARRAY_TYPE,
@@ -589,11 +557,11 @@ constant pool: [
Smi [22],
]
handlers: [
- [46, 564, 572],
- [49, 521, 523],
- [55, 259, 267],
- [58, 219, 221],
- [328, 386, 388],
+ [46, 529, 537],
+ [49, 486, 488],
+ [55, 252, 260],
+ [58, 212, 214],
+ [321, 372, 374],
]
---
@@ -606,87 +574,84 @@ snippet: "
}
f();
"
-frame size: 23
+frame size: 24
parameter count: 1
-bytecode array length: 607
+bytecode array length: 572
bytecodes: [
B(Ldar), R(2),
B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(2), U8(1),
- B(PushContext), R(12),
+ B(PushContext), R(13),
B(RestoreGeneratorState), R(2),
- B(Star), R(11),
+ B(Star), R(12),
B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
- B(Star), R(11),
- B(Mov), R(closure), R(12),
- B(Mov), R(this), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(12), U8(2),
+ B(Star), R(12),
+ B(Mov), R(closure), R(13),
+ B(Mov), R(this), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(13), U8(2),
B(Star), R(2),
/* 16 E> */ B(StackCheck),
B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
- B(Star), R(10),
- B(Mov), R(context), R(14),
+ B(Star), R(11),
B(Mov), R(context), R(15),
+ B(Mov), R(context), R(16),
B(LdaZero),
- B(Star), R(6),
- B(Mov), R(context), R(18),
+ B(Star), R(7),
B(Mov), R(context), R(19),
+ B(Mov), R(context), R(20),
/* 43 S> */ B(CreateArrayLiteral), U8(3), U8(0), U8(37),
- B(Star), R(20),
- B(LdaNamedProperty), R(20), U8(4), U8(1),
+ B(Star), R(21),
+ B(LdaNamedProperty), R(21), U8(4), U8(1),
B(JumpIfUndefined), U8(17),
B(JumpIfNull), U8(15),
- B(Star), R(21),
- B(CallProperty0), R(21), R(20), U8(3),
+ B(Star), R(22),
+ B(CallProperty0), R(22), R(21), U8(3),
B(JumpIfJSReceiver), U8(23),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
- B(LdaNamedProperty), R(20), U8(5), U8(5),
- B(Star), R(21),
- B(CallProperty0), R(21), R(20), U8(7),
- B(Star), R(21),
- B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(21), U8(1),
+ B(LdaNamedProperty), R(21), U8(5), U8(5),
+ B(Star), R(22),
+ B(CallProperty0), R(22), R(21), U8(7),
+ B(Star), R(22),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(22), U8(1),
B(Star), R(4),
- B(Ldar), R(11),
- B(SwitchOnSmiNoFeedback), U8(6), U8(1), I8(0),
+ /* 43 E> */ B(LdaNamedProperty), R(4), U8(6), U8(9),
+ B(Star), R(5),
+ B(Ldar), R(12),
+ B(SwitchOnSmiNoFeedback), U8(7), U8(1), I8(0),
B(LdaSmi), I8(-2),
- /* 43 E> */ B(TestEqualStrictNoFeedback), R(11),
+ B(TestEqualStrictNoFeedback), R(12),
B(JumpIfTrue), U8(4),
- B(Abort), U8(42),
- /* 40 S> */ B(LdaNamedProperty), R(4), U8(7), U8(9),
- B(Star), R(20),
- B(CallProperty0), R(20), R(4), U8(11),
- B(Star), R(21),
- B(Mov), R(2), R(20),
- B(Mov), R(10), R(22),
- B(CallJSRuntime), U8(%async_function_await_uncaught), R(20), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(20), U8(0),
+ B(Abort), U8(15),
+ /* 40 S> */ B(CallProperty0), R(5), R(4), U8(11),
+ B(Star), R(22),
+ B(Mov), R(2), R(21),
+ B(Mov), R(11), R(23),
+ B(CallJSRuntime), U8(%async_function_await_uncaught), R(21), U8(3),
+ B(SuspendGenerator), R(2), R(0), U8(21), U8(0),
/* 114 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(2), R(0), U8(20),
- B(LdaSmi), I8(-2),
- B(Star), R(11),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(2), U8(1),
- B(Star), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(ResumeGenerator), R(2), R(12), R(0), U8(21),
B(Star), R(21),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(Star), R(22),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(21),
+ B(TestEqualStrictNoFeedback), R(22),
B(JumpIfTrue), U8(5),
- B(Ldar), R(20),
+ B(Ldar), R(21),
B(ReThrow),
- B(Mov), R(20), R(5),
- /* 40 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(20), U8(1),
+ B(Mov), R(21), R(6),
+ /* 40 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(21), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
- B(LdaNamedProperty), R(5), U8(8), U8(13),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
+ B(LdaNamedProperty), R(6), U8(8), U8(13),
B(JumpIfToBooleanTrue), U8(43),
- B(LdaNamedProperty), R(5), U8(9), U8(15),
- B(Star), R(7),
+ B(LdaNamedProperty), R(6), U8(9), U8(15),
+ B(Star), R(8),
B(LdaSmi), I8(2),
- B(Star), R(6),
- B(Mov), R(7), R(3),
+ B(Star), R(7),
+ B(Mov), R(8), R(3),
/* 23 E> */ B(StackCheck),
B(Mov), R(3), R(0),
/* 63 S> */ B(LdaSmi), I8(10),
@@ -698,71 +663,92 @@ bytecodes: [
B(JumpIfFalse), U8(4),
/* 103 S> */ B(Jump), U8(8),
B(LdaZero),
- B(Star), R(6),
- B(JumpLoop), U8(129), I8(0),
+ B(Star), R(7),
+ B(JumpLoop), U8(116), I8(0),
B(Jump), U8(40),
- B(Star), R(20),
+ B(Star), R(21),
B(Ldar), R(closure),
- B(CreateCatchContext), R(20), U8(10), U8(11),
- B(Star), R(19),
+ B(CreateCatchContext), R(21), U8(10), U8(11),
+ B(Star), R(20),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(19),
- B(PushContext), R(20),
+ B(Ldar), R(20),
+ B(PushContext), R(21),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(6), U8(19),
+ B(TestEqualStrict), R(7), U8(19),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(6),
+ B(Star), R(7),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(21),
- B(CallRuntime), U16(Runtime::kReThrow), R(21), U8(1),
- B(PopContext), R(20),
+ B(Star), R(22),
+ B(CallRuntime), U16(Runtime::kReThrow), R(22), U8(1),
+ B(PopContext), R(21),
B(LdaSmi), I8(-1),
+ B(Star), R(18),
B(Star), R(17),
- B(Star), R(16),
B(Jump), U8(7),
- B(Star), R(17),
+ B(Star), R(18),
B(LdaZero),
- B(Star), R(16),
+ B(Star), R(17),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(18),
+ B(Star), R(19),
B(LdaZero),
- B(TestEqualStrict), R(6), U8(20),
- B(JumpIfTrue), U8(199),
+ B(TestEqualStrict), R(7), U8(20),
+ B(JumpIfTrue), U8(171),
B(LdaNamedProperty), R(4), U8(12), U8(21),
- B(Star), R(8),
+ B(Star), R(9),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
- B(Jump), U8(188),
+ B(Jump), U8(160),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(6), U8(23),
- B(JumpIfFalse), U8(109),
- B(Ldar), R(8),
+ B(TestEqualStrict), R(7), U8(23),
+ B(JumpIfFalse), U8(88),
+ B(Ldar), R(9),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(143),
- B(Star), R(19),
- B(LdaConstant), U8(13),
+ B(Wide), B(LdaSmi), I16(144),
B(Star), R(20),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(19), U8(2),
+ B(LdaConstant), U8(13),
+ B(Star), R(21),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(20), U8(2),
B(Throw),
- B(Mov), R(context), R(19),
- B(Mov), R(8), R(20),
+ B(Mov), R(context), R(20),
+ B(Mov), R(9), R(21),
+ B(Mov), R(4), R(22),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(21), U8(2),
+ B(Star), R(22),
+ B(Mov), R(2), R(21),
+ B(Mov), R(11), R(23),
+ B(CallJSRuntime), U8(%async_function_await_caught), R(21), U8(3),
+ B(SuspendGenerator), R(2), R(0), U8(21), U8(1),
+ /* 114 S> */ B(Return),
+ B(ResumeGenerator), R(2), R(12), R(0), U8(21),
+ B(Star), R(21),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(Star), R(22),
+ B(LdaZero),
+ B(TestEqualStrictNoFeedback), R(22),
+ B(JumpIfTrue), U8(5),
+ B(Ldar), R(21),
+ B(ReThrow),
+ B(Ldar), R(21),
+ B(Jump), U8(6),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Ldar), R(20),
+ B(Jump), U8(67),
+ B(Mov), R(9), R(20),
B(Mov), R(4), R(21),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(20), U8(2),
B(Star), R(21),
B(Mov), R(2), R(20),
- B(Mov), R(10), R(22),
- B(CallJSRuntime), U8(%async_function_await_caught), R(20), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(20), U8(1),
+ B(Mov), R(11), R(22),
+ B(CallJSRuntime), U8(%async_function_await_uncaught), R(20), U8(3),
+ B(SuspendGenerator), R(2), R(0), U8(20), U8(2),
/* 114 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(2), R(0), U8(20),
- B(LdaSmi), I8(-2),
- B(Star), R(11),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(2), U8(1),
+ B(ResumeGenerator), R(2), R(12), R(0), U8(20),
B(Star), R(20),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
B(Star), R(21),
@@ -771,111 +757,78 @@ bytecodes: [
B(JumpIfTrue), U8(5),
B(Ldar), R(20),
B(ReThrow),
- B(Ldar), R(20),
- B(Jump), U8(20),
- B(Star), R(20),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(20), U8(10), U8(14),
- B(Star), R(19),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Ldar), R(19),
- B(PushContext), R(20),
- B(PopContext), R(20),
- B(Jump), U8(74),
- B(Mov), R(8), R(19),
- B(Mov), R(4), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(19), U8(2),
- B(Star), R(20),
- B(Mov), R(2), R(19),
- B(Mov), R(10), R(21),
- B(CallJSRuntime), U8(%async_function_await_uncaught), R(19), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(19), U8(2),
- /* 114 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(2), R(0), U8(19),
- B(LdaSmi), I8(-2),
- B(Star), R(11),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(2), U8(1),
- B(Star), R(19),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
- B(Star), R(20),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(20),
- B(JumpIfTrue), U8(5),
- B(Ldar), R(19),
- B(ReThrow),
- B(Mov), R(19), R(9),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(19), U8(1),
+ B(Mov), R(20), R(10),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(20), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
- B(Ldar), R(18),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+ B(Ldar), R(19),
B(SetPendingMessage),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(16),
+ B(TestEqualStrictNoFeedback), R(17),
B(JumpIfFalse), U8(5),
- B(Ldar), R(17),
+ B(Ldar), R(18),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(17),
- B(Mov), R(10), R(16),
- B(CallJSRuntime), U8(%promise_resolve), R(16), U8(2),
+ B(Star), R(18),
+ B(Mov), R(11), R(17),
+ B(CallJSRuntime), U8(%promise_resolve), R(17), U8(2),
B(LdaZero),
- B(Star), R(12),
- B(Mov), R(10), R(13),
+ B(Star), R(13),
+ B(Mov), R(11), R(14),
B(Jump), U8(58),
B(Jump), U8(42),
- B(Star), R(16),
+ B(Star), R(17),
B(Ldar), R(closure),
- B(CreateCatchContext), R(16), U8(10), U8(15),
- B(Star), R(15),
+ B(CreateCatchContext), R(17), U8(10), U8(14),
+ B(Star), R(16),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(15),
- B(PushContext), R(16),
+ B(Ldar), R(16),
+ B(PushContext), R(17),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(18),
- B(LdaFalse),
B(Star), R(19),
- B(Mov), R(10), R(17),
- B(CallJSRuntime), U8(%promise_internal_reject), R(17), U8(3),
- B(PopContext), R(16),
+ B(LdaFalse),
+ B(Star), R(20),
+ B(Mov), R(11), R(18),
+ B(CallJSRuntime), U8(%promise_internal_reject), R(18), U8(3),
+ B(PopContext), R(17),
B(LdaZero),
- B(Star), R(12),
- B(Mov), R(10), R(13),
+ B(Star), R(13),
+ B(Mov), R(11), R(14),
B(Jump), U8(16),
B(LdaSmi), I8(-1),
+ B(Star), R(14),
B(Star), R(13),
- B(Star), R(12),
B(Jump), U8(8),
- B(Star), R(13),
+ B(Star), R(14),
B(LdaSmi), I8(1),
- B(Star), R(12),
+ B(Star), R(13),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(14),
- B(CallJSRuntime), U8(%async_function_promise_release), R(10), U8(1),
- B(Ldar), R(14),
+ B(Star), R(15),
+ B(CallJSRuntime), U8(%async_function_promise_release), R(11), U8(1),
+ B(Ldar), R(15),
B(SetPendingMessage),
- B(Ldar), R(12),
- B(SwitchOnSmiNoFeedback), U8(16), U8(2), I8(0),
- B(Jump), U8(8),
B(Ldar), R(13),
+ B(SwitchOnSmiNoFeedback), U8(15), U8(2), I8(0),
+ B(Jump), U8(8),
+ B(Ldar), R(14),
/* 114 S> */ B(Return),
- B(Ldar), R(13),
+ B(Ldar), R(14),
B(ReThrow),
B(LdaUndefined),
/* 114 S> */ B(Return),
]
constant pool: [
- Smi [89],
- Smi [357],
- Smi [437],
+ Smi [95],
+ Smi [350],
+ Smi [409],
TUPLE2_TYPE,
SYMBOL_TYPE,
SYMBOL_TYPE,
- Smi [40],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ Smi [34],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
@@ -883,16 +836,15 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
FIXED_ARRAY_TYPE,
- FIXED_ARRAY_TYPE,
Smi [6],
Smi [9],
]
handlers: [
- [46, 566, 574],
- [49, 524, 526],
- [55, 275, 283],
- [58, 235, 237],
- [343, 401, 403],
+ [46, 531, 539],
+ [49, 489, 491],
+ [55, 268, 276],
+ [58, 228, 230],
+ [336, 387, 389],
]
---
@@ -903,186 +855,180 @@ snippet: "
}
f();
"
-frame size: 19
+frame size: 20
parameter count: 1
-bytecode array length: 417
+bytecode array length: 403
bytecodes: [
/* 16 E> */ B(StackCheck),
B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
- B(Star), R(8),
- B(Mov), R(context), R(11),
+ B(Star), R(9),
B(Mov), R(context), R(12),
- /* 31 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(13),
- B(Mov), R(13), R(1),
+ B(Mov), R(context), R(13),
+ /* 31 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(14),
+ B(Mov), R(14), R(1),
B(LdaZero),
- B(Star), R(4),
- B(Mov), R(context), R(15),
+ B(Star), R(5),
B(Mov), R(context), R(16),
+ B(Mov), R(context), R(17),
/* 68 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
- B(Star), R(17),
- B(LdaNamedProperty), R(17), U8(2), U8(2),
B(Star), R(18),
- B(CallProperty0), R(18), R(17), U8(4),
+ B(LdaNamedProperty), R(18), U8(2), U8(2),
+ B(Star), R(19),
+ B(CallProperty0), R(19), R(18), U8(4),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(2),
- /* 59 S> */ B(LdaNamedProperty), R(2), U8(3), U8(6),
- B(Star), R(17),
- B(CallProperty0), R(17), R(2), U8(8),
+ /* 68 E> */ B(LdaNamedProperty), R(2), U8(3), U8(6),
B(Star), R(3),
- /* 59 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(3), U8(1),
+ /* 59 S> */ B(CallProperty0), R(3), R(2), U8(8),
+ B(Star), R(4),
+ /* 59 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(4), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(3), U8(1),
- B(LdaNamedProperty), R(3), U8(4), U8(10),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(4), U8(1),
+ B(LdaNamedProperty), R(4), U8(4), U8(10),
B(JumpIfToBooleanTrue), U8(30),
- /* 58 E> */ B(LdaNamedProperty), R(3), U8(5), U8(12),
- B(Star), R(5),
+ /* 58 E> */ B(LdaNamedProperty), R(4), U8(5), U8(12),
+ B(Star), R(6),
B(LdaSmi), I8(2),
- B(Star), R(4),
- B(Ldar), R(5),
+ B(Star), R(5),
+ B(Ldar), R(6),
B(StaNamedProperty), R(1), U8(6), U8(14),
/* 53 E> */ B(StackCheck),
/* 87 S> */ B(LdaNamedProperty), R(1), U8(6), U8(16),
- B(Star), R(14),
+ B(Star), R(15),
B(LdaZero),
- B(Star), R(13),
+ B(Star), R(14),
B(Jump), U8(56),
B(Jump), U8(40),
- B(Star), R(17),
+ B(Star), R(18),
B(Ldar), R(closure),
- B(CreateCatchContext), R(17), U8(7), U8(8),
- B(Star), R(16),
+ B(CreateCatchContext), R(18), U8(7), U8(8),
+ B(Star), R(17),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(16),
- B(PushContext), R(17),
+ B(Ldar), R(17),
+ B(PushContext), R(18),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(4), U8(18),
+ B(TestEqualStrict), R(5), U8(18),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(4),
+ B(Star), R(5),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(18),
- B(CallRuntime), U16(Runtime::kReThrow), R(18), U8(1),
- B(PopContext), R(17),
+ B(Star), R(19),
+ B(CallRuntime), U16(Runtime::kReThrow), R(19), U8(1),
+ B(PopContext), R(18),
B(LdaSmi), I8(-1),
+ B(Star), R(15),
B(Star), R(14),
- B(Star), R(13),
B(Jump), U8(8),
- B(Star), R(14),
+ B(Star), R(15),
B(LdaSmi), I8(1),
- B(Star), R(13),
+ B(Star), R(14),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(15),
+ B(Star), R(16),
B(LdaZero),
- B(TestEqualStrict), R(4), U8(19),
- B(JumpIfTrue), U8(104),
+ B(TestEqualStrict), R(5), U8(19),
+ B(JumpIfTrue), U8(90),
B(LdaNamedProperty), R(2), U8(9), U8(20),
- B(Star), R(6),
+ B(Star), R(7),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
- B(Jump), U8(93),
+ B(Jump), U8(79),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(4), U8(22),
- B(JumpIfFalse), U8(61),
- B(Ldar), R(6),
+ B(TestEqualStrict), R(5), U8(22),
+ B(JumpIfFalse), U8(47),
+ B(Ldar), R(7),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(143),
- B(Star), R(16),
- B(LdaConstant), U8(10),
+ B(Wide), B(LdaSmi), I16(144),
B(Star), R(17),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
+ B(LdaConstant), U8(10),
+ B(Star), R(18),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(17), U8(2),
B(Throw),
- B(Mov), R(context), R(16),
- B(Mov), R(6), R(17),
- B(Mov), R(2), R(18),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(17), U8(2),
- B(Jump), U8(20),
- B(Star), R(17),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(17), U8(7), U8(11),
- B(Star), R(16),
+ B(Mov), R(context), R(17),
+ B(Mov), R(7), R(18),
+ B(Mov), R(2), R(19),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(18), U8(2),
+ B(Jump), U8(6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(16),
- B(PushContext), R(17),
- B(PopContext), R(17),
+ B(Ldar), R(17),
B(Jump), U8(27),
- B(Mov), R(6), R(16),
- B(Mov), R(2), R(17),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(16), U8(2),
- B(Star), R(7),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(7), U8(1),
+ B(Mov), R(7), R(17),
+ B(Mov), R(2), R(18),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(17), U8(2),
+ B(Star), R(8),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(8), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
- B(Ldar), R(15),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
+ B(Ldar), R(16),
B(SetPendingMessage),
- B(Ldar), R(13),
- B(SwitchOnSmiNoFeedback), U8(12), U8(2), I8(0),
+ B(Ldar), R(14),
+ B(SwitchOnSmiNoFeedback), U8(11), U8(2), I8(0),
B(Jump), U8(13),
B(LdaZero),
- B(Star), R(9),
- B(Mov), R(14), R(10),
+ B(Star), R(10),
+ B(Mov), R(15), R(11),
B(Jump), U8(81),
- B(Ldar), R(14),
+ B(Ldar), R(15),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(14),
- B(Mov), R(8), R(13),
- B(CallJSRuntime), U8(%promise_resolve), R(13), U8(2),
+ B(Star), R(15),
+ B(Mov), R(9), R(14),
+ B(CallJSRuntime), U8(%promise_resolve), R(14), U8(2),
B(LdaSmi), I8(1),
- B(Star), R(9),
- B(Mov), R(8), R(10),
+ B(Star), R(10),
+ B(Mov), R(9), R(11),
B(Jump), U8(59),
B(Jump), U8(43),
- B(Star), R(13),
+ B(Star), R(14),
B(Ldar), R(closure),
- B(CreateCatchContext), R(13), U8(7), U8(14),
- B(Star), R(12),
+ B(CreateCatchContext), R(14), U8(7), U8(13),
+ B(Star), R(13),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(12),
- B(PushContext), R(13),
+ B(Ldar), R(13),
+ B(PushContext), R(14),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(15),
- B(LdaFalse),
B(Star), R(16),
- B(Mov), R(8), R(14),
- B(CallJSRuntime), U8(%promise_internal_reject), R(14), U8(3),
- B(PopContext), R(13),
+ B(LdaFalse),
+ B(Star), R(17),
+ B(Mov), R(9), R(15),
+ B(CallJSRuntime), U8(%promise_internal_reject), R(15), U8(3),
+ B(PopContext), R(14),
B(LdaSmi), I8(1),
- B(Star), R(9),
- B(Mov), R(8), R(10),
+ B(Star), R(10),
+ B(Mov), R(9), R(11),
B(Jump), U8(16),
B(LdaSmi), I8(-1),
+ B(Star), R(11),
B(Star), R(10),
- B(Star), R(9),
B(Jump), U8(8),
- B(Star), R(10),
+ B(Star), R(11),
B(LdaSmi), I8(2),
- B(Star), R(9),
+ B(Star), R(10),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(11),
- B(CallJSRuntime), U8(%async_function_promise_release), R(8), U8(1),
- B(Ldar), R(11),
+ B(Star), R(12),
+ B(CallJSRuntime), U8(%async_function_promise_release), R(9), U8(1),
+ B(Ldar), R(12),
B(SetPendingMessage),
- B(Ldar), R(9),
- B(SwitchOnSmiNoFeedback), U8(15), U8(3), I8(0),
+ B(Ldar), R(10),
+ B(SwitchOnSmiNoFeedback), U8(14), U8(3), I8(0),
B(Jump), U8(21),
- B(Mov), R(8), R(12),
- B(Mov), R(10), R(13),
- B(CallJSRuntime), U8(%promise_resolve), R(12), U8(2),
- B(Ldar), R(8),
+ B(Mov), R(9), R(13),
+ B(Mov), R(11), R(14),
+ B(CallJSRuntime), U8(%promise_resolve), R(13), U8(2),
+ B(Ldar), R(9),
/* 96 S> */ B(Return),
- B(Ldar), R(10),
+ B(Ldar), R(11),
/* 96 S> */ B(Return),
- B(Ldar), R(10),
+ B(Ldar), R(11),
B(ReThrow),
B(LdaUndefined),
/* 96 S> */ B(Return),
@@ -1099,7 +1045,6 @@ constant pool: [
FIXED_ARRAY_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
- FIXED_ARRAY_TYPE,
Smi [6],
Smi [14],
FIXED_ARRAY_TYPE,
@@ -1108,8 +1053,8 @@ constant pool: [
Smi [22],
]
handlers: [
- [10, 363, 371],
- [13, 320, 322],
+ [10, 349, 357],
+ [13, 306, 308],
[27, 153, 161],
[30, 113, 115],
[222, 232, 234],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
index caf3e26cf7..46e62ed891 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
@@ -9,118 +9,112 @@ wrap: yes
snippet: "
for (var p of [0, 1, 2]) {}
"
-frame size: 14
+frame size: 15
parameter count: 1
-bytecode array length: 262
+bytecode array length: 248
bytecodes: [
/* 30 E> */ B(StackCheck),
B(LdaZero),
- B(Star), R(4),
- B(Mov), R(context), R(10),
+ B(Star), R(5),
B(Mov), R(context), R(11),
+ B(Mov), R(context), R(12),
/* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
- B(Star), R(12),
- B(LdaNamedProperty), R(12), U8(1), U8(1),
B(Star), R(13),
- B(CallProperty0), R(13), R(12), U8(3),
+ B(LdaNamedProperty), R(13), U8(1), U8(1),
+ B(Star), R(14),
+ B(CallProperty0), R(14), R(13), U8(3),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(2),
- /* 43 S> */ B(LdaNamedProperty), R(2), U8(2), U8(5),
- B(Star), R(12),
- B(CallProperty0), R(12), R(2), U8(7),
+ /* 48 E> */ B(LdaNamedProperty), R(2), U8(2), U8(5),
B(Star), R(3),
- /* 43 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(3), U8(1),
+ /* 43 S> */ B(CallProperty0), R(3), R(2), U8(7),
+ B(Star), R(4),
+ /* 43 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(4), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(3), U8(1),
- B(LdaNamedProperty), R(3), U8(3), U8(9),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(4), U8(1),
+ B(LdaNamedProperty), R(4), U8(3), U8(9),
B(JumpIfToBooleanTrue), U8(25),
- B(LdaNamedProperty), R(3), U8(4), U8(11),
- B(Star), R(5),
+ B(LdaNamedProperty), R(4), U8(4), U8(11),
+ B(Star), R(6),
B(LdaSmi), I8(2),
- B(Star), R(4),
- B(Mov), R(5), R(0),
+ B(Star), R(5),
+ B(Mov), R(6), R(0),
/* 34 E> */ B(StackCheck),
B(Mov), R(0), R(1),
B(LdaZero),
- B(Star), R(4),
- B(JumpLoop), U8(50), I8(0),
+ B(Star), R(5),
+ B(JumpLoop), U8(44), I8(0),
B(Jump), U8(36),
- B(Star), R(12),
+ B(Star), R(13),
B(Ldar), R(closure),
- B(CreateCatchContext), R(12), U8(5), U8(6),
- B(PushContext), R(12),
- B(Star), R(11),
+ B(CreateCatchContext), R(13), U8(5), U8(6),
+ B(PushContext), R(13),
+ B(Star), R(12),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(4), U8(13),
+ B(TestEqualStrict), R(5), U8(13),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(4),
+ B(Star), R(5),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kReThrow), R(13), U8(1),
- B(PopContext), R(12),
+ B(Star), R(14),
+ B(CallRuntime), U16(Runtime::kReThrow), R(14), U8(1),
+ B(PopContext), R(13),
B(LdaSmi), I8(-1),
+ B(Star), R(10),
B(Star), R(9),
- B(Star), R(8),
B(Jump), U8(7),
- B(Star), R(9),
+ B(Star), R(10),
B(LdaZero),
- B(Star), R(8),
+ B(Star), R(9),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(10),
+ B(Star), R(11),
B(LdaZero),
- B(TestEqualStrict), R(4), U8(14),
- B(JumpIfTrue), U8(104),
+ B(TestEqualStrict), R(5), U8(14),
+ B(JumpIfTrue), U8(90),
B(LdaNamedProperty), R(2), U8(7), U8(15),
- B(Star), R(6),
+ B(Star), R(7),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
- B(Jump), U8(93),
+ B(Jump), U8(79),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(4), U8(17),
- B(JumpIfFalse), U8(61),
- B(Ldar), R(6),
+ B(TestEqualStrict), R(5), U8(17),
+ B(JumpIfFalse), U8(47),
+ B(Ldar), R(7),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(143),
- B(Star), R(11),
- B(LdaConstant), U8(8),
+ B(Wide), B(LdaSmi), I16(144),
B(Star), R(12),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
+ B(LdaConstant), U8(8),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
B(Throw),
- B(Mov), R(context), R(11),
- B(Mov), R(6), R(12),
- B(Mov), R(2), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
- B(Jump), U8(20),
- B(Star), R(12),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(12), U8(5), U8(9),
- B(Star), R(11),
+ B(Mov), R(context), R(12),
+ B(Mov), R(7), R(13),
+ B(Mov), R(2), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(13), U8(2),
+ B(Jump), U8(6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(11),
- B(PushContext), R(12),
- B(PopContext), R(12),
+ B(Ldar), R(12),
B(Jump), U8(27),
- B(Mov), R(6), R(11),
- B(Mov), R(2), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(11), U8(2),
- B(Star), R(7),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(7), U8(1),
+ B(Mov), R(7), R(12),
+ B(Mov), R(2), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
+ B(Star), R(8),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(8), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
- B(Ldar), R(10),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
+ B(Ldar), R(11),
B(SetPendingMessage),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(8),
+ B(TestEqualStrictNoFeedback), R(9),
B(JumpIfFalse), U8(5),
- B(Ldar), R(9),
+ B(Ldar), R(10),
B(ReThrow),
B(LdaUndefined),
/* 62 S> */ B(Return),
@@ -135,7 +129,6 @@ constant pool: [
FIXED_ARRAY_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
- FIXED_ARRAY_TYPE,
]
handlers: [
[7, 124, 132],
@@ -148,122 +141,116 @@ snippet: "
var x = 'potatoes';
for (var p of x) { return p; }
"
-frame size: 15
+frame size: 16
parameter count: 1
-bytecode array length: 272
+bytecode array length: 258
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
B(LdaZero),
- B(Star), R(5),
- B(Mov), R(context), R(11),
+ B(Star), R(6),
B(Mov), R(context), R(12),
+ B(Mov), R(context), R(13),
/* 68 S> */ B(LdaNamedProperty), R(0), U8(1), U8(0),
- B(Star), R(14),
- B(CallProperty0), R(14), R(0), U8(2),
- B(Mov), R(0), R(13),
+ B(Star), R(15),
+ B(CallProperty0), R(15), R(0), U8(2),
+ B(Mov), R(0), R(14),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(3),
- /* 63 S> */ B(LdaNamedProperty), R(3), U8(2), U8(4),
- B(Star), R(13),
- B(CallProperty0), R(13), R(3), U8(6),
+ /* 68 E> */ B(LdaNamedProperty), R(3), U8(2), U8(4),
B(Star), R(4),
- /* 63 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(4), U8(1),
+ /* 63 S> */ B(CallProperty0), R(4), R(3), U8(6),
+ B(Star), R(5),
+ /* 63 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(5), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(4), U8(1),
- B(LdaNamedProperty), R(4), U8(3), U8(8),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
+ B(LdaNamedProperty), R(5), U8(3), U8(8),
B(JumpIfToBooleanTrue), U8(27),
- B(LdaNamedProperty), R(4), U8(4), U8(10),
- B(Star), R(6),
+ B(LdaNamedProperty), R(5), U8(4), U8(10),
+ B(Star), R(7),
B(LdaSmi), I8(2),
- B(Star), R(5),
- B(Mov), R(6), R(1),
+ B(Star), R(6),
+ B(Mov), R(7), R(1),
/* 54 E> */ B(StackCheck),
B(Mov), R(1), R(2),
/* 73 S> */ B(LdaZero),
- B(Star), R(9),
- B(Mov), R(6), R(10),
+ B(Star), R(10),
+ B(Mov), R(7), R(11),
B(Jump), U8(52),
B(Jump), U8(36),
- B(Star), R(13),
+ B(Star), R(14),
B(Ldar), R(closure),
- B(CreateCatchContext), R(13), U8(5), U8(6),
- B(PushContext), R(13),
- B(Star), R(12),
+ B(CreateCatchContext), R(14), U8(5), U8(6),
+ B(PushContext), R(14),
+ B(Star), R(13),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(5), U8(12),
+ B(TestEqualStrict), R(6), U8(12),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(5),
+ B(Star), R(6),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(14),
- B(CallRuntime), U16(Runtime::kReThrow), R(14), U8(1),
- B(PopContext), R(13),
+ B(Star), R(15),
+ B(CallRuntime), U16(Runtime::kReThrow), R(15), U8(1),
+ B(PopContext), R(14),
B(LdaSmi), I8(-1),
+ B(Star), R(11),
B(Star), R(10),
- B(Star), R(9),
B(Jump), U8(8),
- B(Star), R(10),
+ B(Star), R(11),
B(LdaSmi), I8(1),
- B(Star), R(9),
+ B(Star), R(10),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(11),
+ B(Star), R(12),
B(LdaZero),
- B(TestEqualStrict), R(5), U8(13),
- B(JumpIfTrue), U8(104),
+ B(TestEqualStrict), R(6), U8(13),
+ B(JumpIfTrue), U8(90),
B(LdaNamedProperty), R(3), U8(7), U8(14),
- B(Star), R(7),
+ B(Star), R(8),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
- B(Jump), U8(93),
+ B(Jump), U8(79),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(5), U8(16),
- B(JumpIfFalse), U8(61),
- B(Ldar), R(7),
+ B(TestEqualStrict), R(6), U8(16),
+ B(JumpIfFalse), U8(47),
+ B(Ldar), R(8),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(143),
- B(Star), R(12),
- B(LdaConstant), U8(8),
+ B(Wide), B(LdaSmi), I16(144),
B(Star), R(13),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
+ B(LdaConstant), U8(8),
+ B(Star), R(14),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
B(Throw),
- B(Mov), R(context), R(12),
- B(Mov), R(7), R(13),
- B(Mov), R(3), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(13), U8(2),
- B(Jump), U8(20),
- B(Star), R(13),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(13), U8(5), U8(9),
- B(Star), R(12),
+ B(Mov), R(context), R(13),
+ B(Mov), R(8), R(14),
+ B(Mov), R(3), R(15),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(14), U8(2),
+ B(Jump), U8(6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(12),
- B(PushContext), R(13),
- B(PopContext), R(13),
+ B(Ldar), R(13),
B(Jump), U8(27),
- B(Mov), R(7), R(12),
- B(Mov), R(3), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
- B(Star), R(8),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(8), U8(1),
+ B(Mov), R(8), R(13),
+ B(Mov), R(3), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(13), U8(2),
+ B(Star), R(9),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(9), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
- B(Ldar), R(11),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
+ B(Ldar), R(12),
B(SetPendingMessage),
- B(Ldar), R(9),
- B(SwitchOnSmiNoFeedback), U8(10), U8(2), I8(0),
- B(Jump), U8(8),
B(Ldar), R(10),
+ B(SwitchOnSmiNoFeedback), U8(9), U8(2), I8(0),
+ B(Jump), U8(8),
+ B(Ldar), R(11),
/* 85 S> */ B(Return),
- B(Ldar), R(10),
+ B(Ldar), R(11),
B(ReThrow),
B(LdaUndefined),
/* 85 S> */ B(Return),
@@ -278,7 +265,6 @@ constant pool: [
FIXED_ARRAY_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
- FIXED_ARRAY_TYPE,
Smi [6],
Smi [9],
]
@@ -295,38 +281,38 @@ snippet: "
if (x == 20) break;
}
"
-frame size: 14
+frame size: 15
parameter count: 1
-bytecode array length: 280
+bytecode array length: 266
bytecodes: [
/* 30 E> */ B(StackCheck),
B(LdaZero),
- B(Star), R(4),
- B(Mov), R(context), R(10),
+ B(Star), R(5),
B(Mov), R(context), R(11),
+ B(Mov), R(context), R(12),
/* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
- B(Star), R(12),
- B(LdaNamedProperty), R(12), U8(1), U8(1),
B(Star), R(13),
- B(CallProperty0), R(13), R(12), U8(3),
+ B(LdaNamedProperty), R(13), U8(1), U8(1),
+ B(Star), R(14),
+ B(CallProperty0), R(14), R(13), U8(3),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(2),
- /* 43 S> */ B(LdaNamedProperty), R(2), U8(2), U8(5),
- B(Star), R(12),
- B(CallProperty0), R(12), R(2), U8(7),
+ /* 48 E> */ B(LdaNamedProperty), R(2), U8(2), U8(5),
B(Star), R(3),
- /* 43 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(3), U8(1),
+ /* 43 S> */ B(CallProperty0), R(3), R(2), U8(7),
+ B(Star), R(4),
+ /* 43 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(4), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(3), U8(1),
- B(LdaNamedProperty), R(3), U8(3), U8(9),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(4), U8(1),
+ B(LdaNamedProperty), R(4), U8(3), U8(9),
B(JumpIfToBooleanTrue), U8(43),
- B(LdaNamedProperty), R(3), U8(4), U8(11),
- B(Star), R(5),
+ B(LdaNamedProperty), R(4), U8(4), U8(11),
+ B(Star), R(6),
B(LdaSmi), I8(2),
- B(Star), R(4),
- B(Mov), R(5), R(0),
+ B(Star), R(5),
+ B(Mov), R(6), R(0),
/* 34 E> */ B(StackCheck),
B(Mov), R(0), R(1),
/* 66 S> */ B(LdaSmi), I8(10),
@@ -338,83 +324,77 @@ bytecodes: [
B(JumpIfFalse), U8(4),
/* 104 S> */ B(Jump), U8(8),
B(LdaZero),
- B(Star), R(4),
- B(JumpLoop), U8(68), I8(0),
+ B(Star), R(5),
+ B(JumpLoop), U8(62), I8(0),
B(Jump), U8(36),
- B(Star), R(12),
+ B(Star), R(13),
B(Ldar), R(closure),
- B(CreateCatchContext), R(12), U8(5), U8(6),
- B(PushContext), R(12),
- B(Star), R(11),
+ B(CreateCatchContext), R(13), U8(5), U8(6),
+ B(PushContext), R(13),
+ B(Star), R(12),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(4), U8(15),
+ B(TestEqualStrict), R(5), U8(15),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(4),
+ B(Star), R(5),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kReThrow), R(13), U8(1),
- B(PopContext), R(12),
+ B(Star), R(14),
+ B(CallRuntime), U16(Runtime::kReThrow), R(14), U8(1),
+ B(PopContext), R(13),
B(LdaSmi), I8(-1),
+ B(Star), R(10),
B(Star), R(9),
- B(Star), R(8),
B(Jump), U8(7),
- B(Star), R(9),
+ B(Star), R(10),
B(LdaZero),
- B(Star), R(8),
+ B(Star), R(9),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(10),
+ B(Star), R(11),
B(LdaZero),
- B(TestEqualStrict), R(4), U8(16),
- B(JumpIfTrue), U8(104),
+ B(TestEqualStrict), R(5), U8(16),
+ B(JumpIfTrue), U8(90),
B(LdaNamedProperty), R(2), U8(7), U8(17),
- B(Star), R(6),
+ B(Star), R(7),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
- B(Jump), U8(93),
+ B(Jump), U8(79),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(4), U8(19),
- B(JumpIfFalse), U8(61),
- B(Ldar), R(6),
+ B(TestEqualStrict), R(5), U8(19),
+ B(JumpIfFalse), U8(47),
+ B(Ldar), R(7),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(143),
- B(Star), R(11),
- B(LdaConstant), U8(8),
+ B(Wide), B(LdaSmi), I16(144),
B(Star), R(12),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
+ B(LdaConstant), U8(8),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
B(Throw),
- B(Mov), R(context), R(11),
- B(Mov), R(6), R(12),
- B(Mov), R(2), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
- B(Jump), U8(20),
- B(Star), R(12),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(12), U8(5), U8(9),
- B(Star), R(11),
+ B(Mov), R(context), R(12),
+ B(Mov), R(7), R(13),
+ B(Mov), R(2), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(13), U8(2),
+ B(Jump), U8(6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(11),
- B(PushContext), R(12),
- B(PopContext), R(12),
+ B(Ldar), R(12),
B(Jump), U8(27),
- B(Mov), R(6), R(11),
- B(Mov), R(2), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(11), U8(2),
- B(Star), R(7),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(7), U8(1),
+ B(Mov), R(7), R(12),
+ B(Mov), R(2), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
+ B(Star), R(8),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(8), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
- B(Ldar), R(10),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
+ B(Ldar), R(11),
B(SetPendingMessage),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(8),
+ B(TestEqualStrictNoFeedback), R(9),
B(JumpIfFalse), U8(5),
- B(Ldar), R(9),
+ B(Ldar), R(10),
B(ReThrow),
B(LdaUndefined),
/* 113 S> */ B(Return),
@@ -429,7 +409,6 @@ constant pool: [
FIXED_ARRAY_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
- FIXED_ARRAY_TYPE,
]
handlers: [
[7, 142, 150],
@@ -442,124 +421,118 @@ snippet: "
var x = { 'a': 1, 'b': 2 };
for (x['a'] of [1,2,3]) { return x['a']; }
"
-frame size: 13
+frame size: 14
parameter count: 1
-bytecode array length: 282
+bytecode array length: 268
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(7),
- B(Mov), R(7), R(0),
+ /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(8),
+ B(Mov), R(8), R(0),
B(LdaZero),
- B(Star), R(3),
- B(Mov), R(context), R(9),
+ B(Star), R(4),
B(Mov), R(context), R(10),
+ B(Mov), R(context), R(11),
/* 77 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
- B(Star), R(11),
- B(LdaNamedProperty), R(11), U8(2), U8(2),
B(Star), R(12),
- B(CallProperty0), R(12), R(11), U8(4),
+ B(LdaNamedProperty), R(12), U8(2), U8(2),
+ B(Star), R(13),
+ B(CallProperty0), R(13), R(12), U8(4),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(1),
- /* 68 S> */ B(LdaNamedProperty), R(1), U8(3), U8(6),
- B(Star), R(11),
- B(CallProperty0), R(11), R(1), U8(8),
+ /* 77 E> */ B(LdaNamedProperty), R(1), U8(3), U8(6),
B(Star), R(2),
- /* 68 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(2), U8(1),
+ /* 68 S> */ B(CallProperty0), R(2), R(1), U8(8),
+ B(Star), R(3),
+ /* 68 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(3), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(2), U8(1),
- B(LdaNamedProperty), R(2), U8(4), U8(10),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(3), U8(1),
+ B(LdaNamedProperty), R(3), U8(4), U8(10),
B(JumpIfToBooleanTrue), U8(30),
- /* 67 E> */ B(LdaNamedProperty), R(2), U8(5), U8(12),
- B(Star), R(4),
+ /* 67 E> */ B(LdaNamedProperty), R(3), U8(5), U8(12),
+ B(Star), R(5),
B(LdaSmi), I8(2),
- B(Star), R(3),
- B(Ldar), R(4),
+ B(Star), R(4),
+ B(Ldar), R(5),
B(StaNamedProperty), R(0), U8(6), U8(14),
/* 62 E> */ B(StackCheck),
/* 96 S> */ B(LdaNamedProperty), R(0), U8(6), U8(16),
- B(Star), R(8),
+ B(Star), R(9),
B(LdaZero),
- B(Star), R(7),
+ B(Star), R(8),
B(Jump), U8(52),
B(Jump), U8(36),
- B(Star), R(11),
+ B(Star), R(12),
B(Ldar), R(closure),
- B(CreateCatchContext), R(11), U8(7), U8(8),
- B(PushContext), R(11),
- B(Star), R(10),
+ B(CreateCatchContext), R(12), U8(7), U8(8),
+ B(PushContext), R(12),
+ B(Star), R(11),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(3), U8(18),
+ B(TestEqualStrict), R(4), U8(18),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(3),
+ B(Star), R(4),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kReThrow), R(12), U8(1),
- B(PopContext), R(11),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::kReThrow), R(13), U8(1),
+ B(PopContext), R(12),
B(LdaSmi), I8(-1),
+ B(Star), R(9),
B(Star), R(8),
- B(Star), R(7),
B(Jump), U8(8),
- B(Star), R(8),
+ B(Star), R(9),
B(LdaSmi), I8(1),
- B(Star), R(7),
+ B(Star), R(8),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(9),
+ B(Star), R(10),
B(LdaZero),
- B(TestEqualStrict), R(3), U8(19),
- B(JumpIfTrue), U8(104),
+ B(TestEqualStrict), R(4), U8(19),
+ B(JumpIfTrue), U8(90),
B(LdaNamedProperty), R(1), U8(9), U8(20),
- B(Star), R(5),
+ B(Star), R(6),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
- B(Jump), U8(93),
+ B(Jump), U8(79),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(3), U8(22),
- B(JumpIfFalse), U8(61),
- B(Ldar), R(5),
+ B(TestEqualStrict), R(4), U8(22),
+ B(JumpIfFalse), U8(47),
+ B(Ldar), R(6),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(143),
- B(Star), R(10),
- B(LdaConstant), U8(10),
+ B(Wide), B(LdaSmi), I16(144),
B(Star), R(11),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(10), U8(2),
+ B(LdaConstant), U8(10),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
B(Throw),
- B(Mov), R(context), R(10),
- B(Mov), R(5), R(11),
- B(Mov), R(1), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(11), U8(2),
- B(Jump), U8(20),
- B(Star), R(11),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(11), U8(7), U8(11),
- B(Star), R(10),
+ B(Mov), R(context), R(11),
+ B(Mov), R(6), R(12),
+ B(Mov), R(1), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
+ B(Jump), U8(6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(10),
- B(PushContext), R(11),
- B(PopContext), R(11),
+ B(Ldar), R(11),
B(Jump), U8(27),
- B(Mov), R(5), R(10),
- B(Mov), R(1), R(11),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(10), U8(2),
- B(Star), R(6),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
+ B(Mov), R(6), R(11),
+ B(Mov), R(1), R(12),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(11), U8(2),
+ B(Star), R(7),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(7), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
- B(Ldar), R(9),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
+ B(Ldar), R(10),
B(SetPendingMessage),
- B(Ldar), R(7),
- B(SwitchOnSmiNoFeedback), U8(12), U8(2), I8(0),
- B(Jump), U8(8),
B(Ldar), R(8),
+ B(SwitchOnSmiNoFeedback), U8(11), U8(2), I8(0),
+ B(Jump), U8(8),
+ B(Ldar), R(9),
/* 105 S> */ B(Return),
- B(Ldar), R(8),
+ B(Ldar), R(9),
B(ReThrow),
B(LdaUndefined),
/* 105 S> */ B(Return),
@@ -576,7 +549,6 @@ constant pool: [
FIXED_ARRAY_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
- FIXED_ARRAY_TYPE,
Smi [6],
Smi [9],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
index 57b2b27ea1..1ea568ac21 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
@@ -13,118 +13,112 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 16
+frame size: 17
parameter count: 2
-bytecode array length: 262
+bytecode array length: 248
bytecodes: [
/* 10 E> */ B(StackCheck),
B(LdaZero),
- B(Star), R(6),
- B(Mov), R(context), R(12),
+ B(Star), R(7),
B(Mov), R(context), R(13),
+ B(Mov), R(context), R(14),
/* 34 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
- B(Star), R(15),
- B(CallProperty0), R(15), R(arg0), U8(2),
- B(Mov), R(arg0), R(14),
+ B(Star), R(16),
+ B(CallProperty0), R(16), R(arg0), U8(2),
+ B(Mov), R(arg0), R(15),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(4),
- /* 29 S> */ B(LdaNamedProperty), R(4), U8(1), U8(4),
- B(Star), R(14),
- B(CallProperty0), R(14), R(4), U8(6),
+ /* 34 E> */ B(LdaNamedProperty), R(4), U8(1), U8(4),
B(Star), R(5),
- /* 29 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(5), U8(1),
+ /* 29 S> */ B(CallProperty0), R(5), R(4), U8(6),
+ B(Star), R(6),
+ /* 29 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
- B(LdaNamedProperty), R(5), U8(2), U8(8),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
+ B(LdaNamedProperty), R(6), U8(2), U8(8),
B(JumpIfToBooleanTrue), U8(28),
- B(LdaNamedProperty), R(5), U8(3), U8(10),
- B(Star), R(7),
+ B(LdaNamedProperty), R(6), U8(3), U8(10),
+ B(Star), R(8),
B(LdaSmi), I8(2),
- B(Star), R(6),
- B(Mov), R(7), R(3),
+ B(Star), R(7),
+ B(Mov), R(8), R(3),
/* 20 E> */ B(StackCheck),
B(Mov), R(3), R(1),
/* 49 S> */ B(Mov), R(1), R(0),
B(LdaZero),
- B(Star), R(6),
- B(JumpLoop), U8(53), I8(0),
+ B(Star), R(7),
+ B(JumpLoop), U8(47), I8(0),
B(Jump), U8(36),
- B(Star), R(14),
+ B(Star), R(15),
B(Ldar), R(closure),
- /* 49 E> */ B(CreateCatchContext), R(14), U8(4), U8(5),
- B(PushContext), R(14),
- B(Star), R(13),
+ /* 49 E> */ B(CreateCatchContext), R(15), U8(4), U8(5),
+ B(PushContext), R(15),
+ B(Star), R(14),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(6), U8(12),
+ B(TestEqualStrict), R(7), U8(12),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(6),
+ B(Star), R(7),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(15),
- B(CallRuntime), U16(Runtime::kReThrow), R(15), U8(1),
- B(PopContext), R(14),
+ B(Star), R(16),
+ B(CallRuntime), U16(Runtime::kReThrow), R(16), U8(1),
+ B(PopContext), R(15),
B(LdaSmi), I8(-1),
+ B(Star), R(12),
B(Star), R(11),
- B(Star), R(10),
B(Jump), U8(7),
- B(Star), R(11),
+ B(Star), R(12),
B(LdaZero),
- B(Star), R(10),
+ B(Star), R(11),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(12),
+ B(Star), R(13),
B(LdaZero),
- B(TestEqualStrict), R(6), U8(13),
- B(JumpIfTrue), U8(104),
+ B(TestEqualStrict), R(7), U8(13),
+ B(JumpIfTrue), U8(90),
B(LdaNamedProperty), R(4), U8(6), U8(14),
- B(Star), R(8),
+ B(Star), R(9),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
- B(Jump), U8(93),
+ B(Jump), U8(79),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(6), U8(16),
- B(JumpIfFalse), U8(61),
- B(Ldar), R(8),
+ B(TestEqualStrict), R(7), U8(16),
+ B(JumpIfFalse), U8(47),
+ B(Ldar), R(9),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(143),
- B(Star), R(13),
- B(LdaConstant), U8(7),
+ B(Wide), B(LdaSmi), I16(144),
B(Star), R(14),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
+ B(LdaConstant), U8(7),
+ B(Star), R(15),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
B(Throw),
- B(Mov), R(context), R(13),
- B(Mov), R(8), R(14),
- B(Mov), R(4), R(15),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(14), U8(2),
- B(Jump), U8(20),
- B(Star), R(14),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(14), U8(4), U8(8),
- B(Star), R(13),
+ B(Mov), R(context), R(14),
+ B(Mov), R(9), R(15),
+ B(Mov), R(4), R(16),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(15), U8(2),
+ B(Jump), U8(6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(13),
- B(PushContext), R(14),
- B(PopContext), R(14),
+ B(Ldar), R(14),
B(Jump), U8(27),
- B(Mov), R(8), R(13),
- B(Mov), R(4), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(13), U8(2),
- B(Star), R(9),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(9), U8(1),
+ B(Mov), R(9), R(14),
+ B(Mov), R(4), R(15),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(14), U8(2),
+ B(Star), R(10),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(10), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
- B(Ldar), R(12),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+ B(Ldar), R(13),
B(SetPendingMessage),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(10),
+ B(TestEqualStrictNoFeedback), R(11),
B(JumpIfFalse), U8(5),
- B(Ldar), R(11),
+ B(Ldar), R(12),
B(ReThrow),
B(LdaUndefined),
/* 54 S> */ B(Return),
@@ -138,7 +132,6 @@ constant pool: [
FIXED_ARRAY_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
- FIXED_ARRAY_TYPE,
]
handlers: [
[7, 124, 132],
@@ -153,158 +146,152 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 23
+frame size: 24
parameter count: 2
-bytecode array length: 345
+bytecode array length: 331
bytecodes: [
B(CreateFunctionContext), U8(4),
- B(PushContext), R(8),
+ B(PushContext), R(9),
B(Ldar), R(this),
B(StaCurrentContextSlot), U8(5),
B(Ldar), R(arg0),
B(StaCurrentContextSlot), U8(4),
B(CreateMappedArguments),
B(StaCurrentContextSlot), U8(7),
- B(Ldar), R(7),
+ B(Ldar), R(8),
B(StaCurrentContextSlot), U8(6),
/* 10 E> */ B(StackCheck),
B(Ldar), R(closure),
B(CreateBlockContext), U8(0),
- B(PushContext), R(9),
+ B(PushContext), R(10),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
B(LdaZero),
- B(Star), R(3),
- B(Mov), R(context), R(12),
+ B(Star), R(4),
B(Mov), R(context), R(13),
- /* 34 S> */ B(LdaContextSlot), R(9), U8(4), U8(0),
- B(Star), R(14),
- B(LdaNamedProperty), R(14), U8(1), U8(0),
+ B(Mov), R(context), R(14),
+ /* 34 S> */ B(LdaContextSlot), R(10), U8(4), U8(0),
B(Star), R(15),
- B(CallProperty0), R(15), R(14), U8(2),
+ B(LdaNamedProperty), R(15), U8(1), U8(0),
+ B(Star), R(16),
+ B(CallProperty0), R(16), R(15), U8(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(1),
- /* 29 S> */ B(LdaNamedProperty), R(1), U8(2), U8(4),
- B(Star), R(14),
- B(CallProperty0), R(14), R(1), U8(6),
+ /* 34 E> */ B(LdaNamedProperty), R(1), U8(2), U8(4),
B(Star), R(2),
- /* 29 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(2), U8(1),
+ /* 29 S> */ B(CallProperty0), R(2), R(1), U8(6),
+ B(Star), R(3),
+ /* 29 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(3), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(2), U8(1),
- B(LdaNamedProperty), R(2), U8(3), U8(8),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(3), U8(1),
+ B(LdaNamedProperty), R(3), U8(3), U8(8),
B(JumpIfToBooleanTrue), U8(78),
- B(LdaNamedProperty), R(2), U8(4), U8(10),
- B(Star), R(4),
+ B(LdaNamedProperty), R(3), U8(4), U8(10),
+ B(Star), R(5),
B(LdaSmi), I8(2),
- B(Star), R(3),
- B(Mov), R(4), R(0),
+ B(Star), R(4),
+ B(Mov), R(5), R(0),
/* 20 E> */ B(StackCheck),
B(Ldar), R(closure),
B(CreateBlockContext), U8(5),
- B(PushContext), R(14),
+ B(PushContext), R(15),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(Ldar), R(4),
+ B(Ldar), R(5),
B(StaCurrentContextSlot), U8(4),
/* 41 S> */ B(LdaLookupGlobalSlot), U8(6), U8(12), U8(3),
- B(Star), R(15),
- B(LdaConstant), U8(7),
B(Star), R(16),
+ B(LdaConstant), U8(7),
+ B(Star), R(17),
B(LdaZero),
- B(Star), R(20),
- B(LdaSmi), I8(37),
B(Star), R(21),
- B(LdaSmi), I8(41),
+ B(LdaSmi), I8(37),
B(Star), R(22),
- B(Mov), R(15), R(17),
+ B(LdaSmi), I8(41),
+ B(Star), R(23),
B(Mov), R(16), R(18),
- B(Mov), R(closure), R(19),
- B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(17), U8(6),
- B(Star), R(15),
- /* 41 E> */ B(CallUndefinedReceiver1), R(15), R(16), U8(14),
- B(PopContext), R(14),
+ B(Mov), R(17), R(19),
+ B(Mov), R(closure), R(20),
+ B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(18), U8(6),
+ B(Star), R(16),
+ /* 41 E> */ B(CallUndefinedReceiver1), R(16), R(17), U8(14),
+ B(PopContext), R(15),
B(LdaZero),
- B(Star), R(3),
- B(JumpLoop), U8(103), I8(0),
+ B(Star), R(4),
+ B(JumpLoop), U8(97), I8(0),
B(Jump), U8(36),
- B(Star), R(14),
+ B(Star), R(15),
B(Ldar), R(closure),
- B(CreateCatchContext), R(14), U8(8), U8(9),
- B(PushContext), R(14),
- B(Star), R(13),
+ B(CreateCatchContext), R(15), U8(8), U8(9),
+ B(PushContext), R(15),
+ B(Star), R(14),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(3), U8(16),
+ B(TestEqualStrict), R(4), U8(16),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(3),
+ B(Star), R(4),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(15),
- B(CallRuntime), U16(Runtime::kReThrow), R(15), U8(1),
- B(PopContext), R(14),
+ B(Star), R(16),
+ B(CallRuntime), U16(Runtime::kReThrow), R(16), U8(1),
+ B(PopContext), R(15),
B(LdaSmi), I8(-1),
+ B(Star), R(12),
B(Star), R(11),
- B(Star), R(10),
B(Jump), U8(7),
- B(Star), R(11),
+ B(Star), R(12),
B(LdaZero),
- B(Star), R(10),
+ B(Star), R(11),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(12),
+ B(Star), R(13),
B(LdaZero),
- B(TestEqualStrict), R(3), U8(17),
- B(JumpIfTrue), U8(104),
+ B(TestEqualStrict), R(4), U8(17),
+ B(JumpIfTrue), U8(90),
B(LdaNamedProperty), R(1), U8(10), U8(18),
- B(Star), R(5),
+ B(Star), R(6),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
- B(Jump), U8(93),
+ B(Jump), U8(79),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(3), U8(20),
- B(JumpIfFalse), U8(61),
- B(Ldar), R(5),
+ B(TestEqualStrict), R(4), U8(20),
+ B(JumpIfFalse), U8(47),
+ B(Ldar), R(6),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(143),
- B(Star), R(13),
- B(LdaConstant), U8(11),
+ B(Wide), B(LdaSmi), I16(144),
B(Star), R(14),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
+ B(LdaConstant), U8(11),
+ B(Star), R(15),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
B(Throw),
- B(Mov), R(context), R(13),
- B(Mov), R(5), R(14),
- B(Mov), R(1), R(15),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(14), U8(2),
- B(Jump), U8(20),
- B(Star), R(14),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(14), U8(8), U8(12),
- B(Star), R(13),
+ B(Mov), R(context), R(14),
+ B(Mov), R(6), R(15),
+ B(Mov), R(1), R(16),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(15), U8(2),
+ B(Jump), U8(6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(13),
- B(PushContext), R(14),
- B(PopContext), R(14),
+ B(Ldar), R(14),
B(Jump), U8(27),
- B(Mov), R(5), R(13),
- B(Mov), R(1), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(13), U8(2),
- B(Star), R(6),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
+ B(Mov), R(6), R(14),
+ B(Mov), R(1), R(15),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(14), U8(2),
+ B(Star), R(7),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(7), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
- B(Ldar), R(12),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
+ B(Ldar), R(13),
B(SetPendingMessage),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(10),
+ B(TestEqualStrictNoFeedback), R(11),
B(JumpIfFalse), U8(5),
- B(Ldar), R(11),
+ B(Ldar), R(12),
B(ReThrow),
- B(PopContext), R(9),
+ B(PopContext), R(10),
B(LdaUndefined),
/* 54 S> */ B(Return),
]
@@ -321,7 +308,6 @@ constant pool: [
FIXED_ARRAY_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
- FIXED_ARRAY_TYPE,
]
handlers: [
[35, 205, 213],
@@ -336,127 +322,121 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 14
+frame size: 15
parameter count: 2
-bytecode array length: 280
+bytecode array length: 266
bytecodes: [
/* 10 E> */ B(StackCheck),
B(LdaZero),
- B(Star), R(4),
- B(Mov), R(context), R(10),
+ B(Star), R(5),
B(Mov), R(context), R(11),
+ B(Mov), R(context), R(12),
/* 34 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
- B(Star), R(13),
- B(CallProperty0), R(13), R(arg0), U8(2),
- B(Mov), R(arg0), R(12),
+ B(Star), R(14),
+ B(CallProperty0), R(14), R(arg0), U8(2),
+ B(Mov), R(arg0), R(13),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(2),
- /* 29 S> */ B(LdaNamedProperty), R(2), U8(1), U8(4),
- B(Star), R(12),
- B(CallProperty0), R(12), R(2), U8(6),
+ /* 34 E> */ B(LdaNamedProperty), R(2), U8(1), U8(4),
B(Star), R(3),
- /* 29 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(3), U8(1),
+ /* 29 S> */ B(CallProperty0), R(3), R(2), U8(6),
+ B(Star), R(4),
+ /* 29 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(4), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(3), U8(1),
- B(LdaNamedProperty), R(3), U8(2), U8(8),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(4), U8(1),
+ B(LdaNamedProperty), R(4), U8(2), U8(8),
B(JumpIfToBooleanTrue), U8(46),
- B(LdaNamedProperty), R(3), U8(3), U8(10),
- B(Star), R(5),
+ B(LdaNamedProperty), R(4), U8(3), U8(10),
+ B(Star), R(6),
B(LdaSmi), I8(2),
- B(Star), R(4),
- B(Mov), R(5), R(1),
+ B(Star), R(5),
+ B(Mov), R(6), R(1),
/* 20 E> */ B(StackCheck),
B(Ldar), R(closure),
B(CreateBlockContext), U8(4),
- B(PushContext), R(12),
+ B(PushContext), R(13),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(Ldar), R(5),
+ B(Ldar), R(6),
B(StaCurrentContextSlot), U8(4),
/* 41 S> */ B(CreateClosure), U8(5), U8(12), U8(2),
- B(Star), R(13),
- /* 67 E> */ B(CallUndefinedReceiver0), R(13), U8(13),
- B(PopContext), R(12),
+ B(Star), R(14),
+ /* 67 E> */ B(CallUndefinedReceiver0), R(14), U8(13),
+ B(PopContext), R(13),
B(LdaZero),
- B(Star), R(4),
- B(JumpLoop), U8(71), I8(0),
+ B(Star), R(5),
+ B(JumpLoop), U8(65), I8(0),
B(Jump), U8(36),
- B(Star), R(12),
+ B(Star), R(13),
B(Ldar), R(closure),
- B(CreateCatchContext), R(12), U8(6), U8(7),
- B(PushContext), R(12),
- B(Star), R(11),
+ B(CreateCatchContext), R(13), U8(6), U8(7),
+ B(PushContext), R(13),
+ B(Star), R(12),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(4), U8(15),
+ B(TestEqualStrict), R(5), U8(15),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(4),
+ B(Star), R(5),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kReThrow), R(13), U8(1),
- B(PopContext), R(12),
+ B(Star), R(14),
+ B(CallRuntime), U16(Runtime::kReThrow), R(14), U8(1),
+ B(PopContext), R(13),
B(LdaSmi), I8(-1),
+ B(Star), R(10),
B(Star), R(9),
- B(Star), R(8),
B(Jump), U8(7),
- B(Star), R(9),
+ B(Star), R(10),
B(LdaZero),
- B(Star), R(8),
+ B(Star), R(9),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(10),
+ B(Star), R(11),
B(LdaZero),
- B(TestEqualStrict), R(4), U8(16),
- B(JumpIfTrue), U8(104),
+ B(TestEqualStrict), R(5), U8(16),
+ B(JumpIfTrue), U8(90),
B(LdaNamedProperty), R(2), U8(8), U8(17),
- B(Star), R(6),
+ B(Star), R(7),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
- B(Jump), U8(93),
+ B(Jump), U8(79),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(4), U8(19),
- B(JumpIfFalse), U8(61),
- B(Ldar), R(6),
+ B(TestEqualStrict), R(5), U8(19),
+ B(JumpIfFalse), U8(47),
+ B(Ldar), R(7),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(143),
- B(Star), R(11),
- B(LdaConstant), U8(9),
+ B(Wide), B(LdaSmi), I16(144),
B(Star), R(12),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
+ B(LdaConstant), U8(9),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
B(Throw),
- B(Mov), R(context), R(11),
- B(Mov), R(6), R(12),
- B(Mov), R(2), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
- B(Jump), U8(20),
- B(Star), R(12),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(12), U8(6), U8(10),
- B(Star), R(11),
+ B(Mov), R(context), R(12),
+ B(Mov), R(7), R(13),
+ B(Mov), R(2), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(13), U8(2),
+ B(Jump), U8(6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(11),
- B(PushContext), R(12),
- B(PopContext), R(12),
+ B(Ldar), R(12),
B(Jump), U8(27),
- B(Mov), R(6), R(11),
- B(Mov), R(2), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(11), U8(2),
- B(Star), R(7),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(7), U8(1),
+ B(Mov), R(7), R(12),
+ B(Mov), R(2), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
+ B(Star), R(8),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(8), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
- B(Ldar), R(10),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
+ B(Ldar), R(11),
B(SetPendingMessage),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(8),
+ B(TestEqualStrictNoFeedback), R(9),
B(JumpIfFalse), U8(5),
- B(Ldar), R(9),
+ B(Ldar), R(10),
B(ReThrow),
B(LdaUndefined),
/* 73 S> */ B(Return),
@@ -472,7 +452,6 @@ constant pool: [
FIXED_ARRAY_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
- FIXED_ARRAY_TYPE,
]
handlers: [
[7, 142, 150],
@@ -487,48 +466,48 @@ snippet: "
}
f([{ x: 0, y: 3 }, { x: 1, y: 9 }, { x: -12, y: 17 }]);
"
-frame size: 19
+frame size: 20
parameter count: 2
-bytecode array length: 300
+bytecode array length: 286
bytecodes: [
/* 10 E> */ B(StackCheck),
B(LdaZero),
- B(Star), R(9),
- B(Mov), R(context), R(15),
+ B(Star), R(10),
B(Mov), R(context), R(16),
+ B(Mov), R(context), R(17),
/* 41 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
- B(Star), R(18),
- B(CallProperty0), R(18), R(arg0), U8(2),
- B(Mov), R(arg0), R(17),
+ B(Star), R(19),
+ B(CallProperty0), R(19), R(arg0), U8(2),
+ B(Mov), R(arg0), R(18),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(7),
- /* 36 S> */ B(LdaNamedProperty), R(7), U8(1), U8(4),
- B(Star), R(17),
- B(CallProperty0), R(17), R(7), U8(6),
+ /* 41 E> */ B(LdaNamedProperty), R(7), U8(1), U8(4),
B(Star), R(8),
- /* 36 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(8), U8(1),
+ /* 36 S> */ B(CallProperty0), R(8), R(7), U8(6),
+ B(Star), R(9),
+ /* 36 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(9), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
- B(LdaNamedProperty), R(8), U8(2), U8(8),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
+ B(LdaNamedProperty), R(9), U8(2), U8(8),
B(JumpIfToBooleanTrue), U8(66),
- B(LdaNamedProperty), R(8), U8(3), U8(10),
- B(Star), R(10),
+ B(LdaNamedProperty), R(9), U8(3), U8(10),
+ B(Star), R(11),
B(LdaSmi), I8(2),
- B(Star), R(9),
- B(Mov), R(10), R(5),
+ B(Star), R(10),
+ B(Mov), R(11), R(5),
/* 20 E> */ B(StackCheck),
B(Mov), R(5), R(6),
B(Ldar), R(6),
B(JumpIfUndefined), U8(6),
B(Ldar), R(6),
B(JumpIfNotNull), U8(16),
- B(LdaSmi), I8(73),
- B(Star), R(17),
- B(LdaConstant), U8(4),
+ B(LdaSmi), I8(74),
B(Star), R(18),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(17), U8(2),
+ B(LdaConstant), U8(4),
+ B(Star), R(19),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(18), U8(2),
/* 31 E> */ B(Throw),
/* 31 S> */ B(LdaNamedProperty), R(6), U8(4), U8(12),
B(Star), R(1),
@@ -538,83 +517,77 @@ bytecodes: [
/* 58 E> */ B(Add), R(1), U8(16),
B(Star), R(0),
B(LdaZero),
- B(Star), R(9),
- B(JumpLoop), U8(91), I8(0),
+ B(Star), R(10),
+ B(JumpLoop), U8(85), I8(0),
B(Jump), U8(36),
- B(Star), R(17),
+ B(Star), R(18),
B(Ldar), R(closure),
- /* 56 E> */ B(CreateCatchContext), R(17), U8(6), U8(7),
- B(PushContext), R(17),
- B(Star), R(16),
+ /* 56 E> */ B(CreateCatchContext), R(18), U8(6), U8(7),
+ B(PushContext), R(18),
+ B(Star), R(17),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(9), U8(17),
+ B(TestEqualStrict), R(10), U8(17),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(9),
+ B(Star), R(10),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(18),
- B(CallRuntime), U16(Runtime::kReThrow), R(18), U8(1),
- B(PopContext), R(17),
+ B(Star), R(19),
+ B(CallRuntime), U16(Runtime::kReThrow), R(19), U8(1),
+ B(PopContext), R(18),
B(LdaSmi), I8(-1),
+ B(Star), R(15),
B(Star), R(14),
- B(Star), R(13),
B(Jump), U8(7),
- B(Star), R(14),
+ B(Star), R(15),
B(LdaZero),
- B(Star), R(13),
+ B(Star), R(14),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(15),
+ B(Star), R(16),
B(LdaZero),
- B(TestEqualStrict), R(9), U8(18),
- B(JumpIfTrue), U8(104),
+ B(TestEqualStrict), R(10), U8(18),
+ B(JumpIfTrue), U8(90),
B(LdaNamedProperty), R(7), U8(8), U8(19),
- B(Star), R(11),
+ B(Star), R(12),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
- B(Jump), U8(93),
+ B(Jump), U8(79),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(9), U8(21),
- B(JumpIfFalse), U8(61),
- B(Ldar), R(11),
+ B(TestEqualStrict), R(10), U8(21),
+ B(JumpIfFalse), U8(47),
+ B(Ldar), R(12),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(143),
- B(Star), R(16),
- B(LdaConstant), U8(9),
+ B(Wide), B(LdaSmi), I16(144),
B(Star), R(17),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
+ B(LdaConstant), U8(9),
+ B(Star), R(18),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(17), U8(2),
B(Throw),
- B(Mov), R(context), R(16),
- B(Mov), R(11), R(17),
- B(Mov), R(7), R(18),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(17), U8(2),
- B(Jump), U8(20),
- B(Star), R(17),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(17), U8(6), U8(10),
- B(Star), R(16),
+ B(Mov), R(context), R(17),
+ B(Mov), R(12), R(18),
+ B(Mov), R(7), R(19),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(18), U8(2),
+ B(Jump), U8(6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(16),
- B(PushContext), R(17),
- B(PopContext), R(17),
+ B(Ldar), R(17),
B(Jump), U8(27),
- B(Mov), R(11), R(16),
- B(Mov), R(7), R(17),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(16), U8(2),
- B(Star), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(12), U8(1),
+ B(Mov), R(12), R(17),
+ B(Mov), R(7), R(18),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(17), U8(2),
+ B(Star), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(13), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
- B(Ldar), R(15),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
+ B(Ldar), R(16),
B(SetPendingMessage),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(13),
+ B(TestEqualStrictNoFeedback), R(14),
B(JumpIfFalse), U8(5),
- B(Ldar), R(14),
+ B(Ldar), R(15),
B(ReThrow),
B(LdaUndefined),
/* 65 S> */ B(Return),
@@ -630,7 +603,6 @@ constant pool: [
FIXED_ARRAY_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
- FIXED_ARRAY_TYPE,
]
handlers: [
[7, 162, 170],
@@ -645,150 +617,141 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 19
+frame size: 20
parameter count: 2
-bytecode array length: 341
+bytecode array length: 320
bytecodes: [
B(Ldar), R(3),
B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(3), U8(1),
- B(PushContext), R(12),
+ B(PushContext), R(13),
B(RestoreGeneratorState), R(3),
- B(Star), R(11),
+ B(Star), R(12),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
- B(Star), R(11),
+ B(Star), R(12),
B(CreateFunctionContext), U8(1),
- B(PushContext), R(12),
+ B(PushContext), R(13),
B(Ldar), R(arg0),
B(StaCurrentContextSlot), U8(4),
- B(Mov), R(closure), R(13),
- B(Mov), R(this), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(13), U8(2),
+ B(Mov), R(closure), R(14),
+ B(Mov), R(this), R(15),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(14), U8(2),
B(Star), R(3),
/* 11 E> */ B(StackCheck),
- /* 11 E> */ B(SuspendGenerator), R(3), R(0), U8(13), U8(0),
+ /* 11 E> */ B(SuspendGenerator), R(3), R(0), U8(14), U8(0),
/* 55 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(3), R(0), U8(13),
- B(LdaSmi), I8(-2),
- B(Star), R(11),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(3), U8(1),
- B(Star), R(13),
+ B(ResumeGenerator), R(3), R(12), R(0), U8(14),
+ B(Star), R(14),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
- B(Ldar), R(13),
+ B(Ldar), R(14),
/* 11 E> */ B(Throw),
- B(Ldar), R(13),
+ B(Ldar), R(14),
/* 55 S> */ B(Return),
B(LdaZero),
- B(Star), R(7),
- B(Mov), R(context), R(15),
+ B(Star), R(8),
B(Mov), R(context), R(16),
+ B(Mov), R(context), R(17),
/* 35 S> */ B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(17),
- B(LdaNamedProperty), R(17), U8(3), U8(0),
B(Star), R(18),
- B(CallProperty0), R(18), R(17), U8(2),
+ B(LdaNamedProperty), R(18), U8(3), U8(0),
+ B(Star), R(19),
+ B(CallProperty0), R(19), R(18), U8(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(5),
- /* 30 S> */ B(LdaNamedProperty), R(5), U8(4), U8(4),
- B(Star), R(17),
- B(CallProperty0), R(17), R(5), U8(6),
+ /* 35 E> */ B(LdaNamedProperty), R(5), U8(4), U8(4),
B(Star), R(6),
- /* 30 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
+ /* 30 S> */ B(CallProperty0), R(6), R(5), U8(6),
+ B(Star), R(7),
+ /* 30 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(7), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
- B(LdaNamedProperty), R(6), U8(5), U8(8),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
+ B(LdaNamedProperty), R(7), U8(5), U8(8),
B(JumpIfToBooleanTrue), U8(28),
- B(LdaNamedProperty), R(6), U8(6), U8(10),
- B(Star), R(8),
+ B(LdaNamedProperty), R(7), U8(6), U8(10),
+ B(Star), R(9),
B(LdaSmi), I8(2),
- B(Star), R(7),
- B(Mov), R(8), R(4),
+ B(Star), R(8),
+ B(Mov), R(9), R(4),
/* 21 E> */ B(StackCheck),
B(Mov), R(4), R(1),
/* 50 S> */ B(Mov), R(1), R(0),
B(LdaZero),
- B(Star), R(7),
- B(JumpLoop), U8(53), I8(0),
+ B(Star), R(8),
+ B(JumpLoop), U8(47), I8(0),
B(Jump), U8(36),
- B(Star), R(17),
+ B(Star), R(18),
B(Ldar), R(closure),
- /* 50 E> */ B(CreateCatchContext), R(17), U8(7), U8(8),
- B(PushContext), R(17),
- B(Star), R(16),
+ /* 50 E> */ B(CreateCatchContext), R(18), U8(7), U8(8),
+ B(PushContext), R(18),
+ B(Star), R(17),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(7), U8(12),
+ B(TestEqualStrict), R(8), U8(12),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(7),
+ B(Star), R(8),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(18),
- B(CallRuntime), U16(Runtime::kReThrow), R(18), U8(1),
- B(PopContext), R(17),
+ B(Star), R(19),
+ B(CallRuntime), U16(Runtime::kReThrow), R(19), U8(1),
+ B(PopContext), R(18),
B(LdaSmi), I8(-1),
+ B(Star), R(15),
B(Star), R(14),
- B(Star), R(13),
B(Jump), U8(7),
- B(Star), R(14),
+ B(Star), R(15),
B(LdaZero),
- B(Star), R(13),
+ B(Star), R(14),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(15),
+ B(Star), R(16),
B(LdaZero),
- B(TestEqualStrict), R(7), U8(13),
- B(JumpIfTrue), U8(104),
+ B(TestEqualStrict), R(8), U8(13),
+ B(JumpIfTrue), U8(90),
B(LdaNamedProperty), R(5), U8(9), U8(14),
- B(Star), R(9),
+ B(Star), R(10),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
- B(Jump), U8(93),
+ B(Jump), U8(79),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(7), U8(16),
- B(JumpIfFalse), U8(61),
- B(Ldar), R(9),
+ B(TestEqualStrict), R(8), U8(16),
+ B(JumpIfFalse), U8(47),
+ B(Ldar), R(10),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(143),
- B(Star), R(16),
- B(LdaConstant), U8(10),
+ B(Wide), B(LdaSmi), I16(144),
B(Star), R(17),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
+ B(LdaConstant), U8(10),
+ B(Star), R(18),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(17), U8(2),
B(Throw),
- B(Mov), R(context), R(16),
- B(Mov), R(9), R(17),
- B(Mov), R(5), R(18),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(17), U8(2),
- B(Jump), U8(20),
- B(Star), R(17),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(17), U8(7), U8(11),
- B(Star), R(16),
+ B(Mov), R(context), R(17),
+ B(Mov), R(10), R(18),
+ B(Mov), R(5), R(19),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(18), U8(2),
+ B(Jump), U8(6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(16),
- B(PushContext), R(17),
- B(PopContext), R(17),
+ B(Ldar), R(17),
B(Jump), U8(27),
- B(Mov), R(9), R(16),
- B(Mov), R(5), R(17),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(16), U8(2),
- B(Star), R(10),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(10), U8(1),
+ B(Mov), R(10), R(17),
+ B(Mov), R(5), R(18),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(17), U8(2),
+ B(Star), R(11),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(11), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
- B(Ldar), R(15),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
+ B(Ldar), R(16),
B(SetPendingMessage),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(13),
+ B(TestEqualStrictNoFeedback), R(14),
B(JumpIfFalse), U8(5),
- B(Ldar), R(14),
+ B(Ldar), R(15),
B(ReThrow),
B(LdaUndefined),
/* 55 S> */ B(Return),
@@ -805,12 +768,11 @@ constant pool: [
FIXED_ARRAY_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
- FIXED_ARRAY_TYPE,
]
handlers: [
- [85, 203, 211],
- [88, 167, 169],
- [271, 281, 283],
+ [78, 196, 204],
+ [81, 160, 162],
+ [264, 274, 276],
]
---
@@ -820,188 +782,176 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 18
+frame size: 19
parameter count: 2
-bytecode array length: 408
+bytecode array length: 380
bytecodes: [
B(Ldar), R(2),
B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(2), U8(1),
- B(PushContext), R(11),
+ B(PushContext), R(12),
B(RestoreGeneratorState), R(2),
- B(Star), R(10),
+ B(Star), R(11),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
- B(Star), R(10),
+ B(Star), R(11),
B(CreateFunctionContext), U8(1),
- B(PushContext), R(11),
+ B(PushContext), R(12),
B(Ldar), R(arg0),
B(StaCurrentContextSlot), U8(4),
- B(Mov), R(closure), R(12),
- B(Mov), R(this), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(12), U8(2),
+ B(Mov), R(closure), R(13),
+ B(Mov), R(this), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(13), U8(2),
B(Star), R(2),
/* 11 E> */ B(StackCheck),
- /* 11 E> */ B(SuspendGenerator), R(2), R(0), U8(12), U8(0),
+ /* 11 E> */ B(SuspendGenerator), R(2), R(0), U8(13), U8(0),
/* 49 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(2), R(0), U8(12),
- B(LdaSmi), I8(-2),
- B(Star), R(10),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(2), U8(1),
- B(Star), R(12),
+ B(ResumeGenerator), R(2), R(11), R(0), U8(13),
+ B(Star), R(13),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(12),
+ B(Ldar), R(13),
/* 11 E> */ B(Throw),
- B(Ldar), R(12),
+ B(Ldar), R(13),
/* 49 S> */ B(Return),
B(LdaZero),
- B(Star), R(6),
- B(Mov), R(context), R(14),
+ B(Star), R(7),
B(Mov), R(context), R(15),
+ B(Mov), R(context), R(16),
/* 35 S> */ B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(16),
- B(LdaNamedProperty), R(16), U8(4), U8(0),
B(Star), R(17),
- B(CallProperty0), R(17), R(16), U8(2),
+ B(LdaNamedProperty), R(17), U8(4), U8(0),
+ B(Star), R(18),
+ B(CallProperty0), R(18), R(17), U8(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(4),
- B(Ldar), R(10),
- B(SwitchOnSmiNoFeedback), U8(5), U8(1), I8(1),
+ /* 35 E> */ B(LdaNamedProperty), R(4), U8(5), U8(4),
+ B(Star), R(5),
+ B(Ldar), R(11),
+ B(SwitchOnSmiNoFeedback), U8(6), U8(1), I8(1),
B(LdaSmi), I8(-2),
- /* 35 E> */ B(TestEqualStrictNoFeedback), R(10),
+ B(TestEqualStrictNoFeedback), R(11),
B(JumpIfTrue), U8(4),
- B(Abort), U8(42),
- /* 30 S> */ B(LdaNamedProperty), R(4), U8(6), U8(4),
- B(Star), R(16),
- B(CallProperty0), R(16), R(4), U8(6),
- B(Star), R(5),
- /* 30 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(5), U8(1),
+ B(Abort), U8(15),
+ /* 30 S> */ B(CallProperty0), R(5), R(4), U8(6),
+ B(Star), R(6),
+ /* 30 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
- B(LdaNamedProperty), R(5), U8(7), U8(8),
- B(JumpIfToBooleanTrue), U8(74),
- B(LdaNamedProperty), R(5), U8(8), U8(10),
- B(Star), R(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
+ B(LdaNamedProperty), R(6), U8(7), U8(8),
+ B(JumpIfToBooleanTrue), U8(67),
+ B(LdaNamedProperty), R(6), U8(8), U8(10),
+ B(Star), R(8),
B(LdaSmi), I8(2),
- B(Star), R(6),
- B(Mov), R(7), R(3),
+ B(Star), R(7),
+ B(Mov), R(8), R(3),
/* 21 E> */ B(StackCheck),
B(Mov), R(3), R(0),
/* 40 S> */ B(LdaFalse),
- B(Star), R(17),
- B(Mov), R(0), R(16),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(16), U8(2),
- B(SuspendGenerator), R(2), R(0), U8(16), U8(1),
+ B(Star), R(18),
+ B(Mov), R(0), R(17),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(17), U8(2),
+ B(SuspendGenerator), R(2), R(0), U8(17), U8(1),
/* 49 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(2), R(0), U8(16),
- B(LdaSmi), I8(-2),
- B(Star), R(10),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(2), U8(1),
- B(Star), R(16),
+ B(ResumeGenerator), R(2), R(11), R(0), U8(17),
+ B(Star), R(17),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
B(SwitchOnSmiNoFeedback), U8(9), U8(2), I8(0),
- B(Ldar), R(16),
+ B(Ldar), R(17),
/* 40 E> */ B(Throw),
B(LdaZero),
- B(Star), R(12),
- B(Mov), R(16), R(13),
+ B(Star), R(13),
+ B(Mov), R(17), R(14),
B(Jump), U8(58),
B(LdaZero),
- B(Star), R(6),
- B(JumpLoop), U8(113), I8(0),
+ B(Star), R(7),
+ B(JumpLoop), U8(100), I8(0),
B(Jump), U8(36),
- B(Star), R(16),
+ B(Star), R(17),
B(Ldar), R(closure),
- B(CreateCatchContext), R(16), U8(11), U8(12),
- B(PushContext), R(16),
- B(Star), R(15),
+ B(CreateCatchContext), R(17), U8(11), U8(12),
+ B(PushContext), R(17),
+ B(Star), R(16),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(6), U8(12),
+ B(TestEqualStrict), R(7), U8(12),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(6),
+ B(Star), R(7),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(17),
- B(CallRuntime), U16(Runtime::kReThrow), R(17), U8(1),
- B(PopContext), R(16),
+ B(Star), R(18),
+ B(CallRuntime), U16(Runtime::kReThrow), R(18), U8(1),
+ B(PopContext), R(17),
B(LdaSmi), I8(-1),
+ B(Star), R(14),
B(Star), R(13),
- B(Star), R(12),
B(Jump), U8(8),
- B(Star), R(13),
+ B(Star), R(14),
B(LdaSmi), I8(1),
- B(Star), R(12),
+ B(Star), R(13),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(14),
+ B(Star), R(15),
B(LdaZero),
- B(TestEqualStrict), R(6), U8(13),
- B(JumpIfTrue), U8(104),
+ B(TestEqualStrict), R(7), U8(13),
+ B(JumpIfTrue), U8(90),
B(LdaNamedProperty), R(4), U8(13), U8(14),
- B(Star), R(8),
+ B(Star), R(9),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
- B(Jump), U8(93),
+ B(Jump), U8(79),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(6), U8(16),
- B(JumpIfFalse), U8(61),
- B(Ldar), R(8),
+ B(TestEqualStrict), R(7), U8(16),
+ B(JumpIfFalse), U8(47),
+ B(Ldar), R(9),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(143),
- B(Star), R(15),
- B(LdaConstant), U8(14),
+ B(Wide), B(LdaSmi), I16(144),
B(Star), R(16),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(15), U8(2),
+ B(LdaConstant), U8(14),
+ B(Star), R(17),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
B(Throw),
- B(Mov), R(context), R(15),
- B(Mov), R(8), R(16),
- B(Mov), R(4), R(17),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(16), U8(2),
- B(Jump), U8(20),
- B(Star), R(16),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(16), U8(11), U8(15),
- B(Star), R(15),
+ B(Mov), R(context), R(16),
+ B(Mov), R(9), R(17),
+ B(Mov), R(4), R(18),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(17), U8(2),
+ B(Jump), U8(6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(15),
- B(PushContext), R(16),
- B(PopContext), R(16),
+ B(Ldar), R(16),
B(Jump), U8(27),
- B(Mov), R(8), R(15),
- B(Mov), R(4), R(16),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(15), U8(2),
- B(Star), R(9),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(9), U8(1),
+ B(Mov), R(9), R(16),
+ B(Mov), R(4), R(17),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(16), U8(2),
+ B(Star), R(10),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(10), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
- B(Ldar), R(14),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+ B(Ldar), R(15),
B(SetPendingMessage),
- B(Ldar), R(12),
- B(SwitchOnSmiNoFeedback), U8(16), U8(2), I8(0),
- B(Jump), U8(8),
B(Ldar), R(13),
+ B(SwitchOnSmiNoFeedback), U8(15), U8(2), I8(0),
+ B(Jump), U8(8),
+ B(Ldar), R(14),
/* 49 S> */ B(Return),
- B(Ldar), R(13),
+ B(Ldar), R(14),
B(ReThrow),
B(LdaUndefined),
/* 49 S> */ B(Return),
]
constant pool: [
Smi [37],
- Smi [97],
+ Smi [96],
Smi [10],
Smi [7],
SYMBOL_TYPE,
- Smi [75],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ Smi [69],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
Smi [15],
@@ -1010,14 +960,13 @@ constant pool: [
FIXED_ARRAY_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
- FIXED_ARRAY_TYPE,
Smi [6],
Smi [9],
]
handlers: [
- [85, 263, 271],
- [88, 227, 229],
- [332, 342, 344],
+ [78, 249, 257],
+ [81, 213, 215],
+ [318, 328, 330],
]
---
@@ -1027,178 +976,172 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 23
+frame size: 24
parameter count: 2
-bytecode array length: 386
+bytecode array length: 372
bytecodes: [
B(CreateFunctionContext), U8(1),
- B(PushContext), R(12),
+ B(PushContext), R(13),
B(Ldar), R(arg0),
B(StaCurrentContextSlot), U8(4),
/* 16 E> */ B(StackCheck),
B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
- B(Star), R(11),
- B(Mov), R(context), R(15),
+ B(Star), R(12),
B(Mov), R(context), R(16),
+ B(Mov), R(context), R(17),
B(LdaZero),
- B(Star), R(7),
- B(Mov), R(context), R(19),
+ B(Star), R(8),
B(Mov), R(context), R(20),
+ B(Mov), R(context), R(21),
/* 40 S> */ B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(21),
- B(LdaNamedProperty), R(21), U8(0), U8(0),
B(Star), R(22),
- B(CallProperty0), R(22), R(21), U8(2),
+ B(LdaNamedProperty), R(22), U8(0), U8(0),
+ B(Star), R(23),
+ B(CallProperty0), R(23), R(22), U8(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(5),
- /* 35 S> */ B(LdaNamedProperty), R(5), U8(1), U8(4),
- B(Star), R(21),
- B(CallProperty0), R(21), R(5), U8(6),
+ /* 40 E> */ B(LdaNamedProperty), R(5), U8(1), U8(4),
B(Star), R(6),
- /* 35 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
+ /* 35 S> */ B(CallProperty0), R(6), R(5), U8(6),
+ B(Star), R(7),
+ /* 35 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(7), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
- B(LdaNamedProperty), R(6), U8(2), U8(8),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
+ B(LdaNamedProperty), R(7), U8(2), U8(8),
B(JumpIfToBooleanTrue), U8(28),
- B(LdaNamedProperty), R(6), U8(3), U8(10),
- B(Star), R(8),
+ B(LdaNamedProperty), R(7), U8(3), U8(10),
+ B(Star), R(9),
B(LdaSmi), I8(2),
- B(Star), R(7),
- B(Mov), R(8), R(4),
+ B(Star), R(8),
+ B(Mov), R(9), R(4),
/* 26 E> */ B(StackCheck),
B(Mov), R(4), R(1),
/* 55 S> */ B(Mov), R(1), R(0),
B(LdaZero),
- B(Star), R(7),
- B(JumpLoop), U8(53), I8(0),
+ B(Star), R(8),
+ B(JumpLoop), U8(47), I8(0),
B(Jump), U8(40),
- B(Star), R(21),
+ B(Star), R(22),
B(Ldar), R(closure),
- /* 55 E> */ B(CreateCatchContext), R(21), U8(4), U8(5),
- B(Star), R(20),
+ /* 55 E> */ B(CreateCatchContext), R(22), U8(4), U8(5),
+ B(Star), R(21),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(20),
- B(PushContext), R(21),
+ B(Ldar), R(21),
+ B(PushContext), R(22),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(7), U8(12),
+ B(TestEqualStrict), R(8), U8(12),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(7),
+ B(Star), R(8),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(22),
- B(CallRuntime), U16(Runtime::kReThrow), R(22), U8(1),
- B(PopContext), R(21),
+ B(Star), R(23),
+ B(CallRuntime), U16(Runtime::kReThrow), R(23), U8(1),
+ B(PopContext), R(22),
B(LdaSmi), I8(-1),
+ B(Star), R(19),
B(Star), R(18),
- B(Star), R(17),
B(Jump), U8(7),
- B(Star), R(18),
+ B(Star), R(19),
B(LdaZero),
- B(Star), R(17),
+ B(Star), R(18),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(19),
+ B(Star), R(20),
B(LdaZero),
- B(TestEqualStrict), R(7), U8(13),
- B(JumpIfTrue), U8(104),
+ B(TestEqualStrict), R(8), U8(13),
+ B(JumpIfTrue), U8(90),
B(LdaNamedProperty), R(5), U8(6), U8(14),
- B(Star), R(9),
+ B(Star), R(10),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
- B(Jump), U8(93),
+ B(Jump), U8(79),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(7), U8(16),
- B(JumpIfFalse), U8(61),
- B(Ldar), R(9),
+ B(TestEqualStrict), R(8), U8(16),
+ B(JumpIfFalse), U8(47),
+ B(Ldar), R(10),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(143),
- B(Star), R(20),
- B(LdaConstant), U8(7),
+ B(Wide), B(LdaSmi), I16(144),
B(Star), R(21),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(20), U8(2),
+ B(LdaConstant), U8(7),
+ B(Star), R(22),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(21), U8(2),
B(Throw),
- B(Mov), R(context), R(20),
- B(Mov), R(9), R(21),
- B(Mov), R(5), R(22),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(21), U8(2),
- B(Jump), U8(20),
- B(Star), R(21),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(21), U8(4), U8(8),
- B(Star), R(20),
+ B(Mov), R(context), R(21),
+ B(Mov), R(10), R(22),
+ B(Mov), R(5), R(23),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(22), U8(2),
+ B(Jump), U8(6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(20),
- B(PushContext), R(21),
- B(PopContext), R(21),
+ B(Ldar), R(21),
B(Jump), U8(27),
- B(Mov), R(9), R(20),
- B(Mov), R(5), R(21),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(20), U8(2),
- B(Star), R(10),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(10), U8(1),
+ B(Mov), R(10), R(21),
+ B(Mov), R(5), R(22),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(21), U8(2),
+ B(Star), R(11),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(11), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
- B(Ldar), R(19),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
+ B(Ldar), R(20),
B(SetPendingMessage),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(17),
+ B(TestEqualStrictNoFeedback), R(18),
B(JumpIfFalse), U8(5),
- B(Ldar), R(18),
+ B(Ldar), R(19),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(18),
- B(Mov), R(11), R(17),
- B(CallJSRuntime), U8(%promise_resolve), R(17), U8(2),
+ B(Star), R(19),
+ B(Mov), R(12), R(18),
+ B(CallJSRuntime), U8(%promise_resolve), R(18), U8(2),
B(LdaZero),
- B(Star), R(13),
- B(Mov), R(11), R(14),
+ B(Star), R(14),
+ B(Mov), R(12), R(15),
B(Jump), U8(58),
B(Jump), U8(42),
- B(Star), R(17),
+ B(Star), R(18),
B(Ldar), R(closure),
- B(CreateCatchContext), R(17), U8(4), U8(9),
- B(Star), R(16),
+ B(CreateCatchContext), R(18), U8(4), U8(8),
+ B(Star), R(17),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(16),
- B(PushContext), R(17),
+ B(Ldar), R(17),
+ B(PushContext), R(18),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(19),
- B(LdaFalse),
B(Star), R(20),
- B(Mov), R(11), R(18),
- B(CallJSRuntime), U8(%promise_internal_reject), R(18), U8(3),
- B(PopContext), R(17),
+ B(LdaFalse),
+ B(Star), R(21),
+ B(Mov), R(12), R(19),
+ B(CallJSRuntime), U8(%promise_internal_reject), R(19), U8(3),
+ B(PopContext), R(18),
B(LdaZero),
- B(Star), R(13),
- B(Mov), R(11), R(14),
+ B(Star), R(14),
+ B(Mov), R(12), R(15),
B(Jump), U8(16),
B(LdaSmi), I8(-1),
+ B(Star), R(15),
B(Star), R(14),
- B(Star), R(13),
B(Jump), U8(8),
- B(Star), R(14),
+ B(Star), R(15),
B(LdaSmi), I8(1),
- B(Star), R(13),
+ B(Star), R(14),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(15),
- B(CallJSRuntime), U8(%async_function_promise_release), R(11), U8(1),
- B(Ldar), R(15),
+ B(Star), R(16),
+ B(CallJSRuntime), U8(%async_function_promise_release), R(12), U8(1),
+ B(Ldar), R(16),
B(SetPendingMessage),
- B(Ldar), R(13),
- B(SwitchOnSmiNoFeedback), U8(10), U8(2), I8(0),
- B(Jump), U8(8),
B(Ldar), R(14),
+ B(SwitchOnSmiNoFeedback), U8(9), U8(2), I8(0),
+ B(Jump), U8(8),
+ B(Ldar), R(15),
/* 60 S> */ B(Return),
- B(Ldar), R(14),
+ B(Ldar), R(15),
B(ReThrow),
B(LdaUndefined),
/* 60 S> */ B(Return),
@@ -1213,13 +1156,12 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
FIXED_ARRAY_TYPE,
- FIXED_ARRAY_TYPE,
Smi [6],
Smi [9],
]
handlers: [
- [18, 345, 353],
- [21, 303, 305],
+ [18, 331, 339],
+ [21, 289, 291],
[27, 149, 157],
[30, 109, 111],
[217, 227, 229],
@@ -1232,224 +1174,215 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 24
+frame size: 25
parameter count: 2
-bytecode array length: 480
+bytecode array length: 459
bytecodes: [
B(Ldar), R(2),
B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(2), U8(1),
- B(PushContext), R(12),
+ B(PushContext), R(13),
B(RestoreGeneratorState), R(2),
- B(Star), R(11),
+ B(Star), R(12),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
- B(Star), R(11),
+ B(Star), R(12),
B(CreateFunctionContext), U8(1),
- B(PushContext), R(12),
+ B(PushContext), R(13),
B(Ldar), R(arg0),
B(StaCurrentContextSlot), U8(4),
- B(Mov), R(closure), R(13),
- B(Mov), R(this), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(13), U8(2),
+ B(Mov), R(closure), R(14),
+ B(Mov), R(this), R(15),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(14), U8(2),
B(Star), R(2),
/* 16 E> */ B(StackCheck),
B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
- B(Star), R(10),
- B(Mov), R(context), R(15),
+ B(Star), R(11),
B(Mov), R(context), R(16),
+ B(Mov), R(context), R(17),
B(LdaZero),
- B(Star), R(6),
- B(Mov), R(context), R(19),
+ B(Star), R(7),
B(Mov), R(context), R(20),
+ B(Mov), R(context), R(21),
/* 40 S> */ B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(21),
- B(LdaNamedProperty), R(21), U8(1), U8(0),
B(Star), R(22),
- B(CallProperty0), R(22), R(21), U8(2),
+ B(LdaNamedProperty), R(22), U8(1), U8(0),
+ B(Star), R(23),
+ B(CallProperty0), R(23), R(22), U8(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(4),
- B(Ldar), R(11),
- B(SwitchOnSmiNoFeedback), U8(2), U8(1), I8(0),
+ /* 40 E> */ B(LdaNamedProperty), R(4), U8(2), U8(4),
+ B(Star), R(5),
+ B(Ldar), R(12),
+ B(SwitchOnSmiNoFeedback), U8(3), U8(1), I8(0),
B(LdaSmi), I8(-2),
- /* 40 E> */ B(TestEqualStrictNoFeedback), R(11),
+ B(TestEqualStrictNoFeedback), R(12),
B(JumpIfTrue), U8(4),
- B(Abort), U8(42),
- /* 35 S> */ B(LdaNamedProperty), R(4), U8(3), U8(4),
- B(Star), R(21),
- B(CallProperty0), R(21), R(4), U8(6),
- B(Star), R(5),
- /* 35 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(5), U8(1),
+ B(Abort), U8(15),
+ /* 35 S> */ B(CallProperty0), R(5), R(4), U8(6),
+ B(Star), R(6),
+ /* 35 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
- B(LdaNamedProperty), R(5), U8(4), U8(8),
- B(JumpIfToBooleanTrue), U8(72),
- B(LdaNamedProperty), R(5), U8(5), U8(10),
- B(Star), R(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
+ B(LdaNamedProperty), R(6), U8(4), U8(8),
+ B(JumpIfToBooleanTrue), U8(65),
+ B(LdaNamedProperty), R(6), U8(5), U8(10),
+ B(Star), R(8),
B(LdaSmi), I8(2),
- B(Star), R(6),
- B(Mov), R(7), R(3),
+ B(Star), R(7),
+ B(Mov), R(8), R(3),
/* 26 E> */ B(StackCheck),
B(Mov), R(3), R(0),
- /* 45 S> */ B(Mov), R(2), R(21),
- B(Mov), R(0), R(22),
- B(Mov), R(10), R(23),
- B(CallJSRuntime), U8(%async_function_await_uncaught), R(21), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(21), U8(0),
+ /* 45 S> */ B(Mov), R(2), R(22),
+ B(Mov), R(0), R(23),
+ B(Mov), R(11), R(24),
+ B(CallJSRuntime), U8(%async_function_await_uncaught), R(22), U8(3),
+ B(SuspendGenerator), R(2), R(0), U8(22), U8(0),
/* 54 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(2), R(0), U8(21),
- B(LdaSmi), I8(-2),
- B(Star), R(11),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(2), U8(1),
- B(Star), R(21),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(ResumeGenerator), R(2), R(12), R(0), U8(22),
B(Star), R(22),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(Star), R(23),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(22),
+ B(TestEqualStrictNoFeedback), R(23),
B(JumpIfTrue), U8(5),
- B(Ldar), R(21),
+ B(Ldar), R(22),
B(ReThrow),
B(LdaZero),
- B(Star), R(6),
- B(JumpLoop), U8(111), I8(0),
+ B(Star), R(7),
+ B(JumpLoop), U8(98), I8(0),
B(Jump), U8(40),
- B(Star), R(21),
+ B(Star), R(22),
B(Ldar), R(closure),
- B(CreateCatchContext), R(21), U8(6), U8(7),
- B(Star), R(20),
+ B(CreateCatchContext), R(22), U8(6), U8(7),
+ B(Star), R(21),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(20),
- B(PushContext), R(21),
+ B(Ldar), R(21),
+ B(PushContext), R(22),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(6), U8(12),
+ B(TestEqualStrict), R(7), U8(12),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(6),
+ B(Star), R(7),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(22),
- B(CallRuntime), U16(Runtime::kReThrow), R(22), U8(1),
- B(PopContext), R(21),
+ B(Star), R(23),
+ B(CallRuntime), U16(Runtime::kReThrow), R(23), U8(1),
+ B(PopContext), R(22),
B(LdaSmi), I8(-1),
+ B(Star), R(19),
B(Star), R(18),
- B(Star), R(17),
B(Jump), U8(7),
- B(Star), R(18),
+ B(Star), R(19),
B(LdaZero),
- B(Star), R(17),
+ B(Star), R(18),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(19),
+ B(Star), R(20),
B(LdaZero),
- B(TestEqualStrict), R(6), U8(13),
- B(JumpIfTrue), U8(104),
+ B(TestEqualStrict), R(7), U8(13),
+ B(JumpIfTrue), U8(90),
B(LdaNamedProperty), R(4), U8(8), U8(14),
- B(Star), R(8),
+ B(Star), R(9),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
- B(Jump), U8(93),
+ B(Jump), U8(79),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(6), U8(16),
- B(JumpIfFalse), U8(61),
- B(Ldar), R(8),
+ B(TestEqualStrict), R(7), U8(16),
+ B(JumpIfFalse), U8(47),
+ B(Ldar), R(9),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(143),
- B(Star), R(20),
- B(LdaConstant), U8(9),
+ B(Wide), B(LdaSmi), I16(144),
B(Star), R(21),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(20), U8(2),
+ B(LdaConstant), U8(9),
+ B(Star), R(22),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(21), U8(2),
B(Throw),
- B(Mov), R(context), R(20),
- B(Mov), R(8), R(21),
- B(Mov), R(4), R(22),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(21), U8(2),
- B(Jump), U8(20),
- B(Star), R(21),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(21), U8(6), U8(10),
- B(Star), R(20),
+ B(Mov), R(context), R(21),
+ B(Mov), R(9), R(22),
+ B(Mov), R(4), R(23),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(22), U8(2),
+ B(Jump), U8(6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(20),
- B(PushContext), R(21),
- B(PopContext), R(21),
+ B(Ldar), R(21),
B(Jump), U8(27),
- B(Mov), R(8), R(20),
- B(Mov), R(4), R(21),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(20), U8(2),
- B(Star), R(9),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(9), U8(1),
+ B(Mov), R(9), R(21),
+ B(Mov), R(4), R(22),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(21), U8(2),
+ B(Star), R(10),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(10), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
- B(Ldar), R(19),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+ B(Ldar), R(20),
B(SetPendingMessage),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(17),
+ B(TestEqualStrictNoFeedback), R(18),
B(JumpIfFalse), U8(5),
- B(Ldar), R(18),
+ B(Ldar), R(19),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(18),
- B(Mov), R(10), R(17),
- B(CallJSRuntime), U8(%promise_resolve), R(17), U8(2),
+ B(Star), R(19),
+ B(Mov), R(11), R(18),
+ B(CallJSRuntime), U8(%promise_resolve), R(18), U8(2),
B(LdaZero),
- B(Star), R(13),
- B(Mov), R(10), R(14),
+ B(Star), R(14),
+ B(Mov), R(11), R(15),
B(Jump), U8(58),
B(Jump), U8(42),
- B(Star), R(17),
+ B(Star), R(18),
B(Ldar), R(closure),
- B(CreateCatchContext), R(17), U8(6), U8(11),
- B(Star), R(16),
+ B(CreateCatchContext), R(18), U8(6), U8(10),
+ B(Star), R(17),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(16),
- B(PushContext), R(17),
+ B(Ldar), R(17),
+ B(PushContext), R(18),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(19),
- B(LdaFalse),
B(Star), R(20),
- B(Mov), R(10), R(18),
- B(CallJSRuntime), U8(%promise_internal_reject), R(18), U8(3),
- B(PopContext), R(17),
+ B(LdaFalse),
+ B(Star), R(21),
+ B(Mov), R(11), R(19),
+ B(CallJSRuntime), U8(%promise_internal_reject), R(19), U8(3),
+ B(PopContext), R(18),
B(LdaZero),
- B(Star), R(13),
- B(Mov), R(10), R(14),
+ B(Star), R(14),
+ B(Mov), R(11), R(15),
B(Jump), U8(16),
B(LdaSmi), I8(-1),
+ B(Star), R(15),
B(Star), R(14),
- B(Star), R(13),
B(Jump), U8(8),
- B(Star), R(14),
+ B(Star), R(15),
B(LdaSmi), I8(1),
- B(Star), R(13),
+ B(Star), R(14),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(15),
- B(CallJSRuntime), U8(%async_function_promise_release), R(10), U8(1),
- B(Ldar), R(15),
+ B(Star), R(16),
+ B(CallJSRuntime), U8(%async_function_promise_release), R(11), U8(1),
+ B(Ldar), R(16),
B(SetPendingMessage),
- B(Ldar), R(13),
- B(SwitchOnSmiNoFeedback), U8(12), U8(2), I8(0),
- B(Jump), U8(8),
B(Ldar), R(14),
+ B(SwitchOnSmiNoFeedback), U8(11), U8(2), I8(0),
+ B(Jump), U8(8),
+ B(Ldar), R(15),
/* 54 S> */ B(Return),
- B(Ldar), R(14),
+ B(Ldar), R(15),
B(ReThrow),
B(LdaUndefined),
/* 54 S> */ B(Return),
]
constant pool: [
- Smi [75],
+ Smi [81],
SYMBOL_TYPE,
- Smi [78],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ Smi [72],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
@@ -1457,15 +1390,14 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
FIXED_ARRAY_TYPE,
- FIXED_ARRAY_TYPE,
Smi [6],
Smi [9],
]
handlers: [
- [54, 439, 447],
- [57, 397, 399],
- [63, 243, 251],
- [66, 203, 205],
- [311, 321, 323],
+ [54, 418, 426],
+ [57, 376, 378],
+ [63, 236, 244],
+ [66, 196, 198],
+ [304, 314, 316],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
index 8068bc1fe8..f09cd9fd00 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
@@ -13,7 +13,7 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 73
+bytecode array length: 66
bytecodes: [
B(Ldar), R(0),
B(JumpIfUndefined), U8(18),
@@ -22,7 +22,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
B(Star), R(1),
B(Mov), R(closure), R(2),
@@ -32,10 +32,7 @@ bytecodes: [
/* 11 E> */ B(StackCheck),
/* 11 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(0),
/* 16 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(0), R(0), U8(2),
- B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(0), U8(1),
+ B(ResumeGenerator), R(0), R(1), R(0), U8(2),
B(Star), R(2),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
@@ -61,7 +58,7 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 118
+bytecode array length: 104
bytecodes: [
B(Ldar), R(0),
B(JumpIfUndefined), U8(18),
@@ -70,7 +67,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
B(Star), R(1),
B(Mov), R(closure), R(2),
@@ -80,10 +77,7 @@ bytecodes: [
/* 11 E> */ B(StackCheck),
/* 11 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(0),
/* 25 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(0), R(0), U8(2),
- B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(0), U8(1),
+ B(ResumeGenerator), R(0), R(1), R(0), U8(2),
B(Star), R(2),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
@@ -98,10 +92,7 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(2), U8(2),
B(SuspendGenerator), R(0), R(0), U8(2), U8(1),
/* 25 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(0), R(0), U8(2),
- B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(0), U8(1),
+ B(ResumeGenerator), R(0), R(1), R(0), U8(2),
B(Star), R(2),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(4), U8(2), I8(0),
@@ -114,7 +105,7 @@ bytecodes: [
]
constant pool: [
Smi [29],
- Smi [74],
+ Smi [67],
Smi [10],
Smi [7],
Smi [10],
@@ -128,185 +119,173 @@ snippet: "
function* f() { for (let x of [42]) yield x }
f();
"
-frame size: 17
+frame size: 18
parameter count: 1
-bytecode array length: 402
+bytecode array length: 374
bytecodes: [
B(Ldar), R(2),
B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(2), U8(1),
- B(PushContext), R(11),
+ B(PushContext), R(12),
B(RestoreGeneratorState), R(2),
- B(Star), R(10),
+ B(Star), R(11),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
- B(Star), R(10),
- B(Mov), R(closure), R(11),
- B(Mov), R(this), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(11), U8(2),
+ B(Star), R(11),
+ B(Mov), R(closure), R(12),
+ B(Mov), R(this), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(12), U8(2),
B(Star), R(2),
/* 11 E> */ B(StackCheck),
- /* 11 E> */ B(SuspendGenerator), R(2), R(0), U8(11), U8(0),
+ /* 11 E> */ B(SuspendGenerator), R(2), R(0), U8(12), U8(0),
/* 44 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(2), R(0), U8(11),
- B(LdaSmi), I8(-2),
- B(Star), R(10),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(2), U8(1),
- B(Star), R(11),
+ B(ResumeGenerator), R(2), R(11), R(0), U8(12),
+ B(Star), R(12),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(11),
+ B(Ldar), R(12),
/* 11 E> */ B(Throw),
- B(Ldar), R(11),
+ B(Ldar), R(12),
/* 44 S> */ B(Return),
B(LdaZero),
- B(Star), R(6),
- B(Mov), R(context), R(13),
+ B(Star), R(7),
B(Mov), R(context), R(14),
+ B(Mov), R(context), R(15),
/* 30 S> */ B(CreateArrayLiteral), U8(4), U8(0), U8(37),
- B(Star), R(15),
- B(LdaNamedProperty), R(15), U8(5), U8(1),
B(Star), R(16),
- B(CallProperty0), R(16), R(15), U8(3),
+ B(LdaNamedProperty), R(16), U8(5), U8(1),
+ B(Star), R(17),
+ B(CallProperty0), R(17), R(16), U8(3),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(4),
- B(Ldar), R(10),
- B(SwitchOnSmiNoFeedback), U8(6), U8(1), I8(1),
+ /* 30 E> */ B(LdaNamedProperty), R(4), U8(6), U8(5),
+ B(Star), R(5),
+ B(Ldar), R(11),
+ B(SwitchOnSmiNoFeedback), U8(7), U8(1), I8(1),
B(LdaSmi), I8(-2),
- /* 30 E> */ B(TestEqualStrictNoFeedback), R(10),
+ B(TestEqualStrictNoFeedback), R(11),
B(JumpIfTrue), U8(4),
- B(Abort), U8(42),
- /* 25 S> */ B(LdaNamedProperty), R(4), U8(7), U8(5),
- B(Star), R(15),
- B(CallProperty0), R(15), R(4), U8(7),
- B(Star), R(5),
- /* 25 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(5), U8(1),
+ B(Abort), U8(15),
+ /* 25 S> */ B(CallProperty0), R(5), R(4), U8(7),
+ B(Star), R(6),
+ /* 25 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
- B(LdaNamedProperty), R(5), U8(8), U8(9),
- B(JumpIfToBooleanTrue), U8(74),
- B(LdaNamedProperty), R(5), U8(9), U8(11),
- B(Star), R(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
+ B(LdaNamedProperty), R(6), U8(8), U8(9),
+ B(JumpIfToBooleanTrue), U8(67),
+ B(LdaNamedProperty), R(6), U8(9), U8(11),
+ B(Star), R(8),
B(LdaSmi), I8(2),
- B(Star), R(6),
- B(Mov), R(7), R(3),
+ B(Star), R(7),
+ B(Mov), R(8), R(3),
/* 16 E> */ B(StackCheck),
B(Mov), R(3), R(0),
/* 36 S> */ B(LdaFalse),
- B(Star), R(16),
- B(Mov), R(0), R(15),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(15), U8(2),
- B(SuspendGenerator), R(2), R(0), U8(15), U8(1),
+ B(Star), R(17),
+ B(Mov), R(0), R(16),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(16), U8(2),
+ B(SuspendGenerator), R(2), R(0), U8(16), U8(1),
/* 44 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(2), R(0), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(10),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(2), U8(1),
- B(Star), R(15),
+ B(ResumeGenerator), R(2), R(11), R(0), U8(16),
+ B(Star), R(16),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
B(SwitchOnSmiNoFeedback), U8(10), U8(2), I8(0),
- B(Ldar), R(15),
+ B(Ldar), R(16),
/* 36 E> */ B(Throw),
B(LdaZero),
- B(Star), R(11),
- B(Mov), R(15), R(12),
+ B(Star), R(12),
+ B(Mov), R(16), R(13),
B(Jump), U8(58),
B(LdaZero),
- B(Star), R(6),
- B(JumpLoop), U8(113), I8(0),
+ B(Star), R(7),
+ B(JumpLoop), U8(100), I8(0),
B(Jump), U8(36),
- B(Star), R(15),
+ B(Star), R(16),
B(Ldar), R(closure),
- B(CreateCatchContext), R(15), U8(12), U8(13),
- B(PushContext), R(15),
- B(Star), R(14),
+ B(CreateCatchContext), R(16), U8(12), U8(13),
+ B(PushContext), R(16),
+ B(Star), R(15),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(6), U8(13),
+ B(TestEqualStrict), R(7), U8(13),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(6),
+ B(Star), R(7),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(16),
- B(CallRuntime), U16(Runtime::kReThrow), R(16), U8(1),
- B(PopContext), R(15),
+ B(Star), R(17),
+ B(CallRuntime), U16(Runtime::kReThrow), R(17), U8(1),
+ B(PopContext), R(16),
B(LdaSmi), I8(-1),
+ B(Star), R(13),
B(Star), R(12),
- B(Star), R(11),
B(Jump), U8(8),
- B(Star), R(12),
+ B(Star), R(13),
B(LdaSmi), I8(1),
- B(Star), R(11),
+ B(Star), R(12),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(13),
+ B(Star), R(14),
B(LdaZero),
- B(TestEqualStrict), R(6), U8(14),
- B(JumpIfTrue), U8(104),
+ B(TestEqualStrict), R(7), U8(14),
+ B(JumpIfTrue), U8(90),
B(LdaNamedProperty), R(4), U8(14), U8(15),
- B(Star), R(8),
+ B(Star), R(9),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
- B(Jump), U8(93),
+ B(Jump), U8(79),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(6), U8(17),
- B(JumpIfFalse), U8(61),
- B(Ldar), R(8),
+ B(TestEqualStrict), R(7), U8(17),
+ B(JumpIfFalse), U8(47),
+ B(Ldar), R(9),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(143),
- B(Star), R(14),
- B(LdaConstant), U8(15),
+ B(Wide), B(LdaSmi), I16(144),
B(Star), R(15),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
+ B(LdaConstant), U8(15),
+ B(Star), R(16),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(15), U8(2),
B(Throw),
- B(Mov), R(context), R(14),
- B(Mov), R(8), R(15),
- B(Mov), R(4), R(16),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(15), U8(2),
- B(Jump), U8(20),
- B(Star), R(15),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(15), U8(12), U8(16),
- B(Star), R(14),
+ B(Mov), R(context), R(15),
+ B(Mov), R(9), R(16),
+ B(Mov), R(4), R(17),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(16), U8(2),
+ B(Jump), U8(6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(14),
- B(PushContext), R(15),
- B(PopContext), R(15),
+ B(Ldar), R(15),
B(Jump), U8(27),
- B(Mov), R(8), R(14),
- B(Mov), R(4), R(15),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(14), U8(2),
- B(Star), R(9),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(9), U8(1),
+ B(Mov), R(9), R(15),
+ B(Mov), R(4), R(16),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(15), U8(2),
+ B(Star), R(10),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(10), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
- B(Ldar), R(13),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+ B(Ldar), R(14),
B(SetPendingMessage),
- B(Ldar), R(11),
- B(SwitchOnSmiNoFeedback), U8(17), U8(2), I8(0),
- B(Jump), U8(8),
B(Ldar), R(12),
+ B(SwitchOnSmiNoFeedback), U8(16), U8(2), I8(0),
+ B(Jump), U8(8),
+ B(Ldar), R(13),
/* 44 S> */ B(Return),
- B(Ldar), R(12),
+ B(Ldar), R(13),
B(ReThrow),
B(LdaUndefined),
/* 44 S> */ B(Return),
]
constant pool: [
Smi [29],
- Smi [91],
+ Smi [90],
Smi [10],
Smi [7],
TUPLE2_TYPE,
SYMBOL_TYPE,
- Smi [75],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ Smi [69],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
Smi [15],
@@ -315,14 +294,13 @@ constant pool: [
FIXED_ARRAY_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
- FIXED_ARRAY_TYPE,
Smi [6],
Smi [9],
]
handlers: [
- [77, 257, 265],
- [80, 221, 223],
- [326, 336, 338],
+ [70, 243, 251],
+ [73, 207, 209],
+ [312, 322, 324],
]
---
@@ -331,9 +309,9 @@ snippet: "
function* f() { yield* g() }
f();
"
-frame size: 9
+frame size: 10
parameter count: 1
-bytecode array length: 265
+bytecode array length: 255
bytecodes: [
B(Ldar), R(0),
B(JumpIfUndefined), U8(18),
@@ -342,7 +320,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
B(Star), R(1),
B(Mov), R(closure), R(2),
@@ -352,10 +330,7 @@ bytecodes: [
/* 38 E> */ B(StackCheck),
/* 38 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(0),
/* 54 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(0), R(0), U8(2),
- B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(0), U8(1),
+ B(ResumeGenerator), R(0), R(1), R(0), U8(2),
B(Star), R(2),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
@@ -364,69 +339,68 @@ bytecodes: [
B(Ldar), R(2),
/* 54 S> */ B(Return),
/* 43 S> */ B(LdaGlobal), U8(4), U8(0),
- B(Star), R(8),
- /* 50 E> */ B(CallUndefinedReceiver0), R(8), U8(2),
- B(Star), R(6),
- B(LdaNamedProperty), R(6), U8(5), U8(4),
+ B(Star), R(9),
+ /* 50 E> */ B(CallUndefinedReceiver0), R(9), U8(2),
B(Star), R(7),
- B(CallProperty0), R(7), R(6), U8(6),
+ B(LdaNamedProperty), R(7), U8(5), U8(4),
+ B(Star), R(8),
+ B(CallProperty0), R(8), R(7), U8(6),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(4),
+ B(LdaNamedProperty), R(4), U8(6), U8(8),
+ B(Star), R(6),
B(LdaUndefined),
B(Star), R(5),
B(LdaZero),
B(Star), R(3),
B(Ldar), R(1),
- B(SwitchOnSmiNoFeedback), U8(6), U8(1), I8(1),
+ B(SwitchOnSmiNoFeedback), U8(7), U8(1), I8(1),
B(LdaSmi), I8(-2),
B(TestEqualStrictNoFeedback), R(1),
B(JumpIfTrue), U8(4),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(Ldar), R(3),
- B(SwitchOnSmiNoFeedback), U8(7), U8(2), I8(1),
- B(LdaNamedProperty), R(4), U8(9), U8(8),
- B(Star), R(8),
- B(CallProperty1), R(8), R(4), R(5), U8(10),
- B(Jump), U8(65),
+ B(SwitchOnSmiNoFeedback), U8(8), U8(2), I8(1),
+ B(CallProperty1), R(6), R(4), R(5), U8(10),
+ B(Jump), U8(69),
B(LdaNamedProperty), R(4), U8(10), U8(12),
B(JumpIfUndefined), U8(13),
B(JumpIfNull), U8(11),
- B(Star), R(8),
- B(CallProperty1), R(8), R(4), R(5), U8(14),
- B(Jump), U8(48),
+ B(Star), R(9),
+ B(CallProperty1), R(9), R(4), R(5), U8(14),
+ B(Jump), U8(52),
B(Ldar), R(5),
/* 54 S> */ B(Return),
B(LdaNamedProperty), R(4), U8(11), U8(16),
B(JumpIfUndefined), U8(13),
B(JumpIfNull), U8(11),
- B(Star), R(8),
- B(CallProperty1), R(8), R(4), R(5), U8(18),
- B(Jump), U8(28),
+ B(Star), R(9),
+ B(CallProperty1), R(9), R(4), R(5), U8(18),
+ B(Jump), U8(32),
B(LdaNamedProperty), R(4), U8(10), U8(20),
- B(Star), R(8),
- B(JumpIfUndefined), U8(15),
- B(JumpIfNull), U8(13),
- B(CallProperty0), R(8), R(4), U8(22),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(2), U8(1),
+ B(JumpIfUndefined), U8(21),
+ B(JumpIfNull), U8(19),
+ B(Star), R(9),
+ B(CallProperty0), R(9), R(4), U8(22),
+ B(Jump), U8(2),
+ B(JumpIfJSReceiver), U8(9),
+ B(Star), R(9),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
B(CallRuntime), U16(Runtime::kThrowThrowMethodMissing), R(0), U8(0),
B(Star), R(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(2), U8(1),
B(LdaNamedProperty), R(2), U8(12), U8(24),
- B(JumpIfToBooleanTrue), U8(33),
+ B(JumpIfToBooleanTrue), U8(26),
B(Ldar), R(2),
- B(SuspendGenerator), R(0), R(0), U8(8), U8(1),
+ B(SuspendGenerator), R(0), R(0), U8(9), U8(1),
/* 54 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(0), R(0), U8(8),
- B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(0), U8(1),
+ B(ResumeGenerator), R(0), R(1), R(0), U8(9),
B(Star), R(5),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(Star), R(3),
- B(JumpLoop), U8(139), I8(0),
+ B(JumpLoop), U8(130), I8(0),
B(LdaNamedProperty), R(2), U8(13), U8(26),
B(Star), R(4),
B(LdaSmi), I8(1),
@@ -439,15 +413,15 @@ bytecodes: [
]
constant pool: [
Smi [29],
- Smi [92],
+ Smi [91],
Smi [10],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["g"],
SYMBOL_TYPE,
- Smi [117],
- Smi [17],
- Smi [37],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ Smi [115],
+ Smi [11],
+ Smi [31],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["throw"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden
index 53ae78acb7..b24e5d0aa1 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden
@@ -19,7 +19,7 @@ bytecodes: [
/* 26 E> */ B(StackCheck),
/* 31 S> */ B(LdaGlobal), U8(0), U8(0),
B(BitwiseAndSmi), I8(1), U8(2),
- /* 45 E> */ B(StaGlobalSloppy), U8(0), U8(3),
+ /* 45 E> */ B(StaGlobal), U8(0), U8(3),
/* 50 S> */ B(Return),
]
constant pool: [
@@ -41,7 +41,7 @@ bytecodes: [
/* 27 E> */ B(StackCheck),
/* 32 S> */ B(LdaGlobal), U8(0), U8(0),
B(AddSmi), I8(1), U8(2),
- /* 51 E> */ B(StaGlobalSloppy), U8(0), U8(3),
+ /* 51 E> */ B(StaGlobal), U8(0), U8(3),
/* 56 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
index 302f883cfb..f0479d594d 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
@@ -19,7 +19,7 @@ bytecodes: [
/* 26 E> */ B(StackCheck),
/* 31 S> */ B(LdaGlobal), U8(0), U8(0),
B(Inc), U8(2),
- /* 40 E> */ B(StaGlobalSloppy), U8(0), U8(3),
+ /* 40 E> */ B(StaGlobal), U8(0), U8(3),
/* 47 S> */ B(Return),
]
constant pool: [
@@ -43,7 +43,7 @@ bytecodes: [
B(ToNumeric), U8(2),
B(Star), R(0),
B(Dec), U8(2),
- /* 44 E> */ B(StaGlobalSloppy), U8(0), U8(3),
+ /* 44 E> */ B(StaGlobal), U8(0), U8(3),
B(Ldar), R(0),
/* 47 S> */ B(Return),
]
@@ -66,7 +66,7 @@ bytecodes: [
/* 27 E> */ B(StackCheck),
/* 46 S> */ B(LdaGlobal), U8(0), U8(0),
B(Dec), U8(2),
- /* 55 E> */ B(StaGlobalStrict), U8(0), U8(3),
+ /* 55 E> */ B(StaGlobal), U8(0), U8(3),
/* 67 S> */ B(Return),
]
constant pool: [
@@ -90,7 +90,7 @@ bytecodes: [
B(ToNumeric), U8(2),
B(Star), R(0),
B(Inc), U8(2),
- /* 50 E> */ B(StaGlobalSloppy), U8(0), U8(3),
+ /* 50 E> */ B(StaGlobal), U8(0), U8(3),
B(Ldar), R(0),
/* 53 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
index 3be8bc5158..9c876157ad 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
@@ -11,52 +11,46 @@ top level: yes
snippet: "
import \"bar\";
"
-frame size: 5
+frame size: 6
parameter count: 2
-bytecode array length: 96
+bytecode array length: 84
bytecodes: [
- B(Ldar), R(1),
+ B(Ldar), R(0),
B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
- B(PushContext), R(2),
- B(RestoreGeneratorState), R(1),
- B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(0), U8(1),
+ B(PushContext), R(3),
+ B(RestoreGeneratorState), R(0),
+ B(Star), R(2),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
- B(Star), R(0),
+ B(Star), R(2),
B(LdaConstant), U8(1),
- B(Star), R(4),
- B(Mov), R(arg0), R(2),
- B(Mov), R(closure), R(3),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
- B(PushContext), R(2),
- B(Mov), R(this), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
- B(StaCurrentContextSlot), U8(4),
+ B(Star), R(5),
+ B(Mov), R(arg0), R(3),
+ B(Mov), R(closure), R(4),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
+ B(PushContext), R(3),
+ B(Mov), R(this), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
+ B(Star), R(0),
/* 0 E> */ B(StackCheck),
- B(Star), R(1),
- B(LdaImmutableCurrentContextSlot), U8(4),
- /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(3), U8(0),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
/* 13 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(1), R(0), U8(3),
- B(LdaSmi), I8(-2),
- B(Star), R(0),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(1), U8(1),
- B(Star), R(3),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
+ B(ResumeGenerator), R(0), R(2), R(0), U8(4),
+ B(Star), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(3),
+ B(Ldar), R(4),
/* 0 E> */ B(Throw),
- B(Ldar), R(3),
+ B(Ldar), R(4),
/* 13 S> */ B(Return),
- B(Ldar), R(3),
- B(StaCurrentContextSlot), U8(5),
- B(LdaCurrentContextSlot), U8(5),
+ B(Mov), R(4), R(1),
+ B(Ldar), R(1),
/* 13 S> */ B(Return),
]
constant pool: [
- Smi [47],
+ Smi [43],
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
@@ -68,52 +62,46 @@ handlers: [
snippet: "
import {foo} from \"bar\";
"
-frame size: 5
+frame size: 6
parameter count: 2
-bytecode array length: 96
+bytecode array length: 84
bytecodes: [
- B(Ldar), R(1),
+ B(Ldar), R(0),
B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
- B(PushContext), R(2),
- B(RestoreGeneratorState), R(1),
- B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(0), U8(1),
+ B(PushContext), R(3),
+ B(RestoreGeneratorState), R(0),
+ B(Star), R(2),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
- B(Star), R(0),
+ B(Star), R(2),
B(LdaConstant), U8(1),
- B(Star), R(4),
- B(Mov), R(arg0), R(2),
- B(Mov), R(closure), R(3),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
- B(PushContext), R(2),
- B(Mov), R(this), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
- B(StaCurrentContextSlot), U8(4),
+ B(Star), R(5),
+ B(Mov), R(arg0), R(3),
+ B(Mov), R(closure), R(4),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
+ B(PushContext), R(3),
+ B(Mov), R(this), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
+ B(Star), R(0),
/* 0 E> */ B(StackCheck),
- B(Star), R(1),
- B(LdaImmutableCurrentContextSlot), U8(4),
- /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(3), U8(0),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
/* 24 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(1), R(0), U8(3),
- B(LdaSmi), I8(-2),
- B(Star), R(0),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(1), U8(1),
- B(Star), R(3),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
+ B(ResumeGenerator), R(0), R(2), R(0), U8(4),
+ B(Star), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(3),
+ B(Ldar), R(4),
/* 0 E> */ B(Throw),
- B(Ldar), R(3),
+ B(Ldar), R(4),
/* 24 S> */ B(Return),
- B(Ldar), R(3),
- B(StaCurrentContextSlot), U8(5),
- B(LdaCurrentContextSlot), U8(5),
+ B(Mov), R(4), R(1),
+ B(Ldar), R(1),
/* 24 S> */ B(Return),
]
constant pool: [
- Smi [47],
+ Smi [43],
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
@@ -127,76 +115,63 @@ snippet: "
goo(42);
{ let x; { goo(42) } };
"
-frame size: 6
+frame size: 7
parameter count: 2
-bytecode array length: 140
+bytecode array length: 114
bytecodes: [
B(Ldar), R(1),
B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
- B(PushContext), R(2),
+ B(PushContext), R(4),
B(RestoreGeneratorState), R(1),
- B(Star), R(0),
+ B(Star), R(3),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
- B(Star), R(0),
+ B(Star), R(3),
B(LdaConstant), U8(1),
- B(Star), R(4),
- B(Mov), R(arg0), R(2),
- B(Mov), R(closure), R(3),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
- B(PushContext), R(2),
- B(Mov), R(this), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
- B(StaCurrentContextSlot), U8(4),
- /* 0 E> */ B(StackCheck),
+ B(Star), R(6),
+ B(Mov), R(arg0), R(4),
+ B(Mov), R(closure), R(5),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(4), U8(3),
+ B(PushContext), R(4),
+ B(Mov), R(this), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(5), U8(2),
B(Star), R(1),
- B(LdaImmutableCurrentContextSlot), U8(4),
- /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(3), U8(0),
+ /* 0 E> */ B(StackCheck),
+ /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(5), U8(0),
/* 64 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(1), R(0), U8(3),
- B(LdaSmi), I8(-2),
- B(Star), R(0),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(1), U8(1),
- B(Star), R(3),
+ B(ResumeGenerator), R(1), R(3), R(0), U8(5),
+ B(Star), R(5),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(3),
+ B(Ldar), R(5),
/* 0 E> */ B(Throw),
- B(Ldar), R(3),
+ B(Ldar), R(5),
/* 64 S> */ B(Return),
/* 32 S> */ B(LdaModuleVariable), I8(-1), U8(0),
B(ThrowReferenceErrorIfHole), U8(4),
- B(Star), R(3),
+ B(Star), R(5),
B(LdaSmi), I8(42),
- B(Star), R(4),
- /* 32 E> */ B(CallUndefinedReceiver1), R(3), R(4), U8(0),
- B(Ldar), R(closure),
- B(CreateBlockContext), U8(5),
- B(PushContext), R(3),
- B(LdaTheHole),
- B(StaCurrentContextSlot), U8(4),
+ B(Star), R(6),
+ /* 32 E> */ B(CallUndefinedReceiver1), R(5), R(6), U8(0),
/* 47 S> */ B(LdaUndefined),
- /* 47 E> */ B(StaCurrentContextSlot), U8(4),
- /* 52 S> */ B(LdaModuleVariable), I8(-1), U8(1),
+ B(Star), R(0),
+ /* 52 S> */ B(LdaModuleVariable), I8(-1), U8(0),
B(ThrowReferenceErrorIfHole), U8(4),
- B(Star), R(4),
- B(LdaSmi), I8(42),
B(Star), R(5),
- /* 52 E> */ B(CallUndefinedReceiver1), R(4), R(5), U8(2),
- B(StaContextSlot), R(3), U8(5), U8(0),
- B(PopContext), R(3),
- B(LdaCurrentContextSlot), U8(5),
+ B(LdaSmi), I8(42),
+ B(Star), R(6),
+ /* 52 E> */ B(CallUndefinedReceiver1), R(5), R(6), U8(2),
+ B(Star), R(2),
/* 64 S> */ B(Return),
]
constant pool: [
- Smi [47],
+ Smi [43],
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["goo"],
- FIXED_ARRAY_TYPE,
]
handlers: [
]
@@ -207,74 +182,61 @@ snippet: "
foo++;
{ let x; { foo++ } };
"
-frame size: 5
+frame size: 7
parameter count: 2
-bytecode array length: 137
+bytecode array length: 112
bytecodes: [
B(Ldar), R(1),
B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
- B(PushContext), R(2),
+ B(PushContext), R(4),
B(RestoreGeneratorState), R(1),
- B(Star), R(0),
+ B(Star), R(3),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
- B(Star), R(0),
+ B(Star), R(3),
B(LdaConstant), U8(1),
- B(Star), R(4),
- B(Mov), R(arg0), R(2),
- B(Mov), R(closure), R(3),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
- B(PushContext), R(2),
- B(Mov), R(this), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
- B(StaCurrentContextSlot), U8(4),
- /* 0 E> */ B(StackCheck),
+ B(Star), R(6),
+ B(Mov), R(arg0), R(4),
+ B(Mov), R(closure), R(5),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(4), U8(3),
+ B(PushContext), R(4),
+ B(Mov), R(this), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(5), U8(2),
B(Star), R(1),
- B(LdaImmutableCurrentContextSlot), U8(4),
- /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(3), U8(0),
+ /* 0 E> */ B(StackCheck),
+ /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(5), U8(0),
/* 49 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(1), R(0), U8(3),
- B(LdaSmi), I8(-2),
- B(Star), R(0),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(1), U8(1),
- B(Star), R(3),
+ B(ResumeGenerator), R(1), R(3), R(0), U8(5),
+ B(Star), R(5),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(3),
+ B(Ldar), R(5),
/* 0 E> */ B(Throw),
- B(Ldar), R(3),
+ B(Ldar), R(5),
/* 49 S> */ B(Return),
/* 17 S> */ B(LdaSmi), I8(42),
/* 17 E> */ B(StaModuleVariable), I8(1), U8(0),
/* 21 S> */ B(LdaModuleVariable), I8(1), U8(0),
B(Inc), U8(0),
/* 24 E> */ B(StaModuleVariable), I8(1), U8(0),
- B(Ldar), R(closure),
- B(CreateBlockContext), U8(4),
- B(PushContext), R(3),
- B(LdaTheHole),
- B(StaCurrentContextSlot), U8(4),
/* 34 S> */ B(LdaUndefined),
- /* 34 E> */ B(StaCurrentContextSlot), U8(4),
- /* 39 S> */ B(LdaModuleVariable), I8(1), U8(1),
+ B(Star), R(0),
+ /* 39 S> */ B(LdaModuleVariable), I8(1), U8(0),
B(ToNumeric), U8(1),
- B(Star), R(4),
+ B(Star), R(5),
B(Inc), U8(1),
- /* 42 E> */ B(StaModuleVariable), I8(1), U8(1),
- B(Ldar), R(4),
- B(StaContextSlot), R(3), U8(5), U8(0),
- B(PopContext), R(3),
- B(LdaCurrentContextSlot), U8(5),
+ /* 42 E> */ B(StaModuleVariable), I8(1), U8(0),
+ B(Mov), R(5), R(2),
+ B(Ldar), R(2),
/* 49 S> */ B(Return),
]
constant pool: [
- Smi [47],
+ Smi [43],
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
- FIXED_ARRAY_TYPE,
]
handlers: [
]
@@ -285,76 +247,64 @@ snippet: "
foo++;
{ let x; { foo++ } };
"
-frame size: 5
+frame size: 7
parameter count: 2
-bytecode array length: 141
+bytecode array length: 118
bytecodes: [
B(Ldar), R(1),
B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
- B(PushContext), R(2),
+ B(PushContext), R(4),
B(RestoreGeneratorState), R(1),
- B(Star), R(0),
+ B(Star), R(3),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
- B(Star), R(0),
+ B(Star), R(3),
B(LdaConstant), U8(1),
- B(Star), R(4),
- B(Mov), R(arg0), R(2),
- B(Mov), R(closure), R(3),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
- B(PushContext), R(2),
- B(Mov), R(this), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
- B(StaCurrentContextSlot), U8(4),
+ B(Star), R(6),
+ B(Mov), R(arg0), R(4),
+ B(Mov), R(closure), R(5),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(4), U8(3),
+ B(PushContext), R(4),
+ B(Mov), R(this), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(5), U8(2),
B(Star), R(1),
B(LdaTheHole),
B(StaModuleVariable), I8(1), U8(0),
/* 0 E> */ B(StackCheck),
- B(LdaImmutableCurrentContextSlot), U8(4),
- /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(3), U8(0),
+ B(Ldar), R(1),
+ /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(5), U8(0),
/* 49 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(1), R(0), U8(3),
- B(LdaSmi), I8(-2),
- B(Star), R(0),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(1), U8(1),
- B(Star), R(3),
+ B(ResumeGenerator), R(1), R(3), R(0), U8(5),
+ B(Star), R(5),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(3),
+ B(Ldar), R(5),
/* 0 E> */ B(Throw),
- B(Ldar), R(3),
+ B(Ldar), R(5),
/* 49 S> */ B(Return),
/* 17 S> */ B(LdaSmi), I8(42),
/* 17 E> */ B(StaModuleVariable), I8(1), U8(0),
/* 21 S> */ B(LdaModuleVariable), I8(1), U8(0),
B(Inc), U8(0),
/* 24 E> */ B(StaModuleVariable), I8(1), U8(0),
- B(Ldar), R(closure),
- B(CreateBlockContext), U8(4),
- B(PushContext), R(3),
- B(LdaTheHole),
- B(StaCurrentContextSlot), U8(4),
/* 34 S> */ B(LdaUndefined),
- /* 34 E> */ B(StaCurrentContextSlot), U8(4),
- /* 39 S> */ B(LdaModuleVariable), I8(1), U8(1),
+ B(Star), R(0),
+ /* 39 S> */ B(LdaModuleVariable), I8(1), U8(0),
B(ToNumeric), U8(1),
- B(Star), R(4),
+ B(Star), R(5),
B(Inc), U8(1),
- /* 42 E> */ B(StaModuleVariable), I8(1), U8(1),
- B(Ldar), R(4),
- B(StaContextSlot), R(3), U8(5), U8(0),
- B(PopContext), R(3),
- B(LdaCurrentContextSlot), U8(5),
+ /* 42 E> */ B(StaModuleVariable), I8(1), U8(0),
+ B(Mov), R(5), R(2),
+ B(Ldar), R(2),
/* 49 S> */ B(Return),
]
constant pool: [
- Smi [51],
+ Smi [49],
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
- FIXED_ARRAY_TYPE,
]
handlers: [
]
@@ -365,76 +315,64 @@ snippet: "
foo++;
{ let x; { foo++ } };
"
-frame size: 5
+frame size: 7
parameter count: 2
-bytecode array length: 145
+bytecode array length: 122
bytecodes: [
B(Ldar), R(1),
B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
- B(PushContext), R(2),
+ B(PushContext), R(4),
B(RestoreGeneratorState), R(1),
- B(Star), R(0),
+ B(Star), R(3),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
- B(Star), R(0),
+ B(Star), R(3),
B(LdaConstant), U8(1),
- B(Star), R(4),
- B(Mov), R(arg0), R(2),
- B(Mov), R(closure), R(3),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
- B(PushContext), R(2),
- B(Mov), R(this), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
- B(StaCurrentContextSlot), U8(4),
+ B(Star), R(6),
+ B(Mov), R(arg0), R(4),
+ B(Mov), R(closure), R(5),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(4), U8(3),
+ B(PushContext), R(4),
+ B(Mov), R(this), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(5), U8(2),
B(Star), R(1),
B(LdaTheHole),
B(StaModuleVariable), I8(1), U8(0),
/* 0 E> */ B(StackCheck),
- B(LdaImmutableCurrentContextSlot), U8(4),
- /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(3), U8(0),
+ B(Ldar), R(1),
+ /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(5), U8(0),
/* 51 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(1), R(0), U8(3),
- B(LdaSmi), I8(-2),
- B(Star), R(0),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(1), U8(1),
- B(Star), R(3),
+ B(ResumeGenerator), R(1), R(3), R(0), U8(5),
+ B(Star), R(5),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(3),
+ B(Ldar), R(5),
/* 0 E> */ B(Throw),
- B(Ldar), R(3),
+ B(Ldar), R(5),
/* 51 S> */ B(Return),
/* 19 S> */ B(LdaSmi), I8(42),
/* 19 E> */ B(StaModuleVariable), I8(1), U8(0),
/* 23 S> */ B(LdaModuleVariable), I8(1), U8(0),
B(Inc), U8(0),
/* 26 E> */ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
- B(Ldar), R(closure),
- B(CreateBlockContext), U8(4),
- B(PushContext), R(3),
- B(LdaTheHole),
- B(StaCurrentContextSlot), U8(4),
/* 36 S> */ B(LdaUndefined),
- /* 36 E> */ B(StaCurrentContextSlot), U8(4),
- /* 41 S> */ B(LdaModuleVariable), I8(1), U8(1),
+ B(Star), R(0),
+ /* 41 S> */ B(LdaModuleVariable), I8(1), U8(0),
B(ToNumeric), U8(1),
- B(Star), R(4),
+ B(Star), R(5),
B(Inc), U8(1),
/* 44 E> */ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
- B(Ldar), R(4),
- B(StaContextSlot), R(3), U8(5), U8(0),
- B(PopContext), R(3),
- B(LdaCurrentContextSlot), U8(5),
+ B(Mov), R(5), R(2),
+ B(Ldar), R(2),
/* 51 S> */ B(Return),
]
constant pool: [
- Smi [51],
+ Smi [49],
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
- FIXED_ARRAY_TYPE,
]
handlers: [
]
@@ -443,56 +381,51 @@ handlers: [
snippet: "
export default (function () {});
"
-frame size: 5
+frame size: 6
parameter count: 2
-bytecode array length: 107
+bytecode array length: 97
bytecodes: [
- B(Ldar), R(1),
+ B(Ldar), R(0),
B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
- B(PushContext), R(2),
- B(RestoreGeneratorState), R(1),
- B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(0), U8(1),
+ B(PushContext), R(3),
+ B(RestoreGeneratorState), R(0),
+ B(Star), R(2),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
- B(Star), R(0),
+ B(Star), R(2),
B(LdaConstant), U8(1),
- B(Star), R(4),
- B(Mov), R(arg0), R(2),
- B(Mov), R(closure), R(3),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
- B(PushContext), R(2),
- B(Mov), R(this), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
- B(StaCurrentContextSlot), U8(4),
- B(Star), R(1),
+ B(Star), R(5),
+ B(Mov), R(arg0), R(3),
+ B(Mov), R(closure), R(4),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
+ B(PushContext), R(3),
+ B(Mov), R(this), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
+ B(Star), R(0),
B(LdaTheHole),
B(StaModuleVariable), I8(1), U8(0),
/* 0 E> */ B(StackCheck),
- B(LdaImmutableCurrentContextSlot), U8(4),
- /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(3), U8(0),
+ B(Ldar), R(0),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
/* 32 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(1), R(0), U8(3),
- B(LdaSmi), I8(-2),
- B(Star), R(0),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(1), U8(1),
- B(Star), R(3),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
+ B(ResumeGenerator), R(0), R(2), R(0), U8(4),
+ B(Star), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(3),
+ B(Ldar), R(4),
/* 0 E> */ B(Throw),
- B(Ldar), R(3),
+ B(Ldar), R(4),
/* 32 S> */ B(Return),
- B(Ldar), R(3),
- B(StaCurrentContextSlot), U8(5),
+ B(Mov), R(4), R(1),
B(CreateClosure), U8(4), U8(0), U8(0),
B(StaModuleVariable), I8(1), U8(0),
- B(LdaCurrentContextSlot), U8(5),
+ B(Ldar), R(1),
/* 32 S> */ B(Return),
]
constant pool: [
- Smi [51],
+ Smi [49],
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
@@ -505,65 +438,60 @@ handlers: [
snippet: "
export default (class {});
"
-frame size: 7
+frame size: 8
parameter count: 2
-bytecode array length: 128
+bytecode array length: 118
bytecodes: [
- B(Ldar), R(1),
+ B(Ldar), R(0),
B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
- B(PushContext), R(2),
- B(RestoreGeneratorState), R(1),
- B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(0), U8(1),
+ B(PushContext), R(3),
+ B(RestoreGeneratorState), R(0),
+ B(Star), R(2),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
- B(Star), R(0),
+ B(Star), R(2),
B(LdaConstant), U8(1),
- B(Star), R(4),
- B(Mov), R(arg0), R(2),
- B(Mov), R(closure), R(3),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
- B(PushContext), R(2),
- B(Mov), R(this), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
- B(StaCurrentContextSlot), U8(4),
- B(Star), R(1),
+ B(Star), R(5),
+ B(Mov), R(arg0), R(3),
+ B(Mov), R(closure), R(4),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
+ B(PushContext), R(3),
+ B(Mov), R(this), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
+ B(Star), R(0),
B(LdaTheHole),
B(StaModuleVariable), I8(1), U8(0),
/* 0 E> */ B(StackCheck),
- B(LdaImmutableCurrentContextSlot), U8(4),
- /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(3), U8(0),
+ B(Ldar), R(0),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
/* 26 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(1), R(0), U8(3),
- B(LdaSmi), I8(-2),
- B(Star), R(0),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(1), U8(1),
- B(Star), R(3),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
+ B(ResumeGenerator), R(0), R(2), R(0), U8(4),
+ B(Star), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(3),
+ B(Ldar), R(4),
/* 0 E> */ B(Throw),
- B(Ldar), R(3),
+ B(Ldar), R(4),
/* 26 S> */ B(Return),
- B(Ldar), R(3),
- B(StaCurrentContextSlot), U8(5),
+ B(Mov), R(4), R(1),
B(LdaTheHole),
- B(Star), R(6),
+ B(Star), R(7),
B(CreateClosure), U8(5), U8(0), U8(0),
- B(Star), R(3),
- B(LdaConstant), U8(4),
B(Star), R(4),
- B(Mov), R(3), R(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
- B(Star), R(4),
- B(Ldar), R(5),
+ B(LdaConstant), U8(4),
+ B(Star), R(5),
+ B(Mov), R(4), R(6),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
+ B(Star), R(5),
+ B(Ldar), R(6),
B(StaModuleVariable), I8(1), U8(0),
- B(LdaCurrentContextSlot), U8(5),
+ B(Ldar), R(1),
/* 26 S> */ B(Return),
]
constant pool: [
- Smi [51],
+ Smi [49],
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
@@ -577,52 +505,46 @@ handlers: [
snippet: "
export {foo as goo} from \"bar\"
"
-frame size: 5
+frame size: 6
parameter count: 2
-bytecode array length: 96
+bytecode array length: 84
bytecodes: [
- B(Ldar), R(1),
+ B(Ldar), R(0),
B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
- B(PushContext), R(2),
- B(RestoreGeneratorState), R(1),
- B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(0), U8(1),
+ B(PushContext), R(3),
+ B(RestoreGeneratorState), R(0),
+ B(Star), R(2),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
- B(Star), R(0),
+ B(Star), R(2),
B(LdaConstant), U8(1),
- B(Star), R(4),
- B(Mov), R(arg0), R(2),
- B(Mov), R(closure), R(3),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
- B(PushContext), R(2),
- B(Mov), R(this), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
- B(StaCurrentContextSlot), U8(4),
+ B(Star), R(5),
+ B(Mov), R(arg0), R(3),
+ B(Mov), R(closure), R(4),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
+ B(PushContext), R(3),
+ B(Mov), R(this), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
+ B(Star), R(0),
/* 0 E> */ B(StackCheck),
- B(Star), R(1),
- B(LdaImmutableCurrentContextSlot), U8(4),
- /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(3), U8(0),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
/* 30 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(1), R(0), U8(3),
- B(LdaSmi), I8(-2),
- B(Star), R(0),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(1), U8(1),
- B(Star), R(3),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
+ B(ResumeGenerator), R(0), R(2), R(0), U8(4),
+ B(Star), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(3),
+ B(Ldar), R(4),
/* 0 E> */ B(Throw),
- B(Ldar), R(3),
+ B(Ldar), R(4),
/* 30 S> */ B(Return),
- B(Ldar), R(3),
- B(StaCurrentContextSlot), U8(5),
- B(LdaCurrentContextSlot), U8(5),
+ B(Mov), R(4), R(1),
+ B(Ldar), R(1),
/* 30 S> */ B(Return),
]
constant pool: [
- Smi [47],
+ Smi [43],
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
@@ -634,52 +556,46 @@ handlers: [
snippet: "
export * from \"bar\"
"
-frame size: 5
+frame size: 6
parameter count: 2
-bytecode array length: 96
+bytecode array length: 84
bytecodes: [
- B(Ldar), R(1),
+ B(Ldar), R(0),
B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
- B(PushContext), R(2),
- B(RestoreGeneratorState), R(1),
- B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(0), U8(1),
+ B(PushContext), R(3),
+ B(RestoreGeneratorState), R(0),
+ B(Star), R(2),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
- B(Star), R(0),
+ B(Star), R(2),
B(LdaConstant), U8(1),
- B(Star), R(4),
- B(Mov), R(arg0), R(2),
- B(Mov), R(closure), R(3),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
- B(PushContext), R(2),
- B(Mov), R(this), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
- B(StaCurrentContextSlot), U8(4),
+ B(Star), R(5),
+ B(Mov), R(arg0), R(3),
+ B(Mov), R(closure), R(4),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
+ B(PushContext), R(3),
+ B(Mov), R(this), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
+ B(Star), R(0),
/* 0 E> */ B(StackCheck),
- B(Star), R(1),
- B(LdaImmutableCurrentContextSlot), U8(4),
- /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(3), U8(0),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
/* 19 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(1), R(0), U8(3),
- B(LdaSmi), I8(-2),
- B(Star), R(0),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(1), U8(1),
- B(Star), R(3),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
+ B(ResumeGenerator), R(0), R(2), R(0), U8(4),
+ B(Star), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(3),
+ B(Ldar), R(4),
/* 0 E> */ B(Throw),
- B(Ldar), R(3),
+ B(Ldar), R(4),
/* 19 S> */ B(Return),
- B(Ldar), R(3),
- B(StaCurrentContextSlot), U8(5),
- B(LdaCurrentContextSlot), U8(5),
+ B(Mov), R(4), R(1),
+ B(Ldar), R(1),
/* 19 S> */ B(Return),
]
constant pool: [
- Smi [47],
+ Smi [43],
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
@@ -692,66 +608,55 @@ snippet: "
import * as foo from \"bar\"
foo.f(foo, foo.x);
"
-frame size: 7
+frame size: 9
parameter count: 2
-bytecode array length: 134
+bytecode array length: 111
bytecodes: [
- B(Ldar), R(1),
+ B(Ldar), R(0),
B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
- B(PushContext), R(2),
- B(RestoreGeneratorState), R(1),
- B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(0), U8(1),
+ B(PushContext), R(4),
+ B(RestoreGeneratorState), R(0),
+ B(Star), R(3),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
- B(Star), R(0),
+ B(Star), R(3),
B(LdaConstant), U8(1),
- B(Star), R(4),
- B(Mov), R(arg0), R(2),
- B(Mov), R(closure), R(3),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
- B(PushContext), R(2),
- B(Mov), R(this), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
- B(StaCurrentContextSlot), U8(4),
- B(Star), R(1),
+ B(Star), R(6),
+ B(Mov), R(arg0), R(4),
+ B(Mov), R(closure), R(5),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(4), U8(3),
+ B(PushContext), R(4),
+ B(Mov), R(this), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(5), U8(2),
+ B(Star), R(0),
B(LdaZero),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kGetModuleNamespace), R(3), U8(1),
- B(StaCurrentContextSlot), U8(5),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kGetModuleNamespace), R(5), U8(1),
+ B(Star), R(1),
/* 0 E> */ B(StackCheck),
- B(LdaImmutableCurrentContextSlot), U8(4),
- /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(3), U8(0),
+ B(Ldar), R(0),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(5), U8(0),
/* 45 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(1), R(0), U8(3),
- B(LdaSmi), I8(-2),
- B(Star), R(0),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(1), U8(1),
- B(Star), R(3),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
+ B(ResumeGenerator), R(0), R(3), R(0), U8(5),
+ B(Star), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(3),
+ B(Ldar), R(5),
/* 0 E> */ B(Throw),
- B(Ldar), R(3),
+ B(Ldar), R(5),
/* 45 S> */ B(Return),
- /* 27 S> */ B(LdaImmutableCurrentContextSlot), U8(5),
- B(Star), R(4),
- /* 31 E> */ B(LdaNamedProperty), R(4), U8(4), U8(0),
- B(Star), R(3),
- B(LdaImmutableCurrentContextSlot), U8(5),
+ /* 31 S> */ B(LdaNamedProperty), R(1), U8(4), U8(0),
B(Star), R(5),
- B(LdaImmutableCurrentContextSlot), U8(5),
- B(Star), R(6),
- /* 42 E> */ B(LdaNamedProperty), R(6), U8(5), U8(2),
- B(Star), R(6),
- /* 31 E> */ B(CallProperty2), R(3), R(4), R(5), R(6), U8(4),
- B(StaCurrentContextSlot), U8(6),
- B(LdaCurrentContextSlot), U8(6),
+ /* 42 E> */ B(LdaNamedProperty), R(1), U8(5), U8(2),
+ B(Star), R(8),
+ /* 31 E> */ B(CallProperty2), R(5), R(1), R(1), R(8), U8(4),
+ B(Star), R(2),
/* 45 S> */ B(Return),
]
constant pool: [
- Smi [57],
+ Smi [55],
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
index ec2d310302..e0567143b5 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
@@ -231,7 +231,7 @@ bytecodes: [
B(JumpIfUndefined), U8(6),
B(Ldar), R(3),
B(JumpIfNotNull), U8(16),
- B(LdaSmi), I8(73),
+ B(LdaSmi), I8(74),
B(Star), R(4),
B(LdaConstant), U8(1),
B(Star), R(5),
@@ -272,7 +272,7 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 96
+bytecode array length: 89
bytecodes: [
B(Ldar), R(2),
B(JumpIfUndefined), U8(18),
@@ -281,7 +281,7 @@ bytecodes: [
B(RestoreGeneratorState), R(2),
B(Star), R(3),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
B(Star), R(3),
B(Mov), R(closure), R(4),
@@ -291,10 +291,7 @@ bytecodes: [
/* 11 E> */ B(StackCheck),
/* 11 E> */ B(SuspendGenerator), R(2), R(0), U8(4), U8(0),
/* 62 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(2), R(0), U8(4),
- B(LdaSmi), I8(-2),
- B(Star), R(3),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(2), U8(1),
+ B(ResumeGenerator), R(2), R(3), R(0), U8(4),
B(Star), R(4),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
@@ -333,7 +330,7 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 151
+bytecode array length: 137
bytecodes: [
B(Ldar), R(1),
B(JumpIfUndefined), U8(18),
@@ -342,7 +339,7 @@ bytecodes: [
B(RestoreGeneratorState), R(1),
B(Star), R(2),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
B(Star), R(2),
B(Mov), R(closure), R(3),
@@ -352,10 +349,7 @@ bytecodes: [
/* 11 E> */ B(StackCheck),
/* 11 E> */ B(SuspendGenerator), R(1), R(0), U8(3), U8(0),
/* 56 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(1), R(0), U8(3),
- B(LdaSmi), I8(-2),
- B(Star), R(2),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(1), U8(1),
+ B(ResumeGenerator), R(1), R(2), R(0), U8(3),
B(Star), R(3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
@@ -370,10 +364,10 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 31 E> */ B(TestEqualStrictNoFeedback), R(2),
B(JumpIfTrue), U8(4),
- B(Abort), U8(42),
+ B(Abort), U8(15),
/* 36 S> */ B(LdaSmi), I8(10),
/* 36 E> */ B(TestLessThan), R(0), U8(0),
- B(JumpIfFalse), U8(56),
+ B(JumpIfFalse), U8(49),
/* 18 E> */ B(StackCheck),
/* 47 S> */ B(LdaFalse),
B(Star), R(4),
@@ -381,10 +375,7 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(3), U8(2),
B(SuspendGenerator), R(1), R(0), U8(3), U8(1),
/* 56 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(1), R(0), U8(3),
- B(LdaSmi), I8(-2),
- B(Star), R(2),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(1), U8(1),
+ B(ResumeGenerator), R(1), R(2), R(0), U8(3),
B(Star), R(3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
B(SwitchOnSmiNoFeedback), U8(5), U8(2), I8(0),
@@ -395,13 +386,13 @@ bytecodes: [
/* 44 S> */ B(Ldar), R(0),
B(Inc), U8(1),
B(Star), R(0),
- B(JumpLoop), U8(72), I8(0),
+ B(JumpLoop), U8(65), I8(0),
B(LdaUndefined),
/* 56 S> */ B(Return),
]
constant pool: [
Smi [29],
- Smi [60],
+ Smi [53],
Smi [10],
Smi [7],
Smi [36],
@@ -509,7 +500,7 @@ snippet: "
"
frame size: 12
parameter count: 1
-bytecode array length: 231
+bytecode array length: 224
bytecodes: [
B(Ldar), R(1),
B(JumpIfUndefined), U8(18),
@@ -518,7 +509,7 @@ bytecodes: [
B(RestoreGeneratorState), R(1),
B(Star), R(3),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(42),
+ B(Abort), U8(15),
B(LdaSmi), I8(-2),
B(Star), R(3),
B(Mov), R(closure), R(4),
@@ -537,10 +528,10 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 36 E> */ B(TestEqualStrictNoFeedback), R(3),
B(JumpIfTrue), U8(4),
- B(Abort), U8(42),
+ B(Abort), U8(15),
/* 41 S> */ B(LdaSmi), I8(10),
/* 41 E> */ B(TestLessThan), R(0), U8(0),
- B(JumpIfFalse), U8(59),
+ B(JumpIfFalse), U8(52),
/* 23 E> */ B(StackCheck),
/* 52 S> */ B(Mov), R(1), R(8),
B(Mov), R(0), R(9),
@@ -548,10 +539,7 @@ bytecodes: [
B(CallJSRuntime), U8(%async_function_await_uncaught), R(8), U8(3),
B(SuspendGenerator), R(1), R(0), U8(8), U8(0),
/* 61 S> */ B(Return),
- B(RestoreGeneratorRegisters), R(1), R(0), U8(8),
- B(LdaSmi), I8(-2),
- B(Star), R(3),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(1), U8(1),
+ B(ResumeGenerator), R(1), R(3), R(0), U8(8),
B(Star), R(8),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
B(Star), R(9),
@@ -563,7 +551,7 @@ bytecodes: [
/* 49 S> */ B(Ldar), R(0),
B(Inc), U8(1),
B(Star), R(0),
- B(JumpLoop), U8(75), I8(0),
+ B(JumpLoop), U8(68), I8(0),
B(LdaUndefined),
B(Star), R(9),
B(Mov), R(2), R(8),
@@ -624,7 +612,7 @@ constant pool: [
Smi [9],
]
handlers: [
- [46, 190, 198],
- [49, 148, 150],
+ [46, 183, 191],
+ [49, 141, 143],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden
new file mode 100644
index 0000000000..80a2e4fd49
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden
@@ -0,0 +1,339 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+wrap: yes
+public fields: yes
+static fields: yes
+
+---
+snippet: "
+ {
+ class A {
+ a;
+ ['b'];
+ static c;
+ static ['d'];
+ }
+
+ class B {
+ a = 1;
+ ['b'] = this.a;
+ static c = 3;
+ static ['d'] = this.c;
+ }
+ new A;
+ new B;
+ }
+"
+frame size: 11
+parameter count: 1
+bytecode array length: 193
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(4),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(5),
+ B(LdaTheHole),
+ B(Star), R(8),
+ B(CreateClosure), U8(2), U8(0), U8(2),
+ B(Star), R(5),
+ B(LdaConstant), U8(1),
+ B(Star), R(6),
+ B(LdaConstant), U8(3),
+ B(StaCurrentContextSlot), U8(4),
+ B(Star), R(9),
+ B(LdaConstant), U8(4),
+ B(Star), R(10),
+ B(LdaConstant), U8(5),
+ B(TestEqualStrictNoFeedback), R(10),
+ B(Mov), R(5), R(7),
+ B(JumpIfFalse), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
+ B(Ldar), R(10),
+ B(StaCurrentContextSlot), U8(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(5),
+ B(Star), R(6),
+ B(Mov), R(5), R(1),
+ B(CreateClosure), U8(6), U8(1), U8(2),
+ B(Star), R(7),
+ B(StaNamedProperty), R(5), U8(7), U8(2),
+ B(CreateClosure), U8(8), U8(4), U8(2),
+ B(Star), R(9),
+ B(CallProperty0), R(9), R(1), U8(5),
+ B(PopContext), R(4),
+ B(Mov), R(1), R(2),
+ B(Ldar), R(closure),
+ /* 38 E> */ B(CreateBlockContext), U8(9),
+ B(PushContext), R(4),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(5),
+ B(LdaTheHole),
+ B(Star), R(8),
+ B(CreateClosure), U8(11), U8(7), U8(2),
+ B(Star), R(5),
+ B(LdaConstant), U8(10),
+ B(Star), R(6),
+ B(LdaConstant), U8(3),
+ B(StaCurrentContextSlot), U8(4),
+ B(Star), R(9),
+ B(LdaConstant), U8(4),
+ B(Star), R(10),
+ B(LdaConstant), U8(5),
+ B(TestEqualStrictNoFeedback), R(10),
+ B(Mov), R(5), R(7),
+ B(JumpIfFalse), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
+ B(Ldar), R(10),
+ B(StaCurrentContextSlot), U8(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(5),
+ B(Star), R(6),
+ B(Mov), R(5), R(0),
+ B(CreateClosure), U8(12), U8(8), U8(2),
+ B(Star), R(7),
+ B(StaNamedProperty), R(5), U8(7), U8(9),
+ B(CreateClosure), U8(13), U8(11), U8(2),
+ B(Star), R(9),
+ B(CallProperty0), R(9), R(0), U8(12),
+ B(PopContext), R(4),
+ B(Mov), R(0), R(3),
+ /* 197 S> */ B(Ldar), R(2),
+ /* 197 E> */ B(Construct), R(2), R(0), U8(0), U8(14),
+ /* 206 S> */ B(Ldar), R(0),
+ /* 206 E> */ B(Construct), R(0), R(0), U8(0), U8(16),
+ B(LdaUndefined),
+ /* 215 S> */ B(Return),
+]
+constant pool: [
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["b"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["d"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["prototype"],
+ SHARED_FUNCTION_INFO_TYPE,
+ SYMBOL_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ {
+ class A extends class {} {
+ a;
+ ['b'];
+ static c;
+ static ['d'];
+ }
+
+ class B extends class {} {
+ a = 1;
+ ['b'] = this.a;
+ static c = 3;
+ static ['d'] = this.c;
+ foo() { return 1; }
+ constructor() {
+ super();
+ }
+ }
+
+ class C extends B {
+ a = 1;
+ ['b'] = this.a;
+ static c = 3;
+ static ['d'] = super.foo();
+ constructor() {
+ (() => super())();
+ }
+ }
+
+ new A;
+ new B;
+ new C;
+ }
+"
+frame size: 15
+parameter count: 1
+bytecode array length: 346
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(6),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(5),
+ B(LdaTheHole),
+ B(Star), R(14),
+ B(CreateClosure), U8(3), U8(0), U8(2),
+ B(Star), R(11),
+ B(LdaConstant), U8(2),
+ B(Star), R(12),
+ B(Mov), R(11), R(13),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(12), U8(3),
+ B(Star), R(12),
+ B(CreateClosure), U8(4), U8(1), U8(2),
+ B(Star), R(7),
+ B(LdaConstant), U8(1),
+ B(Star), R(8),
+ B(LdaConstant), U8(5),
+ B(StaCurrentContextSlot), U8(4),
+ B(Star), R(11),
+ B(LdaConstant), U8(6),
+ B(Star), R(12),
+ B(LdaConstant), U8(7),
+ B(TestEqualStrictNoFeedback), R(12),
+ B(Mov), R(13), R(10),
+ B(Mov), R(7), R(9),
+ B(JumpIfFalse), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
+ B(Ldar), R(12),
+ B(StaCurrentContextSlot), U8(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(5),
+ B(Star), R(8),
+ B(Mov), R(7), R(2),
+ B(CreateClosure), U8(8), U8(2), U8(2),
+ B(Star), R(9),
+ B(StaNamedProperty), R(7), U8(9), U8(3),
+ B(CreateClosure), U8(10), U8(5), U8(2),
+ B(Star), R(11),
+ B(CallProperty0), R(11), R(2), U8(6),
+ B(PopContext), R(6),
+ B(Mov), R(2), R(3),
+ B(Ldar), R(closure),
+ /* 38 E> */ B(CreateBlockContext), U8(11),
+ B(PushContext), R(6),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(5),
+ B(LdaTheHole),
+ B(Star), R(14),
+ B(CreateClosure), U8(14), U8(8), U8(2),
+ B(Star), R(11),
+ B(LdaConstant), U8(13),
+ B(Star), R(12),
+ B(Mov), R(11), R(13),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(12), U8(3),
+ B(Star), R(12),
+ B(CreateClosure), U8(15), U8(9), U8(2),
+ B(Star), R(7),
+ B(LdaConstant), U8(12),
+ B(Star), R(8),
+ B(LdaConstant), U8(5),
+ B(StaCurrentContextSlot), U8(4),
+ B(Star), R(11),
+ B(LdaConstant), U8(6),
+ B(Star), R(12),
+ B(LdaConstant), U8(7),
+ B(TestEqualStrictNoFeedback), R(12),
+ B(Mov), R(7), R(9),
+ B(Mov), R(13), R(10),
+ B(JumpIfFalse), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
+ B(Ldar), R(12),
+ B(StaCurrentContextSlot), U8(5),
+ B(CreateClosure), U8(16), U8(10), U8(2),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(6),
+ B(Star), R(8),
+ B(Mov), R(7), R(1),
+ B(CreateClosure), U8(17), U8(11), U8(2),
+ B(Star), R(9),
+ B(StaNamedProperty), R(7), U8(9), U8(12),
+ B(CreateClosure), U8(18), U8(14), U8(2),
+ B(Star), R(11),
+ B(CallProperty0), R(11), R(1), U8(15),
+ B(PopContext), R(6),
+ B(Mov), R(1), R(4),
+ B(Ldar), R(closure),
+ /* 122 E> */ B(CreateBlockContext), U8(19),
+ B(PushContext), R(6),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(5),
+ /* 313 E> */ B(CreateClosure), U8(21), U8(17), U8(2),
+ B(Star), R(7),
+ B(LdaConstant), U8(20),
+ B(Star), R(8),
+ B(LdaConstant), U8(5),
+ B(StaCurrentContextSlot), U8(4),
+ B(Star), R(11),
+ B(LdaConstant), U8(6),
+ B(Star), R(12),
+ B(LdaConstant), U8(7),
+ B(TestEqualStrictNoFeedback), R(12),
+ B(Mov), R(1), R(10),
+ B(Mov), R(7), R(9),
+ B(JumpIfFalse), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
+ B(Ldar), R(12),
+ B(StaCurrentContextSlot), U8(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(5),
+ B(Star), R(8),
+ B(Mov), R(7), R(0),
+ B(CreateClosure), U8(22), U8(18), U8(2),
+ B(Star), R(9),
+ B(StaNamedProperty), R(7), U8(9), U8(19),
+ B(CreateClosure), U8(23), U8(21), U8(2),
+ B(Star), R(11),
+ B(Ldar), R(0),
+ B(StaNamedProperty), R(11), U8(24), U8(22),
+ B(CallProperty0), R(11), R(0), U8(24),
+ B(PopContext), R(6),
+ B(Mov), R(0), R(5),
+ /* 456 S> */ B(Ldar), R(3),
+ /* 456 E> */ B(Construct), R(3), R(0), U8(0), U8(26),
+ /* 465 S> */ B(Ldar), R(4),
+ /* 465 E> */ B(Construct), R(4), R(0), U8(0), U8(28),
+ /* 474 S> */ B(Ldar), R(0),
+ /* 474 E> */ B(Construct), R(0), R(0), U8(0), U8(30),
+ B(LdaUndefined),
+ /* 483 S> */ B(Return),
+]
+constant pool: [
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["b"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["d"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["prototype"],
+ SHARED_FUNCTION_INFO_TYPE,
+ SYMBOL_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SYMBOL_TYPE,
+]
+handlers: [
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden
index 8a24433be3..927b9f3307 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden
@@ -18,7 +18,7 @@ bytecode array length: 8
bytecodes: [
/* 21 E> */ B(StackCheck),
/* 26 S> */ B(LdaSmi), I8(2),
- /* 28 E> */ B(StaGlobalSloppy), U8(0), U8(0),
+ /* 28 E> */ B(StaGlobal), U8(0), U8(0),
B(LdaUndefined),
/* 33 S> */ B(Return),
]
@@ -39,7 +39,7 @@ bytecode array length: 8
bytecodes: [
/* 26 E> */ B(StackCheck),
/* 32 S> */ B(Ldar), R(arg0),
- /* 34 E> */ B(StaGlobalSloppy), U8(0), U8(0),
+ /* 34 E> */ B(StaGlobal), U8(0), U8(0),
B(LdaUndefined),
/* 39 S> */ B(Return),
]
@@ -61,7 +61,7 @@ bytecode array length: 8
bytecodes: [
/* 35 E> */ B(StackCheck),
/* 40 S> */ B(LdaSmi), I8(2),
- /* 42 E> */ B(StaGlobalStrict), U8(0), U8(0),
+ /* 42 E> */ B(StaGlobal), U8(0), U8(0),
B(LdaUndefined),
/* 47 S> */ B(Return),
]
@@ -83,7 +83,7 @@ bytecode array length: 8
bytecodes: [
/* 17 E> */ B(StackCheck),
/* 22 S> */ B(LdaSmi), I8(2),
- /* 24 E> */ B(StaGlobalSloppy), U8(0), U8(0),
+ /* 24 E> */ B(StaGlobal), U8(0), U8(0),
B(LdaUndefined),
/* 29 S> */ B(Return),
]
@@ -363,7 +363,7 @@ bytecodes: [
/* 1287 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(252),
/* 1297 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(254),
/* 1305 S> */ B(LdaSmi), I8(2),
- /* 1307 E> */ B(Wide), B(StaGlobalSloppy), U16(1), U16(256),
+ /* 1307 E> */ B(Wide), B(StaGlobal), U16(1), U16(256),
B(LdaUndefined),
/* 1312 S> */ B(Return),
]
@@ -645,7 +645,7 @@ bytecodes: [
/* 1303 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(252),
/* 1313 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(254),
/* 1321 S> */ B(LdaSmi), I8(2),
- /* 1323 E> */ B(Wide), B(StaGlobalStrict), U16(1), U16(256),
+ /* 1323 E> */ B(Wide), B(StaGlobal), U16(1), U16(256),
B(LdaUndefined),
/* 1328 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
index 4b56b6302b..0747228933 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
@@ -25,7 +25,7 @@ bytecodes: [
B(CreateClosure), U8(2), U8(3), U8(0),
B(StaNamedOwnProperty), R(1), U8(3), U8(4),
B(Ldar), R(1),
- /* 8 E> */ B(StaGlobalSloppy), U8(4), U8(6),
+ /* 8 E> */ B(StaGlobal), U8(4), U8(6),
B(LdaUndefined),
/* 33 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
index c6b1a01ff7..4064ea1d8b 100644
--- a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
+++ b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
@@ -45,6 +45,7 @@ class ProgramOptions final {
do_expressions_(false),
async_iteration_(false),
public_fields_(false),
+ static_fields_(false),
verbose_(false) {}
bool Validate() const;
@@ -65,6 +66,7 @@ class ProgramOptions final {
bool do_expressions() const { return do_expressions_; }
bool async_iteration() const { return async_iteration_; }
bool public_fields() const { return public_fields_; }
+ bool static_fields() const { return static_fields_; }
bool verbose() const { return verbose_; }
bool suppress_runtime_errors() const { return rebaseline_ && !verbose_; }
std::vector<std::string> input_filenames() const { return input_filenames_; }
@@ -83,6 +85,7 @@ class ProgramOptions final {
bool do_expressions_;
bool async_iteration_;
bool public_fields_;
+ bool static_fields_;
bool verbose_;
std::vector<std::string> input_filenames_;
std::string output_filename_;
@@ -174,6 +177,8 @@ ProgramOptions ProgramOptions::FromCommandLine(int argc, char** argv) {
options.async_iteration_ = true;
} else if (strcmp(argv[i], "--public-fields") == 0) {
options.public_fields_ = true;
+ } else if (strcmp(argv[i], "--static-fields") == 0) {
+ options.static_fields_ = true;
} else if (strcmp(argv[i], "--verbose") == 0) {
options.verbose_ = true;
} else if (strncmp(argv[i], "--output=", 9) == 0) {
@@ -280,6 +285,8 @@ void ProgramOptions::UpdateFromHeader(std::istream& stream) {
async_iteration_ = ParseBoolean(line.c_str() + 17);
} else if (line.compare(0, 15, "public fields: ") == 0) {
public_fields_ = ParseBoolean(line.c_str() + 15);
+ } else if (line.compare(0, 15, "static fields: ") == 0) {
+ static_fields_ = ParseBoolean(line.c_str() + 15);
} else if (line == "---") {
break;
} else if (line.empty()) {
@@ -304,6 +311,7 @@ void ProgramOptions::PrintHeader(std::ostream& stream) const { // NOLINT
if (do_expressions_) stream << "\ndo expressions: yes";
if (async_iteration_) stream << "\nasync iteration: yes";
if (public_fields_) stream << "\npublic fields: yes";
+ if (static_fields_) stream << "\nstatic fields: yes";
stream << "\n\n";
}
@@ -407,8 +415,8 @@ void GenerateExpectationsFile(std::ostream& stream, // NOLINT
}
if (options.do_expressions()) i::FLAG_harmony_do_expressions = true;
- if (options.async_iteration()) i::FLAG_harmony_async_iteration = true;
if (options.public_fields()) i::FLAG_harmony_public_fields = true;
+ if (options.static_fields()) i::FLAG_harmony_static_fields = true;
stream << "#\n# Autogenerated by generate-bytecode-expectations.\n#\n\n";
options.PrintHeader(stream);
@@ -417,8 +425,8 @@ void GenerateExpectationsFile(std::ostream& stream, // NOLINT
}
i::FLAG_harmony_do_expressions = false;
- i::FLAG_harmony_async_iteration = false;
i::FLAG_harmony_public_fields = false;
+ i::FLAG_harmony_static_fields = false;
}
bool WriteExpectationsFile(const std::vector<std::string>& snippet_list,
@@ -465,8 +473,8 @@ void PrintUsage(const char* exec_path) {
"Specify the name of the test function.\n"
" --top-level Process top level code, not the top-level function.\n"
" --do-expressions Enable harmony_do_expressions flag.\n"
- " --async-iteration Enable harmony_async_iteration flag.\n"
" --public-fields Enable harmony_public_fields flag.\n"
+ " --static-fields Enable harmony_static_fields flag.\n"
" --output=file.name\n"
" Specify the output file. If not specified, output goes to "
"stdout.\n"
diff --git a/deps/v8/test/cctest/interpreter/interpreter-tester.h b/deps/v8/test/cctest/interpreter/interpreter-tester.h
index 8bc6e67a32..93e1d930ac 100644
--- a/deps/v8/test/cctest/interpreter/interpreter-tester.h
+++ b/deps/v8/test/cctest/interpreter/interpreter-tester.h
@@ -83,6 +83,11 @@ class InterpreterTester {
static const char kFunctionName[];
+ // Expose raw RegisterList construction to tests.
+ static RegisterList NewRegisterList(int first_reg_index, int register_count) {
+ return RegisterList(first_reg_index, register_count);
+ }
+
private:
Isolate* isolate_;
const char* source_;
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index ed8098ddab..1db0f70ad7 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -2244,6 +2244,62 @@ TEST(ClassFields) {
" class A {\n"
" a;\n"
" ['b'];\n"
+ " }\n"
+ "\n"
+ " class B {\n"
+ " a = 1;\n"
+ " ['b'] = this.a;\n"
+ " }\n"
+ " new A;\n"
+ " new B;\n"
+ "}\n",
+
+ "{\n"
+ " class A extends class {} {\n"
+ " a;\n"
+ " ['b'];\n"
+ " }\n"
+ "\n"
+ " class B extends class {} {\n"
+ " a = 1;\n"
+ " ['b'] = this.a;\n"
+ " foo() { return 1; }\n"
+ " constructor() {\n"
+ " super();\n"
+ " }\n"
+ " }\n"
+ "\n"
+ " class C extends B {\n"
+ " a = 1;\n"
+ " ['b'] = this.a;\n"
+ " constructor() {\n"
+ " (() => super())();\n"
+ " }\n"
+ " }\n"
+ "\n"
+ " new A;\n"
+ " new B;\n"
+ " new C;\n"
+ "}\n"};
+
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("ClassFields.golden")));
+ i::FLAG_harmony_public_fields = old_flag;
+}
+
+TEST(StaticClassFields) {
+ bool old_flag = i::FLAG_harmony_public_fields;
+ bool old_static_flag = i::FLAG_harmony_static_fields;
+ i::FLAG_harmony_public_fields = true;
+ i::FLAG_harmony_static_fields = true;
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
+
+ const char* snippets[] = {
+ "{\n"
+ " class A {\n"
+ " a;\n"
+ " ['b'];\n"
" static c;\n"
" static ['d'];\n"
" }\n"
@@ -2293,8 +2349,9 @@ TEST(ClassFields) {
"}\n"};
CHECK(CompareTexts(BuildActual(printer, snippets),
- LoadGolden("ClassFields.golden")));
+ LoadGolden("StaticClassFields.golden")));
i::FLAG_harmony_public_fields = old_flag;
+ i::FLAG_harmony_static_fields = old_static_flag;
}
TEST(Generators) {
@@ -2323,8 +2380,6 @@ TEST(Generators) {
}
TEST(AsyncGenerators) {
- bool old_flag = i::FLAG_harmony_async_iteration;
- i::FLAG_harmony_async_iteration = true;
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
@@ -2347,7 +2402,6 @@ TEST(AsyncGenerators) {
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("AsyncGenerators.golden")));
- i::FLAG_harmony_async_iteration = old_flag;
}
TEST(Modules) {
@@ -2465,8 +2519,6 @@ TEST(NewAndSpread) {
}
TEST(ForAwaitOf) {
- bool old_flag = i::FLAG_harmony_async_iteration;
- i::FLAG_harmony_async_iteration = true;
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
@@ -2499,8 +2551,6 @@ TEST(ForAwaitOf) {
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("ForAwaitOf.golden")));
-
- i::FLAG_harmony_async_iteration = old_flag;
}
TEST(StandardForLoop) {
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc b/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
index b706b7c480..b8ebef3b28 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
@@ -27,7 +27,8 @@ class InvokeIntrinsicHelper {
Handle<Object> Invoke(A... args) {
CHECK(IntrinsicsHelper::IsSupported(function_id_));
BytecodeArrayBuilder builder(zone_, sizeof...(args), 0, 0);
- RegisterList reg_list(builder.Receiver().index(), sizeof...(args));
+ RegisterList reg_list = InterpreterTester::NewRegisterList(
+ builder.Receiver().index(), sizeof...(args));
builder.CallRuntime(function_id_, reg_list).Return();
InterpreterTester tester(isolate_, builder.ToBytecodeArray(isolate_));
auto callable = tester.GetCallable<A...>();
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter.cc b/deps/v8/test/cctest/interpreter/test-interpreter.cc
index 6185925ab4..cd77fd43a1 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter.cc
@@ -387,6 +387,48 @@ TEST(InterpreterBinaryOpsHeapNumber) {
}
}
+TEST(InterpreterBinaryOpsBigInt) {
+ // This test only checks that the recorded type feedback is kBigInt.
+ AstBigInt inputs[] = {AstBigInt("1"), AstBigInt("-42"), AstBigInt("0xFFFF")};
+ for (size_t l = 0; l < arraysize(inputs); l++) {
+ for (size_t r = 0; r < arraysize(inputs); r++) {
+ for (size_t o = 0; o < arraysize(kArithmeticOperators); o++) {
+ // Skip over unsigned right shift.
+ if (kArithmeticOperators[o] == Token::Value::SHR) continue;
+
+ HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
+ Zone* zone = handles.main_zone();
+ FeedbackVectorSpec feedback_spec(zone);
+ BytecodeArrayBuilder builder(zone, 1, 1, &feedback_spec);
+
+ FeedbackSlot slot = feedback_spec.AddBinaryOpICSlot();
+ Handle<i::FeedbackMetadata> metadata =
+ NewFeedbackMetadata(isolate, &feedback_spec);
+
+ Register reg(0);
+ auto lhs = inputs[l];
+ auto rhs = inputs[r];
+ builder.LoadLiteral(lhs)
+ .StoreAccumulatorInRegister(reg)
+ .LoadLiteral(rhs)
+ .BinaryOperation(kArithmeticOperators[o], reg, GetIndex(slot))
+ .Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
+
+ InterpreterTester tester(isolate, bytecode_array, metadata);
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->IsBigInt());
+ Object* feedback = callable.vector()->Get(slot);
+ CHECK(feedback->IsSmi());
+ CHECK_EQ(BinaryOperationFeedback::kBigInt,
+ static_cast<Smi*>(feedback)->value());
+ }
+ }
+ }
+}
+
namespace {
struct LiteralForTest {
@@ -827,6 +869,8 @@ TEST(InterpreterUnaryOpFeedback) {
Handle<Smi> smi_max = Handle<Smi>(Smi::FromInt(Smi::kMaxValue), isolate);
Handle<Smi> smi_min = Handle<Smi>(Smi::FromInt(Smi::kMinValue), isolate);
Handle<HeapNumber> number = isolate->factory()->NewHeapNumber(2.1);
+ Handle<BigInt> bigint =
+ BigInt::FromNumber(isolate, smi_max).ToHandleChecked();
Handle<String> str = isolate->factory()->NewStringFromAsciiChecked("42");
struct TestCase {
@@ -834,19 +878,23 @@ TEST(InterpreterUnaryOpFeedback) {
Handle<Smi> smi_feedback_value;
Handle<Smi> smi_to_number_feedback_value;
Handle<HeapNumber> number_feedback_value;
+ Handle<BigInt> bigint_feedback_value;
Handle<Object> any_feedback_value;
};
TestCase const kTestCases[] = {
- {Token::Value::INC, smi_one, smi_max, number, str},
- {Token::Value::DEC, smi_one, smi_min, number, str}};
+ // Testing ADD and BIT_NOT would require generalizing the test setup.
+ {Token::Value::SUB, smi_one, smi_min, number, bigint, str},
+ {Token::Value::INC, smi_one, smi_max, number, bigint, str},
+ {Token::Value::DEC, smi_one, smi_min, number, bigint, str}};
for (TestCase const& test_case : kTestCases) {
i::FeedbackVectorSpec feedback_spec(zone);
- BytecodeArrayBuilder builder(zone, 4, 0, &feedback_spec);
+ BytecodeArrayBuilder builder(zone, 5, 0, &feedback_spec);
i::FeedbackSlot slot0 = feedback_spec.AddBinaryOpICSlot();
i::FeedbackSlot slot1 = feedback_spec.AddBinaryOpICSlot();
i::FeedbackSlot slot2 = feedback_spec.AddBinaryOpICSlot();
i::FeedbackSlot slot3 = feedback_spec.AddBinaryOpICSlot();
+ i::FeedbackSlot slot4 = feedback_spec.AddBinaryOpICSlot();
Handle<i::FeedbackMetadata> metadata =
i::NewFeedbackMetadata(isolate, &feedback_spec);
@@ -859,18 +907,21 @@ TEST(InterpreterUnaryOpFeedback) {
.UnaryOperation(test_case.op, GetIndex(slot2))
.LoadAccumulatorWithRegister(builder.Parameter(2))
.UnaryOperation(test_case.op, GetIndex(slot3))
+ .LoadAccumulatorWithRegister(builder.Parameter(3))
+ .UnaryOperation(test_case.op, GetIndex(slot4))
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, metadata);
typedef Handle<Object> H;
- auto callable = tester.GetCallable<H, H, H, H>();
+ auto callable = tester.GetCallable<H, H, H, H, H>();
Handle<Object> return_val =
callable(test_case.smi_feedback_value,
test_case.smi_to_number_feedback_value,
- test_case.number_feedback_value, test_case.any_feedback_value)
+ test_case.number_feedback_value,
+ test_case.bigint_feedback_value, test_case.any_feedback_value)
.ToHandleChecked();
USE(return_val);
Object* feedback0 = callable.vector()->Get(slot0);
@@ -890,8 +941,13 @@ TEST(InterpreterUnaryOpFeedback) {
Object* feedback3 = callable.vector()->Get(slot3);
CHECK(feedback3->IsSmi());
- CHECK_EQ(BinaryOperationFeedback::kAny,
+ CHECK_EQ(BinaryOperationFeedback::kBigInt,
static_cast<Smi*>(feedback3)->value());
+
+ Object* feedback4 = callable.vector()->Get(slot4);
+ CHECK(feedback4->IsSmi());
+ CHECK_EQ(BinaryOperationFeedback::kAny,
+ static_cast<Smi*>(feedback4)->value());
}
}
@@ -1822,6 +1878,49 @@ TEST(InterpreterHeapNumberComparisons) {
}
}
+TEST(InterpreterBigIntComparisons) {
+ // This test only checks that the recorded type feedback is kBigInt.
+ AstBigInt inputs[] = {AstBigInt("0"), AstBigInt("-42"),
+ AstBigInt("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")};
+ for (size_t c = 0; c < arraysize(kComparisonTypes); c++) {
+ Token::Value comparison = kComparisonTypes[c];
+ for (size_t i = 0; i < arraysize(inputs); i++) {
+ for (size_t j = 0; j < arraysize(inputs); j++) {
+ HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
+ Zone* zone = handles.main_zone();
+ AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
+ isolate->heap()->HashSeed());
+
+ FeedbackVectorSpec feedback_spec(zone);
+ BytecodeArrayBuilder builder(zone, 1, 1, &feedback_spec);
+
+ FeedbackSlot slot = feedback_spec.AddCompareICSlot();
+ Handle<i::FeedbackMetadata> metadata =
+ NewFeedbackMetadata(isolate, &feedback_spec);
+
+ Register r0(0);
+ builder.LoadLiteral(inputs[i])
+ .StoreAccumulatorInRegister(r0)
+ .LoadLiteral(inputs[j])
+ .CompareOperation(comparison, r0, GetIndex(slot))
+ .Return();
+
+ ast_factory.Internalize(isolate);
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
+ InterpreterTester tester(isolate, bytecode_array, metadata);
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->IsBoolean());
+ Object* feedback = callable.vector()->Get(slot);
+ CHECK(feedback->IsSmi());
+ CHECK_EQ(CompareOperationFeedback::kBigInt,
+ static_cast<Smi*>(feedback)->value());
+ }
+ }
+ }
+}
+
TEST(InterpreterStringComparisons) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
@@ -3314,17 +3413,17 @@ TEST(InterpreterForIn) {
{"var r = 0;\n"
"for (var a in [0,6,7,9]) { r = r + (1 << a); }\n"
"return r;\n",
- 0xf},
+ 0xF},
{"var r = 0;\n"
"for (var a in [0,6,7,9]) { r = r + (1 << a); }\n"
"var r = 0;\n"
"for (var a in [0,6,7,9]) { r = r + (1 << a); }\n"
"return r;\n",
- 0xf},
+ 0xF},
{"var r = 0;\n"
"for (var a in 'foobar') { r = r + (1 << a); }\n"
"return r;\n",
- 0x3f},
+ 0x3F},
{"var r = 0;\n"
"for (var a in {1:0, 10:1, 100:2, 1000:3}) {\n"
" r = r + Number(a);\n"
@@ -4491,7 +4590,7 @@ TEST(InterpreterWideParametersPickOne) {
std::string source = os.str();
InterpreterTester tester(isolate, source.c_str(), "*");
auto callable = tester.GetCallable<Handle<Object>>();
- Handle<Object> arg = handle(Smi::FromInt(0xaa55), isolate);
+ Handle<Object> arg = handle(Smi::FromInt(0xAA55), isolate);
Handle<Object> return_value = callable(arg).ToHandleChecked();
Handle<Smi> actual = Handle<Smi>::cast(return_value);
CHECK_EQ(actual->value(), parameter);
diff --git a/deps/v8/test/cctest/parsing/test-preparser.cc b/deps/v8/test/cctest/parsing/test-preparser.cc
index 74630c6c7e..48aa4826c9 100644
--- a/deps/v8/test/cctest/parsing/test-preparser.cc
+++ b/deps/v8/test/cctest/parsing/test-preparser.cc
@@ -660,11 +660,23 @@ TEST(PreParserScopeAnalysis) {
[] { i::FLAG_harmony_public_fields = true; },
[] { i::FLAG_harmony_public_fields = false; }},
{"class X { static ['foo'] = 2; }; new X;",
- [] { i::FLAG_harmony_public_fields = true; },
- [] { i::FLAG_harmony_public_fields = false; }},
+ [] {
+ i::FLAG_harmony_public_fields = true;
+ i::FLAG_harmony_static_fields = true;
+ },
+ [] {
+ i::FLAG_harmony_public_fields = false;
+ i::FLAG_harmony_static_fields = false;
+ }},
{"class X { ['bar'] = 1; static ['foo'] = 2; }; new X;",
- [] { i::FLAG_harmony_public_fields = true; },
- [] { i::FLAG_harmony_public_fields = false; }},
+ [] {
+ i::FLAG_harmony_public_fields = true;
+ i::FLAG_harmony_static_fields = true;
+ },
+ [] {
+ i::FLAG_harmony_public_fields = false;
+ i::FLAG_harmony_static_fields = false;
+ }},
};
for (unsigned outer_ix = 0; outer_ix < arraysize(outers); ++outer_ix) {
diff --git a/deps/v8/test/cctest/parsing/test-scanner-streams.cc b/deps/v8/test/cctest/parsing/test-scanner-streams.cc
index 27fc086487..ab207e5813 100644
--- a/deps/v8/test/cctest/parsing/test-scanner-streams.cc
+++ b/deps/v8/test/cctest/parsing/test-scanner-streams.cc
@@ -500,7 +500,7 @@ TEST(Regress6377) {
"\xbf\0", // third chunk - end of 2-byte seq
};
const std::vector<std::vector<uint16_t>> unicode_expected = {
- {0xd800, 0xdc00, 97}, {0xfff, 97}, {0xff, 97}, {0xd800, 0xdc00, 97, 0xff},
+ {0xD800, 0xDC00, 97}, {0xFFF, 97}, {0xFF, 97}, {0xD800, 0xDC00, 97, 0xFF},
};
CHECK_EQ(unicode_expected.size(), arraysize(cases));
TestChunkStreamAgainstReference(cases, unicode_expected);
@@ -508,7 +508,7 @@ TEST(Regress6377) {
TEST(Regress6836) {
const char* cases[] = {
- // 0xc2 is a lead byte, but there's no continuation. The bug occurs when
+ // 0xC2 is a lead byte, but there's no continuation. The bug occurs when
// this happens near the chunk end.
"X\xc2Y\0",
// Last chunk ends with a 2-byte char lead.
@@ -518,7 +518,7 @@ TEST(Regress6836) {
"X\xe0\xbf\0",
};
const std::vector<std::vector<uint16_t>> unicode_expected = {
- {0x58, 0xfffd, 0x59}, {0x58, 0xfffd}, {0x58, 0xfffd},
+ {0x58, 0xFFFD, 0x59}, {0x58, 0xFFFD}, {0x58, 0xFFFD},
};
CHECK_EQ(unicode_expected.size(), arraysize(cases));
TestChunkStreamAgainstReference(cases, unicode_expected);
@@ -540,12 +540,12 @@ TEST(TestOverlongAndInvalidSequences) {
"X\xf4\x90\x80\x80Y\0",
};
const std::vector<std::vector<uint16_t>> unicode_expected = {
- {0x58, 0xfffd, 0xfffd, 0x59},
- {0x58, 0xfffd, 0xfffd, 0x59},
- {0x58, 0xfffd, 0xfffd, 0xfffd, 0x59},
- {0x58, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0x59},
- {0x58, 0xfffd, 0xfffd, 0xfffd, 0x59},
- {0x58, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0x59},
+ {0x58, 0xFFFD, 0xFFFD, 0x59},
+ {0x58, 0xFFFD, 0xFFFD, 0x59},
+ {0x58, 0xFFFD, 0xFFFD, 0xFFFD, 0x59},
+ {0x58, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0x59},
+ {0x58, 0xFFFD, 0xFFFD, 0xFFFD, 0x59},
+ {0x58, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0x59},
};
CHECK_EQ(unicode_expected.size(), arraysize(cases));
TestChunkStreamAgainstReference(cases, unicode_expected);
diff --git a/deps/v8/test/cctest/parsing/test-scanner.cc b/deps/v8/test/cctest/parsing/test-scanner.cc
index 9c18bfb1ae..ea7a8fbaa2 100644
--- a/deps/v8/test/cctest/parsing/test-scanner.cc
+++ b/deps/v8/test/cctest/parsing/test-scanner.cc
@@ -29,7 +29,6 @@ struct ScannerTestHelper {
std::unique_ptr<UnicodeCache> unicode_cache;
std::unique_ptr<Utf16CharacterStream> stream;
std::unique_ptr<Scanner> scanner;
- int use_counts[v8::Isolate::kUseCounterFeatureCount];
Scanner* operator->() const { return scanner.get(); }
Scanner* get() const { return scanner.get(); }
@@ -39,11 +38,8 @@ ScannerTestHelper make_scanner(const char* src) {
ScannerTestHelper helper;
helper.unicode_cache = std::unique_ptr<UnicodeCache>(new UnicodeCache);
helper.stream = ScannerStream::ForTesting(src);
- for (int i = 0; i < v8::Isolate::kUseCounterFeatureCount; i++) {
- helper.use_counts[i] = 0;
- }
- helper.scanner = std::unique_ptr<Scanner>(
- new Scanner(helper.unicode_cache.get(), helper.use_counts));
+ helper.scanner =
+ std::unique_ptr<Scanner>(new Scanner(helper.unicode_cache.get()));
helper.scanner->Initialize(helper.stream.get(), false);
return helper;
}
diff --git a/deps/v8/test/cctest/profiler-extension.cc b/deps/v8/test/cctest/profiler-extension.cc
index aa75a481f5..28fb146036 100644
--- a/deps/v8/test/cctest/profiler-extension.cc
+++ b/deps/v8/test/cctest/profiler-extension.cc
@@ -53,8 +53,7 @@ v8::Local<v8::FunctionTemplate> ProfilerExtension::GetNativeFunctionTemplate(
if (name->Equals(context, v8_str(isolate, "collectSample")).FromJust()) {
return v8::FunctionTemplate::New(isolate, ProfilerExtension::CollectSample);
}
- CHECK(false);
- return v8::Local<v8::FunctionTemplate>();
+ UNREACHABLE();
}
void ProfilerExtension::StartProfiling(
diff --git a/deps/v8/test/cctest/test-access-checks.cc b/deps/v8/test/cctest/test-access-checks.cc
index f260a15c6e..acea843c14 100644
--- a/deps/v8/test/cctest/test-access-checks.cc
+++ b/deps/v8/test/cctest/test-access-checks.cc
@@ -348,7 +348,7 @@ TEST(AccessCheckWithExceptionThrowingInterceptor) {
isolate->SetFailedAccessCheckCallbackFunction([](v8::Local<v8::Object> target,
v8::AccessType type,
v8::Local<v8::Value> data) {
- CHECK(false); // This should never be called.
+ UNREACHABLE(); // This should never be called.
});
v8::HandleScope scope(isolate);
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index 07da7a55a0..24c10a0f6e 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -84,36 +84,65 @@ THREADED_TEST(PropertyHandler) {
Local<Script> setter;
// check function instance accessors
getter = v8_compile("var obj = new Fun(); obj.instance_foo;");
- CHECK_EQ(900, getter->Run(env.local())
- .ToLocalChecked()
- ->Int32Value(env.local())
- .FromJust());
+ for (int i = 0; i < 4; i++) {
+ CHECK_EQ(900, getter->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ }
setter = v8_compile("obj.instance_foo = 901;");
- CHECK_EQ(901, setter->Run(env.local())
- .ToLocalChecked()
- ->Int32Value(env.local())
- .FromJust());
+ for (int i = 0; i < 4; i++) {
+ CHECK_EQ(901, setter->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ }
getter = v8_compile("obj.bar;");
- CHECK_EQ(907, getter->Run(env.local())
- .ToLocalChecked()
- ->Int32Value(env.local())
- .FromJust());
+ for (int i = 0; i < 4; i++) {
+ CHECK_EQ(907, getter->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ }
setter = v8_compile("obj.bar = 908;");
- CHECK_EQ(908, setter->Run(env.local())
- .ToLocalChecked()
- ->Int32Value(env.local())
- .FromJust());
+ for (int i = 0; i < 4; i++) {
+ CHECK_EQ(908, setter->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ }
// check function static accessors
getter = v8_compile("Fun.object_foo;");
- CHECK_EQ(902, getter->Run(env.local())
- .ToLocalChecked()
- ->Int32Value(env.local())
- .FromJust());
+ for (int i = 0; i < 4; i++) {
+ CHECK_EQ(902, getter->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ }
setter = v8_compile("Fun.object_foo = 903;");
- CHECK_EQ(903, setter->Run(env.local())
- .ToLocalChecked()
- ->Int32Value(env.local())
- .FromJust());
+ for (int i = 0; i < 4; i++) {
+ CHECK_EQ(903, setter->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ }
+
+ // And now with null prototype.
+ CompileRun(env.local(), "obj.__proto__ = null;");
+ getter = v8_compile("obj.bar;");
+ for (int i = 0; i < 4; i++) {
+ CHECK_EQ(907, getter->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ }
+ setter = v8_compile("obj.bar = 908;");
+ for (int i = 0; i < 4; i++) {
+ CHECK_EQ(908, setter->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ }
}
@@ -647,10 +676,32 @@ THREADED_TEST(GlobalObjectAccessor) {
" set : function() { set_value = this; }"
"});"
"function getter() { return x; }"
- "function setter() { x = 1; }"
- "for (var i = 0; i < 4; i++) { getter(); setter(); }");
- CHECK(v8::Utils::OpenHandle(*CompileRun("getter()"))->IsJSGlobalProxy());
- CHECK(v8::Utils::OpenHandle(*CompileRun("set_value"))->IsJSGlobalProxy());
+ "function setter() { x = 1; }");
+
+ Local<Script> check_getter = v8_compile("getter()");
+ Local<Script> check_setter = v8_compile("setter(); set_value");
+
+ // Ensure that LoadGlobalICs in getter and StoreGlobalICs setter get
+ // JSGlobalProxy as a receiver regardless of the current IC state and
+ // the order in which ICs are executed.
+ for (int i = 0; i < 10; i++) {
+ CHECK(
+ v8::Utils::OpenHandle(*check_getter->Run(env.local()).ToLocalChecked())
+ ->IsJSGlobalProxy());
+ }
+ for (int i = 0; i < 10; i++) {
+ CHECK(
+ v8::Utils::OpenHandle(*check_setter->Run(env.local()).ToLocalChecked())
+ ->IsJSGlobalProxy());
+ }
+ for (int i = 0; i < 10; i++) {
+ CHECK(
+ v8::Utils::OpenHandle(*check_getter->Run(env.local()).ToLocalChecked())
+ ->IsJSGlobalProxy());
+ CHECK(
+ v8::Utils::OpenHandle(*check_setter->Run(env.local()).ToLocalChecked())
+ ->IsJSGlobalProxy());
+ }
}
diff --git a/deps/v8/test/cctest/test-allocation.cc b/deps/v8/test/cctest/test-allocation.cc
index b1a3bef421..139829dd2b 100644
--- a/deps/v8/test/cctest/test-allocation.cc
+++ b/deps/v8/test/cctest/test-allocation.cc
@@ -4,6 +4,12 @@
#include <stdlib.h>
#include <string.h>
+#if V8_OS_POSIX
+#include <setjmp.h>
+#include <signal.h>
+#include <unistd.h> // NOLINT
+#endif
+
#include "src/v8.h"
#include "test/cctest/cctest.h"
@@ -35,6 +41,11 @@ class AllocationPlatform : public TestPlatform {
void OnCriticalMemoryPressure() override { oom_callback_called = true; }
+ bool OnCriticalMemoryPressure(size_t length) override {
+ oom_callback_called = true;
+ return true;
+ }
+
static AllocationPlatform* current_platform;
bool oom_callback_called = false;
};
@@ -54,7 +65,7 @@ size_t GetHugeMemoryAmount() {
static size_t huge_memory = 0;
if (!huge_memory) {
for (int i = 0; i < 100; i++) {
- huge_memory |= bit_cast<size_t>(v8::base::OS::GetRandomMmapAddr());
+ huge_memory |= bit_cast<size_t>(v8::internal::GetRandomMmapAddr());
}
// Make it larger than the available address space.
huge_memory *= 2;
@@ -122,7 +133,7 @@ TEST(AlignedAllocOOM) {
// On failure, this won't return, since an AlignedAlloc failure is fatal.
// In that case, behavior is checked in OnAlignedAllocOOM before exit.
void* result = v8::internal::AlignedAlloc(GetHugeMemoryAmount(),
- v8::base::OS::AllocatePageSize());
+ v8::internal::AllocatePageSize());
// On a few systems, allocation somehow succeeds.
CHECK_EQ(result == nullptr, platform.oom_callback_called);
}
@@ -143,7 +154,7 @@ TEST(AlignedAllocVirtualMemoryOOM) {
CHECK(!platform.oom_callback_called);
v8::internal::VirtualMemory result;
bool success = v8::internal::AlignedAllocVirtualMemory(
- GetHugeMemoryAmount(), v8::base::OS::AllocatePageSize(), nullptr,
+ GetHugeMemoryAmount(), v8::internal::AllocatePageSize(), nullptr,
&result);
// On a few systems, allocation somehow succeeds.
CHECK_IMPLIES(success, result.IsReserved());
diff --git a/deps/v8/test/cctest/test-api-interceptors.cc b/deps/v8/test/cctest/test-api-interceptors.cc
index 63f3bc42fb..fd811abffe 100644
--- a/deps/v8/test/cctest/test-api-interceptors.cc
+++ b/deps/v8/test/cctest/test-api-interceptors.cc
@@ -98,10 +98,10 @@ void SymbolAccessorSetter(Local<Name> name, Local<Value> value,
SimpleAccessorSetter(Local<String>::Cast(sym->Name()), value, info);
}
-void StringInterceptorGetter(
- Local<String> name,
- const v8::PropertyCallbackInfo<v8::Value>&
- info) { // Intercept names that start with 'interceptor_'.
+void InterceptorGetter(Local<Name> generic_name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ if (generic_name->IsSymbol()) return;
+ Local<String> name = Local<String>::Cast(generic_name);
String::Utf8Value utf8(info.GetIsolate(), name);
char* name_str = *utf8;
char prefix[] = "interceptor_";
@@ -117,9 +117,10 @@ void StringInterceptorGetter(
.ToLocalChecked());
}
-
-void StringInterceptorSetter(Local<String> name, Local<Value> value,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
+void InterceptorSetter(Local<Name> generic_name, Local<Value> value,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ if (generic_name->IsSymbol()) return;
+ Local<String> name = Local<String>::Cast(generic_name);
// Intercept accesses that set certain integer values, for which the name does
// not start with 'accessor_'.
String::Utf8Value utf8(info.GetIsolate(), name);
@@ -140,18 +141,6 @@ void StringInterceptorSetter(Local<String> name, Local<Value> value,
}
}
-void InterceptorGetter(Local<Name> generic_name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- if (generic_name->IsSymbol()) return;
- StringInterceptorGetter(Local<String>::Cast(generic_name), info);
-}
-
-void InterceptorSetter(Local<Name> generic_name, Local<Value> value,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- if (generic_name->IsSymbol()) return;
- StringInterceptorSetter(Local<String>::Cast(generic_name), value, info);
-}
-
void GenericInterceptorGetter(Local<Name> generic_name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
Local<String> str;
@@ -198,19 +187,20 @@ void AddAccessor(Local<FunctionTemplate> templ, Local<String> name,
templ->PrototypeTemplate()->SetAccessor(name, getter, setter);
}
-void AddInterceptor(Local<FunctionTemplate> templ,
- v8::NamedPropertyGetterCallback getter,
- v8::NamedPropertySetterCallback setter) {
- templ->InstanceTemplate()->SetNamedPropertyHandler(getter, setter);
-}
-
-
void AddAccessor(Local<FunctionTemplate> templ, Local<Name> name,
v8::AccessorNameGetterCallback getter,
v8::AccessorNameSetterCallback setter) {
templ->PrototypeTemplate()->SetAccessor(name, getter, setter);
}
+void AddStringOnlyInterceptor(Local<FunctionTemplate> templ,
+ v8::GenericNamedPropertyGetterCallback getter,
+ v8::GenericNamedPropertySetterCallback setter) {
+ templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ getter, setter, nullptr, nullptr, nullptr, Local<v8::Value>(),
+ v8::PropertyHandlerFlags::kOnlyInterceptStrings));
+}
+
void AddInterceptor(Local<FunctionTemplate> templ,
v8::GenericNamedPropertyGetterCallback getter,
v8::GenericNamedPropertySetterCallback setter) {
@@ -1517,7 +1507,7 @@ THREADED_TEST(LegacyInterceptorDoesNotSeeSymbols) {
child->Inherit(parent);
AddAccessor(parent, age, SymbolAccessorGetter, SymbolAccessorSetter);
- AddInterceptor(child, StringInterceptorGetter, StringInterceptorSetter);
+ AddStringOnlyInterceptor(child, InterceptorGetter, InterceptorSetter);
env->Global()
->Set(env.local(), v8_str("Child"),
@@ -4387,7 +4377,7 @@ THREADED_TEST(Regress625155) {
CompileRun(
"Number.prototype.__proto__ = new Bug;"
"var x;"
- "x = 0xdead;"
+ "x = 0xDEAD;"
"x.boom = 0;"
"x = 's';"
"x.boom = 0;"
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index a12a00da35..73dc19aa66 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -1390,10 +1390,10 @@ THREADED_TEST(ExternalWrap) {
expected_ptr = reinterpret_cast<void*>(1);
TestExternalPointerWrapping();
- expected_ptr = reinterpret_cast<void*>(0xdeadbeef);
+ expected_ptr = reinterpret_cast<void*>(0xDEADBEEF);
TestExternalPointerWrapping();
- expected_ptr = reinterpret_cast<void*>(0xdeadbeef + 1);
+ expected_ptr = reinterpret_cast<void*>(0xDEADBEEF + 1);
TestExternalPointerWrapping();
#if defined(V8_HOST_ARCH_X64)
@@ -1401,10 +1401,10 @@ THREADED_TEST(ExternalWrap) {
expected_ptr = reinterpret_cast<void*>(0x400000000);
TestExternalPointerWrapping();
- expected_ptr = reinterpret_cast<void*>(0xdeadbeefdeadbeef);
+ expected_ptr = reinterpret_cast<void*>(0xDEADBEEFDEADBEEF);
TestExternalPointerWrapping();
- expected_ptr = reinterpret_cast<void*>(0xdeadbeefdeadbeef + 1);
+ expected_ptr = reinterpret_cast<void*>(0xDEADBEEFDEADBEEF + 1);
TestExternalPointerWrapping();
#endif
}
@@ -7916,31 +7916,31 @@ THREADED_TEST(StringWrite) {
v8::HandleScope scope(context->GetIsolate());
v8::Local<String> str = v8_str("abcde");
// abc<Icelandic eth><Unicode snowman>.
- v8::Local<String> str2 = v8_str("abc\303\260\342\230\203");
+ v8::Local<String> str2 = v8_str("abc\xC3\xB0\xE2\x98\x83");
v8::Local<String> str3 =
v8::String::NewFromUtf8(context->GetIsolate(), "abc\0def",
v8::NewStringType::kNormal, 7)
.ToLocalChecked();
- // "ab" + lead surrogate + "cd" + trail surrogate + "ef"
- uint16_t orphans[8] = { 0x61, 0x62, 0xd800, 0x63, 0x64, 0xdc00, 0x65, 0x66 };
+ // "ab" + lead surrogate + "wx" + trail surrogate + "yz"
+ uint16_t orphans[8] = {0x61, 0x62, 0xD800, 0x77, 0x78, 0xDC00, 0x79, 0x7A};
v8::Local<String> orphans_str =
v8::String::NewFromTwoByte(context->GetIsolate(), orphans,
v8::NewStringType::kNormal, 8)
.ToLocalChecked();
// single lead surrogate
- uint16_t lead[1] = { 0xd800 };
+ uint16_t lead[1] = {0xD800};
v8::Local<String> lead_str =
v8::String::NewFromTwoByte(context->GetIsolate(), lead,
v8::NewStringType::kNormal, 1)
.ToLocalChecked();
// single trail surrogate
- uint16_t trail[1] = { 0xdc00 };
+ uint16_t trail[1] = {0xDC00};
v8::Local<String> trail_str =
v8::String::NewFromTwoByte(context->GetIsolate(), trail,
v8::NewStringType::kNormal, 1)
.ToLocalChecked();
// surrogate pair
- uint16_t pair[2] = { 0xd800, 0xdc00 };
+ uint16_t pair[2] = {0xD800, 0xDC00};
v8::Local<String> pair_str =
v8::String::NewFromTwoByte(context->GetIsolate(), pair,
v8::NewStringType::kNormal, 2)
@@ -7948,12 +7948,12 @@ THREADED_TEST(StringWrite) {
const int kStride = 4; // Must match stride in for loops in JS below.
CompileRun(
"var left = '';"
- "for (var i = 0; i < 0xd800; i += 4) {"
+ "for (var i = 0; i < 0xD800; i += 4) {"
" left = left + String.fromCharCode(i);"
"}");
CompileRun(
"var right = '';"
- "for (var i = 0; i < 0xd800; i += 4) {"
+ "for (var i = 0; i < 0xD800; i += 4) {"
" right = String.fromCharCode(i) + right;"
"}");
v8::Local<v8::Object> global = context->Global();
@@ -7965,11 +7965,11 @@ THREADED_TEST(StringWrite) {
.As<String>();
CHECK_EQ(5, str2->Length());
- CHECK_EQ(0xd800 / kStride, left_tree->Length());
- CHECK_EQ(0xd800 / kStride, right_tree->Length());
+ CHECK_EQ(0xD800 / kStride, left_tree->Length());
+ CHECK_EQ(0xD800 / kStride, right_tree->Length());
char buf[100];
- char utf8buf[0xd800 * 3];
+ char utf8buf[0xD800 * 3];
uint16_t wbuf[100];
int len;
int charlen;
@@ -7978,58 +7978,58 @@ THREADED_TEST(StringWrite) {
len = str2->WriteUtf8(utf8buf, sizeof(utf8buf), &charlen);
CHECK_EQ(9, len);
CHECK_EQ(5, charlen);
- CHECK_EQ(0, strcmp(utf8buf, "abc\303\260\342\230\203"));
+ CHECK_EQ(0, strcmp(utf8buf, "abc\xC3\xB0\xE2\x98\x83"));
memset(utf8buf, 0x1, 1000);
len = str2->WriteUtf8(utf8buf, 8, &charlen);
CHECK_EQ(8, len);
CHECK_EQ(5, charlen);
- CHECK_EQ(0, strncmp(utf8buf, "abc\303\260\342\230\203\1", 9));
+ CHECK_EQ(0, strncmp(utf8buf, "abc\xC3\xB0\xE2\x98\x83\x01", 9));
memset(utf8buf, 0x1, 1000);
len = str2->WriteUtf8(utf8buf, 7, &charlen);
CHECK_EQ(5, len);
CHECK_EQ(4, charlen);
- CHECK_EQ(0, strncmp(utf8buf, "abc\303\260\1", 5));
+ CHECK_EQ(0, strncmp(utf8buf, "abc\xC3\xB0\x01", 5));
memset(utf8buf, 0x1, 1000);
len = str2->WriteUtf8(utf8buf, 6, &charlen);
CHECK_EQ(5, len);
CHECK_EQ(4, charlen);
- CHECK_EQ(0, strncmp(utf8buf, "abc\303\260\1", 5));
+ CHECK_EQ(0, strncmp(utf8buf, "abc\xC3\xB0\x01", 5));
memset(utf8buf, 0x1, 1000);
len = str2->WriteUtf8(utf8buf, 5, &charlen);
CHECK_EQ(5, len);
CHECK_EQ(4, charlen);
- CHECK_EQ(0, strncmp(utf8buf, "abc\303\260\1", 5));
+ CHECK_EQ(0, strncmp(utf8buf, "abc\xC3\xB0\x01", 5));
memset(utf8buf, 0x1, 1000);
len = str2->WriteUtf8(utf8buf, 4, &charlen);
CHECK_EQ(3, len);
CHECK_EQ(3, charlen);
- CHECK_EQ(0, strncmp(utf8buf, "abc\1", 4));
+ CHECK_EQ(0, strncmp(utf8buf, "abc\x01", 4));
memset(utf8buf, 0x1, 1000);
len = str2->WriteUtf8(utf8buf, 3, &charlen);
CHECK_EQ(3, len);
CHECK_EQ(3, charlen);
- CHECK_EQ(0, strncmp(utf8buf, "abc\1", 4));
+ CHECK_EQ(0, strncmp(utf8buf, "abc\x01", 4));
memset(utf8buf, 0x1, 1000);
len = str2->WriteUtf8(utf8buf, 2, &charlen);
CHECK_EQ(2, len);
CHECK_EQ(2, charlen);
- CHECK_EQ(0, strncmp(utf8buf, "ab\1", 3));
+ CHECK_EQ(0, strncmp(utf8buf, "ab\x01", 3));
// allow orphan surrogates by default
memset(utf8buf, 0x1, 1000);
len = orphans_str->WriteUtf8(utf8buf, sizeof(utf8buf), &charlen);
CHECK_EQ(13, len);
CHECK_EQ(8, charlen);
- CHECK_EQ(0, strcmp(utf8buf, "ab\355\240\200cd\355\260\200ef"));
+ CHECK_EQ(0, strcmp(utf8buf, "ab\xED\xA0\x80wx\xED\xB0\x80yz"));
- // replace orphan surrogates with unicode replacement character
+ // replace orphan surrogates with Unicode replacement character
memset(utf8buf, 0x1, 1000);
len = orphans_str->WriteUtf8(utf8buf,
sizeof(utf8buf),
@@ -8037,9 +8037,9 @@ THREADED_TEST(StringWrite) {
String::REPLACE_INVALID_UTF8);
CHECK_EQ(13, len);
CHECK_EQ(8, charlen);
- CHECK_EQ(0, strcmp(utf8buf, "ab\357\277\275cd\357\277\275ef"));
+ CHECK_EQ(0, strcmp(utf8buf, "ab\xEF\xBF\xBDwx\xEF\xBF\xBDyz"));
- // replace single lead surrogate with unicode replacement character
+ // replace single lead surrogate with Unicode replacement character
memset(utf8buf, 0x1, 1000);
len = lead_str->WriteUtf8(utf8buf,
sizeof(utf8buf),
@@ -8047,9 +8047,9 @@ THREADED_TEST(StringWrite) {
String::REPLACE_INVALID_UTF8);
CHECK_EQ(4, len);
CHECK_EQ(1, charlen);
- CHECK_EQ(0, strcmp(utf8buf, "\357\277\275"));
+ CHECK_EQ(0, strcmp(utf8buf, "\xEF\xBF\xBD"));
- // replace single trail surrogate with unicode replacement character
+ // replace single trail surrogate with Unicode replacement character
memset(utf8buf, 0x1, 1000);
len = trail_str->WriteUtf8(utf8buf,
sizeof(utf8buf),
@@ -8057,7 +8057,7 @@ THREADED_TEST(StringWrite) {
String::REPLACE_INVALID_UTF8);
CHECK_EQ(4, len);
CHECK_EQ(1, charlen);
- CHECK_EQ(0, strcmp(utf8buf, "\357\277\275"));
+ CHECK_EQ(0, strcmp(utf8buf, "\xEF\xBF\xBD"));
// do not replace / write anything if surrogate pair does not fit the buffer
// space
@@ -8072,14 +8072,14 @@ THREADED_TEST(StringWrite) {
memset(utf8buf, 0x1, sizeof(utf8buf));
len = GetUtf8Length(left_tree);
int utf8_expected =
- (0x80 + (0x800 - 0x80) * 2 + (0xd800 - 0x800) * 3) / kStride;
+ (0x80 + (0x800 - 0x80) * 2 + (0xD800 - 0x800) * 3) / kStride;
CHECK_EQ(utf8_expected, len);
len = left_tree->WriteUtf8(utf8buf, utf8_expected, &charlen);
CHECK_EQ(utf8_expected, len);
- CHECK_EQ(0xd800 / kStride, charlen);
- CHECK_EQ(0xed, static_cast<unsigned char>(utf8buf[utf8_expected - 3]));
- CHECK_EQ(0x9f, static_cast<unsigned char>(utf8buf[utf8_expected - 2]));
- CHECK_EQ(0xc0 - kStride,
+ CHECK_EQ(0xD800 / kStride, charlen);
+ CHECK_EQ(0xED, static_cast<unsigned char>(utf8buf[utf8_expected - 3]));
+ CHECK_EQ(0x9F, static_cast<unsigned char>(utf8buf[utf8_expected - 2]));
+ CHECK_EQ(0xC0 - kStride,
static_cast<unsigned char>(utf8buf[utf8_expected - 1]));
CHECK_EQ(1, utf8buf[utf8_expected]);
@@ -8088,10 +8088,10 @@ THREADED_TEST(StringWrite) {
CHECK_EQ(utf8_expected, len);
len = right_tree->WriteUtf8(utf8buf, utf8_expected, &charlen);
CHECK_EQ(utf8_expected, len);
- CHECK_EQ(0xd800 / kStride, charlen);
- CHECK_EQ(0xed, static_cast<unsigned char>(utf8buf[0]));
- CHECK_EQ(0x9f, static_cast<unsigned char>(utf8buf[1]));
- CHECK_EQ(0xc0 - kStride, static_cast<unsigned char>(utf8buf[2]));
+ CHECK_EQ(0xD800 / kStride, charlen);
+ CHECK_EQ(0xED, static_cast<unsigned char>(utf8buf[0]));
+ CHECK_EQ(0x9F, static_cast<unsigned char>(utf8buf[1]));
+ CHECK_EQ(0xC0 - kStride, static_cast<unsigned char>(utf8buf[2]));
CHECK_EQ(1, utf8buf[utf8_expected]);
memset(buf, 0x1, sizeof(buf));
@@ -8110,7 +8110,7 @@ THREADED_TEST(StringWrite) {
CHECK_EQ(4, len);
len = str->Write(wbuf, 0, 4);
CHECK_EQ(4, len);
- CHECK_EQ(0, strncmp("abcd\1", buf, 5));
+ CHECK_EQ(0, strncmp("abcd\x01", buf, 5));
uint16_t answer2[] = {'a', 'b', 'c', 'd', 0x101};
CHECK_EQ(0, StrNCmp16(answer2, wbuf, 5));
@@ -8120,7 +8120,7 @@ THREADED_TEST(StringWrite) {
CHECK_EQ(5, len);
len = str->Write(wbuf, 0, 5);
CHECK_EQ(5, len);
- CHECK_EQ(0, strncmp("abcde\1", buf, 6));
+ CHECK_EQ(0, strncmp("abcde\x01", buf, 6));
uint16_t answer3[] = {'a', 'b', 'c', 'd', 'e', 0x101};
CHECK_EQ(0, StrNCmp16(answer3, wbuf, 6));
@@ -8159,7 +8159,7 @@ THREADED_TEST(StringWrite) {
CHECK_EQ(1, len);
len = str->Write(wbuf, 4, 1);
CHECK_EQ(1, len);
- CHECK_EQ(0, strncmp("e\1", buf, 2));
+ CHECK_EQ(0, strncmp("e\x01", buf, 2));
uint16_t answer6[] = {'e', 0x101};
CHECK_EQ(0, StrNCmp16(answer6, wbuf, 2));
@@ -8169,7 +8169,7 @@ THREADED_TEST(StringWrite) {
CHECK_EQ(1, len);
len = str->Write(wbuf, 3, 1);
CHECK_EQ(1, len);
- CHECK_EQ(0, strncmp("d\1", buf, 2));
+ CHECK_EQ(0, strncmp("d\x01", buf, 2));
uint16_t answer7[] = {'d', 0x101};
CHECK_EQ(0, StrNCmp16(answer7, wbuf, 2));
@@ -8205,10 +8205,10 @@ THREADED_TEST(StringWrite) {
CHECK_EQ(8, len);
CHECK_EQ('X', utf8buf[8]);
CHECK_EQ(5, charlen);
- CHECK_EQ(0, strncmp(utf8buf, "abc\303\260\342\230\203", 8));
- CHECK_NE(0, strcmp(utf8buf, "abc\303\260\342\230\203"));
+ CHECK_EQ(0, strncmp(utf8buf, "abc\xC3\xB0\xE2\x98\x83", 8));
+ CHECK_NE(0, strcmp(utf8buf, "abc\xC3\xB0\xE2\x98\x83"));
utf8buf[8] = '\0';
- CHECK_EQ(0, strcmp(utf8buf, "abc\303\260\342\230\203"));
+ CHECK_EQ(0, strcmp(utf8buf, "abc\xC3\xB0\xE2\x98\x83"));
memset(utf8buf, 0x1, sizeof(utf8buf));
utf8buf[5] = 'X';
@@ -8300,16 +8300,16 @@ THREADED_TEST(OverlongSequencesAndSurrogates) {
"X\xf4\x90\x80Y\0",
};
const std::vector<std::vector<uint16_t>> unicode_expected = {
- {0x58, 0xfffd, 0xfffd, 0x59},
- {0x58, 0xfffd, 0xfffd, 0x59},
- {0x58, 0xfffd, 0xfffd, 0xfffd, 0x59},
- {0x58, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0x59},
- {0x58, 0xfffd, 0xfffd, 0xfffd, 0x59},
- {0x58, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0x59},
- {0x58, 0xfffd, 0xfffd, 0x59},
- {0x58, 0xfffd, 0xfffd, 0xfffd, 0x59},
- {0x58, 0xfffd, 0xfffd, 0x59},
- {0x58, 0xfffd, 0xfffd, 0xfffd, 0x59},
+ {0x58, 0xFFFD, 0xFFFD, 0x59},
+ {0x58, 0xFFFD, 0xFFFD, 0x59},
+ {0x58, 0xFFFD, 0xFFFD, 0xFFFD, 0x59},
+ {0x58, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0x59},
+ {0x58, 0xFFFD, 0xFFFD, 0xFFFD, 0x59},
+ {0x58, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0x59},
+ {0x58, 0xFFFD, 0xFFFD, 0x59},
+ {0x58, 0xFFFD, 0xFFFD, 0xFFFD, 0x59},
+ {0x58, 0xFFFD, 0xFFFD, 0x59},
+ {0x58, 0xFFFD, 0xFFFD, 0xFFFD, 0x59},
};
CHECK_EQ(unicode_expected.size(), arraysize(cases));
TestUtf8DecodingAgainstReference(cases, unicode_expected);
@@ -8323,10 +8323,10 @@ THREADED_TEST(Utf16) {
"var p = [];"
"var plens = [20, 3, 3];"
"p.push('01234567890123456789');"
- "var lead = 0xd800;"
- "var trail = 0xdc00;"
- "p.push(String.fromCharCode(0xd800));"
- "p.push(String.fromCharCode(0xdc00));"
+ "var lead = 0xD800;"
+ "var trail = 0xDC00;"
+ "p.push(String.fromCharCode(0xD800));"
+ "p.push(String.fromCharCode(0xDC00));"
"var a = [];"
"var b = [];"
"var c = [];"
@@ -8353,8 +8353,9 @@ THREADED_TEST(Utf16) {
" var newc = 'x' + c[m] + c[n] + 'y';"
" c2.push(newc.substring(1, newc.length - 1));"
" var utf = alens[m] + alens[n];" // And here.
- // The 'n's that start with 0xdc.. are 6-8
- // The 'm's that end with 0xd8.. are 1, 4 and 7
+ // The 'n's that start with 0xDC..
+ // are 6-8 The 'm's that end with
+ // 0xD8.. are 1, 4 and 7
" if ((m % 3) == 1 && n >= 6) utf -= 2;"
" a2lens.push(utf);"
" }"
@@ -8387,41 +8388,41 @@ THREADED_TEST(Utf16Symbol) {
CompileRun(
"var sym0 = 'benedictus';"
- "var sym0b = 'S\303\270ren';"
- "var sym1 = '\355\240\201\355\260\207';"
- "var sym2 = '\360\220\220\210';"
- "var sym3 = 'x\355\240\201\355\260\207';"
- "var sym4 = 'x\360\220\220\210';"
+ "var sym0b = 'S\xC3\xB8ren';"
+ "var sym1 = '\xED\xA0\x81\xED\xB0\x87';"
+ "var sym2 = '\xF0\x90\x90\x88';"
+ "var sym3 = 'x\xED\xA0\x81\xED\xB0\x87';"
+ "var sym4 = 'x\xF0\x90\x90\x88';"
"if (sym1.length != 2) throw sym1;"
- "if (sym1.charCodeAt(1) != 0xdc07) throw sym1.charCodeAt(1);"
+ "if (sym1.charCodeAt(1) != 0xDC07) throw sym1.charCodeAt(1);"
"if (sym2.length != 2) throw sym2;"
- "if (sym2.charCodeAt(1) != 0xdc08) throw sym2.charCodeAt(2);"
+ "if (sym2.charCodeAt(1) != 0xDC08) throw sym2.charCodeAt(2);"
"if (sym3.length != 3) throw sym3;"
- "if (sym3.charCodeAt(2) != 0xdc07) throw sym1.charCodeAt(2);"
+ "if (sym3.charCodeAt(2) != 0xDC07) throw sym1.charCodeAt(2);"
"if (sym4.length != 3) throw sym4;"
- "if (sym4.charCodeAt(2) != 0xdc08) throw sym2.charCodeAt(2);");
+ "if (sym4.charCodeAt(2) != 0xDC08) throw sym2.charCodeAt(2);");
Local<String> sym0 =
v8::String::NewFromUtf8(context->GetIsolate(), "benedictus",
v8::NewStringType::kInternalized)
.ToLocalChecked();
Local<String> sym0b =
- v8::String::NewFromUtf8(context->GetIsolate(), "S\303\270ren",
+ v8::String::NewFromUtf8(context->GetIsolate(), "S\xC3\xB8ren",
v8::NewStringType::kInternalized)
.ToLocalChecked();
Local<String> sym1 =
- v8::String::NewFromUtf8(context->GetIsolate(), "\355\240\201\355\260\207",
+ v8::String::NewFromUtf8(context->GetIsolate(), "\xED\xA0\x81\xED\xB0\x87",
v8::NewStringType::kInternalized)
.ToLocalChecked();
Local<String> sym2 =
- v8::String::NewFromUtf8(context->GetIsolate(), "\360\220\220\210",
+ v8::String::NewFromUtf8(context->GetIsolate(), "\xF0\x90\x90\x88",
v8::NewStringType::kInternalized)
.ToLocalChecked();
Local<String> sym3 = v8::String::NewFromUtf8(context->GetIsolate(),
- "x\355\240\201\355\260\207",
+ "x\xED\xA0\x81\xED\xB0\x87",
v8::NewStringType::kInternalized)
.ToLocalChecked();
Local<String> sym4 =
- v8::String::NewFromUtf8(context->GetIsolate(), "x\360\220\220\210",
+ v8::String::NewFromUtf8(context->GetIsolate(), "x\xF0\x90\x90\x88",
v8::NewStringType::kInternalized)
.ToLocalChecked();
v8::Local<v8::Object> global = context->Global();
@@ -8454,10 +8455,10 @@ THREADED_TEST(Utf16MissingTrailing) {
int size = 1024 * 64;
uint8_t* buffer = new uint8_t[size];
for (int i = 0; i < size; i += 4) {
- buffer[i] = 0xf0;
- buffer[i + 1] = 0x9d;
+ buffer[i] = 0xF0;
+ buffer[i + 1] = 0x9D;
buffer[i + 2] = 0x80;
- buffer[i + 3] = 0x9e;
+ buffer[i + 3] = 0x9E;
}
// Now invoke the decoder without last 3 bytes
@@ -8479,9 +8480,9 @@ THREADED_TEST(Utf16Trailing3Byte) {
int size = 1024 * 63;
uint8_t* buffer = new uint8_t[size];
for (int i = 0; i < size; i += 3) {
- buffer[i] = 0xe2;
+ buffer[i] = 0xE2;
buffer[i + 1] = 0x80;
- buffer[i + 2] = 0xa6;
+ buffer[i + 2] = 0xA6;
}
// Now invoke the decoder without last 3 bytes
@@ -9620,20 +9621,20 @@ static void EchoSetter(Local<String> name, Local<Value> value,
static void UnreachableGetter(
Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
- CHECK(false); // This function should not be called..
+ UNREACHABLE(); // This function should not be called..
}
static void UnreachableSetter(Local<String>,
Local<Value>,
const v8::PropertyCallbackInfo<void>&) {
- CHECK(false); // This function should not be called.
+ UNREACHABLE(); // This function should not be called.
}
static void UnreachableFunction(
const v8::FunctionCallbackInfo<v8::Value>& info) {
- CHECK(false); // This function should not be called..
+ UNREACHABLE(); // This function should not be called..
}
@@ -14462,8 +14463,7 @@ static void event_handler(const v8::JitCodeEvent* event) {
case v8::JitCodeEvent::CODE_REMOVED:
// Object/code removal events are currently not dispatched from the GC.
- CHECK(false);
- break;
+ UNREACHABLE();
// For CODE_START_LINE_INFO_RECORDING event, we will create one
// DummyJitCodeLineInfo data structure pointed by event->user_dat. We
@@ -14501,8 +14501,7 @@ static void event_handler(const v8::JitCodeEvent* event) {
default:
// Impossible event.
- CHECK(false);
- break;
+ UNREACHABLE();
}
}
@@ -18466,7 +18465,6 @@ TEST(SetStackLimitInThread) {
}
}
-
THREADED_TEST(GetHeapStatistics) {
LocalContext c1;
v8::HandleScope scope(c1->GetIsolate());
@@ -18478,6 +18476,55 @@ THREADED_TEST(GetHeapStatistics) {
CHECK_NE(static_cast<int>(heap_statistics.used_heap_size()), 0);
}
+TEST(NumberOfNativeContexts) {
+ static const size_t kNumTestContexts = 10;
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::HandleScope scope(isolate);
+ v8::Global<v8::Context> context[kNumTestContexts];
+ v8::HeapStatistics heap_statistics;
+ CHECK_EQ(0u, heap_statistics.number_of_native_contexts());
+ CcTest::isolate()->GetHeapStatistics(&heap_statistics);
+ CHECK_EQ(0u, heap_statistics.number_of_native_contexts());
+ for (size_t i = 0; i < kNumTestContexts; i++) {
+ i::HandleScope inner(isolate);
+ context[i].Reset(CcTest::isolate(), v8::Context::New(CcTest::isolate()));
+ CcTest::isolate()->GetHeapStatistics(&heap_statistics);
+ CHECK_EQ(i + 1, heap_statistics.number_of_native_contexts());
+ }
+ for (size_t i = 0; i < kNumTestContexts; i++) {
+ context[i].Reset();
+ CcTest::CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+ CcTest::isolate()->GetHeapStatistics(&heap_statistics);
+ CHECK_EQ(kNumTestContexts - i - 1u,
+ heap_statistics.number_of_native_contexts());
+ }
+}
+
+TEST(NumberOfDetachedContexts) {
+ static const size_t kNumTestContexts = 10;
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::HandleScope scope(isolate);
+ v8::Global<v8::Context> context[kNumTestContexts];
+ v8::HeapStatistics heap_statistics;
+ CHECK_EQ(0u, heap_statistics.number_of_detached_contexts());
+ CcTest::isolate()->GetHeapStatistics(&heap_statistics);
+ CHECK_EQ(0u, heap_statistics.number_of_detached_contexts());
+ for (size_t i = 0; i < kNumTestContexts; i++) {
+ i::HandleScope inner(isolate);
+ v8::Local<v8::Context> local = v8::Context::New(CcTest::isolate());
+ context[i].Reset(CcTest::isolate(), local);
+ local->DetachGlobal();
+ CcTest::isolate()->GetHeapStatistics(&heap_statistics);
+ CHECK_EQ(i + 1, heap_statistics.number_of_detached_contexts());
+ }
+ for (size_t i = 0; i < kNumTestContexts; i++) {
+ context[i].Reset();
+ CcTest::CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+ CcTest::isolate()->GetHeapStatistics(&heap_statistics);
+ CHECK_EQ(kNumTestContexts - i - 1u,
+ heap_statistics.number_of_detached_contexts());
+ }
+}
class VisitorImpl : public v8::ExternalResourceVisitor {
public:
@@ -18723,12 +18770,12 @@ THREADED_TEST(QuietSignalingNaNs) {
v8::TryCatch try_catch(isolate);
// Special double values.
- double snan = DoubleFromBits(0x7ff00000, 0x00000001);
- double qnan = DoubleFromBits(0x7ff80000, 0x00000000);
- double infinity = DoubleFromBits(0x7ff00000, 0x00000000);
- double max_normal = DoubleFromBits(0x7fefffff, 0xffffffffu);
+ double snan = DoubleFromBits(0x7FF00000, 0x00000001);
+ double qnan = DoubleFromBits(0x7FF80000, 0x00000000);
+ double infinity = DoubleFromBits(0x7FF00000, 0x00000000);
+ double max_normal = DoubleFromBits(0x7FEFFFFF, 0xFFFFFFFFu);
double min_normal = DoubleFromBits(0x00100000, 0x00000000);
- double max_denormal = DoubleFromBits(0x000fffff, 0xffffffffu);
+ double max_denormal = DoubleFromBits(0x000FFFFF, 0xFFFFFFFFu);
double min_denormal = DoubleFromBits(0x00000000, 0x00000001);
// Date values are capped at +/-100000000 days (times 864e5 ms per day)
@@ -18775,9 +18822,9 @@ THREADED_TEST(QuietSignalingNaNs) {
!defined(USE_SIMULATOR)
// Most significant fraction bit for quiet nan is set to 0
// on MIPS architecture. Allowed by IEEE-754.
- CHECK_EQ(0xffe, static_cast<int>((stored_bits >> 51) & 0xfff));
+ CHECK_EQ(0xFFE, static_cast<int>((stored_bits >> 51) & 0xFFF));
#else
- CHECK_EQ(0xfff, static_cast<int>((stored_bits >> 51) & 0xfff));
+ CHECK_EQ(0xFFF, static_cast<int>((stored_bits >> 51) & 0xFFF));
#endif
}
@@ -18797,9 +18844,9 @@ THREADED_TEST(QuietSignalingNaNs) {
!defined(USE_SIMULATOR)
// Most significant fraction bit for quiet nan is set to 0
// on MIPS architecture. Allowed by IEEE-754.
- CHECK_EQ(0xffe, static_cast<int>((stored_bits >> 51) & 0xfff));
+ CHECK_EQ(0xFFE, static_cast<int>((stored_bits >> 51) & 0xFFF));
#else
- CHECK_EQ(0xfff, static_cast<int>((stored_bits >> 51) & 0xfff));
+ CHECK_EQ(0xFFF, static_cast<int>((stored_bits >> 51) & 0xFFF));
#endif
}
}
@@ -22172,20 +22219,20 @@ UNINITIALIZED_TEST(IsolateEmbedderData) {
CHECK(!i_isolate->GetData(slot));
}
for (uint32_t slot = 0; slot < v8::Isolate::GetNumberOfDataSlots(); ++slot) {
- void* data = reinterpret_cast<void*>(0xacce55ed + slot);
+ void* data = reinterpret_cast<void*>(0xACCE55ED + slot);
isolate->SetData(slot, data);
}
for (uint32_t slot = 0; slot < v8::Isolate::GetNumberOfDataSlots(); ++slot) {
- void* data = reinterpret_cast<void*>(0xacce55ed + slot);
+ void* data = reinterpret_cast<void*>(0xACCE55ED + slot);
CHECK_EQ(data, isolate->GetData(slot));
CHECK_EQ(data, i_isolate->GetData(slot));
}
for (uint32_t slot = 0; slot < v8::Isolate::GetNumberOfDataSlots(); ++slot) {
- void* data = reinterpret_cast<void*>(0xdecea5ed + slot);
+ void* data = reinterpret_cast<void*>(0xDECEA5ED + slot);
isolate->SetData(slot, data);
}
for (uint32_t slot = 0; slot < v8::Isolate::GetNumberOfDataSlots(); ++slot) {
- void* data = reinterpret_cast<void*>(0xdecea5ed + slot);
+ void* data = reinterpret_cast<void*>(0xDECEA5ED + slot);
CHECK_EQ(data, isolate->GetData(slot));
CHECK_EQ(data, i_isolate->GetData(slot));
}
@@ -22316,16 +22363,13 @@ THREADED_TEST(InstanceCheckOnInstanceAccessor) {
CheckInstanceCheckedAccessors(false);
}
-
static void EmptyInterceptorGetter(
- Local<String> name, const v8::PropertyCallbackInfo<v8::Value>& info) {}
-
+ Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {}
static void EmptyInterceptorSetter(
- Local<String> name, Local<Value> value,
+ Local<Name> name, Local<Value> value,
const v8::PropertyCallbackInfo<v8::Value>& info) {}
-
THREADED_TEST(InstanceCheckOnInstanceAccessorWithInterceptor) {
v8::internal::FLAG_allow_natives_syntax = true;
LocalContext context;
@@ -22333,8 +22377,8 @@ THREADED_TEST(InstanceCheckOnInstanceAccessorWithInterceptor) {
Local<FunctionTemplate> templ = FunctionTemplate::New(context->GetIsolate());
Local<ObjectTemplate> inst = templ->InstanceTemplate();
- templ->InstanceTemplate()->SetNamedPropertyHandler(EmptyInterceptorGetter,
- EmptyInterceptorSetter);
+ templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ EmptyInterceptorGetter, EmptyInterceptorSetter));
inst->SetAccessor(v8_str("foo"), InstanceCheckedGetter, InstanceCheckedSetter,
Local<Value>(), v8::DEFAULT, v8::None,
v8::AccessorSignature::New(context->GetIsolate(), templ));
@@ -22835,7 +22879,7 @@ THREADED_TEST(SemaphoreInterruption) {
void UnreachableCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
- CHECK(false);
+ UNREACHABLE();
}
@@ -26412,7 +26456,7 @@ TEST(Proxy) {
CHECK(proxy->IsProxy());
CHECK(!target->IsProxy());
CHECK(proxy->IsRevoked());
- CHECK(proxy->GetTarget()->SameValue(target));
+ CHECK(proxy->GetTarget()->IsNull());
CHECK(proxy->GetHandler()->IsNull());
}
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index 169f927f74..10a111c8df 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -27,7 +27,6 @@
#include <iostream> // NOLINT(readability/streams)
-#include "src/arm/simulator-arm.h"
#include "src/assembler-inl.h"
#include "src/base/utils/random-number-generator.h"
#include "src/disassembler.h"
@@ -35,6 +34,7 @@
#include "src/factory.h"
#include "src/macro-assembler.h"
#include "src/ostreams.h"
+#include "src/simulator.h"
#include "src/v8.h"
#include "test/cctest/assembler-helper-arm.h"
#include "test/cctest/cctest.h"
@@ -66,9 +66,8 @@ TEST(0) {
OFStream os(stdout);
code->Print(os);
#endif
- F_iiiii f = FUNCTION_CAST<F_iiiii>(code->entry());
- int res =
- reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 3, 4, 0, 0, 0));
+ auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ int res = reinterpret_cast<int>(f.Call(3, 4, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(7, res);
}
@@ -103,9 +102,8 @@ TEST(1) {
OFStream os(stdout);
code->Print(os);
#endif
- F_iiiii f = FUNCTION_CAST<F_iiiii>(code->entry());
- int res =
- reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 100, 0, 0, 0, 0));
+ auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ int res = reinterpret_cast<int>(f.Call(100, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(5050, res);
}
@@ -149,9 +147,8 @@ TEST(2) {
OFStream os(stdout);
code->Print(os);
#endif
- F_iiiii f = FUNCTION_CAST<F_iiiii>(code->entry());
- int res =
- reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 10, 0, 0, 0, 0));
+ auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ int res = reinterpret_cast<int>(f.Call(10, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(3628800, res);
}
@@ -197,12 +194,11 @@ TEST(3) {
OFStream os(stdout);
code->Print(os);
#endif
- F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
+ auto f = GeneratedCode<F_piiii>::FromCode(*code);
t.i = 100000;
t.c = 10;
t.s = 1000;
- int res =
- reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0));
+ int res = reinterpret_cast<int>(f.Call(&t, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(101010, res);
CHECK_EQ(100000/2, t.i);
@@ -276,7 +272,7 @@ TEST(4) {
__ vstr(d4, r4, offsetof(T, e));
// Move a literal into a register that requires 64 bits to encode.
- // 0x3ff0000010000000 = 1.000000059604644775390625
+ // 0x3FF0000010000000 = 1.000000059604644775390625
__ vmov(d4, Double(1.000000059604644775390625));
__ vstr(d4, r4, offsetof(T, d));
@@ -329,7 +325,7 @@ TEST(4) {
OFStream os(stdout);
code->Print(os);
#endif
- F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
+ auto f = GeneratedCode<F_piiii>::FromCode(*code);
t.a = 1.5;
t.b = 2.75;
t.c = 17.17;
@@ -344,8 +340,7 @@ TEST(4) {
t.n = 123.456;
t.x = 4.5;
t.y = 9.0;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(-16.0f, t.p);
CHECK_EQ(0.25f, t.o);
CHECK_EQ(-123.456, t.n);
@@ -392,9 +387,8 @@ TEST(5) {
OFStream os(stdout);
code->Print(os);
#endif
- F_iiiii f = FUNCTION_CAST<F_iiiii>(code->entry());
- int res = reinterpret_cast<int>(
- CALL_GENERATED_CODE(isolate, f, 0xAAAAAAAA, 0, 0, 0, 0));
+ auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ int res = reinterpret_cast<int>(f.Call(0xAAAAAAAA, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(-7, res);
}
@@ -424,9 +418,8 @@ TEST(6) {
OFStream os(stdout);
code->Print(os);
#endif
- F_iiiii f = FUNCTION_CAST<F_iiiii>(code->entry());
- int res = reinterpret_cast<int>(
- CALL_GENERATED_CODE(isolate, f, 0xFFFF, 0, 0, 0, 0));
+ auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ int res = reinterpret_cast<int>(f.Call(0xFFFF, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(382, res);
}
@@ -493,9 +486,8 @@ static void TestRoundingMode(VCVTTypes types,
OFStream os(stdout);
code->Print(os);
#endif
- F_iiiii f = FUNCTION_CAST<F_iiiii>(code->entry());
- int res =
- reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ int res = reinterpret_cast<int>(f.Call(0, 0, 0, 0, 0));
::printf("res = %d\n", res);
CHECK_EQ(expected, res);
}
@@ -573,8 +565,8 @@ TEST(7) {
TestRoundingMode(u32_f64, RZ, kMinInt - 1.0, 0, true);
// Positive values.
- // kMaxInt is the maximum *signed* integer: 0x7fffffff.
- static const uint32_t kMaxUInt = 0xffffffffu;
+ // kMaxInt is the maximum *signed* integer: 0x7FFFFFFF.
+ static const uint32_t kMaxUInt = 0xFFFFFFFFu;
TestRoundingMode(u32_f64, RZ, 0, 0);
TestRoundingMode(u32_f64, RZ, 0.5, 0);
TestRoundingMode(u32_f64, RZ, 123.7, 123);
@@ -676,7 +668,7 @@ TEST(8) {
OFStream os(stdout);
code->Print(os);
#endif
- F_ppiii fn = FUNCTION_CAST<F_ppiii>(code->entry());
+ auto fn = GeneratedCode<F_ppiii>::FromCode(*code);
d.a = 1.1;
d.b = 2.2;
d.c = 3.3;
@@ -695,8 +687,7 @@ TEST(8) {
f.g = 7.0;
f.h = 8.0;
- Object* dummy = CALL_GENERATED_CODE(isolate, fn, &d, &f, 0, 0, 0);
- USE(dummy);
+ fn.Call(&d, &f, 0, 0, 0);
CHECK_EQ(7.7, d.a);
CHECK_EQ(8.8, d.b);
@@ -786,7 +777,7 @@ TEST(9) {
OFStream os(stdout);
code->Print(os);
#endif
- F_ppiii fn = FUNCTION_CAST<F_ppiii>(code->entry());
+ auto fn = GeneratedCode<F_ppiii>::FromCode(*code);
d.a = 1.1;
d.b = 2.2;
d.c = 3.3;
@@ -805,8 +796,7 @@ TEST(9) {
f.g = 7.0;
f.h = 8.0;
- Object* dummy = CALL_GENERATED_CODE(isolate, fn, &d, &f, 0, 0, 0);
- USE(dummy);
+ fn.Call(&d, &f, 0, 0, 0);
CHECK_EQ(7.7, d.a);
CHECK_EQ(8.8, d.b);
@@ -892,7 +882,7 @@ TEST(10) {
OFStream os(stdout);
code->Print(os);
#endif
- F_ppiii fn = FUNCTION_CAST<F_ppiii>(code->entry());
+ auto fn = GeneratedCode<F_ppiii>::FromCode(*code);
d.a = 1.1;
d.b = 2.2;
d.c = 3.3;
@@ -911,8 +901,7 @@ TEST(10) {
f.g = 7.0;
f.h = 8.0;
- Object* dummy = CALL_GENERATED_CODE(isolate, fn, &d, &f, 0, 0, 0);
- USE(dummy);
+ fn.Call(&d, &f, 0, 0, 0);
CHECK_EQ(7.7, d.a);
CHECK_EQ(8.8, d.b);
@@ -948,8 +937,8 @@ TEST(11) {
} I;
I i;
- i.a = 0xabcd0001;
- i.b = 0xabcd0000;
+ i.a = 0xABCD0001;
+ i.b = 0xABCD0000;
Assembler assm(isolate, nullptr, 0);
@@ -965,13 +954,13 @@ TEST(11) {
__ str(r2, MemOperand(r0, offsetof(I, b)));
// Test corner cases.
- __ mov(r1, Operand(0xffffffff));
+ __ mov(r1, Operand(0xFFFFFFFF));
__ mov(r2, Operand::Zero());
__ mov(r3, Operand(r1, ASR, 1), SetCC); // Set the carry.
__ adc(r3, r1, Operand(r2));
__ str(r3, MemOperand(r0, offsetof(I, c)));
- __ mov(r1, Operand(0xffffffff));
+ __ mov(r1, Operand(0xFFFFFFFF));
__ mov(r2, Operand::Zero());
__ mov(r3, Operand(r2, ASR, 1), SetCC); // Unset the carry.
__ adc(r3, r1, Operand(r2));
@@ -987,14 +976,13 @@ TEST(11) {
OFStream os(stdout);
code->Print(os);
#endif
- F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &i, 0, 0, 0, 0);
- USE(dummy);
+ auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ f.Call(&i, 0, 0, 0, 0);
- CHECK_EQ(static_cast<int32_t>(0xabcd0001), i.a);
- CHECK_EQ(static_cast<int32_t>(0xabcd0000) >> 1, i.b);
+ CHECK_EQ(static_cast<int32_t>(0xABCD0001), i.a);
+ CHECK_EQ(static_cast<int32_t>(0xABCD0000) >> 1, i.b);
CHECK_EQ(0x00000000, i.c);
- CHECK_EQ(static_cast<int32_t>(0xffffffff), i.d);
+ CHECK_EQ(static_cast<int32_t>(0xFFFFFFFF), i.d);
}
@@ -1114,15 +1102,14 @@ TEST(13) {
OFStream os(stdout);
code->Print(os);
#endif
- F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
+ auto f = GeneratedCode<F_piiii>::FromCode(*code);
t.a = 1.5;
t.b = 2.75;
t.c = 17.17;
t.x = 1.5;
t.y = 2.75;
t.z = 17.17;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(14.7610017472335499, t.a);
CHECK_EQ(3.84200491244266251, t.b);
CHECK_EQ(73.8818412254460241, t.c);
@@ -1187,16 +1174,15 @@ TEST(14) {
OFStream os(stdout);
code->Print(os);
#endif
- F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
+ auto f = GeneratedCode<F_piiii>::FromCode(*code);
t.left = bit_cast<double>(kHoleNanInt64);
t.right = 1;
t.add_result = 0;
t.sub_result = 0;
t.mul_result = 0;
t.div_result = 0;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
- const uint32_t kArmNanUpper32 = 0x7ff80000;
+ f.Call(&t, 0, 0, 0, 0);
+ const uint32_t kArmNanUpper32 = 0x7FF80000;
const uint32_t kArmNanLower32 = 0x00000000;
#ifdef DEBUG
const uint64_t kArmNanInt64 =
@@ -1206,17 +1192,17 @@ TEST(14) {
// With VFP2 the sign of the canonicalized Nan is undefined. So
// we remove the sign bit for the upper tests.
CHECK_EQ(kArmNanUpper32,
- (bit_cast<int64_t>(t.add_result) >> 32) & 0x7fffffff);
- CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.add_result) & 0xffffffffu);
+ (bit_cast<int64_t>(t.add_result) >> 32) & 0x7FFFFFFF);
+ CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.add_result) & 0xFFFFFFFFu);
CHECK_EQ(kArmNanUpper32,
- (bit_cast<int64_t>(t.sub_result) >> 32) & 0x7fffffff);
- CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.sub_result) & 0xffffffffu);
+ (bit_cast<int64_t>(t.sub_result) >> 32) & 0x7FFFFFFF);
+ CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.sub_result) & 0xFFFFFFFFu);
CHECK_EQ(kArmNanUpper32,
- (bit_cast<int64_t>(t.mul_result) >> 32) & 0x7fffffff);
- CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.mul_result) & 0xffffffffu);
+ (bit_cast<int64_t>(t.mul_result) >> 32) & 0x7FFFFFFF);
+ CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.mul_result) & 0xFFFFFFFFu);
CHECK_EQ(kArmNanUpper32,
- (bit_cast<int64_t>(t.div_result) >> 32) & 0x7fffffff);
- CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.div_result) & 0xffffffffu);
+ (bit_cast<int64_t>(t.div_result) >> 32) & 0x7FFFFFFF);
+ CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.div_result) & 0xFFFFFFFFu);
}
#define CHECK_EQ_SPLAT(field, ex) \
@@ -1376,7 +1362,7 @@ TEST(15) {
__ vstr(d4, r0, offsetof(T, vqmovn_s32));
// ARM core register to scalar.
- __ mov(r4, Operand(0xfffffff8));
+ __ mov(r4, Operand(0xFFFFFFF8));
__ vmov(d0, Double(0.0));
__ vmov(NeonS8, d0, 1, r4);
__ vmov(NeonS16, d0, 1, r4);
@@ -1388,8 +1374,8 @@ TEST(15) {
__ vstr(d0, r0, offsetof(T, vmov_to_scalar2));
// Scalar to ARM core register.
- __ mov(r4, Operand(0xffffff00));
- __ mov(r5, Operand(0xffffffff));
+ __ mov(r4, Operand(0xFFFFFF00));
+ __ mov(r5, Operand(0xFFFFFFFF));
__ vmov(d0, r4, r5);
__ vmov(NeonS8, r4, d0, 1);
__ str(r4, MemOperand(r0, offsetof(T, vmov_from_scalar_s8)));
@@ -1441,7 +1427,7 @@ TEST(15) {
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
// vdup (from register).
- __ mov(r4, Operand(0xa));
+ __ mov(r4, Operand(0xA));
__ vdup(Neon8, q0, r4);
__ vdup(Neon16, q1, r4);
__ vdup(Neon32, q2, r4);
@@ -1477,10 +1463,10 @@ TEST(15) {
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
// vabs (integer).
- __ mov(r4, Operand(0x7f7f7f7f));
+ __ mov(r4, Operand(0x7F7F7F7F));
__ mov(r5, Operand(0x01010101));
__ vmov(d0, r4, r5);
- __ mov(r4, Operand(0xffffffff));
+ __ mov(r4, Operand(0xFFFFFFFF));
__ mov(r5, Operand(0x80808080));
__ vmov(d1, r4, r5);
__ vabs(Neon8, q1, q0);
@@ -1504,7 +1490,7 @@ TEST(15) {
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
// veor.
- __ mov(r4, Operand(0xaa));
+ __ mov(r4, Operand(0xAA));
__ vdup(Neon16, q0, r4);
__ mov(r4, Operand(0x55));
__ vdup(Neon16, q1, r4);
@@ -1512,15 +1498,15 @@ TEST(15) {
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, veor))));
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
// vand.
- __ mov(r4, Operand(0xff));
+ __ mov(r4, Operand(0xFF));
__ vdup(Neon16, q0, r4);
- __ mov(r4, Operand(0xfe));
+ __ mov(r4, Operand(0xFE));
__ vdup(Neon16, q1, r4);
__ vand(q1, q1, q0);
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vand))));
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
// vorr.
- __ mov(r4, Operand(0xaa));
+ __ mov(r4, Operand(0xAA));
__ vdup(Neon16, q0, r4);
__ mov(r4, Operand(0x55));
__ vdup(Neon16, q1, r4);
@@ -1631,7 +1617,7 @@ TEST(15) {
__ vmax(NeonS8, q2, q0, q1);
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vmax_s8))));
__ vst1(Neon8, NeonListOperand(q2), NeonMemOperand(r4));
- __ mov(r4, Operand(0xff));
+ __ mov(r4, Operand(0xFF));
__ vdup(Neon16, q0, r4);
__ vdup(Neon8, q1, r4);
__ vmin(NeonU16, q2, q0, q1);
@@ -1640,7 +1626,7 @@ TEST(15) {
__ vmax(NeonU16, q2, q0, q1);
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vmax_u16))));
__ vst1(Neon8, NeonListOperand(q2), NeonMemOperand(r4));
- __ mov(r4, Operand(0xff));
+ __ mov(r4, Operand(0xFF));
__ vdup(Neon32, q0, r4);
__ vdup(Neon8, q1, r4);
__ vmin(NeonS32, q2, q0, q1);
@@ -1669,14 +1655,14 @@ TEST(15) {
__ vstr(d4, r0, offsetof(T, vpmin_s8));
__ vpmax(NeonS8, d4, d0, d2);
__ vstr(d4, r0, offsetof(T, vpmax_s8));
- __ mov(r4, Operand(0xffff));
+ __ mov(r4, Operand(0xFFFF));
__ vdup(Neon32, q0, r4);
__ vdup(Neon16, q1, r4);
__ vpmin(NeonU16, d4, d0, d2);
__ vstr(d4, r0, offsetof(T, vpmin_u16));
__ vpmax(NeonU16, d4, d0, d2);
__ vstr(d4, r0, offsetof(T, vpmax_u16));
- __ mov(r4, Operand(0xff));
+ __ mov(r4, Operand(0xFF));
__ veor(q0, q0, q0);
__ vmov(s0, r4);
__ vdup(Neon8, q1, r4);
@@ -1754,16 +1740,16 @@ TEST(15) {
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
// vqsub.
- __ mov(r4, Operand(0x7f));
+ __ mov(r4, Operand(0x7F));
__ vdup(Neon8, q0, r4);
- __ mov(r4, Operand(0x3f));
+ __ mov(r4, Operand(0x3F));
__ vdup(Neon8, q1, r4);
__ vqsub(NeonU8, q1, q1, q0);
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vqsub_u8))));
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
__ mov(r4, Operand(0x8000));
__ vdup(Neon16, q0, r4);
- __ mov(r4, Operand(0x7fff));
+ __ mov(r4, Operand(0x7FFF));
__ vdup(Neon16, q1, r4);
__ vqsub(NeonS16, q1, q1, q0);
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vqsub_s16))));
@@ -1820,7 +1806,7 @@ TEST(15) {
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
// vsli, vsri.
- __ mov(r4, Operand(0xffffffff));
+ __ mov(r4, Operand(0xFFFFFFFF));
__ mov(r5, Operand(0x1));
__ vmov(d0, r4, r5);
__ vmov(d1, r5, r5);
@@ -1857,7 +1843,7 @@ TEST(15) {
__ vcgt(NeonS8, q2, q0, q1);
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vcgt_s8))));
__ vst1(Neon8, NeonListOperand(q2), NeonMemOperand(r4));
- __ mov(r4, Operand(0xff));
+ __ mov(r4, Operand(0xFF));
__ vdup(Neon16, q0, r4);
__ vdup(Neon8, q1, r4);
__ vcge(NeonU16, q2, q0, q1);
@@ -1866,7 +1852,7 @@ TEST(15) {
__ vcgt(NeonU16, q2, q0, q1);
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vcgt_u16))));
__ vst1(Neon8, NeonListOperand(q2), NeonMemOperand(r4));
- __ mov(r4, Operand(0xff));
+ __ mov(r4, Operand(0xFF));
__ vdup(Neon32, q0, r4);
__ vdup(Neon8, q1, r4);
__ vcge(NeonS32, q2, q0, q1);
@@ -1886,7 +1872,7 @@ TEST(15) {
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
// vbsl.
- __ mov(r4, Operand(0x00ff));
+ __ mov(r4, Operand(0x00FF));
__ vdup(Neon16, q0, r4);
__ mov(r4, Operand(0x01));
__ vdup(Neon8, q1, r4);
@@ -2050,7 +2036,7 @@ TEST(15) {
// vtb[l/x].
__ mov(r4, Operand(0x06040200));
- __ mov(r5, Operand(0xff050301));
+ __ mov(r5, Operand(0xFF050301));
__ vmov(d2, r4, r5); // d2 = ff05030106040200
__ vtbl(d0, NeonListOperand(d2, 1), d2);
__ vstr(d0, r0, offsetof(T, vtbl));
@@ -2068,7 +2054,7 @@ TEST(15) {
OFStream os(stdout);
code->Print(os);
#endif
- F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
+ auto f = GeneratedCode<F_piiii>::FromCode(*code);
t.src0 = 0x01020304;
t.src1 = 0x11121314;
t.src2 = 0x21222324;
@@ -2093,10 +2079,9 @@ TEST(15) {
t.dstA3 = 0;
t.lane_test[0] = 0x03020100;
t.lane_test[1] = 0x07060504;
- t.lane_test[2] = 0x0b0a0908;
- t.lane_test[3] = 0x0f0e0d0c;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ t.lane_test[2] = 0x0B0A0908;
+ t.lane_test[3] = 0x0F0E0D0C;
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(0x01020304u, t.dst0);
CHECK_EQ(0x11121314u, t.dst1);
@@ -2111,30 +2096,30 @@ TEST(15) {
CHECK_EQ(0x00830084u, t.dstA2);
CHECK_EQ(0x00810082u, t.dstA3);
- CHECK_EQ_32X4(vmovl_s8, 0x00430044u, 0x00410042u, 0xff83ff84u, 0xff81ff82u);
- CHECK_EQ_32X4(vmovl_u16, 0xff84u, 0xff83u, 0xff82u, 0xff81u);
- CHECK_EQ_32X4(vmovl_s32, 0xff84u, 0x0u, 0xff83u, 0x0u);
- CHECK_EQ_32X2(vqmovn_u16, 0xff83ff84u, 0xff81ff82u);
+ CHECK_EQ_32X4(vmovl_s8, 0x00430044u, 0x00410042u, 0xFF83FF84u, 0xFF81FF82u);
+ CHECK_EQ_32X4(vmovl_u16, 0xFF84u, 0xFF83u, 0xFF82u, 0xFF81u);
+ CHECK_EQ_32X4(vmovl_s32, 0xFF84u, 0x0u, 0xFF83u, 0x0u);
+ CHECK_EQ_32X2(vqmovn_u16, 0xFF83FF84u, 0xFF81FF82u);
CHECK_EQ_32X2(vqmovn_s8, 0x81828384u, 0x81828384u);
- CHECK_EQ_32X2(vqmovn_s32, 0xff84u, 0xff83u);
+ CHECK_EQ_32X2(vqmovn_s32, 0xFF84u, 0xFF83u);
- CHECK_EQ(0xfffffff8fff8f800u, t.vmov_to_scalar1);
- CHECK_EQ(0xfff80000f8000000u, t.vmov_to_scalar2);
+ CHECK_EQ(0xFFFFFFF8FFF8F800u, t.vmov_to_scalar1);
+ CHECK_EQ(0xFFF80000F8000000u, t.vmov_to_scalar2);
CHECK_EQ(0xFFFFFFFFu, t.vmov_from_scalar_s8);
CHECK_EQ(0xFFu, t.vmov_from_scalar_u8);
CHECK_EQ(0xFFFFFFFFu, t.vmov_from_scalar_s16);
CHECK_EQ(0xFFFFu, t.vmov_from_scalar_u16);
CHECK_EQ(0xFFFFFFFFu, t.vmov_from_scalar_32);
- CHECK_EQ_32X4(vmov, 0x03020100u, 0x07060504u, 0x0b0a0908u, 0x0f0e0d0cu);
- CHECK_EQ_32X4(vmvn, 0xfcfdfeffu, 0xf8f9fafbu, 0xf4f5f6f7u, 0xf0f1f2f3u);
+ CHECK_EQ_32X4(vmov, 0x03020100u, 0x07060504u, 0x0B0A0908u, 0x0F0E0D0Cu);
+ CHECK_EQ_32X4(vmvn, 0xFCFDFEFFu, 0xF8F9FAFBu, 0xF4F5F6F7u, 0xF0F1F2F3u);
- CHECK_EQ_SPLAT(vdup8, 0x0a0a0a0au);
- CHECK_EQ_SPLAT(vdup16, 0x000a000au);
- CHECK_EQ_SPLAT(vdup32, 0x0000000au);
- CHECK_EQ_SPLAT(vdupf, -1.0); // bit pattern is 0xbf800000.
- CHECK_EQ_32X2(vdupf_16, 0xbf80bf80u, 0xbf80bf80u);
- CHECK_EQ_SPLAT(vdupf_8, 0xbfbfbfbfu);
+ CHECK_EQ_SPLAT(vdup8, 0x0A0A0A0Au);
+ CHECK_EQ_SPLAT(vdup16, 0x000A000Au);
+ CHECK_EQ_SPLAT(vdup32, 0x0000000Au);
+ CHECK_EQ_SPLAT(vdupf, -1.0); // bit pattern is 0xBF800000.
+ CHECK_EQ_32X2(vdupf_16, 0xBF80BF80u, 0xBF80BF80u);
+ CHECK_EQ_SPLAT(vdupf_8, 0xBFBFBFBFu);
// src: [-1, -1, 1, 1]
CHECK_EQ_32X4(vcvt_s32_f32, -1, -1, 1, 1);
@@ -2149,17 +2134,17 @@ TEST(15) {
CHECK_EQ_32X4(vabsf, 1.0, 0.0, 0.0, 1.0);
CHECK_EQ_32X4(vnegf, 1.0, 0.0, -0.0, -1.0);
- // src: [0x7f7f7f7f, 0x01010101, 0xffffffff, 0x80808080]
- CHECK_EQ_32X4(vabs_s8, 0x7f7f7f7fu, 0x01010101u, 0x01010101u, 0x80808080u);
- CHECK_EQ_32X4(vabs_s16, 0x7f7f7f7fu, 0x01010101u, 0x00010001u, 0x7f807f80u);
- CHECK_EQ_32X4(vabs_s32, 0x7f7f7f7fu, 0x01010101u, 0x00000001u, 0x7f7f7f80u);
- CHECK_EQ_32X4(vneg_s8, 0x81818181u, 0xffffffffu, 0x01010101u, 0x80808080u);
- CHECK_EQ_32X4(vneg_s16, 0x80818081u, 0xfefffeffu, 0x00010001u, 0x7f807f80u);
- CHECK_EQ_32X4(vneg_s32, 0x80808081u, 0xfefefeffu, 0x00000001u, 0x7f7f7f80u);
-
- CHECK_EQ_SPLAT(veor, 0x00ff00ffu);
- CHECK_EQ_SPLAT(vand, 0x00fe00feu);
- CHECK_EQ_SPLAT(vorr, 0x00ff00ffu);
+ // src: [0x7F7F7F7F, 0x01010101, 0xFFFFFFFF, 0x80808080]
+ CHECK_EQ_32X4(vabs_s8, 0x7F7F7F7Fu, 0x01010101u, 0x01010101u, 0x80808080u);
+ CHECK_EQ_32X4(vabs_s16, 0x7F7F7F7Fu, 0x01010101u, 0x00010001u, 0x7F807F80u);
+ CHECK_EQ_32X4(vabs_s32, 0x7F7F7F7Fu, 0x01010101u, 0x00000001u, 0x7F7F7F80u);
+ CHECK_EQ_32X4(vneg_s8, 0x81818181u, 0xFFFFFFFFu, 0x01010101u, 0x80808080u);
+ CHECK_EQ_32X4(vneg_s16, 0x80818081u, 0xFEFFFEFFu, 0x00010001u, 0x7F807F80u);
+ CHECK_EQ_32X4(vneg_s32, 0x80808081u, 0xFEFEFEFFu, 0x00000001u, 0x7F7F7F80u);
+
+ CHECK_EQ_SPLAT(veor, 0x00FF00FFu);
+ CHECK_EQ_SPLAT(vand, 0x00FE00FEu);
+ CHECK_EQ_SPLAT(vorr, 0x00FF00FFu);
CHECK_EQ_SPLAT(vaddf, 2.0);
CHECK_EQ_32X2(vpaddf, 3.0, 7.0);
CHECK_EQ_SPLAT(vminf, 1.0);
@@ -2170,101 +2155,101 @@ TEST(15) {
CHECK_EQ_SPLAT(vrecps, -1.0f); // 2 - (2 * 1.5)
CHECK_ESTIMATE_SPLAT(vrsqrte, 0.5f, 0.1f); // 1 / sqrt(4)
CHECK_EQ_SPLAT(vrsqrts, -1.0f); // (3 - (2 * 2.5)) / 2
- CHECK_EQ_SPLAT(vceqf, 0xffffffffu);
+ CHECK_EQ_SPLAT(vceqf, 0xFFFFFFFFu);
// [0] >= [-1, 1, -0, 0]
- CHECK_EQ_32X4(vcgef, 0u, 0xffffffffu, 0xffffffffu, 0xffffffffu);
- CHECK_EQ_32X4(vcgtf, 0u, 0xffffffffu, 0u, 0u);
+ CHECK_EQ_32X4(vcgef, 0u, 0xFFFFFFFFu, 0xFFFFFFFFu, 0xFFFFFFFFu);
+ CHECK_EQ_32X4(vcgtf, 0u, 0xFFFFFFFFu, 0u, 0u);
// [0, 3, 0, 3, ...] and [3, 3, 3, 3, ...]
CHECK_EQ_SPLAT(vmin_s8, 0x00030003u);
CHECK_EQ_SPLAT(vmax_s8, 0x03030303u);
- // [0x00ff, 0x00ff, ...] and [0xffff, 0xffff, ...]
- CHECK_EQ_SPLAT(vmin_u16, 0x00ff00ffu);
- CHECK_EQ_SPLAT(vmax_u16, 0xffffffffu);
- // [0x000000ff, 0x000000ff, ...] and [0xffffffff, 0xffffffff, ...]
- CHECK_EQ_SPLAT(vmin_s32, 0xffffffffu);
- CHECK_EQ_SPLAT(vmax_s32, 0xffu);
+ // [0x00FF, 0x00FF, ...] and [0xFFFF, 0xFFFF, ...]
+ CHECK_EQ_SPLAT(vmin_u16, 0x00FF00FFu);
+ CHECK_EQ_SPLAT(vmax_u16, 0xFFFFFFFFu);
+ // [0x000000FF, 0x000000FF, ...] and [0xFFFFFFFF, 0xFFFFFFFF, ...]
+ CHECK_EQ_SPLAT(vmin_s32, 0xFFFFFFFFu);
+ CHECK_EQ_SPLAT(vmax_s32, 0xFFu);
// [0, 3, 0, 3, ...] and [3, 3, 3, 3, ...]
CHECK_EQ_32X2(vpadd_i8, 0x03030303u, 0x06060606u);
- CHECK_EQ_32X2(vpadd_i16, 0x0c0c0606u, 0x06060606u);
- CHECK_EQ_32X2(vpadd_i32, 0x12120c0cu, 0x06060606u);
+ CHECK_EQ_32X2(vpadd_i16, 0x0C0C0606u, 0x06060606u);
+ CHECK_EQ_32X2(vpadd_i32, 0x12120C0Cu, 0x06060606u);
CHECK_EQ_32X2(vpmin_s8, 0x00000000u, 0x03030303u);
CHECK_EQ_32X2(vpmax_s8, 0x03030303u, 0x03030303u);
// [0, ffff, 0, ffff] and [ffff, ffff]
- CHECK_EQ_32X2(vpmin_u16, 0x00000000u, 0xffffffffu);
- CHECK_EQ_32X2(vpmax_u16, 0xffffffffu, 0xffffffffu);
- // [0x000000ff, 0x00000000u] and [0xffffffff, 0xffffffff, ...]
- CHECK_EQ_32X2(vpmin_s32, 0x00u, 0xffffffffu);
- CHECK_EQ_32X2(vpmax_s32, 0xffu, 0xffffffffu);
+ CHECK_EQ_32X2(vpmin_u16, 0x00000000u, 0xFFFFFFFFu);
+ CHECK_EQ_32X2(vpmax_u16, 0xFFFFFFFFu, 0xFFFFFFFFu);
+ // [0x000000FF, 0x00000000u] and [0xFFFFFFFF, 0xFFFFFFFF, ...]
+ CHECK_EQ_32X2(vpmin_s32, 0x00u, 0xFFFFFFFFu);
+ CHECK_EQ_32X2(vpmax_s32, 0xFFu, 0xFFFFFFFFu);
CHECK_EQ_SPLAT(vadd8, 0x03030303u);
CHECK_EQ_SPLAT(vadd16, 0x00030003u);
CHECK_EQ_SPLAT(vadd32, 0x00000003u);
CHECK_EQ_SPLAT(vqadd_s8, 0x80808080u);
- CHECK_EQ_SPLAT(vqadd_u16, 0xffffffffu);
+ CHECK_EQ_SPLAT(vqadd_u16, 0xFFFFFFFFu);
CHECK_EQ_SPLAT(vqadd_s32, 0x80000000u);
CHECK_EQ_SPLAT(vqsub_u8, 0x00000000u);
- CHECK_EQ_SPLAT(vqsub_s16, 0x7fff7fffu);
+ CHECK_EQ_SPLAT(vqsub_s16, 0x7FFF7FFFu);
CHECK_EQ_SPLAT(vqsub_u32, 0x00000000u);
- CHECK_EQ_SPLAT(vsub8, 0xfefefefeu);
- CHECK_EQ_SPLAT(vsub16, 0xfffefffeu);
- CHECK_EQ_SPLAT(vsub32, 0xfffffffeu);
+ CHECK_EQ_SPLAT(vsub8, 0xFEFEFEFEu);
+ CHECK_EQ_SPLAT(vsub16, 0xFFFEFFFEu);
+ CHECK_EQ_SPLAT(vsub32, 0xFFFFFFFEu);
CHECK_EQ_SPLAT(vmul8, 0x04040404u);
CHECK_EQ_SPLAT(vmul16, 0x00040004u);
CHECK_EQ_SPLAT(vmul32, 0x00000004u);
- CHECK_EQ_SPLAT(vshl8, 0xaaaaaaaau);
- CHECK_EQ_SPLAT(vshl16, 0xaa00aa00u);
- CHECK_EQ_SPLAT(vshl32, 0xaaaa0000u);
- CHECK_EQ_SPLAT(vshr_s8, 0xc0c0c0c0u);
+ CHECK_EQ_SPLAT(vshl8, 0xAAAAAAAAu);
+ CHECK_EQ_SPLAT(vshl16, 0xAA00AA00u);
+ CHECK_EQ_SPLAT(vshl32, 0xAAAA0000u);
+ CHECK_EQ_SPLAT(vshr_s8, 0xC0C0C0C0u);
CHECK_EQ_SPLAT(vshr_u16, 0x00400040u);
- CHECK_EQ_SPLAT(vshr_s32, 0xffffc040u);
- CHECK_EQ_32X2(vsli_64, 0x01u, 0xffffffffu);
- CHECK_EQ_32X2(vsri_64, 0xffffffffu, 0x01u);
- CHECK_EQ_32X2(vsli_32, 0xffff0001u, 0x00010001u);
- CHECK_EQ_32X2(vsri_32, 0x00000000u, 0x0000ffffu);
- CHECK_EQ_SPLAT(vceq, 0x00ff00ffu);
+ CHECK_EQ_SPLAT(vshr_s32, 0xFFFFC040u);
+ CHECK_EQ_32X2(vsli_64, 0x01u, 0xFFFFFFFFu);
+ CHECK_EQ_32X2(vsri_64, 0xFFFFFFFFu, 0x01u);
+ CHECK_EQ_32X2(vsli_32, 0xFFFF0001u, 0x00010001u);
+ CHECK_EQ_32X2(vsri_32, 0x00000000u, 0x0000FFFFu);
+ CHECK_EQ_SPLAT(vceq, 0x00FF00FFu);
// [0, 3, 0, 3, ...] >= [3, 3, 3, 3, ...]
- CHECK_EQ_SPLAT(vcge_s8, 0x00ff00ffu);
+ CHECK_EQ_SPLAT(vcge_s8, 0x00FF00FFu);
CHECK_EQ_SPLAT(vcgt_s8, 0u);
- // [0x00ff, 0x00ff, ...] >= [0xffff, 0xffff, ...]
+ // [0x00FF, 0x00FF, ...] >= [0xFFFF, 0xFFFF, ...]
CHECK_EQ_SPLAT(vcge_u16, 0u);
CHECK_EQ_SPLAT(vcgt_u16, 0u);
- // [0x000000ff, 0x000000ff, ...] >= [0xffffffff, 0xffffffff, ...]
- CHECK_EQ_SPLAT(vcge_s32, 0xffffffffu);
- CHECK_EQ_SPLAT(vcgt_s32, 0xffffffffu);
- CHECK_EQ_SPLAT(vtst, 0x00ff00ffu);
+ // [0x000000FF, 0x000000FF, ...] >= [0xFFFFFFFF, 0xFFFFFFFF, ...]
+ CHECK_EQ_SPLAT(vcge_s32, 0xFFFFFFFFu);
+ CHECK_EQ_SPLAT(vcgt_s32, 0xFFFFFFFFu);
+ CHECK_EQ_SPLAT(vtst, 0x00FF00FFu);
CHECK_EQ_SPLAT(vbsl, 0x02010201u);
- CHECK_EQ_32X4(vext, 0x06050403u, 0x0a090807u, 0x0e0d0c0bu, 0x0201000fu);
+ CHECK_EQ_32X4(vext, 0x06050403u, 0x0A090807u, 0x0E0D0C0Bu, 0x0201000Fu);
CHECK_EQ_32X4(vzip8a, 0x01010000u, 0x03030202u, 0x05050404u, 0x07070606u);
- CHECK_EQ_32X4(vzip8b, 0x09090808u, 0x0b0b0a0au, 0x0d0d0c0cu, 0x0f0f0e0eu);
+ CHECK_EQ_32X4(vzip8b, 0x09090808u, 0x0B0B0A0Au, 0x0D0D0C0Cu, 0x0F0F0E0Eu);
CHECK_EQ_32X4(vzip16a, 0x01000100u, 0x03020302u, 0x05040504u, 0x07060706u);
- CHECK_EQ_32X4(vzip16b, 0x09080908u, 0x0b0a0b0au, 0x0d0c0d0cu, 0x0f0e0f0eu);
+ CHECK_EQ_32X4(vzip16b, 0x09080908u, 0x0B0A0B0Au, 0x0D0C0D0Cu, 0x0F0E0F0Eu);
CHECK_EQ_32X4(vzip32a, 0x03020100u, 0x03020100u, 0x07060504u, 0x07060504u);
- CHECK_EQ_32X4(vzip32b, 0x0b0a0908u, 0x0b0a0908u, 0x0f0e0d0cu, 0x0f0e0d0cu);
+ CHECK_EQ_32X4(vzip32b, 0x0B0A0908u, 0x0B0A0908u, 0x0F0E0D0Cu, 0x0F0E0D0Cu);
CHECK_EQ_32X2(vzipd8a, 0x01010000u, 0x03030202u);
CHECK_EQ_32X2(vzipd8b, 0x05050404u, 0x07070606u);
CHECK_EQ_32X2(vzipd16a, 0x01000100u, 0x03020302u);
CHECK_EQ_32X2(vzipd16b, 0x05040504u, 0x07060706u);
- CHECK_EQ_32X4(vuzp8a, 0x06040200u, 0x0e0c0a08u, 0x06040200u, 0x0e0c0a08u);
- CHECK_EQ_32X4(vuzp8b, 0x07050301u, 0x0f0d0b09u, 0x07050301u, 0x0f0d0b09u);
- CHECK_EQ_32X4(vuzp16a, 0x05040100u, 0x0d0c0908u, 0x05040100u, 0x0d0c0908u);
- CHECK_EQ_32X4(vuzp16b, 0x07060302u, 0x0f0e0b0au, 0x07060302u, 0x0f0e0b0au);
- CHECK_EQ_32X4(vuzp32a, 0x03020100u, 0x0b0a0908u, 0x03020100u, 0x0b0a0908u);
- CHECK_EQ_32X4(vuzp32b, 0x07060504u, 0x0f0e0d0cu, 0x07060504u, 0x0f0e0d0cu);
+ CHECK_EQ_32X4(vuzp8a, 0x06040200u, 0x0E0C0A08u, 0x06040200u, 0x0E0C0A08u);
+ CHECK_EQ_32X4(vuzp8b, 0x07050301u, 0x0F0D0B09u, 0x07050301u, 0x0F0D0B09u);
+ CHECK_EQ_32X4(vuzp16a, 0x05040100u, 0x0D0C0908u, 0x05040100u, 0x0D0C0908u);
+ CHECK_EQ_32X4(vuzp16b, 0x07060302u, 0x0F0E0B0Au, 0x07060302u, 0x0F0E0B0Au);
+ CHECK_EQ_32X4(vuzp32a, 0x03020100u, 0x0B0A0908u, 0x03020100u, 0x0B0A0908u);
+ CHECK_EQ_32X4(vuzp32b, 0x07060504u, 0x0F0E0D0Cu, 0x07060504u, 0x0F0E0D0Cu);
CHECK_EQ_32X2(vuzpd8a, 0x06040200u, 0x06040200u);
CHECK_EQ_32X2(vuzpd8b, 0x07050301u, 0x07050301u);
CHECK_EQ_32X2(vuzpd16a, 0x05040100u, 0x05040100u);
CHECK_EQ_32X2(vuzpd16b, 0x07060302u, 0x07060302u);
- CHECK_EQ_32X4(vtrn8a, 0x02020000u, 0x06060404u, 0x0a0a0808u, 0x0e0e0c0cu);
- CHECK_EQ_32X4(vtrn8b, 0x03030101u, 0x07070505u, 0x0b0b0909u, 0x0f0f0d0du);
- CHECK_EQ_32X4(vtrn16a, 0x01000100u, 0x05040504u, 0x09080908u, 0x0d0c0d0cu);
- CHECK_EQ_32X4(vtrn16b, 0x03020302u, 0x07060706u, 0x0b0a0b0au, 0x0f0e0f0eu);
- CHECK_EQ_32X4(vtrn32a, 0x03020100u, 0x03020100u, 0x0b0a0908u, 0x0b0a0908u);
- CHECK_EQ_32X4(vtrn32b, 0x07060504u, 0x07060504u, 0x0f0e0d0cu, 0x0f0e0d0cu);
+ CHECK_EQ_32X4(vtrn8a, 0x02020000u, 0x06060404u, 0x0A0A0808u, 0x0E0E0C0Cu);
+ CHECK_EQ_32X4(vtrn8b, 0x03030101u, 0x07070505u, 0x0B0B0909u, 0x0F0F0D0Du);
+ CHECK_EQ_32X4(vtrn16a, 0x01000100u, 0x05040504u, 0x09080908u, 0x0D0C0D0Cu);
+ CHECK_EQ_32X4(vtrn16b, 0x03020302u, 0x07060706u, 0x0B0A0B0Au, 0x0F0E0F0Eu);
+ CHECK_EQ_32X4(vtrn32a, 0x03020100u, 0x03020100u, 0x0B0A0908u, 0x0B0A0908u);
+ CHECK_EQ_32X4(vtrn32b, 0x07060504u, 0x07060504u, 0x0F0E0D0Cu, 0x0F0E0D0Cu);
CHECK_EQ_32X2(vtrnd8a, 0x02020000u, 0x06060404u);
CHECK_EQ_32X2(vtrnd8b, 0x03030101u, 0x07070505u);
@@ -2274,20 +2259,20 @@ TEST(15) {
CHECK_EQ_32X2(vtrnd32b, 0x07060504u, 0x07060504u);
// src: 0 1 2 3 4 5 6 7 8 9 a b c d e f (little endian)
- CHECK_EQ_32X4(vrev64_32, 0x07060504u, 0x03020100u, 0x0f0e0d0cu,
- 0x0b0a0908u);
- CHECK_EQ_32X4(vrev64_16, 0x05040706u, 0x01000302u, 0x0d0c0f0eu,
- 0x09080b0au);
- CHECK_EQ_32X4(vrev64_8, 0x04050607u, 0x00010203u, 0x0c0d0e0fu, 0x08090a0bu);
- CHECK_EQ_32X4(vrev32_16, 0x01000302u, 0x05040706u, 0x09080b0au,
- 0x0d0c0f0eu);
- CHECK_EQ_32X4(vrev32_8, 0x00010203u, 0x04050607u, 0x08090a0bu, 0x0c0d0e0fu);
- CHECK_EQ_32X4(vrev16_8, 0x02030001u, 0x06070405u, 0x0a0b0809u, 0x0e0f0c0du);
+ CHECK_EQ_32X4(vrev64_32, 0x07060504u, 0x03020100u, 0x0F0E0D0Cu,
+ 0x0B0A0908u);
+ CHECK_EQ_32X4(vrev64_16, 0x05040706u, 0x01000302u, 0x0D0C0F0Eu,
+ 0x09080B0Au);
+ CHECK_EQ_32X4(vrev64_8, 0x04050607u, 0x00010203u, 0x0C0D0E0Fu, 0x08090A0Bu);
+ CHECK_EQ_32X4(vrev32_16, 0x01000302u, 0x05040706u, 0x09080B0Au,
+ 0x0D0C0F0Eu);
+ CHECK_EQ_32X4(vrev32_8, 0x00010203u, 0x04050607u, 0x08090A0Bu, 0x0C0D0E0Fu);
+ CHECK_EQ_32X4(vrev16_8, 0x02030001u, 0x06070405u, 0x0A0B0809u, 0x0E0F0C0Du);
CHECK_EQ(0x05010400u, t.vtbl[0]);
CHECK_EQ(0x00030602u, t.vtbl[1]);
CHECK_EQ(0x05010400u, t.vtbx[0]);
- CHECK_EQ(0xff030602u, t.vtbx[1]);
+ CHECK_EQ(0xFF030602u, t.vtbx[1]);
}
}
@@ -2345,7 +2330,7 @@ TEST(16) {
OFStream os(stdout);
code->Print(os);
#endif
- F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
+ auto f = GeneratedCode<F_piiii>::FromCode(*code);
t.src0 = 0x01020304;
t.src1 = 0x11121314;
t.src2 = 0x11121300;
@@ -2354,8 +2339,7 @@ TEST(16) {
t.dst2 = 0;
t.dst3 = 0;
t.dst4 = 0;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(0x12130304u, t.dst0);
CHECK_EQ(0x01021213u, t.dst1);
CHECK_EQ(0x00010003u, t.dst2);
@@ -2383,15 +2367,13 @@ TEST(17) {
__ nop();
}
-
-#define TEST_SDIV(expected_, dividend_, divisor_) \
- t.dividend = dividend_; \
- t.divisor = divisor_; \
- t.result = 0; \
- dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0); \
+#define TEST_SDIV(expected_, dividend_, divisor_) \
+ t.dividend = dividend_; \
+ t.divisor = divisor_; \
+ t.result = 0; \
+ f.Call(&t, 0, 0, 0, 0); \
CHECK_EQ(expected_, t.result);
-
TEST(sdiv) {
// Test the sdiv.
CcTest::InitializeVM();
@@ -2426,8 +2408,7 @@ TEST(sdiv) {
OFStream os(stdout);
code->Print(os);
#endif
- F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
- Object* dummy;
+ auto f = GeneratedCode<F_piiii>::FromCode(*code);
TEST_SDIV(0, kMinInt, 0);
TEST_SDIV(0, 1024, 0);
TEST_SDIV(1073741824, kMinInt, -2);
@@ -2440,22 +2421,19 @@ TEST(sdiv) {
TEST_SDIV(-3, -10, 3);
TEST_SDIV(5, -10, -2);
TEST_SDIV(3, -10, -3);
- USE(dummy);
}
}
#undef TEST_SDIV
-
-#define TEST_UDIV(expected_, dividend_, divisor_) \
- t.dividend = dividend_; \
- t.divisor = divisor_; \
- t.result = 0; \
- dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0); \
+#define TEST_UDIV(expected_, dividend_, divisor_) \
+ t.dividend = dividend_; \
+ t.divisor = divisor_; \
+ t.result = 0; \
+ f.Call(&t, 0, 0, 0, 0); \
CHECK_EQ(expected_, t.result);
-
TEST(udiv) {
// Test the udiv.
CcTest::InitializeVM();
@@ -2490,13 +2468,11 @@ TEST(udiv) {
OFStream os(stdout);
code->Print(os);
#endif
- F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
- Object* dummy;
+ auto f = GeneratedCode<F_piiii>::FromCode(*code);
TEST_UDIV(0u, 0, 0);
TEST_UDIV(0u, 1024, 0);
TEST_UDIV(5u, 10, 2);
TEST_UDIV(3u, 10, 3);
- USE(dummy);
}
}
@@ -2520,12 +2496,11 @@ TEST(smmla) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
+ auto f = GeneratedCode<F_piiii>::FromCode(*code);
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt(), y = rng->NextInt(), z = rng->NextInt();
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, y, z, 0);
+ f.Call(&r, x, y, z, 0);
CHECK_EQ(base::bits::SignedMulHighAndAdd32(x, y, z), r);
- USE(dummy);
}
}
@@ -2546,12 +2521,11 @@ TEST(smmul) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
+ auto f = GeneratedCode<F_piiii>::FromCode(*code);
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt(), y = rng->NextInt();
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, y, 0, 0);
+ f.Call(&r, x, y, 0, 0);
CHECK_EQ(base::bits::SignedMulHigh32(x, y), r);
- USE(dummy);
}
}
@@ -2572,12 +2546,11 @@ TEST(sxtb) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
+ auto f = GeneratedCode<F_piiii>::FromCode(*code);
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt();
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, 0, 0, 0);
+ f.Call(&r, x, 0, 0, 0);
CHECK_EQ(static_cast<int32_t>(static_cast<int8_t>(x)), r);
- USE(dummy);
}
}
@@ -2598,12 +2571,11 @@ TEST(sxtab) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
+ auto f = GeneratedCode<F_piiii>::FromCode(*code);
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt(), y = rng->NextInt();
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, y, 0, 0);
+ f.Call(&r, x, y, 0, 0);
CHECK_EQ(static_cast<int32_t>(static_cast<int8_t>(x)) + y, r);
- USE(dummy);
}
}
@@ -2624,12 +2596,11 @@ TEST(sxth) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
+ auto f = GeneratedCode<F_piiii>::FromCode(*code);
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt();
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, 0, 0, 0);
+ f.Call(&r, x, 0, 0, 0);
CHECK_EQ(static_cast<int32_t>(static_cast<int16_t>(x)), r);
- USE(dummy);
}
}
@@ -2650,12 +2621,11 @@ TEST(sxtah) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
+ auto f = GeneratedCode<F_piiii>::FromCode(*code);
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt(), y = rng->NextInt();
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, y, 0, 0);
+ f.Call(&r, x, y, 0, 0);
CHECK_EQ(static_cast<int32_t>(static_cast<int16_t>(x)) + y, r);
- USE(dummy);
}
}
@@ -2676,12 +2646,11 @@ TEST(uxtb) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
+ auto f = GeneratedCode<F_piiii>::FromCode(*code);
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt();
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, 0, 0, 0);
+ f.Call(&r, x, 0, 0, 0);
CHECK_EQ(static_cast<int32_t>(static_cast<uint8_t>(x)), r);
- USE(dummy);
}
}
@@ -2702,12 +2671,11 @@ TEST(uxtab) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
+ auto f = GeneratedCode<F_piiii>::FromCode(*code);
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt(), y = rng->NextInt();
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, y, 0, 0);
+ f.Call(&r, x, y, 0, 0);
CHECK_EQ(static_cast<int32_t>(static_cast<uint8_t>(x)) + y, r);
- USE(dummy);
}
}
@@ -2728,12 +2696,11 @@ TEST(uxth) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
+ auto f = GeneratedCode<F_piiii>::FromCode(*code);
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt();
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, 0, 0, 0);
+ f.Call(&r, x, 0, 0, 0);
CHECK_EQ(static_cast<int32_t>(static_cast<uint16_t>(x)), r);
- USE(dummy);
}
}
@@ -2754,19 +2721,18 @@ TEST(uxtah) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
+ auto f = GeneratedCode<F_piiii>::FromCode(*code);
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt(), y = rng->NextInt();
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, y, 0, 0);
+ f.Call(&r, x, y, 0, 0);
CHECK_EQ(static_cast<int32_t>(static_cast<uint16_t>(x)) + y, r);
- USE(dummy);
}
}
-#define TEST_RBIT(expected_, input_) \
- t.input = input_; \
- t.result = 0; \
- dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0); \
+#define TEST_RBIT(expected_, input_) \
+ t.input = input_; \
+ t.result = 0; \
+ f.Call(&t, 0, 0, 0, 0); \
CHECK_EQ(static_cast<uint32_t>(expected_), t.result);
TEST(rbit) {
@@ -2798,15 +2764,13 @@ TEST(rbit) {
code->Print(std::cout);
#endif
- F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
- Object* dummy = nullptr;
- TEST_RBIT(0xffffffff, 0xffffffff);
+ auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ TEST_RBIT(0xFFFFFFFF, 0xFFFFFFFF);
TEST_RBIT(0x00000000, 0x00000000);
- TEST_RBIT(0xffff0000, 0x0000ffff);
- TEST_RBIT(0xff00ff00, 0x00ff00ff);
- TEST_RBIT(0xf0f0f0f0, 0x0f0f0f0f);
- TEST_RBIT(0x1e6a2c48, 0x12345678);
- USE(dummy);
+ TEST_RBIT(0xFFFF0000, 0x0000FFFF);
+ TEST_RBIT(0xFF00FF00, 0x00FF00FF);
+ TEST_RBIT(0xF0F0F0F0, 0x0F0F0F0F);
+ TEST_RBIT(0x1E6A2C48, 0x12345678);
}
}
@@ -2875,9 +2839,8 @@ TEST(code_relative_offset) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, code_object);
- F_iiiii f = FUNCTION_CAST<F_iiiii>(code->entry());
- int res =
- reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 21, 0, 0, 0, 0));
+ auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ int res = reinterpret_cast<int>(f.Call(21, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(42, res);
}
@@ -2919,19 +2882,16 @@ TEST(msr_mrs) {
OFStream os(stdout);
code->Print(os);
#endif
- F_ippii f = FUNCTION_CAST<F_ippii>(code->entry());
- Object* dummy = nullptr;
- USE(dummy);
-
-#define CHECK_MSR_MRS(n, z, c, v) \
- do { \
- uint32_t nzcv = (n << 31) | (z << 30) | (c << 29) | (v << 28); \
- uint32_t result_conditionals = -1; \
- uint32_t result_mrs = -1; \
- dummy = CALL_GENERATED_CODE(isolate, f, nzcv, &result_conditionals, \
- &result_mrs, 0, 0); \
- CHECK_EQ(nzcv, result_conditionals); \
- CHECK_EQ(nzcv, result_mrs); \
+ auto f = GeneratedCode<F_ippii>::FromCode(*code);
+
+#define CHECK_MSR_MRS(n, z, c, v) \
+ do { \
+ uint32_t nzcv = (n << 31) | (z << 30) | (c << 29) | (v << 28); \
+ uint32_t result_conditionals = -1; \
+ uint32_t result_mrs = -1; \
+ f.Call(nzcv, &result_conditionals, &result_mrs, 0, 0); \
+ CHECK_EQ(nzcv, result_conditionals); \
+ CHECK_EQ(nzcv, result_mrs); \
} while (0);
// N Z C V
@@ -3020,14 +2980,11 @@ TEST(ARMv8_float32_vrintX) {
OFStream os(stdout);
code->Print(os);
#endif
- F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
-
- Object* dummy = nullptr;
- USE(dummy);
+ auto f = GeneratedCode<F_piiii>::FromCode(*code);
#define CHECK_VRINT(input_val, ares, nres, mres, pres, zres) \
t.input = input_val; \
- dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0); \
+ f.Call(&t, 0, 0, 0, 0); \
CHECK_EQ(ares, t.ar); \
CHECK_EQ(nres, t.nr); \
CHECK_EQ(mres, t.mr); \
@@ -3048,7 +3005,7 @@ TEST(ARMv8_float32_vrintX) {
// Check NaN propagation.
float nan = std::numeric_limits<float>::quiet_NaN();
t.input = nan;
- dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(bit_cast<int32_t>(nan), bit_cast<int32_t>(t.ar));
CHECK_EQ(bit_cast<int32_t>(nan), bit_cast<int32_t>(t.nr));
CHECK_EQ(bit_cast<int32_t>(nan), bit_cast<int32_t>(t.mr));
@@ -3125,14 +3082,11 @@ TEST(ARMv8_vrintX) {
OFStream os(stdout);
code->Print(os);
#endif
- F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
-
- Object* dummy = nullptr;
- USE(dummy);
+ auto f = GeneratedCode<F_piiii>::FromCode(*code);
#define CHECK_VRINT(input_val, ares, nres, mres, pres, zres) \
t.input = input_val; \
- dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0); \
+ f.Call(&t, 0, 0, 0, 0); \
CHECK_EQ(ares, t.ar); \
CHECK_EQ(nres, t.nr); \
CHECK_EQ(mres, t.mr); \
@@ -3153,7 +3107,7 @@ TEST(ARMv8_vrintX) {
// Check NaN propagation.
double nan = std::numeric_limits<double>::quiet_NaN();
t.input = nan;
- dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(bit_cast<int64_t>(nan), bit_cast<int64_t>(t.ar));
CHECK_EQ(bit_cast<int64_t>(nan), bit_cast<int64_t>(t.nr));
CHECK_EQ(bit_cast<int64_t>(nan), bit_cast<int64_t>(t.mr));
@@ -3265,34 +3219,31 @@ TEST(ARMv8_vsel) {
OFStream os(stdout);
code->Print(os);
#endif
- F_ippii f = FUNCTION_CAST<F_ippii>(code->entry());
- Object* dummy = nullptr;
- USE(dummy);
+ auto f = GeneratedCode<F_ippii>::FromCode(*code);
STATIC_ASSERT(kResultPass == -kResultFail);
-#define CHECK_VSEL(n, z, c, v, vseleq, vselge, vselgt, vselvs) \
- do { \
- ResultsF32 results_f32; \
- ResultsF64 results_f64; \
- uint32_t nzcv = (n << 31) | (z << 30) | (c << 29) | (v << 28); \
- dummy = CALL_GENERATED_CODE(isolate, f, nzcv, &results_f32, &results_f64, \
- 0, 0); \
- CHECK_EQ(vseleq, results_f32.vseleq_); \
- CHECK_EQ(vselge, results_f32.vselge_); \
- CHECK_EQ(vselgt, results_f32.vselgt_); \
- CHECK_EQ(vselvs, results_f32.vselvs_); \
- CHECK_EQ(-vseleq, results_f32.vselne_); \
- CHECK_EQ(-vselge, results_f32.vsellt_); \
- CHECK_EQ(-vselgt, results_f32.vselle_); \
- CHECK_EQ(-vselvs, results_f32.vselvc_); \
- CHECK_EQ(vseleq, results_f64.vseleq_); \
- CHECK_EQ(vselge, results_f64.vselge_); \
- CHECK_EQ(vselgt, results_f64.vselgt_); \
- CHECK_EQ(vselvs, results_f64.vselvs_); \
- CHECK_EQ(-vseleq, results_f64.vselne_); \
- CHECK_EQ(-vselge, results_f64.vsellt_); \
- CHECK_EQ(-vselgt, results_f64.vselle_); \
- CHECK_EQ(-vselvs, results_f64.vselvc_); \
+#define CHECK_VSEL(n, z, c, v, vseleq, vselge, vselgt, vselvs) \
+ do { \
+ ResultsF32 results_f32; \
+ ResultsF64 results_f64; \
+ uint32_t nzcv = (n << 31) | (z << 30) | (c << 29) | (v << 28); \
+ f.Call(nzcv, &results_f32, &results_f64, 0, 0); \
+ CHECK_EQ(vseleq, results_f32.vseleq_); \
+ CHECK_EQ(vselge, results_f32.vselge_); \
+ CHECK_EQ(vselgt, results_f32.vselgt_); \
+ CHECK_EQ(vselvs, results_f32.vselvs_); \
+ CHECK_EQ(-vseleq, results_f32.vselne_); \
+ CHECK_EQ(-vselge, results_f32.vsellt_); \
+ CHECK_EQ(-vselgt, results_f32.vselle_); \
+ CHECK_EQ(-vselvs, results_f32.vselvc_); \
+ CHECK_EQ(vseleq, results_f64.vseleq_); \
+ CHECK_EQ(vselge, results_f64.vselge_); \
+ CHECK_EQ(vselgt, results_f64.vselgt_); \
+ CHECK_EQ(vselvs, results_f64.vselvs_); \
+ CHECK_EQ(-vseleq, results_f64.vselne_); \
+ CHECK_EQ(-vselge, results_f64.vsellt_); \
+ CHECK_EQ(-vselgt, results_f64.vselle_); \
+ CHECK_EQ(-vselvs, results_f64.vselvc_); \
} while (0);
// N Z C V vseleq vselge vselgt vselvs
@@ -3359,22 +3310,20 @@ TEST(ARMv8_vminmax_f64) {
OFStream os(stdout);
code->Print(os);
#endif
- F_ppiii f = FUNCTION_CAST<F_ppiii>(code->entry());
- Object* dummy = nullptr;
- USE(dummy);
+ auto f = GeneratedCode<F_ppiii>::FromCode(*code);
#define CHECK_VMINMAX(left, right, vminnm, vmaxnm) \
do { \
Inputs inputs = {left, right}; \
Results results; \
- dummy = CALL_GENERATED_CODE(isolate, f, &inputs, &results, 0, 0, 0); \
+ f.Call(&inputs, &results, 0, 0, 0); \
/* Use a bit_cast to correctly identify -0.0 and NaNs. */ \
CHECK_EQ(bit_cast<uint64_t>(vminnm), bit_cast<uint64_t>(results.vminnm_)); \
CHECK_EQ(bit_cast<uint64_t>(vmaxnm), bit_cast<uint64_t>(results.vmaxnm_)); \
} while (0);
- double nan_a = bit_cast<double>(UINT64_C(0x7ff8000000000001));
- double nan_b = bit_cast<double>(UINT64_C(0x7ff8000000000002));
+ double nan_a = bit_cast<double>(UINT64_C(0x7FF8000000000001));
+ double nan_b = bit_cast<double>(UINT64_C(0x7FF8000000000002));
CHECK_VMINMAX(1.0, -1.0, -1.0, 1.0);
CHECK_VMINMAX(-1.0, 1.0, -1.0, 1.0);
@@ -3441,22 +3390,20 @@ TEST(ARMv8_vminmax_f32) {
OFStream os(stdout);
code->Print(os);
#endif
- F_ppiii f = FUNCTION_CAST<F_ppiii>(code->entry());
- Object* dummy = nullptr;
- USE(dummy);
+ auto f = GeneratedCode<F_ppiii>::FromCode(*code);
#define CHECK_VMINMAX(left, right, vminnm, vmaxnm) \
do { \
Inputs inputs = {left, right}; \
Results results; \
- dummy = CALL_GENERATED_CODE(isolate, f, &inputs, &results, 0, 0, 0); \
+ f.Call(&inputs, &results, 0, 0, 0); \
/* Use a bit_cast to correctly identify -0.0 and NaNs. */ \
CHECK_EQ(bit_cast<uint32_t>(vminnm), bit_cast<uint32_t>(results.vminnm_)); \
CHECK_EQ(bit_cast<uint32_t>(vmaxnm), bit_cast<uint32_t>(results.vmaxnm_)); \
} while (0);
- float nan_a = bit_cast<float>(UINT32_C(0x7fc00001));
- float nan_b = bit_cast<float>(UINT32_C(0x7fc00002));
+ float nan_a = bit_cast<float>(UINT32_C(0x7FC00001));
+ float nan_b = bit_cast<float>(UINT32_C(0x7FC00002));
CHECK_VMINMAX(1.0f, -1.0f, -1.0f, 1.0f);
CHECK_VMINMAX(-1.0f, 1.0f, -1.0f, 1.0f);
@@ -3482,7 +3429,7 @@ TEST(ARMv8_vminmax_f32) {
}
template <typename T, typename Inputs, typename Results>
-static F_ppiii GenerateMacroFloatMinMax(MacroAssembler& assm) {
+static GeneratedCode<F_ppiii> GenerateMacroFloatMinMax(MacroAssembler& assm) {
T a = T::from_code(0); // d0/s0
T b = T::from_code(1); // d1/s1
T c = T::from_code(2); // d2/s2
@@ -3573,7 +3520,7 @@ static F_ppiii GenerateMacroFloatMinMax(MacroAssembler& assm) {
OFStream os(stdout);
code->Print(os);
#endif
- return FUNCTION_CAST<F_ppiii>(code->entry());
+ return GeneratedCode<F_ppiii>::FromCode(*code);
}
TEST(macro_float_minmax_f64) {
@@ -3600,16 +3547,13 @@ TEST(macro_float_minmax_f64) {
double max_aba_;
};
- F_ppiii f = GenerateMacroFloatMinMax<DwVfpRegister, Inputs, Results>(assm);
-
- Object* dummy = nullptr;
- USE(dummy);
+ auto f = GenerateMacroFloatMinMax<DwVfpRegister, Inputs, Results>(assm);
#define CHECK_MINMAX(left, right, min, max) \
do { \
Inputs inputs = {left, right}; \
Results results; \
- dummy = CALL_GENERATED_CODE(isolate, f, &inputs, &results, 0, 0, 0); \
+ f.Call(&inputs, &results, 0, 0, 0); \
/* Use a bit_cast to correctly identify -0.0 and NaNs. */ \
CHECK_EQ(bit_cast<uint64_t>(min), bit_cast<uint64_t>(results.min_abc_)); \
CHECK_EQ(bit_cast<uint64_t>(min), bit_cast<uint64_t>(results.min_aab_)); \
@@ -3619,8 +3563,8 @@ TEST(macro_float_minmax_f64) {
CHECK_EQ(bit_cast<uint64_t>(max), bit_cast<uint64_t>(results.max_aba_)); \
} while (0)
- double nan_a = bit_cast<double>(UINT64_C(0x7ff8000000000001));
- double nan_b = bit_cast<double>(UINT64_C(0x7ff8000000000002));
+ double nan_a = bit_cast<double>(UINT64_C(0x7FF8000000000001));
+ double nan_b = bit_cast<double>(UINT64_C(0x7FF8000000000002));
CHECK_MINMAX(1.0, -1.0, -1.0, 1.0);
CHECK_MINMAX(-1.0, 1.0, -1.0, 1.0);
@@ -3668,15 +3612,13 @@ TEST(macro_float_minmax_f32) {
float max_aba_;
};
- F_ppiii f = GenerateMacroFloatMinMax<SwVfpRegister, Inputs, Results>(assm);
- Object* dummy = nullptr;
- USE(dummy);
+ auto f = GenerateMacroFloatMinMax<SwVfpRegister, Inputs, Results>(assm);
#define CHECK_MINMAX(left, right, min, max) \
do { \
Inputs inputs = {left, right}; \
Results results; \
- dummy = CALL_GENERATED_CODE(isolate, f, &inputs, &results, 0, 0, 0); \
+ f.Call(&inputs, &results, 0, 0, 0); \
/* Use a bit_cast to correctly identify -0.0 and NaNs. */ \
CHECK_EQ(bit_cast<uint32_t>(min), bit_cast<uint32_t>(results.min_abc_)); \
CHECK_EQ(bit_cast<uint32_t>(min), bit_cast<uint32_t>(results.min_aab_)); \
@@ -3686,8 +3628,8 @@ TEST(macro_float_minmax_f32) {
CHECK_EQ(bit_cast<uint32_t>(max), bit_cast<uint32_t>(results.max_aba_)); \
} while (0)
- float nan_a = bit_cast<float>(UINT32_C(0x7fc00001));
- float nan_b = bit_cast<float>(UINT32_C(0x7fc00002));
+ float nan_a = bit_cast<float>(UINT32_C(0x7FC00001));
+ float nan_b = bit_cast<float>(UINT32_C(0x7FC00002));
CHECK_MINMAX(1.0f, -1.0f, -1.0f, 1.0f);
CHECK_MINMAX(-1.0f, 1.0f, -1.0f, 1.0f);
@@ -3741,30 +3683,27 @@ TEST(unaligned_loads) {
OFStream os(stdout);
code->Print(os);
#endif
- F_ppiii f = FUNCTION_CAST<F_ppiii>(code->entry());
-
- Object* dummy = nullptr;
- USE(dummy);
+ auto f = GeneratedCode<F_ppiii>::FromCode(*code);
#ifndef V8_TARGET_LITTLE_ENDIAN
#error This test assumes a little-endian layout.
#endif
- uint64_t data = UINT64_C(0x84838281807f7e7d);
- dummy = CALL_GENERATED_CODE(isolate, f, &t, &data, 0, 0, 0);
- CHECK_EQ(0x00007e7du, t.ldrh);
- CHECK_EQ(0x00007e7du, t.ldrsh);
- CHECK_EQ(0x807f7e7du, t.ldr);
- dummy = CALL_GENERATED_CODE(isolate, f, &t, &data, 1, 0, 0);
- CHECK_EQ(0x00007f7eu, t.ldrh);
- CHECK_EQ(0x00007f7eu, t.ldrsh);
- CHECK_EQ(0x81807f7eu, t.ldr);
- dummy = CALL_GENERATED_CODE(isolate, f, &t, &data, 2, 0, 0);
- CHECK_EQ(0x0000807fu, t.ldrh);
- CHECK_EQ(0xffff807fu, t.ldrsh);
- CHECK_EQ(0x8281807fu, t.ldr);
- dummy = CALL_GENERATED_CODE(isolate, f, &t, &data, 3, 0, 0);
+ uint64_t data = UINT64_C(0x84838281807F7E7D);
+ f.Call(&t, &data, 0, 0, 0);
+ CHECK_EQ(0x00007E7Du, t.ldrh);
+ CHECK_EQ(0x00007E7Du, t.ldrsh);
+ CHECK_EQ(0x807F7E7Du, t.ldr);
+ f.Call(&t, &data, 1, 0, 0);
+ CHECK_EQ(0x00007F7Eu, t.ldrh);
+ CHECK_EQ(0x00007F7Eu, t.ldrsh);
+ CHECK_EQ(0x81807F7Eu, t.ldr);
+ f.Call(&t, &data, 2, 0, 0);
+ CHECK_EQ(0x0000807Fu, t.ldrh);
+ CHECK_EQ(0xFFFF807Fu, t.ldrsh);
+ CHECK_EQ(0x8281807Fu, t.ldr);
+ f.Call(&t, &data, 3, 0, 0);
CHECK_EQ(0x00008180u, t.ldrh);
- CHECK_EQ(0xffff8180u, t.ldrsh);
+ CHECK_EQ(0xFFFF8180u, t.ldrsh);
CHECK_EQ(0x83828180u, t.ldr);
}
@@ -3787,10 +3726,7 @@ TEST(unaligned_stores) {
OFStream os(stdout);
code->Print(os);
#endif
- F_ppiii f = FUNCTION_CAST<F_ppiii>(code->entry());
-
- Object* dummy = nullptr;
- USE(dummy);
+ auto f = GeneratedCode<F_ppiii>::FromCode(*code);
#ifndef V8_TARGET_LITTLE_ENDIAN
#error This test assumes a little-endian layout.
@@ -3798,30 +3734,30 @@ TEST(unaligned_stores) {
{
uint64_t strh = 0;
uint64_t str = 0;
- dummy = CALL_GENERATED_CODE(isolate, f, &strh, &str, 0, 0xfedcba98, 0);
- CHECK_EQ(UINT64_C(0x000000000000ba98), strh);
- CHECK_EQ(UINT64_C(0x00000000fedcba98), str);
+ f.Call(&strh, &str, 0, 0xFEDCBA98, 0);
+ CHECK_EQ(UINT64_C(0x000000000000BA98), strh);
+ CHECK_EQ(UINT64_C(0x00000000FEDCBA98), str);
}
{
uint64_t strh = 0;
uint64_t str = 0;
- dummy = CALL_GENERATED_CODE(isolate, f, &strh, &str, 1, 0xfedcba98, 0);
- CHECK_EQ(UINT64_C(0x0000000000ba9800), strh);
- CHECK_EQ(UINT64_C(0x000000fedcba9800), str);
+ f.Call(&strh, &str, 1, 0xFEDCBA98, 0);
+ CHECK_EQ(UINT64_C(0x0000000000BA9800), strh);
+ CHECK_EQ(UINT64_C(0x000000FEDCBA9800), str);
}
{
uint64_t strh = 0;
uint64_t str = 0;
- dummy = CALL_GENERATED_CODE(isolate, f, &strh, &str, 2, 0xfedcba98, 0);
- CHECK_EQ(UINT64_C(0x00000000ba980000), strh);
- CHECK_EQ(UINT64_C(0x0000fedcba980000), str);
+ f.Call(&strh, &str, 2, 0xFEDCBA98, 0);
+ CHECK_EQ(UINT64_C(0x00000000BA980000), strh);
+ CHECK_EQ(UINT64_C(0x0000FEDCBA980000), str);
}
{
uint64_t strh = 0;
uint64_t str = 0;
- dummy = CALL_GENERATED_CODE(isolate, f, &strh, &str, 3, 0xfedcba98, 0);
- CHECK_EQ(UINT64_C(0x000000ba98000000), strh);
- CHECK_EQ(UINT64_C(0x00fedcba98000000), str);
+ f.Call(&strh, &str, 3, 0xFEDCBA98, 0);
+ CHECK_EQ(UINT64_C(0x000000BA98000000), strh);
+ CHECK_EQ(UINT64_C(0x00FEDCBA98000000), str);
}
}
@@ -3847,10 +3783,10 @@ TEST(vswp) {
uint64_t one = bit_cast<uint64_t>(1.0);
__ mov(r5, Operand(one >> 32));
- __ mov(r4, Operand(one & 0xffffffff));
+ __ mov(r4, Operand(one & 0xFFFFFFFF));
uint64_t minus_one = bit_cast<uint64_t>(-1.0);
__ mov(r7, Operand(minus_one >> 32));
- __ mov(r6, Operand(minus_one & 0xffffffff));
+ __ mov(r6, Operand(minus_one & 0xFFFFFFFF));
__ vmov(d0, r4, r5); // d0 = 1.0
__ vmov(d1, r6, r7); // d1 = -1.0
@@ -3868,7 +3804,7 @@ TEST(vswp) {
// q-register swap.
const uint32_t test_1 = 0x01234567;
- const uint32_t test_2 = 0x89abcdef;
+ const uint32_t test_2 = 0x89ABCDEF;
__ mov(r4, Operand(test_1));
__ mov(r5, Operand(test_2));
__ vdup(Neon32, q4, r4);
@@ -3890,9 +3826,8 @@ TEST(vswp) {
OFStream os(stdout);
code->Print(os);
#endif
- F_piiii f = FUNCTION_CAST<F_piiii>(code->entry());
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(minus_one, t.vswp_d0);
CHECK_EQ(one, t.vswp_d1);
if (CpuFeatures::IsSupported(VFP32DREGS)) {
@@ -4008,9 +3943,8 @@ TEST(split_add_immediate) {
OFStream os(stdout);
code->Print(os);
#endif
- F_iiiii f = FUNCTION_CAST<F_iiiii>(code->entry());
- uint32_t res =
- reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ uint32_t res = reinterpret_cast<int>(f.Call(0, 0, 0, 0, 0));
::printf("f() = 0x%x\n", res);
CHECK_EQ(0x12345678, res);
}
@@ -4029,9 +3963,8 @@ TEST(split_add_immediate) {
OFStream os(stdout);
code->Print(os);
#endif
- F_iiiii f = FUNCTION_CAST<F_iiiii>(code->entry());
- uint32_t res =
- reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ uint32_t res = reinterpret_cast<int>(f.Call(0, 0, 0, 0, 0));
::printf("f() = 0x%x\n", res);
CHECK_EQ(0x12345678, res);
}
@@ -4053,9 +3986,8 @@ TEST(split_add_immediate) {
OFStream os(stdout);
code->Print(os);
#endif
- F_iiiii f = FUNCTION_CAST<F_iiiii>(code->entry());
- uint32_t res =
- reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ uint32_t res = reinterpret_cast<int>(f.Call(0, 0, 0, 0, 0));
::printf("f() = 0x%x\n", res);
CHECK_EQ(0x12345678, res);
}
@@ -4087,15 +4019,15 @@ TEST(vabs_32) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- F_iiiii f = FUNCTION_CAST<F_iiiii>(AssembleCode([](Assembler& assm) {
+ auto f = AssembleCode<F_iiiii>([](Assembler& assm) {
__ vmov(s0, r0);
__ vabs(s0, s0);
__ vmov(r0, s0);
- }));
+ });
for (Float32 f32 : Float32Inputs()) {
- Float32 res = Float32::FromBits(reinterpret_cast<uint32_t>(
- CALL_GENERATED_CODE(isolate, f, f32.get_bits(), 0, 0, 0, 0)));
+ Float32 res = Float32::FromBits(
+ reinterpret_cast<uint32_t>(f.Call(f32.get_bits(), 0, 0, 0, 0)));
Float32 exp = Float32::FromBits(f32.get_bits() & ~(1 << 31));
CHECK_EQ(exp.get_bits(), res.get_bits());
}
@@ -4105,17 +4037,16 @@ TEST(vabs_64) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- F_iiiii f = FUNCTION_CAST<F_iiiii>(AssembleCode([](Assembler& assm) {
+ auto f = AssembleCode<F_iiiii>([](Assembler& assm) {
__ vmov(d0, r0, r1);
__ vabs(d0, d0);
__ vmov(r1, r0, d0);
- }));
+ });
for (Float64 f64 : Float64Inputs()) {
uint32_t p0 = static_cast<uint32_t>(f64.get_bits());
uint32_t p1 = static_cast<uint32_t>(f64.get_bits() >> 32);
- uint32_t res = reinterpret_cast<uint32_t>(
- CALL_GENERATED_CODE(isolate, f, p0, p1, 0, 0, 0));
+ uint32_t res = reinterpret_cast<uint32_t>(f.Call(p0, p1, 0, 0, 0));
Float64 exp = Float64::FromBits(f64.get_bits() & ~(1ull << 63));
// We just get back the top word, so only compare that one.
CHECK_EQ(exp.get_bits() >> 32, res);
@@ -4126,15 +4057,15 @@ TEST(vneg_32) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- F_iiiii f = FUNCTION_CAST<F_iiiii>(AssembleCode([](Assembler& assm) {
+ auto f = AssembleCode<F_iiiii>([](Assembler& assm) {
__ vmov(s0, r0);
__ vneg(s0, s0);
__ vmov(r0, s0);
- }));
+ });
for (Float32 f32 : Float32Inputs()) {
- Float32 res = Float32::FromBits(reinterpret_cast<uint32_t>(
- CALL_GENERATED_CODE(isolate, f, f32.get_bits(), 0, 0, 0, 0)));
+ Float32 res = Float32::FromBits(
+ reinterpret_cast<uint32_t>(f.Call(f32.get_bits(), 0, 0, 0, 0)));
Float32 exp = Float32::FromBits(f32.get_bits() ^ (1 << 31));
CHECK_EQ(exp.get_bits(), res.get_bits());
}
@@ -4144,17 +4075,16 @@ TEST(vneg_64) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- F_iiiii f = FUNCTION_CAST<F_iiiii>(AssembleCode([](Assembler& assm) {
+ auto f = AssembleCode<F_iiiii>([](Assembler& assm) {
__ vmov(d0, r0, r1);
__ vneg(d0, d0);
__ vmov(r1, r0, d0);
- }));
+ });
for (Float64 f64 : Float64Inputs()) {
uint32_t p0 = static_cast<uint32_t>(f64.get_bits());
uint32_t p1 = static_cast<uint32_t>(f64.get_bits() >> 32);
- uint32_t res = reinterpret_cast<uint32_t>(
- CALL_GENERATED_CODE(isolate, f, p0, p1, 0, 0, 0));
+ uint32_t res = reinterpret_cast<uint32_t>(f.Call(p0, p1, 0, 0, 0));
Float64 exp = Float64::FromBits(f64.get_bits() ^ (1ull << 63));
// We just get back the top word, so only compare that one.
CHECK_EQ(exp.get_bits() >> 32, res);
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index 62f7ccf2c6..4fc80201d2 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -95,8 +95,7 @@ namespace internal {
// If more advance computation is required before the assert then access the
// RegisterDump named core directly:
//
-// CHECK_EQUAL_64(0x1234, core.xreg(0) & 0xffff);
-
+// CHECK_EQUAL_64(0x1234, core.xreg(0) & 0xFFFF);
#if 0 // TODO(all): enable.
static v8::Persistent<v8::Context> env;
@@ -150,7 +149,6 @@ static void InitializeVM() {
simulator.ResetState();
#define START_AFTER_RESET() \
- __ SetStackPointer(csp); \
__ PushCalleeSavedRegisters(); \
__ Debug("Start test.", __LINE__, TRACE_ENABLE | LOG_ALL);
@@ -192,7 +190,6 @@ static void InitializeVM() {
#define START_AFTER_RESET() \
- __ SetStackPointer(csp); \
__ PushCalleeSavedRegisters();
#define START() \
@@ -200,6 +197,7 @@ static void InitializeVM() {
START_AFTER_RESET();
#define RUN() \
+ MakeAssemblerBufferExecutable(buf, allocated); \
Assembler::FlushICache(isolate, buf, masm.SizeOfGeneratedCode()); \
{ \
void (*test_function)(void); \
@@ -213,7 +211,7 @@ static void InitializeVM() {
__ Ret(); \
__ GetCode(masm.isolate(), nullptr);
-#define TEARDOWN() CHECK(v8::base::OS::Free(buf, allocated));
+#define TEARDOWN() CHECK(v8::internal::FreePages(buf, allocated));
#endif // ifdef USE_SIMULATOR.
@@ -266,20 +264,20 @@ TEST(stack_ops) {
__ Mov(x1, csp);
// Add extended to the csp, and move the result to a normal register.
- __ Mov(x17, 0xfff);
+ __ Mov(x17, 0xFFF);
__ Add(csp, csp, Operand(x17, SXTB));
__ Mov(x2, csp);
// Create an csp using a logical instruction, and move to normal register.
- __ Orr(csp, xzr, Operand(0x1fff));
+ __ Orr(csp, xzr, Operand(0x1FFF));
__ Mov(x3, csp);
// Write wcsp using a logical instruction.
- __ Orr(wcsp, wzr, Operand(0xfffffff8L));
+ __ Orr(wcsp, wzr, Operand(0xFFFFFFF8L));
__ Mov(x4, csp);
// Write csp, and read back wcsp.
- __ Orr(csp, xzr, Operand(0xfffffff8L));
+ __ Orr(csp, xzr, Operand(0xFFFFFFF8L));
__ Mov(w5, wcsp);
// restore csp.
@@ -290,10 +288,10 @@ TEST(stack_ops) {
CHECK_EQUAL_64(0x1000, x0);
CHECK_EQUAL_64(0x1050, x1);
- CHECK_EQUAL_64(0x104f, x2);
- CHECK_EQUAL_64(0x1fff, x3);
- CHECK_EQUAL_64(0xfffffff8, x4);
- CHECK_EQUAL_64(0xfffffff8, x5);
+ CHECK_EQUAL_64(0x104F, x2);
+ CHECK_EQUAL_64(0x1FFF, x3);
+ CHECK_EQUAL_64(0xFFFFFFF8, x4);
+ CHECK_EQUAL_64(0xFFFFFFF8, x5);
TEARDOWN();
}
@@ -304,8 +302,8 @@ TEST(mvn) {
SETUP();
START();
- __ Mvn(w0, 0xfff);
- __ Mvn(x1, 0xfff);
+ __ Mvn(w0, 0xFFF);
+ __ Mvn(x1, 0xFFF);
__ Mvn(w2, Operand(w0, LSL, 1));
__ Mvn(x3, Operand(x1, LSL, 2));
__ Mvn(w4, Operand(w0, LSR, 3));
@@ -324,22 +322,22 @@ TEST(mvn) {
RUN();
- CHECK_EQUAL_64(0xfffff000, x0);
- CHECK_EQUAL_64(0xfffffffffffff000UL, x1);
- CHECK_EQUAL_64(0x00001fff, x2);
- CHECK_EQUAL_64(0x0000000000003fffUL, x3);
- CHECK_EQUAL_64(0xe00001ff, x4);
- CHECK_EQUAL_64(0xf0000000000000ffUL, x5);
+ CHECK_EQUAL_64(0xFFFFF000, x0);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFF000UL, x1);
+ CHECK_EQUAL_64(0x00001FFF, x2);
+ CHECK_EQUAL_64(0x0000000000003FFFUL, x3);
+ CHECK_EQUAL_64(0xE00001FF, x4);
+ CHECK_EQUAL_64(0xF0000000000000FFUL, x5);
CHECK_EQUAL_64(0x00000001, x6);
CHECK_EQUAL_64(0x0, x7);
- CHECK_EQUAL_64(0x7ff80000, x8);
- CHECK_EQUAL_64(0x3ffc000000000000UL, x9);
- CHECK_EQUAL_64(0xffffff00, x10);
+ CHECK_EQUAL_64(0x7FF80000, x8);
+ CHECK_EQUAL_64(0x3FFC000000000000UL, x9);
+ CHECK_EQUAL_64(0xFFFFFF00, x10);
CHECK_EQUAL_64(0x0000000000000001UL, x11);
- CHECK_EQUAL_64(0xffff8003, x12);
- CHECK_EQUAL_64(0xffffffffffff0007UL, x13);
- CHECK_EQUAL_64(0xfffffffffffe000fUL, x14);
- CHECK_EQUAL_64(0xfffffffffffe000fUL, x15);
+ CHECK_EQUAL_64(0xFFFF8003, x12);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFF0007UL, x13);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFE000FUL, x14);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFE000FUL, x15);
TEARDOWN();
}
@@ -350,35 +348,35 @@ TEST(mov) {
SETUP();
START();
- __ Mov(x0, 0xffffffffffffffffL);
- __ Mov(x1, 0xffffffffffffffffL);
- __ Mov(x2, 0xffffffffffffffffL);
- __ Mov(x3, 0xffffffffffffffffL);
+ __ Mov(x0, 0xFFFFFFFFFFFFFFFFL);
+ __ Mov(x1, 0xFFFFFFFFFFFFFFFFL);
+ __ Mov(x2, 0xFFFFFFFFFFFFFFFFL);
+ __ Mov(x3, 0xFFFFFFFFFFFFFFFFL);
- __ Mov(x0, 0x0123456789abcdefL);
+ __ Mov(x0, 0x0123456789ABCDEFL);
- __ movz(x1, 0xabcdL << 16);
- __ movk(x2, 0xabcdL << 32);
- __ movn(x3, 0xabcdL << 48);
+ __ movz(x1, 0xABCDL << 16);
+ __ movk(x2, 0xABCDL << 32);
+ __ movn(x3, 0xABCDL << 48);
- __ Mov(x4, 0x0123456789abcdefL);
+ __ Mov(x4, 0x0123456789ABCDEFL);
__ Mov(x5, x4);
__ Mov(w6, -1);
// Test that moves back to the same register have the desired effect. This
// is a no-op for X registers, and a truncation for W registers.
- __ Mov(x7, 0x0123456789abcdefL);
+ __ Mov(x7, 0x0123456789ABCDEFL);
__ Mov(x7, x7);
- __ Mov(x8, 0x0123456789abcdefL);
+ __ Mov(x8, 0x0123456789ABCDEFL);
__ Mov(w8, w8);
- __ Mov(x9, 0x0123456789abcdefL);
+ __ Mov(x9, 0x0123456789ABCDEFL);
__ Mov(x9, Operand(x9));
- __ Mov(x10, 0x0123456789abcdefL);
+ __ Mov(x10, 0x0123456789ABCDEFL);
__ Mov(w10, Operand(w10));
- __ Mov(w11, 0xfff);
- __ Mov(x12, 0xfff);
+ __ Mov(w11, 0xFFF);
+ __ Mov(x12, 0xFFF);
__ Mov(w13, Operand(w11, LSL, 1));
__ Mov(x14, Operand(x12, LSL, 2));
__ Mov(w15, Operand(w11, LSR, 3));
@@ -396,31 +394,31 @@ TEST(mov) {
RUN();
- CHECK_EQUAL_64(0x0123456789abcdefL, x0);
- CHECK_EQUAL_64(0x00000000abcd0000L, x1);
- CHECK_EQUAL_64(0xffffabcdffffffffL, x2);
- CHECK_EQUAL_64(0x5432ffffffffffffL, x3);
+ CHECK_EQUAL_64(0x0123456789ABCDEFL, x0);
+ CHECK_EQUAL_64(0x00000000ABCD0000L, x1);
+ CHECK_EQUAL_64(0xFFFFABCDFFFFFFFFL, x2);
+ CHECK_EQUAL_64(0x5432FFFFFFFFFFFFL, x3);
CHECK_EQUAL_64(x4, x5);
CHECK_EQUAL_32(-1, w6);
- CHECK_EQUAL_64(0x0123456789abcdefL, x7);
- CHECK_EQUAL_32(0x89abcdefL, w8);
- CHECK_EQUAL_64(0x0123456789abcdefL, x9);
- CHECK_EQUAL_32(0x89abcdefL, w10);
- CHECK_EQUAL_64(0x00000fff, x11);
- CHECK_EQUAL_64(0x0000000000000fffUL, x12);
- CHECK_EQUAL_64(0x00001ffe, x13);
- CHECK_EQUAL_64(0x0000000000003ffcUL, x14);
- CHECK_EQUAL_64(0x000001ff, x15);
- CHECK_EQUAL_64(0x00000000000000ffUL, x18);
+ CHECK_EQUAL_64(0x0123456789ABCDEFL, x7);
+ CHECK_EQUAL_32(0x89ABCDEFL, w8);
+ CHECK_EQUAL_64(0x0123456789ABCDEFL, x9);
+ CHECK_EQUAL_32(0x89ABCDEFL, w10);
+ CHECK_EQUAL_64(0x00000FFF, x11);
+ CHECK_EQUAL_64(0x0000000000000FFFUL, x12);
+ CHECK_EQUAL_64(0x00001FFE, x13);
+ CHECK_EQUAL_64(0x0000000000003FFCUL, x14);
+ CHECK_EQUAL_64(0x000001FF, x15);
+ CHECK_EQUAL_64(0x00000000000000FFUL, x18);
CHECK_EQUAL_64(0x00000001, x19);
CHECK_EQUAL_64(0x0, x20);
- CHECK_EQUAL_64(0x7ff80000, x21);
- CHECK_EQUAL_64(0x3ffc000000000000UL, x22);
- CHECK_EQUAL_64(0x000000fe, x23);
- CHECK_EQUAL_64(0xfffffffffffffffcUL, x24);
- CHECK_EQUAL_64(0x00007ff8, x25);
- CHECK_EQUAL_64(0x000000000000fff0UL, x26);
- CHECK_EQUAL_64(0x000000000001ffe0UL, x27);
+ CHECK_EQUAL_64(0x7FF80000, x21);
+ CHECK_EQUAL_64(0x3FFC000000000000UL, x22);
+ CHECK_EQUAL_64(0x000000FE, x23);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFCUL, x24);
+ CHECK_EQUAL_64(0x00007FF8, x25);
+ CHECK_EQUAL_64(0x000000000000FFF0UL, x26);
+ CHECK_EQUAL_64(0x000000000001FFE0UL, x27);
TEARDOWN();
}
@@ -431,29 +429,29 @@ TEST(mov_imm_w) {
SETUP();
START();
- __ Mov(w0, 0xffffffffL);
- __ Mov(w1, 0xffff1234L);
- __ Mov(w2, 0x1234ffffL);
+ __ Mov(w0, 0xFFFFFFFFL);
+ __ Mov(w1, 0xFFFF1234L);
+ __ Mov(w2, 0x1234FFFFL);
__ Mov(w3, 0x00000000L);
__ Mov(w4, 0x00001234L);
__ Mov(w5, 0x12340000L);
__ Mov(w6, 0x12345678L);
__ Mov(w7, (int32_t)0x80000000);
- __ Mov(w8, (int32_t)0xffff0000);
+ __ Mov(w8, (int32_t)0xFFFF0000);
__ Mov(w9, kWMinInt);
END();
RUN();
- CHECK_EQUAL_64(0xffffffffL, x0);
- CHECK_EQUAL_64(0xffff1234L, x1);
- CHECK_EQUAL_64(0x1234ffffL, x2);
+ CHECK_EQUAL_64(0xFFFFFFFFL, x0);
+ CHECK_EQUAL_64(0xFFFF1234L, x1);
+ CHECK_EQUAL_64(0x1234FFFFL, x2);
CHECK_EQUAL_64(0x00000000L, x3);
CHECK_EQUAL_64(0x00001234L, x4);
CHECK_EQUAL_64(0x12340000L, x5);
CHECK_EQUAL_64(0x12345678L, x6);
CHECK_EQUAL_64(0x80000000L, x7);
- CHECK_EQUAL_64(0xffff0000L, x8);
+ CHECK_EQUAL_64(0xFFFF0000L, x8);
CHECK_EQUAL_32(kWMinInt, w9);
TEARDOWN();
@@ -465,18 +463,18 @@ TEST(mov_imm_x) {
SETUP();
START();
- __ Mov(x0, 0xffffffffffffffffL);
- __ Mov(x1, 0xffffffffffff1234L);
- __ Mov(x2, 0xffffffff12345678L);
- __ Mov(x3, 0xffff1234ffff5678L);
- __ Mov(x4, 0x1234ffffffff5678L);
- __ Mov(x5, 0x1234ffff5678ffffL);
- __ Mov(x6, 0x12345678ffffffffL);
- __ Mov(x7, 0x1234ffffffffffffL);
- __ Mov(x8, 0x123456789abcffffL);
- __ Mov(x9, 0x12345678ffff9abcL);
- __ Mov(x10, 0x1234ffff56789abcL);
- __ Mov(x11, 0xffff123456789abcL);
+ __ Mov(x0, 0xFFFFFFFFFFFFFFFFL);
+ __ Mov(x1, 0xFFFFFFFFFFFF1234L);
+ __ Mov(x2, 0xFFFFFFFF12345678L);
+ __ Mov(x3, 0xFFFF1234FFFF5678L);
+ __ Mov(x4, 0x1234FFFFFFFF5678L);
+ __ Mov(x5, 0x1234FFFF5678FFFFL);
+ __ Mov(x6, 0x12345678FFFFFFFFL);
+ __ Mov(x7, 0x1234FFFFFFFFFFFFL);
+ __ Mov(x8, 0x123456789ABCFFFFL);
+ __ Mov(x9, 0x12345678FFFF9ABCL);
+ __ Mov(x10, 0x1234FFFF56789ABCL);
+ __ Mov(x11, 0xFFFF123456789ABCL);
__ Mov(x12, 0x0000000000000000L);
__ Mov(x13, 0x0000000000001234L);
__ Mov(x14, 0x0000000012345678L);
@@ -485,28 +483,28 @@ TEST(mov_imm_x) {
__ Mov(x19, 0x1234000056780000L);
__ Mov(x20, 0x1234567800000000L);
__ Mov(x21, 0x1234000000000000L);
- __ Mov(x22, 0x123456789abc0000L);
- __ Mov(x23, 0x1234567800009abcL);
- __ Mov(x24, 0x1234000056789abcL);
- __ Mov(x25, 0x0000123456789abcL);
- __ Mov(x26, 0x123456789abcdef0L);
- __ Mov(x27, 0xffff000000000001L);
- __ Mov(x28, 0x8000ffff00000000L);
- END();
-
- RUN();
-
- CHECK_EQUAL_64(0xffffffffffff1234L, x1);
- CHECK_EQUAL_64(0xffffffff12345678L, x2);
- CHECK_EQUAL_64(0xffff1234ffff5678L, x3);
- CHECK_EQUAL_64(0x1234ffffffff5678L, x4);
- CHECK_EQUAL_64(0x1234ffff5678ffffL, x5);
- CHECK_EQUAL_64(0x12345678ffffffffL, x6);
- CHECK_EQUAL_64(0x1234ffffffffffffL, x7);
- CHECK_EQUAL_64(0x123456789abcffffL, x8);
- CHECK_EQUAL_64(0x12345678ffff9abcL, x9);
- CHECK_EQUAL_64(0x1234ffff56789abcL, x10);
- CHECK_EQUAL_64(0xffff123456789abcL, x11);
+ __ Mov(x22, 0x123456789ABC0000L);
+ __ Mov(x23, 0x1234567800009ABCL);
+ __ Mov(x24, 0x1234000056789ABCL);
+ __ Mov(x25, 0x0000123456789ABCL);
+ __ Mov(x26, 0x123456789ABCDEF0L);
+ __ Mov(x27, 0xFFFF000000000001L);
+ __ Mov(x28, 0x8000FFFF00000000L);
+ END();
+
+ RUN();
+
+ CHECK_EQUAL_64(0xFFFFFFFFFFFF1234L, x1);
+ CHECK_EQUAL_64(0xFFFFFFFF12345678L, x2);
+ CHECK_EQUAL_64(0xFFFF1234FFFF5678L, x3);
+ CHECK_EQUAL_64(0x1234FFFFFFFF5678L, x4);
+ CHECK_EQUAL_64(0x1234FFFF5678FFFFL, x5);
+ CHECK_EQUAL_64(0x12345678FFFFFFFFL, x6);
+ CHECK_EQUAL_64(0x1234FFFFFFFFFFFFL, x7);
+ CHECK_EQUAL_64(0x123456789ABCFFFFL, x8);
+ CHECK_EQUAL_64(0x12345678FFFF9ABCL, x9);
+ CHECK_EQUAL_64(0x1234FFFF56789ABCL, x10);
+ CHECK_EQUAL_64(0xFFFF123456789ABCL, x11);
CHECK_EQUAL_64(0x0000000000000000L, x12);
CHECK_EQUAL_64(0x0000000000001234L, x13);
CHECK_EQUAL_64(0x0000000012345678L, x14);
@@ -515,13 +513,13 @@ TEST(mov_imm_x) {
CHECK_EQUAL_64(0x1234000056780000L, x19);
CHECK_EQUAL_64(0x1234567800000000L, x20);
CHECK_EQUAL_64(0x1234000000000000L, x21);
- CHECK_EQUAL_64(0x123456789abc0000L, x22);
- CHECK_EQUAL_64(0x1234567800009abcL, x23);
- CHECK_EQUAL_64(0x1234000056789abcL, x24);
- CHECK_EQUAL_64(0x0000123456789abcL, x25);
- CHECK_EQUAL_64(0x123456789abcdef0L, x26);
- CHECK_EQUAL_64(0xffff000000000001L, x27);
- CHECK_EQUAL_64(0x8000ffff00000000L, x28);
+ CHECK_EQUAL_64(0x123456789ABC0000L, x22);
+ CHECK_EQUAL_64(0x1234567800009ABCL, x23);
+ CHECK_EQUAL_64(0x1234000056789ABCL, x24);
+ CHECK_EQUAL_64(0x0000123456789ABCL, x25);
+ CHECK_EQUAL_64(0x123456789ABCDEF0L, x26);
+ CHECK_EQUAL_64(0xFFFF000000000001L, x27);
+ CHECK_EQUAL_64(0x8000FFFF00000000L, x28);
TEARDOWN();
}
@@ -532,8 +530,8 @@ TEST(orr) {
SETUP();
START();
- __ Mov(x0, 0xf0f0);
- __ Mov(x1, 0xf00000ff);
+ __ Mov(x0, 0xF0F0);
+ __ Mov(x1, 0xF00000FF);
__ Orr(x2, x0, Operand(x1));
__ Orr(w3, w0, Operand(w1, LSL, 28));
@@ -543,22 +541,22 @@ TEST(orr) {
__ Orr(x7, x0, Operand(x1, ASR, 4));
__ Orr(w8, w0, Operand(w1, ROR, 12));
__ Orr(x9, x0, Operand(x1, ROR, 12));
- __ Orr(w10, w0, Operand(0xf));
- __ Orr(x11, x0, Operand(0xf0000000f0000000L));
+ __ Orr(w10, w0, Operand(0xF));
+ __ Orr(x11, x0, Operand(0xF0000000F0000000L));
END();
RUN();
- CHECK_EQUAL_64(0xf000f0ff, x2);
- CHECK_EQUAL_64(0xf000f0f0, x3);
- CHECK_EQUAL_64(0xf00000ff0000f0f0L, x4);
- CHECK_EQUAL_64(0x0f00f0ff, x5);
- CHECK_EQUAL_64(0xff00f0ff, x6);
- CHECK_EQUAL_64(0x0f00f0ff, x7);
- CHECK_EQUAL_64(0x0ffff0f0, x8);
- CHECK_EQUAL_64(0x0ff00000000ff0f0L, x9);
- CHECK_EQUAL_64(0xf0ff, x10);
- CHECK_EQUAL_64(0xf0000000f000f0f0L, x11);
+ CHECK_EQUAL_64(0xF000F0FF, x2);
+ CHECK_EQUAL_64(0xF000F0F0, x3);
+ CHECK_EQUAL_64(0xF00000FF0000F0F0L, x4);
+ CHECK_EQUAL_64(0x0F00F0FF, x5);
+ CHECK_EQUAL_64(0xFF00F0FF, x6);
+ CHECK_EQUAL_64(0x0F00F0FF, x7);
+ CHECK_EQUAL_64(0x0FFFF0F0, x8);
+ CHECK_EQUAL_64(0x0FF00000000FF0F0L, x9);
+ CHECK_EQUAL_64(0xF0FF, x10);
+ CHECK_EQUAL_64(0xF0000000F000F0F0L, x11);
TEARDOWN();
}
@@ -587,9 +585,9 @@ TEST(orr_extend) {
CHECK_EQUAL_64(0x00010101, x7);
CHECK_EQUAL_64(0x00020201, x8);
CHECK_EQUAL_64(0x0000000400040401UL, x9);
- CHECK_EQUAL_64(0x00000000ffffff81UL, x10);
- CHECK_EQUAL_64(0xffffffffffff0101UL, x11);
- CHECK_EQUAL_64(0xfffffffe00020201UL, x12);
+ CHECK_EQUAL_64(0x00000000FFFFFF81UL, x10);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFF0101UL, x11);
+ CHECK_EQUAL_64(0xFFFFFFFE00020201UL, x12);
CHECK_EQUAL_64(0x0000000400040401UL, x13);
TEARDOWN();
@@ -602,10 +600,10 @@ TEST(bitwise_wide_imm) {
START();
__ Mov(x0, 0);
- __ Mov(x1, 0xf0f0f0f0f0f0f0f0UL);
+ __ Mov(x1, 0xF0F0F0F0F0F0F0F0UL);
- __ Orr(x10, x0, Operand(0x1234567890abcdefUL));
- __ Orr(w11, w1, Operand(0x90abcdef));
+ __ Orr(x10, x0, Operand(0x1234567890ABCDEFUL));
+ __ Orr(w11, w1, Operand(0x90ABCDEF));
__ Orr(w12, w0, kWMinInt);
__ Eor(w13, w0, kWMinInt);
@@ -614,9 +612,9 @@ TEST(bitwise_wide_imm) {
RUN();
CHECK_EQUAL_64(0, x0);
- CHECK_EQUAL_64(0xf0f0f0f0f0f0f0f0UL, x1);
- CHECK_EQUAL_64(0x1234567890abcdefUL, x10);
- CHECK_EQUAL_64(0xf0fbfdffUL, x11);
+ CHECK_EQUAL_64(0xF0F0F0F0F0F0F0F0UL, x1);
+ CHECK_EQUAL_64(0x1234567890ABCDEFUL, x10);
+ CHECK_EQUAL_64(0xF0FBFDFFUL, x11);
CHECK_EQUAL_32(kWMinInt, w12);
CHECK_EQUAL_32(kWMinInt, w13);
@@ -629,8 +627,8 @@ TEST(orn) {
SETUP();
START();
- __ Mov(x0, 0xf0f0);
- __ Mov(x1, 0xf00000ff);
+ __ Mov(x0, 0xF0F0);
+ __ Mov(x1, 0xF00000FF);
__ Orn(x2, x0, Operand(x1));
__ Orn(w3, w0, Operand(w1, LSL, 4));
@@ -640,22 +638,22 @@ TEST(orn) {
__ Orn(x7, x0, Operand(x1, ASR, 1));
__ Orn(w8, w0, Operand(w1, ROR, 16));
__ Orn(x9, x0, Operand(x1, ROR, 16));
- __ Orn(w10, w0, Operand(0xffff));
- __ Orn(x11, x0, Operand(0xffff0000ffffL));
+ __ Orn(w10, w0, Operand(0xFFFF));
+ __ Orn(x11, x0, Operand(0xFFFF0000FFFFL));
END();
RUN();
- CHECK_EQUAL_64(0xffffffff0ffffff0L, x2);
- CHECK_EQUAL_64(0xfffff0ff, x3);
- CHECK_EQUAL_64(0xfffffff0fffff0ffL, x4);
- CHECK_EQUAL_64(0xffffffff87fffff0L, x5);
- CHECK_EQUAL_64(0x07fffff0, x6);
- CHECK_EQUAL_64(0xffffffff87fffff0L, x7);
- CHECK_EQUAL_64(0xff00ffff, x8);
- CHECK_EQUAL_64(0xff00ffffffffffffL, x9);
- CHECK_EQUAL_64(0xfffff0f0, x10);
- CHECK_EQUAL_64(0xffff0000fffff0f0L, x11);
+ CHECK_EQUAL_64(0xFFFFFFFF0FFFFFF0L, x2);
+ CHECK_EQUAL_64(0xFFFFF0FF, x3);
+ CHECK_EQUAL_64(0xFFFFFFF0FFFFF0FFL, x4);
+ CHECK_EQUAL_64(0xFFFFFFFF87FFFFF0L, x5);
+ CHECK_EQUAL_64(0x07FFFFF0, x6);
+ CHECK_EQUAL_64(0xFFFFFFFF87FFFFF0L, x7);
+ CHECK_EQUAL_64(0xFF00FFFF, x8);
+ CHECK_EQUAL_64(0xFF00FFFFFFFFFFFFL, x9);
+ CHECK_EQUAL_64(0xFFFFF0F0, x10);
+ CHECK_EQUAL_64(0xFFFF0000FFFFF0F0L, x11);
TEARDOWN();
}
@@ -680,14 +678,14 @@ TEST(orn_extend) {
RUN();
- CHECK_EQUAL_64(0xffffff7f, x6);
- CHECK_EQUAL_64(0xfffffffffffefefdUL, x7);
- CHECK_EQUAL_64(0xfffdfdfb, x8);
- CHECK_EQUAL_64(0xfffffffbfffbfbf7UL, x9);
- CHECK_EQUAL_64(0x0000007f, x10);
- CHECK_EQUAL_64(0x0000fefd, x11);
- CHECK_EQUAL_64(0x00000001fffdfdfbUL, x12);
- CHECK_EQUAL_64(0xfffffffbfffbfbf7UL, x13);
+ CHECK_EQUAL_64(0xFFFFFF7F, x6);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFEFEFDUL, x7);
+ CHECK_EQUAL_64(0xFFFDFDFB, x8);
+ CHECK_EQUAL_64(0xFFFFFFFBFFFBFBF7UL, x9);
+ CHECK_EQUAL_64(0x0000007F, x10);
+ CHECK_EQUAL_64(0x0000FEFD, x11);
+ CHECK_EQUAL_64(0x00000001FFFDFDFBUL, x12);
+ CHECK_EQUAL_64(0xFFFFFFFBFFFBFBF7UL, x13);
TEARDOWN();
}
@@ -698,8 +696,8 @@ TEST(and_) {
SETUP();
START();
- __ Mov(x0, 0xfff0);
- __ Mov(x1, 0xf00000ff);
+ __ Mov(x0, 0xFFF0);
+ __ Mov(x1, 0xF00000FF);
__ And(x2, x0, Operand(x1));
__ And(w3, w0, Operand(w1, LSL, 4));
@@ -709,22 +707,22 @@ TEST(and_) {
__ And(x7, x0, Operand(x1, ASR, 20));
__ And(w8, w0, Operand(w1, ROR, 28));
__ And(x9, x0, Operand(x1, ROR, 28));
- __ And(w10, w0, Operand(0xff00));
- __ And(x11, x0, Operand(0xff));
+ __ And(w10, w0, Operand(0xFF00));
+ __ And(x11, x0, Operand(0xFF));
END();
RUN();
- CHECK_EQUAL_64(0x000000f0, x2);
- CHECK_EQUAL_64(0x00000ff0, x3);
- CHECK_EQUAL_64(0x00000ff0, x4);
+ CHECK_EQUAL_64(0x000000F0, x2);
+ CHECK_EQUAL_64(0x00000FF0, x3);
+ CHECK_EQUAL_64(0x00000FF0, x4);
CHECK_EQUAL_64(0x00000070, x5);
- CHECK_EQUAL_64(0x0000ff00, x6);
- CHECK_EQUAL_64(0x00000f00, x7);
- CHECK_EQUAL_64(0x00000ff0, x8);
+ CHECK_EQUAL_64(0x0000FF00, x6);
+ CHECK_EQUAL_64(0x00000F00, x7);
+ CHECK_EQUAL_64(0x00000FF0, x8);
CHECK_EQUAL_64(0x00000000, x9);
- CHECK_EQUAL_64(0x0000ff00, x10);
- CHECK_EQUAL_64(0x000000f0, x11);
+ CHECK_EQUAL_64(0x0000FF00, x10);
+ CHECK_EQUAL_64(0x000000F0, x11);
TEARDOWN();
}
@@ -735,7 +733,7 @@ TEST(and_extend) {
SETUP();
START();
- __ Mov(x0, 0xffffffffffffffffUL);
+ __ Mov(x0, 0xFFFFFFFFFFFFFFFFUL);
__ Mov(x1, 0x8000000080008081UL);
__ And(w6, w0, Operand(w1, UXTB));
__ And(x7, x0, Operand(x1, UXTH, 1));
@@ -753,9 +751,9 @@ TEST(and_extend) {
CHECK_EQUAL_64(0x00010102, x7);
CHECK_EQUAL_64(0x00020204, x8);
CHECK_EQUAL_64(0x0000000400040408UL, x9);
- CHECK_EQUAL_64(0xffffff81, x10);
- CHECK_EQUAL_64(0xffffffffffff0102UL, x11);
- CHECK_EQUAL_64(0xfffffffe00020204UL, x12);
+ CHECK_EQUAL_64(0xFFFFFF81, x10);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFF0102UL, x11);
+ CHECK_EQUAL_64(0xFFFFFFFE00020204UL, x12);
CHECK_EQUAL_64(0x0000000400040408UL, x13);
TEARDOWN();
@@ -767,18 +765,18 @@ TEST(ands) {
SETUP();
START();
- __ Mov(x1, 0xf00000ff);
+ __ Mov(x1, 0xF00000FF);
__ Ands(w0, w1, Operand(w1));
END();
RUN();
CHECK_EQUAL_NZCV(NFlag);
- CHECK_EQUAL_64(0xf00000ff, x0);
+ CHECK_EQUAL_64(0xF00000FF, x0);
START();
- __ Mov(x0, 0xfff0);
- __ Mov(x1, 0xf00000ff);
+ __ Mov(x0, 0xFFF0);
+ __ Mov(x1, 0xF00000FF);
__ Ands(w0, w0, Operand(w1, LSR, 4));
END();
@@ -799,8 +797,8 @@ TEST(ands) {
CHECK_EQUAL_64(0x8000000000000000L, x0);
START();
- __ Mov(x0, 0xfff0);
- __ Ands(w0, w0, Operand(0xf));
+ __ Mov(x0, 0xFFF0);
+ __ Ands(w0, w0, Operand(0xF));
END();
RUN();
@@ -809,7 +807,7 @@ TEST(ands) {
CHECK_EQUAL_64(0x00000000, x0);
START();
- __ Mov(x0, 0xff000000);
+ __ Mov(x0, 0xFF000000);
__ Ands(w0, w0, Operand(0x80000000));
END();
@@ -827,8 +825,8 @@ TEST(bic) {
SETUP();
START();
- __ Mov(x0, 0xfff0);
- __ Mov(x1, 0xf00000ff);
+ __ Mov(x0, 0xFFF0);
+ __ Mov(x1, 0xF00000FF);
__ Bic(x2, x0, Operand(x1));
__ Bic(w3, w0, Operand(w1, LSL, 4));
@@ -838,34 +836,32 @@ TEST(bic) {
__ Bic(x7, x0, Operand(x1, ASR, 20));
__ Bic(w8, w0, Operand(w1, ROR, 28));
__ Bic(x9, x0, Operand(x1, ROR, 24));
- __ Bic(x10, x0, Operand(0x1f));
+ __ Bic(x10, x0, Operand(0x1F));
__ Bic(x11, x0, Operand(0x100));
// Test bic into csp when the constant cannot be encoded in the immediate
// field.
// Use x20 to preserve csp. We check for the result via x21 because the
// test infrastructure requires that csp be restored to its original value.
- __ SetStackPointer(jssp); // Change stack pointer to avoid consistency check.
__ Mov(x20, csp);
- __ Mov(x0, 0xffffff);
- __ Bic(csp, x0, Operand(0xabcdef));
+ __ Mov(x0, 0xFFFFFF);
+ __ Bic(csp, x0, Operand(0xABCDEF));
__ Mov(x21, csp);
__ Mov(csp, x20);
- __ SetStackPointer(csp); // Restore stack pointer.
END();
RUN();
- CHECK_EQUAL_64(0x0000ff00, x2);
- CHECK_EQUAL_64(0x0000f000, x3);
- CHECK_EQUAL_64(0x0000f000, x4);
- CHECK_EQUAL_64(0x0000ff80, x5);
- CHECK_EQUAL_64(0x000000f0, x6);
- CHECK_EQUAL_64(0x0000f0f0, x7);
- CHECK_EQUAL_64(0x0000f000, x8);
- CHECK_EQUAL_64(0x0000ff00, x9);
- CHECK_EQUAL_64(0x0000ffe0, x10);
- CHECK_EQUAL_64(0x0000fef0, x11);
+ CHECK_EQUAL_64(0x0000FF00, x2);
+ CHECK_EQUAL_64(0x0000F000, x3);
+ CHECK_EQUAL_64(0x0000F000, x4);
+ CHECK_EQUAL_64(0x0000FF80, x5);
+ CHECK_EQUAL_64(0x000000F0, x6);
+ CHECK_EQUAL_64(0x0000F0F0, x7);
+ CHECK_EQUAL_64(0x0000F000, x8);
+ CHECK_EQUAL_64(0x0000FF00, x9);
+ CHECK_EQUAL_64(0x0000FFE0, x10);
+ CHECK_EQUAL_64(0x0000FEF0, x11);
CHECK_EQUAL_64(0x543210, x21);
@@ -878,7 +874,7 @@ TEST(bic_extend) {
SETUP();
START();
- __ Mov(x0, 0xffffffffffffffffUL);
+ __ Mov(x0, 0xFFFFFFFFFFFFFFFFUL);
__ Mov(x1, 0x8000000080008081UL);
__ Bic(w6, w0, Operand(w1, UXTB));
__ Bic(x7, x0, Operand(x1, UXTH, 1));
@@ -892,14 +888,14 @@ TEST(bic_extend) {
RUN();
- CHECK_EQUAL_64(0xffffff7e, x6);
- CHECK_EQUAL_64(0xfffffffffffefefdUL, x7);
- CHECK_EQUAL_64(0xfffdfdfb, x8);
- CHECK_EQUAL_64(0xfffffffbfffbfbf7UL, x9);
- CHECK_EQUAL_64(0x0000007e, x10);
- CHECK_EQUAL_64(0x0000fefd, x11);
- CHECK_EQUAL_64(0x00000001fffdfdfbUL, x12);
- CHECK_EQUAL_64(0xfffffffbfffbfbf7UL, x13);
+ CHECK_EQUAL_64(0xFFFFFF7E, x6);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFEFEFDUL, x7);
+ CHECK_EQUAL_64(0xFFFDFDFB, x8);
+ CHECK_EQUAL_64(0xFFFFFFFBFFFBFBF7UL, x9);
+ CHECK_EQUAL_64(0x0000007E, x10);
+ CHECK_EQUAL_64(0x0000FEFD, x11);
+ CHECK_EQUAL_64(0x00000001FFFDFDFBUL, x12);
+ CHECK_EQUAL_64(0xFFFFFFFBFFFBFBF7UL, x13);
TEARDOWN();
}
@@ -910,7 +906,7 @@ TEST(bics) {
SETUP();
START();
- __ Mov(x1, 0xffff);
+ __ Mov(x1, 0xFFFF);
__ Bics(w0, w1, Operand(w1));
END();
@@ -920,7 +916,7 @@ TEST(bics) {
CHECK_EQUAL_64(0x00000000, x0);
START();
- __ Mov(x0, 0xffffffff);
+ __ Mov(x0, 0xFFFFFFFF);
__ Bics(w0, w0, Operand(w0, LSR, 1));
END();
@@ -941,8 +937,8 @@ TEST(bics) {
CHECK_EQUAL_64(0x00000000, x0);
START();
- __ Mov(x0, 0xffffffffffffffffL);
- __ Bics(x0, x0, Operand(0x7fffffffffffffffL));
+ __ Mov(x0, 0xFFFFFFFFFFFFFFFFL);
+ __ Bics(x0, x0, Operand(0x7FFFFFFFFFFFFFFFL));
END();
RUN();
@@ -951,8 +947,8 @@ TEST(bics) {
CHECK_EQUAL_64(0x8000000000000000L, x0);
START();
- __ Mov(w0, 0xffff0000);
- __ Bics(w0, w0, Operand(0xfffffff0));
+ __ Mov(w0, 0xFFFF0000);
+ __ Bics(w0, w0, Operand(0xFFFFFFF0));
END();
RUN();
@@ -969,8 +965,8 @@ TEST(eor) {
SETUP();
START();
- __ Mov(x0, 0xfff0);
- __ Mov(x1, 0xf00000ff);
+ __ Mov(x0, 0xFFF0);
+ __ Mov(x1, 0xF00000FF);
__ Eor(x2, x0, Operand(x1));
__ Eor(w3, w0, Operand(w1, LSL, 4));
@@ -980,22 +976,22 @@ TEST(eor) {
__ Eor(x7, x0, Operand(x1, ASR, 20));
__ Eor(w8, w0, Operand(w1, ROR, 28));
__ Eor(x9, x0, Operand(x1, ROR, 28));
- __ Eor(w10, w0, Operand(0xff00ff00));
- __ Eor(x11, x0, Operand(0xff00ff00ff00ff00L));
+ __ Eor(w10, w0, Operand(0xFF00FF00));
+ __ Eor(x11, x0, Operand(0xFF00FF00FF00FF00L));
END();
RUN();
- CHECK_EQUAL_64(0xf000ff0f, x2);
- CHECK_EQUAL_64(0x0000f000, x3);
- CHECK_EQUAL_64(0x0000000f0000f000L, x4);
- CHECK_EQUAL_64(0x7800ff8f, x5);
- CHECK_EQUAL_64(0xffff00f0, x6);
- CHECK_EQUAL_64(0x0000f0f0, x7);
- CHECK_EQUAL_64(0x0000f00f, x8);
- CHECK_EQUAL_64(0x00000ff00000ffffL, x9);
- CHECK_EQUAL_64(0xff0000f0, x10);
- CHECK_EQUAL_64(0xff00ff00ff0000f0L, x11);
+ CHECK_EQUAL_64(0xF000FF0F, x2);
+ CHECK_EQUAL_64(0x0000F000, x3);
+ CHECK_EQUAL_64(0x0000000F0000F000L, x4);
+ CHECK_EQUAL_64(0x7800FF8F, x5);
+ CHECK_EQUAL_64(0xFFFF00F0, x6);
+ CHECK_EQUAL_64(0x0000F0F0, x7);
+ CHECK_EQUAL_64(0x0000F00F, x8);
+ CHECK_EQUAL_64(0x00000FF00000FFFFL, x9);
+ CHECK_EQUAL_64(0xFF0000F0, x10);
+ CHECK_EQUAL_64(0xFF00FF00FF0000F0L, x11);
TEARDOWN();
}
@@ -1024,9 +1020,9 @@ TEST(eor_extend) {
CHECK_EQUAL_64(0x1111111111101013UL, x7);
CHECK_EQUAL_64(0x11131315, x8);
CHECK_EQUAL_64(0x1111111511151519UL, x9);
- CHECK_EQUAL_64(0xeeeeee90, x10);
- CHECK_EQUAL_64(0xeeeeeeeeeeee1013UL, x11);
- CHECK_EQUAL_64(0xeeeeeeef11131315UL, x12);
+ CHECK_EQUAL_64(0xEEEEEE90, x10);
+ CHECK_EQUAL_64(0xEEEEEEEEEEEE1013UL, x11);
+ CHECK_EQUAL_64(0xEEEEEEEF11131315UL, x12);
CHECK_EQUAL_64(0x1111111511151519UL, x13);
TEARDOWN();
@@ -1038,8 +1034,8 @@ TEST(eon) {
SETUP();
START();
- __ Mov(x0, 0xfff0);
- __ Mov(x1, 0xf00000ff);
+ __ Mov(x0, 0xFFF0);
+ __ Mov(x1, 0xF00000FF);
__ Eon(x2, x0, Operand(x1));
__ Eon(w3, w0, Operand(w1, LSL, 4));
@@ -1049,22 +1045,22 @@ TEST(eon) {
__ Eon(x7, x0, Operand(x1, ASR, 20));
__ Eon(w8, w0, Operand(w1, ROR, 28));
__ Eon(x9, x0, Operand(x1, ROR, 28));
- __ Eon(w10, w0, Operand(0x03c003c0));
+ __ Eon(w10, w0, Operand(0x03C003C0));
__ Eon(x11, x0, Operand(0x0000100000001000L));
END();
RUN();
- CHECK_EQUAL_64(0xffffffff0fff00f0L, x2);
- CHECK_EQUAL_64(0xffff0fff, x3);
- CHECK_EQUAL_64(0xfffffff0ffff0fffL, x4);
- CHECK_EQUAL_64(0xffffffff87ff0070L, x5);
- CHECK_EQUAL_64(0x0000ff0f, x6);
- CHECK_EQUAL_64(0xffffffffffff0f0fL, x7);
- CHECK_EQUAL_64(0xffff0ff0, x8);
- CHECK_EQUAL_64(0xfffff00fffff0000L, x9);
- CHECK_EQUAL_64(0xfc3f03cf, x10);
- CHECK_EQUAL_64(0xffffefffffff100fL, x11);
+ CHECK_EQUAL_64(0xFFFFFFFF0FFF00F0L, x2);
+ CHECK_EQUAL_64(0xFFFF0FFF, x3);
+ CHECK_EQUAL_64(0xFFFFFFF0FFFF0FFFL, x4);
+ CHECK_EQUAL_64(0xFFFFFFFF87FF0070L, x5);
+ CHECK_EQUAL_64(0x0000FF0F, x6);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFF0F0FL, x7);
+ CHECK_EQUAL_64(0xFFFF0FF0, x8);
+ CHECK_EQUAL_64(0xFFFFF00FFFFF0000L, x9);
+ CHECK_EQUAL_64(0xFC3F03CF, x10);
+ CHECK_EQUAL_64(0xFFFFEFFFFFFF100FL, x11);
TEARDOWN();
}
@@ -1089,14 +1085,14 @@ TEST(eon_extend) {
RUN();
- CHECK_EQUAL_64(0xeeeeee6f, x6);
- CHECK_EQUAL_64(0xeeeeeeeeeeefefecUL, x7);
- CHECK_EQUAL_64(0xeeececea, x8);
- CHECK_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x9);
- CHECK_EQUAL_64(0x1111116f, x10);
- CHECK_EQUAL_64(0x111111111111efecUL, x11);
- CHECK_EQUAL_64(0x11111110eeececeaUL, x12);
- CHECK_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x13);
+ CHECK_EQUAL_64(0xEEEEEE6F, x6);
+ CHECK_EQUAL_64(0xEEEEEEEEEEEFEFECUL, x7);
+ CHECK_EQUAL_64(0xEEECECEA, x8);
+ CHECK_EQUAL_64(0xEEEEEEEAEEEAEAE6UL, x9);
+ CHECK_EQUAL_64(0x1111116F, x10);
+ CHECK_EQUAL_64(0x111111111111EFECUL, x11);
+ CHECK_EQUAL_64(0x11111110EEECECEAUL, x12);
+ CHECK_EQUAL_64(0xEEEEEEEAEEEAEAE6UL, x13);
TEARDOWN();
}
@@ -1109,8 +1105,8 @@ TEST(mul) {
START();
__ Mov(x16, 0);
__ Mov(x17, 1);
- __ Mov(x18, 0xffffffff);
- __ Mov(x19, 0xffffffffffffffffUL);
+ __ Mov(x18, 0xFFFFFFFF);
+ __ Mov(x19, 0xFFFFFFFFFFFFFFFFUL);
__ Mul(w0, w16, w16);
__ Mul(w1, w16, w17);
@@ -1137,23 +1133,23 @@ TEST(mul) {
CHECK_EQUAL_64(0, x0);
CHECK_EQUAL_64(0, x1);
- CHECK_EQUAL_64(0xffffffff, x2);
+ CHECK_EQUAL_64(0xFFFFFFFF, x2);
CHECK_EQUAL_64(1, x3);
CHECK_EQUAL_64(0, x4);
- CHECK_EQUAL_64(0xffffffff, x5);
- CHECK_EQUAL_64(0xffffffff00000001UL, x6);
+ CHECK_EQUAL_64(0xFFFFFFFF, x5);
+ CHECK_EQUAL_64(0xFFFFFFFF00000001UL, x6);
CHECK_EQUAL_64(1, x7);
- CHECK_EQUAL_64(0xffffffffffffffffUL, x8);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x8);
CHECK_EQUAL_64(1, x9);
CHECK_EQUAL_64(1, x10);
CHECK_EQUAL_64(0, x11);
CHECK_EQUAL_64(0, x12);
CHECK_EQUAL_64(1, x13);
- CHECK_EQUAL_64(0xffffffff, x14);
+ CHECK_EQUAL_64(0xFFFFFFFF, x14);
CHECK_EQUAL_64(0, x20);
- CHECK_EQUAL_64(0xffffffff00000001UL, x21);
- CHECK_EQUAL_64(0xffffffff, x22);
- CHECK_EQUAL_64(0xffffffffffffffffUL, x23);
+ CHECK_EQUAL_64(0xFFFFFFFF00000001UL, x21);
+ CHECK_EQUAL_64(0xFFFFFFFF, x22);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x23);
TEARDOWN();
}
@@ -1178,7 +1174,7 @@ TEST(smull) {
SmullHelper(1, 1, 1);
SmullHelper(-1, -1, 1);
SmullHelper(1, -1, -1);
- SmullHelper(0xffffffff80000000, 0x80000000, 1);
+ SmullHelper(0xFFFFFFFF80000000, 0x80000000, 1);
SmullHelper(0x0000000080000000, 0x00010000, 0x00008000);
}
@@ -1190,8 +1186,8 @@ TEST(madd) {
START();
__ Mov(x16, 0);
__ Mov(x17, 1);
- __ Mov(x18, 0xffffffff);
- __ Mov(x19, 0xffffffffffffffffUL);
+ __ Mov(x18, 0xFFFFFFFF);
+ __ Mov(x19, 0xFFFFFFFFFFFFFFFFUL);
__ Madd(w0, w16, w16, w16);
__ Madd(w1, w16, w16, w17);
@@ -1225,27 +1221,27 @@ TEST(madd) {
CHECK_EQUAL_64(0, x0);
CHECK_EQUAL_64(1, x1);
- CHECK_EQUAL_64(0xffffffff, x2);
- CHECK_EQUAL_64(0xffffffff, x3);
+ CHECK_EQUAL_64(0xFFFFFFFF, x2);
+ CHECK_EQUAL_64(0xFFFFFFFF, x3);
CHECK_EQUAL_64(1, x4);
CHECK_EQUAL_64(0, x5);
CHECK_EQUAL_64(0, x6);
- CHECK_EQUAL_64(0xffffffff, x7);
- CHECK_EQUAL_64(0xfffffffe, x8);
+ CHECK_EQUAL_64(0xFFFFFFFF, x7);
+ CHECK_EQUAL_64(0xFFFFFFFE, x8);
CHECK_EQUAL_64(2, x9);
CHECK_EQUAL_64(0, x10);
CHECK_EQUAL_64(0, x11);
CHECK_EQUAL_64(0, x12);
CHECK_EQUAL_64(1, x13);
- CHECK_EQUAL_64(0xffffffff, x14);
- CHECK_EQUAL_64(0xffffffffffffffff, x15);
+ CHECK_EQUAL_64(0xFFFFFFFF, x14);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFF, x15);
CHECK_EQUAL_64(1, x20);
CHECK_EQUAL_64(0x100000000UL, x21);
CHECK_EQUAL_64(0, x22);
- CHECK_EQUAL_64(0xffffffff, x23);
- CHECK_EQUAL_64(0x1fffffffe, x24);
- CHECK_EQUAL_64(0xfffffffe00000002UL, x25);
+ CHECK_EQUAL_64(0xFFFFFFFF, x23);
+ CHECK_EQUAL_64(0x1FFFFFFFE, x24);
+ CHECK_EQUAL_64(0xFFFFFFFE00000002UL, x25);
CHECK_EQUAL_64(0, x26);
CHECK_EQUAL_64(0, x27);
@@ -1260,8 +1256,8 @@ TEST(msub) {
START();
__ Mov(x16, 0);
__ Mov(x17, 1);
- __ Mov(x18, 0xffffffff);
- __ Mov(x19, 0xffffffffffffffffUL);
+ __ Mov(x18, 0xFFFFFFFF);
+ __ Mov(x19, 0xFFFFFFFFFFFFFFFFUL);
__ Msub(w0, w16, w16, w16);
__ Msub(w1, w16, w16, w17);
@@ -1295,29 +1291,29 @@ TEST(msub) {
CHECK_EQUAL_64(0, x0);
CHECK_EQUAL_64(1, x1);
- CHECK_EQUAL_64(0xffffffff, x2);
- CHECK_EQUAL_64(0xffffffff, x3);
+ CHECK_EQUAL_64(0xFFFFFFFF, x2);
+ CHECK_EQUAL_64(0xFFFFFFFF, x3);
CHECK_EQUAL_64(1, x4);
- CHECK_EQUAL_64(0xfffffffe, x5);
- CHECK_EQUAL_64(0xfffffffe, x6);
+ CHECK_EQUAL_64(0xFFFFFFFE, x5);
+ CHECK_EQUAL_64(0xFFFFFFFE, x6);
CHECK_EQUAL_64(1, x7);
CHECK_EQUAL_64(0, x8);
CHECK_EQUAL_64(0, x9);
- CHECK_EQUAL_64(0xfffffffe, x10);
- CHECK_EQUAL_64(0xfffffffe, x11);
+ CHECK_EQUAL_64(0xFFFFFFFE, x10);
+ CHECK_EQUAL_64(0xFFFFFFFE, x11);
CHECK_EQUAL_64(0, x12);
CHECK_EQUAL_64(1, x13);
- CHECK_EQUAL_64(0xffffffff, x14);
- CHECK_EQUAL_64(0xffffffffffffffffUL, x15);
+ CHECK_EQUAL_64(0xFFFFFFFF, x14);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x15);
CHECK_EQUAL_64(1, x20);
- CHECK_EQUAL_64(0xfffffffeUL, x21);
- CHECK_EQUAL_64(0xfffffffffffffffeUL, x22);
- CHECK_EQUAL_64(0xffffffff00000001UL, x23);
+ CHECK_EQUAL_64(0xFFFFFFFEUL, x21);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFEUL, x22);
+ CHECK_EQUAL_64(0xFFFFFFFF00000001UL, x23);
CHECK_EQUAL_64(0, x24);
CHECK_EQUAL_64(0x200000000UL, x25);
- CHECK_EQUAL_64(0x1fffffffeUL, x26);
- CHECK_EQUAL_64(0xfffffffffffffffeUL, x27);
+ CHECK_EQUAL_64(0x1FFFFFFFEUL, x26);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFEUL, x27);
TEARDOWN();
}
@@ -1332,12 +1328,12 @@ TEST(smulh) {
__ Mov(x21, 1);
__ Mov(x22, 0x0000000100000000L);
__ Mov(x23, 0x12345678);
- __ Mov(x24, 0x0123456789abcdefL);
+ __ Mov(x24, 0x0123456789ABCDEFL);
__ Mov(x25, 0x0000000200000000L);
__ Mov(x26, 0x8000000000000000UL);
- __ Mov(x27, 0xffffffffffffffffUL);
+ __ Mov(x27, 0xFFFFFFFFFFFFFFFFUL);
__ Mov(x28, 0x5555555555555555UL);
- __ Mov(x29, 0xaaaaaaaaaaaaaaaaUL);
+ __ Mov(x29, 0xAAAAAAAAAAAAAAAAUL);
__ Smulh(x0, x20, x24);
__ Smulh(x1, x21, x24);
@@ -1359,14 +1355,14 @@ TEST(smulh) {
CHECK_EQUAL_64(0, x1);
CHECK_EQUAL_64(0, x2);
CHECK_EQUAL_64(0x01234567, x3);
- CHECK_EQUAL_64(0x02468acf, x4);
- CHECK_EQUAL_64(0xffffffffffffffffUL, x5);
+ CHECK_EQUAL_64(0x02468ACF, x4);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x5);
CHECK_EQUAL_64(0x4000000000000000UL, x6);
CHECK_EQUAL_64(0, x7);
CHECK_EQUAL_64(0, x8);
- CHECK_EQUAL_64(0x1c71c71c71c71c71UL, x9);
- CHECK_EQUAL_64(0xe38e38e38e38e38eUL, x10);
- CHECK_EQUAL_64(0x1c71c71c71c71c72UL, x11);
+ CHECK_EQUAL_64(0x1C71C71C71C71C71UL, x9);
+ CHECK_EQUAL_64(0xE38E38E38E38E38EUL, x10);
+ CHECK_EQUAL_64(0x1C71C71C71C71C72UL, x11);
TEARDOWN();
}
@@ -1378,8 +1374,8 @@ TEST(smaddl_umaddl) {
START();
__ Mov(x17, 1);
- __ Mov(x18, 0xffffffff);
- __ Mov(x19, 0xffffffffffffffffUL);
+ __ Mov(x18, 0xFFFFFFFF);
+ __ Mov(x19, 0xFFFFFFFFFFFFFFFFUL);
__ Mov(x20, 4);
__ Mov(x21, 0x200000000UL);
@@ -1400,8 +1396,8 @@ TEST(smaddl_umaddl) {
CHECK_EQUAL_64(5, x11);
CHECK_EQUAL_64(0x200000001UL, x12);
CHECK_EQUAL_64(0x100000003UL, x13);
- CHECK_EQUAL_64(0xfffffffe00000005UL, x14);
- CHECK_EQUAL_64(0xfffffffe00000005UL, x15);
+ CHECK_EQUAL_64(0xFFFFFFFE00000005UL, x14);
+ CHECK_EQUAL_64(0xFFFFFFFE00000005UL, x15);
CHECK_EQUAL_64(0x1, x22);
TEARDOWN();
@@ -1414,8 +1410,8 @@ TEST(smsubl_umsubl) {
START();
__ Mov(x17, 1);
- __ Mov(x18, 0xffffffff);
- __ Mov(x19, 0xffffffffffffffffUL);
+ __ Mov(x18, 0xFFFFFFFF);
+ __ Mov(x19, 0xFFFFFFFFFFFFFFFFUL);
__ Mov(x20, 4);
__ Mov(x21, 0x200000000UL);
@@ -1434,11 +1430,11 @@ TEST(smsubl_umsubl) {
CHECK_EQUAL_64(5, x9);
CHECK_EQUAL_64(3, x10);
CHECK_EQUAL_64(3, x11);
- CHECK_EQUAL_64(0x1ffffffffUL, x12);
- CHECK_EQUAL_64(0xffffffff00000005UL, x13);
+ CHECK_EQUAL_64(0x1FFFFFFFFUL, x12);
+ CHECK_EQUAL_64(0xFFFFFFFF00000005UL, x13);
CHECK_EQUAL_64(0x200000003UL, x14);
CHECK_EQUAL_64(0x200000003UL, x15);
- CHECK_EQUAL_64(0x3ffffffffUL, x22);
+ CHECK_EQUAL_64(0x3FFFFFFFFUL, x22);
TEARDOWN();
}
@@ -1450,8 +1446,8 @@ TEST(div) {
START();
__ Mov(x16, 1);
- __ Mov(x17, 0xffffffff);
- __ Mov(x18, 0xffffffffffffffffUL);
+ __ Mov(x17, 0xFFFFFFFF);
+ __ Mov(x18, 0xFFFFFFFFFFFFFFFFUL);
__ Mov(x19, 0x80000000);
__ Mov(x20, 0x8000000000000000UL);
__ Mov(x21, 2);
@@ -1495,15 +1491,15 @@ TEST(div) {
RUN();
CHECK_EQUAL_64(1, x0);
- CHECK_EQUAL_64(0xffffffff, x1);
+ CHECK_EQUAL_64(0xFFFFFFFF, x1);
CHECK_EQUAL_64(1, x2);
- CHECK_EQUAL_64(0xffffffff, x3);
+ CHECK_EQUAL_64(0xFFFFFFFF, x3);
CHECK_EQUAL_64(1, x4);
CHECK_EQUAL_64(1, x5);
CHECK_EQUAL_64(0, x6);
CHECK_EQUAL_64(1, x7);
CHECK_EQUAL_64(0, x8);
- CHECK_EQUAL_64(0xffffffff00000001UL, x9);
+ CHECK_EQUAL_64(0xFFFFFFFF00000001UL, x9);
CHECK_EQUAL_64(0x40000000, x10);
CHECK_EQUAL_64(0xC0000000, x11);
CHECK_EQUAL_64(0x40000000, x12);
@@ -1516,7 +1512,7 @@ TEST(div) {
CHECK_EQUAL_64(0x8000000000000000UL, x25);
CHECK_EQUAL_64(0, x26);
CHECK_EQUAL_64(0, x27);
- CHECK_EQUAL_64(0x7fffffffffffffffUL, x28);
+ CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x28);
CHECK_EQUAL_64(0, x29);
CHECK_EQUAL_64(0, x18);
CHECK_EQUAL_64(0, x19);
@@ -1532,7 +1528,7 @@ TEST(rbit_rev) {
SETUP();
START();
- __ Mov(x24, 0xfedcba9876543210UL);
+ __ Mov(x24, 0xFEDCBA9876543210UL);
__ Rbit(w0, w24);
__ Rbit(x1, x24);
__ Rev16(w2, w24);
@@ -1544,13 +1540,13 @@ TEST(rbit_rev) {
RUN();
- CHECK_EQUAL_64(0x084c2a6e, x0);
- CHECK_EQUAL_64(0x084c2a6e195d3b7fUL, x1);
+ CHECK_EQUAL_64(0x084C2A6E, x0);
+ CHECK_EQUAL_64(0x084C2A6E195D3B7FUL, x1);
CHECK_EQUAL_64(0x54761032, x2);
- CHECK_EQUAL_64(0xdcfe98ba54761032UL, x3);
+ CHECK_EQUAL_64(0xDCFE98BA54761032UL, x3);
CHECK_EQUAL_64(0x10325476, x4);
- CHECK_EQUAL_64(0x98badcfe10325476UL, x5);
- CHECK_EQUAL_64(0x1032547698badcfeUL, x6);
+ CHECK_EQUAL_64(0x98BADCFE10325476UL, x5);
+ CHECK_EQUAL_64(0x1032547698BADCFEUL, x6);
TEARDOWN();
}
@@ -1562,7 +1558,7 @@ TEST(clz_cls) {
START();
__ Mov(x24, 0x0008000000800000UL);
- __ Mov(x25, 0xff800000fff80000UL);
+ __ Mov(x25, 0xFF800000FFF80000UL);
__ Mov(x26, 0);
__ Clz(w0, w24);
__ Clz(x1, x24);
@@ -1773,7 +1769,7 @@ TEST(adr_far) {
RUN();
- CHECK_EQUAL_64(0xf, x0);
+ CHECK_EQUAL_64(0xF, x0);
TEARDOWN();
}
@@ -1960,7 +1956,7 @@ TEST(compare_branch) {
__ Mov(x3, 1);
__ Bind(&nzf_end);
- __ Mov(x18, 0xffffffff00000000UL);
+ __ Mov(x18, 0xFFFFFFFF00000000UL);
Label a, a_end;
__ Cbz(w18, &a);
@@ -2000,7 +1996,7 @@ TEST(test_branch) {
__ Mov(x1, 0);
__ Mov(x2, 0);
__ Mov(x3, 0);
- __ Mov(x16, 0xaaaaaaaaaaaaaaaaUL);
+ __ Mov(x16, 0xAAAAAAAAAAAAAAAAUL);
Label bz, bz_end;
__ Tbz(w16, 0, &bz);
@@ -2432,7 +2428,7 @@ TEST(ldr_str_offset) {
INIT_V8();
SETUP();
- uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
+ uint64_t src[2] = {0xFEDCBA9876543210UL, 0x0123456789ABCDEFUL};
uint64_t dst[5] = {0, 0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
@@ -2456,10 +2452,10 @@ TEST(ldr_str_offset) {
CHECK_EQUAL_64(0x76543210, x0);
CHECK_EQUAL_64(0x76543210, dst[0]);
- CHECK_EQUAL_64(0xfedcba98, x1);
- CHECK_EQUAL_64(0xfedcba9800000000UL, dst[1]);
- CHECK_EQUAL_64(0x0123456789abcdefUL, x2);
- CHECK_EQUAL_64(0x0123456789abcdefUL, dst[2]);
+ CHECK_EQUAL_64(0xFEDCBA98, x1);
+ CHECK_EQUAL_64(0xFEDCBA9800000000UL, dst[1]);
+ CHECK_EQUAL_64(0x0123456789ABCDEFUL, x2);
+ CHECK_EQUAL_64(0x0123456789ABCDEFUL, dst[2]);
CHECK_EQUAL_64(0x32, x3);
CHECK_EQUAL_64(0x3200, dst[3]);
CHECK_EQUAL_64(0x7654, x4);
@@ -2479,8 +2475,8 @@ TEST(ldr_str_wide) {
uint32_t dst[8192];
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
- memset(src, 0xaa, 8192 * sizeof(src[0]));
- memset(dst, 0xaa, 8192 * sizeof(dst[0]));
+ memset(src, 0xAA, 8192 * sizeof(src[0]));
+ memset(dst, 0xAA, 8192 * sizeof(dst[0]));
src[0] = 0;
src[6144] = 6144;
src[8191] = 8191;
@@ -2523,7 +2519,7 @@ TEST(ldr_str_preindex) {
INIT_V8();
SETUP();
- uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
+ uint64_t src[2] = {0xFEDCBA9876543210UL, 0x0123456789ABCDEFUL};
uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
@@ -2553,10 +2549,10 @@ TEST(ldr_str_preindex) {
RUN();
- CHECK_EQUAL_64(0xfedcba98, x0);
- CHECK_EQUAL_64(0xfedcba9800000000UL, dst[1]);
- CHECK_EQUAL_64(0x0123456789abcdefUL, x1);
- CHECK_EQUAL_64(0x0123456789abcdefUL, dst[2]);
+ CHECK_EQUAL_64(0xFEDCBA98, x0);
+ CHECK_EQUAL_64(0xFEDCBA9800000000UL, dst[1]);
+ CHECK_EQUAL_64(0x0123456789ABCDEFUL, x1);
+ CHECK_EQUAL_64(0x0123456789ABCDEFUL, dst[2]);
CHECK_EQUAL_64(0x01234567, x2);
CHECK_EQUAL_64(0x0123456700000000UL, dst[4]);
CHECK_EQUAL_64(0x32, x3);
@@ -2581,7 +2577,7 @@ TEST(ldr_str_postindex) {
INIT_V8();
SETUP();
- uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
+ uint64_t src[2] = {0xFEDCBA9876543210UL, 0x0123456789ABCDEFUL};
uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
@@ -2611,12 +2607,12 @@ TEST(ldr_str_postindex) {
RUN();
- CHECK_EQUAL_64(0xfedcba98, x0);
- CHECK_EQUAL_64(0xfedcba9800000000UL, dst[1]);
- CHECK_EQUAL_64(0x0123456789abcdefUL, x1);
- CHECK_EQUAL_64(0x0123456789abcdefUL, dst[2]);
- CHECK_EQUAL_64(0x0123456789abcdefUL, x2);
- CHECK_EQUAL_64(0x0123456789abcdefUL, dst[4]);
+ CHECK_EQUAL_64(0xFEDCBA98, x0);
+ CHECK_EQUAL_64(0xFEDCBA9800000000UL, dst[1]);
+ CHECK_EQUAL_64(0x0123456789ABCDEFUL, x1);
+ CHECK_EQUAL_64(0x0123456789ABCDEFUL, dst[2]);
+ CHECK_EQUAL_64(0x0123456789ABCDEFUL, x2);
+ CHECK_EQUAL_64(0x0123456789ABCDEFUL, dst[4]);
CHECK_EQUAL_64(0x32, x3);
CHECK_EQUAL_64(0x3200, dst[3]);
CHECK_EQUAL_64(0x9876, x4);
@@ -2639,7 +2635,7 @@ TEST(load_signed) {
INIT_V8();
SETUP();
- uint32_t src[2] = {0x80008080, 0x7fff7f7f};
+ uint32_t src[2] = {0x80008080, 0x7FFF7F7F};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
START();
@@ -2658,16 +2654,16 @@ TEST(load_signed) {
RUN();
- CHECK_EQUAL_64(0xffffff80, x0);
- CHECK_EQUAL_64(0x0000007f, x1);
- CHECK_EQUAL_64(0xffff8080, x2);
- CHECK_EQUAL_64(0x00007f7f, x3);
- CHECK_EQUAL_64(0xffffffffffffff80UL, x4);
- CHECK_EQUAL_64(0x000000000000007fUL, x5);
- CHECK_EQUAL_64(0xffffffffffff8080UL, x6);
- CHECK_EQUAL_64(0x0000000000007f7fUL, x7);
- CHECK_EQUAL_64(0xffffffff80008080UL, x8);
- CHECK_EQUAL_64(0x000000007fff7f7fUL, x9);
+ CHECK_EQUAL_64(0xFFFFFF80, x0);
+ CHECK_EQUAL_64(0x0000007F, x1);
+ CHECK_EQUAL_64(0xFFFF8080, x2);
+ CHECK_EQUAL_64(0x00007F7F, x3);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFF80UL, x4);
+ CHECK_EQUAL_64(0x000000000000007FUL, x5);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFF8080UL, x6);
+ CHECK_EQUAL_64(0x0000000000007F7FUL, x7);
+ CHECK_EQUAL_64(0xFFFFFFFF80008080UL, x8);
+ CHECK_EQUAL_64(0x000000007FFF7F7FUL, x9);
TEARDOWN();
}
@@ -2690,9 +2686,9 @@ TEST(load_store_regoffset) {
__ Mov(x24, 0);
__ Mov(x25, 4);
__ Mov(x26, -4);
- __ Mov(x27, 0xfffffffc); // 32-bit -4.
- __ Mov(x28, 0xfffffffe); // 32-bit -2.
- __ Mov(x29, 0xffffffff); // 32-bit -1.
+ __ Mov(x27, 0xFFFFFFFC); // 32-bit -4.
+ __ Mov(x28, 0xFFFFFFFE); // 32-bit -2.
+ __ Mov(x29, 0xFFFFFFFF); // 32-bit -1.
__ Ldr(w0, MemOperand(x16, x24));
__ Ldr(x1, MemOperand(x16, x25));
@@ -2891,11 +2887,11 @@ TEST(load_store_q) {
INIT_V8();
SETUP();
- uint8_t src[48] = {0x10, 0x32, 0x54, 0x76, 0x98, 0xba, 0xdc, 0xfe, 0x01, 0x23,
- 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x21, 0x43, 0x65, 0x87,
- 0xa9, 0xcb, 0xed, 0x0f, 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc,
- 0xde, 0xf0, 0x24, 0x46, 0x68, 0x8a, 0xac, 0xce, 0xe0, 0x02,
- 0x42, 0x64, 0x86, 0xa8, 0xca, 0xec, 0x0e, 0x20};
+ uint8_t src[48] = {0x10, 0x32, 0x54, 0x76, 0x98, 0xBA, 0xDC, 0xFE, 0x01, 0x23,
+ 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0x21, 0x43, 0x65, 0x87,
+ 0xA9, 0xCB, 0xED, 0x0F, 0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC,
+ 0xDE, 0xF0, 0x24, 0x46, 0x68, 0x8A, 0xAC, 0xCE, 0xE0, 0x02,
+ 0x42, 0x64, 0x86, 0xA8, 0xCA, 0xEC, 0x0E, 0x20};
uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
@@ -2918,15 +2914,15 @@ TEST(load_store_q) {
RUN();
- CHECK_EQUAL_128(0xf0debc9a78563412, 0x0fedcba987654321, q0);
- CHECK_EQUAL_64(0x0fedcba987654321, dst[0]);
- CHECK_EQUAL_64(0xf0debc9a78563412, dst[1]);
- CHECK_EQUAL_128(0xefcdab8967452301, 0xfedcba9876543210, q1);
- CHECK_EQUAL_64(0xfedcba9876543210, dst[4]);
- CHECK_EQUAL_64(0xefcdab8967452301, dst[5]);
- CHECK_EQUAL_128(0x200eeccaa8866442, 0x02e0ceac8a684624, q2);
- CHECK_EQUAL_64(0x02e0ceac8a684624, dst[2]);
- CHECK_EQUAL_64(0x200eeccaa8866442, dst[3]);
+ CHECK_EQUAL_128(0xF0DEBC9A78563412, 0x0FEDCBA987654321, q0);
+ CHECK_EQUAL_64(0x0FEDCBA987654321, dst[0]);
+ CHECK_EQUAL_64(0xF0DEBC9A78563412, dst[1]);
+ CHECK_EQUAL_128(0xEFCDAB8967452301, 0xFEDCBA9876543210, q1);
+ CHECK_EQUAL_64(0xFEDCBA9876543210, dst[4]);
+ CHECK_EQUAL_64(0xEFCDAB8967452301, dst[5]);
+ CHECK_EQUAL_128(0x200EECCAA8866442, 0x02E0CEAC8A684624, q2);
+ CHECK_EQUAL_64(0x02E0CEAC8A684624, dst[2]);
+ CHECK_EQUAL_64(0x200EECCAA8866442, dst[3]);
CHECK_EQUAL_64(src_base, x17);
CHECK_EQUAL_64(dst_base + 16, x18);
CHECK_EQUAL_64(src_base + 16, x19);
@@ -2967,22 +2963,22 @@ TEST(neon_ld1_d) {
CHECK_EQUAL_128(0, 0x0706050403020100, q2);
CHECK_EQUAL_128(0, 0x0807060504030201, q3);
- CHECK_EQUAL_128(0, 0x100f0e0d0c0b0a09, q4);
+ CHECK_EQUAL_128(0, 0x100F0E0D0C0B0A09, q4);
CHECK_EQUAL_128(0, 0x0908070605040302, q5);
- CHECK_EQUAL_128(0, 0x11100f0e0d0c0b0a, q6);
+ CHECK_EQUAL_128(0, 0x11100F0E0D0C0B0A, q6);
CHECK_EQUAL_128(0, 0x1918171615141312, q7);
- CHECK_EQUAL_128(0, 0x0a09080706050403, q16);
- CHECK_EQUAL_128(0, 0x1211100f0e0d0c0b, q17);
- CHECK_EQUAL_128(0, 0x1a19181716151413, q18);
- CHECK_EQUAL_128(0, 0x2221201f1e1d1c1b, q19);
- CHECK_EQUAL_128(0, 0x0b0a090807060504, q30);
- CHECK_EQUAL_128(0, 0x131211100f0e0d0c, q31);
- CHECK_EQUAL_128(0, 0x1b1a191817161514, q0);
- CHECK_EQUAL_128(0, 0x232221201f1e1d1c, q1);
- CHECK_EQUAL_128(0, 0x0c0b0a0908070605, q20);
- CHECK_EQUAL_128(0, 0x14131211100f0e0d, q21);
- CHECK_EQUAL_128(0, 0x1c1b1a1918171615, q22);
- CHECK_EQUAL_128(0, 0x24232221201f1e1d, q23);
+ CHECK_EQUAL_128(0, 0x0A09080706050403, q16);
+ CHECK_EQUAL_128(0, 0x1211100F0E0D0C0B, q17);
+ CHECK_EQUAL_128(0, 0x1A19181716151413, q18);
+ CHECK_EQUAL_128(0, 0x2221201F1E1D1C1B, q19);
+ CHECK_EQUAL_128(0, 0x0B0A090807060504, q30);
+ CHECK_EQUAL_128(0, 0x131211100F0E0D0C, q31);
+ CHECK_EQUAL_128(0, 0x1B1A191817161514, q0);
+ CHECK_EQUAL_128(0, 0x232221201F1E1D1C, q1);
+ CHECK_EQUAL_128(0, 0x0C0B0A0908070605, q20);
+ CHECK_EQUAL_128(0, 0x14131211100F0E0D, q21);
+ CHECK_EQUAL_128(0, 0x1C1B1A1918171615, q22);
+ CHECK_EQUAL_128(0, 0x24232221201F1E1D, q23);
TEARDOWN();
}
@@ -3021,22 +3017,22 @@ TEST(neon_ld1_d_postindex) {
CHECK_EQUAL_128(0, 0x0706050403020100, q2);
CHECK_EQUAL_128(0, 0x0807060504030201, q3);
- CHECK_EQUAL_128(0, 0x100f0e0d0c0b0a09, q4);
+ CHECK_EQUAL_128(0, 0x100F0E0D0C0B0A09, q4);
CHECK_EQUAL_128(0, 0x0908070605040302, q5);
- CHECK_EQUAL_128(0, 0x11100f0e0d0c0b0a, q6);
+ CHECK_EQUAL_128(0, 0x11100F0E0D0C0B0A, q6);
CHECK_EQUAL_128(0, 0x1918171615141312, q7);
- CHECK_EQUAL_128(0, 0x0a09080706050403, q16);
- CHECK_EQUAL_128(0, 0x1211100f0e0d0c0b, q17);
- CHECK_EQUAL_128(0, 0x1a19181716151413, q18);
- CHECK_EQUAL_128(0, 0x2221201f1e1d1c1b, q19);
- CHECK_EQUAL_128(0, 0x0b0a090807060504, q30);
- CHECK_EQUAL_128(0, 0x131211100f0e0d0c, q31);
- CHECK_EQUAL_128(0, 0x1b1a191817161514, q0);
- CHECK_EQUAL_128(0, 0x232221201f1e1d1c, q1);
- CHECK_EQUAL_128(0, 0x0c0b0a0908070605, q20);
- CHECK_EQUAL_128(0, 0x14131211100f0e0d, q21);
- CHECK_EQUAL_128(0, 0x1c1b1a1918171615, q22);
- CHECK_EQUAL_128(0, 0x24232221201f1e1d, q23);
+ CHECK_EQUAL_128(0, 0x0A09080706050403, q16);
+ CHECK_EQUAL_128(0, 0x1211100F0E0D0C0B, q17);
+ CHECK_EQUAL_128(0, 0x1A19181716151413, q18);
+ CHECK_EQUAL_128(0, 0x2221201F1E1D1C1B, q19);
+ CHECK_EQUAL_128(0, 0x0B0A090807060504, q30);
+ CHECK_EQUAL_128(0, 0x131211100F0E0D0C, q31);
+ CHECK_EQUAL_128(0, 0x1B1A191817161514, q0);
+ CHECK_EQUAL_128(0, 0x232221201F1E1D1C, q1);
+ CHECK_EQUAL_128(0, 0x0C0B0A0908070605, q20);
+ CHECK_EQUAL_128(0, 0x14131211100F0E0D, q21);
+ CHECK_EQUAL_128(0, 0x1C1B1A1918171615, q22);
+ CHECK_EQUAL_128(0, 0x24232221201F1E1D, q23);
CHECK_EQUAL_64(src_base + 1, x17);
CHECK_EQUAL_64(src_base + 1 + 16, x18);
CHECK_EQUAL_64(src_base + 2 + 24, x19);
@@ -3072,20 +3068,20 @@ TEST(neon_ld1_q) {
RUN();
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050403020100, q2);
- CHECK_EQUAL_128(0x100f0e0d0c0b0a09, 0x0807060504030201, q3);
- CHECK_EQUAL_128(0x201f1e1d1c1b1a19, 0x1817161514131211, q4);
- CHECK_EQUAL_128(0x11100f0e0d0c0b0a, 0x0908070605040302, q5);
- CHECK_EQUAL_128(0x21201f1e1d1c1b1a, 0x1918171615141312, q6);
- CHECK_EQUAL_128(0x31302f2e2d2c2b2a, 0x2928272625242322, q7);
- CHECK_EQUAL_128(0x1211100f0e0d0c0b, 0x0a09080706050403, q16);
- CHECK_EQUAL_128(0x2221201f1e1d1c1b, 0x1a19181716151413, q17);
- CHECK_EQUAL_128(0x3231302f2e2d2c2b, 0x2a29282726252423, q18);
- CHECK_EQUAL_128(0x4241403f3e3d3c3b, 0x3a39383736353433, q19);
- CHECK_EQUAL_128(0x131211100f0e0d0c, 0x0b0a090807060504, q30);
- CHECK_EQUAL_128(0x232221201f1e1d1c, 0x1b1a191817161514, q31);
- CHECK_EQUAL_128(0x333231302f2e2d2c, 0x2b2a292827262524, q0);
- CHECK_EQUAL_128(0x434241403f3e3d3c, 0x3b3a393837363534, q1);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0706050403020100, q2);
+ CHECK_EQUAL_128(0x100F0E0D0C0B0A09, 0x0807060504030201, q3);
+ CHECK_EQUAL_128(0x201F1E1D1C1B1A19, 0x1817161514131211, q4);
+ CHECK_EQUAL_128(0x11100F0E0D0C0B0A, 0x0908070605040302, q5);
+ CHECK_EQUAL_128(0x21201F1E1D1C1B1A, 0x1918171615141312, q6);
+ CHECK_EQUAL_128(0x31302F2E2D2C2B2A, 0x2928272625242322, q7);
+ CHECK_EQUAL_128(0x1211100F0E0D0C0B, 0x0A09080706050403, q16);
+ CHECK_EQUAL_128(0x2221201F1E1D1C1B, 0x1A19181716151413, q17);
+ CHECK_EQUAL_128(0x3231302F2E2D2C2B, 0x2A29282726252423, q18);
+ CHECK_EQUAL_128(0x4241403F3E3D3C3B, 0x3A39383736353433, q19);
+ CHECK_EQUAL_128(0x131211100F0E0D0C, 0x0B0A090807060504, q30);
+ CHECK_EQUAL_128(0x232221201F1E1D1C, 0x1B1A191817161514, q31);
+ CHECK_EQUAL_128(0x333231302F2E2D2C, 0x2B2A292827262524, q0);
+ CHECK_EQUAL_128(0x434241403F3E3D3C, 0x3B3A393837363534, q1);
TEARDOWN();
}
@@ -3118,20 +3114,20 @@ TEST(neon_ld1_q_postindex) {
RUN();
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050403020100, q2);
- CHECK_EQUAL_128(0x100f0e0d0c0b0a09, 0x0807060504030201, q3);
- CHECK_EQUAL_128(0x201f1e1d1c1b1a19, 0x1817161514131211, q4);
- CHECK_EQUAL_128(0x11100f0e0d0c0b0a, 0x0908070605040302, q5);
- CHECK_EQUAL_128(0x21201f1e1d1c1b1a, 0x1918171615141312, q6);
- CHECK_EQUAL_128(0x31302f2e2d2c2b2a, 0x2928272625242322, q7);
- CHECK_EQUAL_128(0x1211100f0e0d0c0b, 0x0a09080706050403, q16);
- CHECK_EQUAL_128(0x2221201f1e1d1c1b, 0x1a19181716151413, q17);
- CHECK_EQUAL_128(0x3231302f2e2d2c2b, 0x2a29282726252423, q18);
- CHECK_EQUAL_128(0x4241403f3e3d3c3b, 0x3a39383736353433, q19);
- CHECK_EQUAL_128(0x131211100f0e0d0c, 0x0b0a090807060504, q30);
- CHECK_EQUAL_128(0x232221201f1e1d1c, 0x1b1a191817161514, q31);
- CHECK_EQUAL_128(0x333231302f2e2d2c, 0x2b2a292827262524, q0);
- CHECK_EQUAL_128(0x434241403f3e3d3c, 0x3b3a393837363534, q1);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0706050403020100, q2);
+ CHECK_EQUAL_128(0x100F0E0D0C0B0A09, 0x0807060504030201, q3);
+ CHECK_EQUAL_128(0x201F1E1D1C1B1A19, 0x1817161514131211, q4);
+ CHECK_EQUAL_128(0x11100F0E0D0C0B0A, 0x0908070605040302, q5);
+ CHECK_EQUAL_128(0x21201F1E1D1C1B1A, 0x1918171615141312, q6);
+ CHECK_EQUAL_128(0x31302F2E2D2C2B2A, 0x2928272625242322, q7);
+ CHECK_EQUAL_128(0x1211100F0E0D0C0B, 0x0A09080706050403, q16);
+ CHECK_EQUAL_128(0x2221201F1E1D1C1B, 0x1A19181716151413, q17);
+ CHECK_EQUAL_128(0x3231302F2E2D2C2B, 0x2A29282726252423, q18);
+ CHECK_EQUAL_128(0x4241403F3E3D3C3B, 0x3A39383736353433, q19);
+ CHECK_EQUAL_128(0x131211100F0E0D0C, 0x0B0A090807060504, q30);
+ CHECK_EQUAL_128(0x232221201F1E1D1C, 0x1B1A191817161514, q31);
+ CHECK_EQUAL_128(0x333231302F2E2D2C, 0x2B2A292827262524, q0);
+ CHECK_EQUAL_128(0x434241403F3E3D3C, 0x3B3A393837363534, q1);
CHECK_EQUAL_64(src_base + 1, x17);
CHECK_EQUAL_64(src_base + 1 + 32, x18);
CHECK_EQUAL_64(src_base + 2 + 48, x19);
@@ -3193,13 +3189,13 @@ TEST(neon_ld1_lane) {
RUN();
- CHECK_EQUAL_128(0x0001020304050607, 0x08090a0b0c0d0e0f, q0);
+ CHECK_EQUAL_128(0x0001020304050607, 0x08090A0B0C0D0E0F, q0);
CHECK_EQUAL_128(0x0100020103020403, 0x0504060507060807, q1);
CHECK_EQUAL_128(0x0302010004030201, 0x0504030206050403, q2);
CHECK_EQUAL_128(0x0706050403020100, 0x0807060504030201, q3);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050003020100, q4);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0100050403020100, q5);
- CHECK_EQUAL_128(0x0f0e0d0c03020100, 0x0706050403020100, q6);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0706050003020100, q4);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0100050403020100, q5);
+ CHECK_EQUAL_128(0x0F0E0D0C03020100, 0x0706050403020100, q6);
CHECK_EQUAL_128(0x0706050403020100, 0x0706050403020100, q7);
TEARDOWN();
@@ -3228,14 +3224,14 @@ TEST(neon_ld2_d) {
RUN();
- CHECK_EQUAL_128(0, 0x0e0c0a0806040200, q2);
- CHECK_EQUAL_128(0, 0x0f0d0b0907050301, q3);
- CHECK_EQUAL_128(0, 0x0f0d0b0907050301, q4);
- CHECK_EQUAL_128(0, 0x100e0c0a08060402, q5);
- CHECK_EQUAL_128(0, 0x0f0e0b0a07060302, q6);
- CHECK_EQUAL_128(0, 0x11100d0c09080504, q7);
- CHECK_EQUAL_128(0, 0x0e0d0c0b06050403, q31);
- CHECK_EQUAL_128(0, 0x1211100f0a090807, q0);
+ CHECK_EQUAL_128(0, 0x0E0C0A0806040200, q2);
+ CHECK_EQUAL_128(0, 0x0F0D0B0907050301, q3);
+ CHECK_EQUAL_128(0, 0x0F0D0B0907050301, q4);
+ CHECK_EQUAL_128(0, 0x100E0C0A08060402, q5);
+ CHECK_EQUAL_128(0, 0x0F0E0B0A07060302, q6);
+ CHECK_EQUAL_128(0, 0x11100D0C09080504, q7);
+ CHECK_EQUAL_128(0, 0x0E0D0C0B06050403, q31);
+ CHECK_EQUAL_128(0, 0x1211100F0A090807, q0);
TEARDOWN();
}
@@ -3266,15 +3262,15 @@ TEST(neon_ld2_d_postindex) {
RUN();
- CHECK_EQUAL_128(0, 0x0e0c0a0806040200, q2);
- CHECK_EQUAL_128(0, 0x0f0d0b0907050301, q3);
- CHECK_EQUAL_128(0, 0x0f0d0b0907050301, q4);
- CHECK_EQUAL_128(0, 0x0f0e0b0a07060302, q5);
- CHECK_EQUAL_128(0, 0x11100d0c09080504, q6);
- CHECK_EQUAL_128(0, 0x0e0d0c0b06050403, q16);
- CHECK_EQUAL_128(0, 0x1211100f0a090807, q17);
- CHECK_EQUAL_128(0, 0x0f0e0d0c07060504, q31);
- CHECK_EQUAL_128(0, 0x131211100b0a0908, q0);
+ CHECK_EQUAL_128(0, 0x0E0C0A0806040200, q2);
+ CHECK_EQUAL_128(0, 0x0F0D0B0907050301, q3);
+ CHECK_EQUAL_128(0, 0x0F0D0B0907050301, q4);
+ CHECK_EQUAL_128(0, 0x0F0E0B0A07060302, q5);
+ CHECK_EQUAL_128(0, 0x11100D0C09080504, q6);
+ CHECK_EQUAL_128(0, 0x0E0D0C0B06050403, q16);
+ CHECK_EQUAL_128(0, 0x1211100F0A090807, q17);
+ CHECK_EQUAL_128(0, 0x0F0E0D0C07060504, q31);
+ CHECK_EQUAL_128(0, 0x131211100B0A0908, q0);
CHECK_EQUAL_64(src_base + 1, x17);
CHECK_EQUAL_64(src_base + 1 + 16, x18);
@@ -3310,16 +3306,16 @@ TEST(neon_ld2_q) {
RUN();
- CHECK_EQUAL_128(0x1e1c1a1816141210, 0x0e0c0a0806040200, q2);
- CHECK_EQUAL_128(0x1f1d1b1917151311, 0x0f0d0b0907050301, q3);
- CHECK_EQUAL_128(0x1f1d1b1917151311, 0x0f0d0b0907050301, q4);
- CHECK_EQUAL_128(0x201e1c1a18161412, 0x100e0c0a08060402, q5);
- CHECK_EQUAL_128(0x1f1e1b1a17161312, 0x0f0e0b0a07060302, q6);
- CHECK_EQUAL_128(0x21201d1c19181514, 0x11100d0c09080504, q7);
- CHECK_EQUAL_128(0x1e1d1c1b16151413, 0x0e0d0c0b06050403, q16);
- CHECK_EQUAL_128(0x2221201f1a191817, 0x1211100f0a090807, q17);
- CHECK_EQUAL_128(0x1b1a191817161514, 0x0b0a090807060504, q31);
- CHECK_EQUAL_128(0x232221201f1e1d1c, 0x131211100f0e0d0c, q0);
+ CHECK_EQUAL_128(0x1E1C1A1816141210, 0x0E0C0A0806040200, q2);
+ CHECK_EQUAL_128(0x1F1D1B1917151311, 0x0F0D0B0907050301, q3);
+ CHECK_EQUAL_128(0x1F1D1B1917151311, 0x0F0D0B0907050301, q4);
+ CHECK_EQUAL_128(0x201E1C1A18161412, 0x100E0C0A08060402, q5);
+ CHECK_EQUAL_128(0x1F1E1B1A17161312, 0x0F0E0B0A07060302, q6);
+ CHECK_EQUAL_128(0x21201D1C19181514, 0x11100D0C09080504, q7);
+ CHECK_EQUAL_128(0x1E1D1C1B16151413, 0x0E0D0C0B06050403, q16);
+ CHECK_EQUAL_128(0x2221201F1A191817, 0x1211100F0A090807, q17);
+ CHECK_EQUAL_128(0x1B1A191817161514, 0x0B0A090807060504, q31);
+ CHECK_EQUAL_128(0x232221201F1E1D1C, 0x131211100F0E0D0C, q0);
TEARDOWN();
}
@@ -3350,16 +3346,16 @@ TEST(neon_ld2_q_postindex) {
RUN();
- CHECK_EQUAL_128(0x1e1c1a1816141210, 0x0e0c0a0806040200, q2);
- CHECK_EQUAL_128(0x1f1d1b1917151311, 0x0f0d0b0907050301, q3);
- CHECK_EQUAL_128(0x1f1d1b1917151311, 0x0f0d0b0907050301, q4);
- CHECK_EQUAL_128(0x201e1c1a18161412, 0x100e0c0a08060402, q5);
- CHECK_EQUAL_128(0x1f1e1b1a17161312, 0x0f0e0b0a07060302, q6);
- CHECK_EQUAL_128(0x21201d1c19181514, 0x11100d0c09080504, q7);
- CHECK_EQUAL_128(0x1e1d1c1b16151413, 0x0e0d0c0b06050403, q16);
- CHECK_EQUAL_128(0x2221201f1a191817, 0x1211100f0a090807, q17);
- CHECK_EQUAL_128(0x1b1a191817161514, 0x0b0a090807060504, q31);
- CHECK_EQUAL_128(0x232221201f1e1d1c, 0x131211100f0e0d0c, q0);
+ CHECK_EQUAL_128(0x1E1C1A1816141210, 0x0E0C0A0806040200, q2);
+ CHECK_EQUAL_128(0x1F1D1B1917151311, 0x0F0D0B0907050301, q3);
+ CHECK_EQUAL_128(0x1F1D1B1917151311, 0x0F0D0B0907050301, q4);
+ CHECK_EQUAL_128(0x201E1C1A18161412, 0x100E0C0A08060402, q5);
+ CHECK_EQUAL_128(0x1F1E1B1A17161312, 0x0F0E0B0A07060302, q6);
+ CHECK_EQUAL_128(0x21201D1C19181514, 0x11100D0C09080504, q7);
+ CHECK_EQUAL_128(0x1E1D1C1B16151413, 0x0E0D0C0B06050403, q16);
+ CHECK_EQUAL_128(0x2221201F1A191817, 0x1211100F0A090807, q17);
+ CHECK_EQUAL_128(0x1B1A191817161514, 0x0B0A090807060504, q31);
+ CHECK_EQUAL_128(0x232221201F1E1D1C, 0x131211100F0E0D0C, q0);
CHECK_EQUAL_64(src_base + 1, x17);
CHECK_EQUAL_64(src_base + 1 + 32, x18);
@@ -3430,22 +3426,22 @@ TEST(neon_ld2_lane) {
RUN();
- CHECK_EQUAL_128(0x0001020304050607, 0x08090a0b0c0d0e0f, q0);
- CHECK_EQUAL_128(0x0102030405060708, 0x090a0b0c0d0e0f10, q1);
+ CHECK_EQUAL_128(0x0001020304050607, 0x08090A0B0C0D0E0F, q0);
+ CHECK_EQUAL_128(0x0102030405060708, 0x090A0B0C0D0E0F10, q1);
CHECK_EQUAL_128(0x0100020103020403, 0x0504060507060807, q2);
- CHECK_EQUAL_128(0x0302040305040605, 0x0706080709080a09, q3);
+ CHECK_EQUAL_128(0x0302040305040605, 0x0706080709080A09, q3);
CHECK_EQUAL_128(0x0302010004030201, 0x0504030206050403, q4);
- CHECK_EQUAL_128(0x0706050408070605, 0x090807060a090807, q5);
+ CHECK_EQUAL_128(0x0706050408070605, 0x090807060A090807, q5);
CHECK_EQUAL_128(0x0706050403020100, 0x0807060504030201, q6);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x100f0e0d0c0b0a09, q7);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050003020100, q8);
- CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x1716150113121110, q9);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0100050403020100, q10);
- CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x0302151413121110, q11);
- CHECK_EQUAL_128(0x0f0e0d0c03020100, 0x0706050403020100, q12);
- CHECK_EQUAL_128(0x1f1e1d1c07060504, 0x1716151413121110, q13);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x100F0E0D0C0B0A09, q7);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0706050003020100, q8);
+ CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x1716150113121110, q9);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0100050403020100, q10);
+ CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x0302151413121110, q11);
+ CHECK_EQUAL_128(0x0F0E0D0C03020100, 0x0706050403020100, q12);
+ CHECK_EQUAL_128(0x1F1E1D1C07060504, 0x1716151413121110, q13);
CHECK_EQUAL_128(0x0706050403020100, 0x0706050403020100, q14);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x1716151413121110, q15);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x1716151413121110, q15);
TEARDOWN();
}
@@ -3516,22 +3512,22 @@ TEST(neon_ld2_lane_postindex) {
RUN();
- CHECK_EQUAL_128(0x00020406080a0c0e, 0x10121416181a1c1e, q0);
- CHECK_EQUAL_128(0x01030507090b0d0f, 0x11131517191b1d1f, q1);
- CHECK_EQUAL_128(0x0100050409080d0c, 0x1110151419181d1c, q2);
- CHECK_EQUAL_128(0x030207060b0a0f0e, 0x131217161b1a1f1e, q3);
- CHECK_EQUAL_128(0x030201000b0a0908, 0x131211101b1a1918, q4);
- CHECK_EQUAL_128(0x070605040f0e0d0c, 0x171615141f1e1d1c, q5);
+ CHECK_EQUAL_128(0x00020406080A0C0E, 0x10121416181A1C1E, q0);
+ CHECK_EQUAL_128(0x01030507090B0D0F, 0x11131517191B1D1F, q1);
+ CHECK_EQUAL_128(0x0100050409080D0C, 0x1110151419181D1C, q2);
+ CHECK_EQUAL_128(0x030207060B0A0F0E, 0x131217161B1A1F1E, q3);
+ CHECK_EQUAL_128(0x030201000B0A0908, 0x131211101B1A1918, q4);
+ CHECK_EQUAL_128(0x070605040F0E0D0C, 0x171615141F1E1D1C, q5);
CHECK_EQUAL_128(0x0706050403020100, 0x1716151413121110, q6);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x1f1e1d1c1b1a1918, q7);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050003020100, q8);
- CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x1716150113121110, q9);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0100050403020100, q10);
- CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x0302151413121110, q11);
- CHECK_EQUAL_128(0x0f0e0d0c03020100, 0x0706050403020100, q12);
- CHECK_EQUAL_128(0x1f1e1d1c07060504, 0x1716151413121110, q13);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x1F1E1D1C1B1A1918, q7);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0706050003020100, q8);
+ CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x1716150113121110, q9);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0100050403020100, q10);
+ CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x0302151413121110, q11);
+ CHECK_EQUAL_128(0x0F0E0D0C03020100, 0x0706050403020100, q12);
+ CHECK_EQUAL_128(0x1F1E1D1C07060504, 0x1716151413121110, q13);
CHECK_EQUAL_128(0x0706050403020100, 0x0706050403020100, q14);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x1716151413121110, q15);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x1716151413121110, q15);
CHECK_EQUAL_64(src_base + 32, x17);
CHECK_EQUAL_64(src_base + 32, x18);
@@ -3583,12 +3579,12 @@ TEST(neon_ld2_alllanes) {
CHECK_EQUAL_128(0x0000000000000000, 0x0706070607060706, q5);
CHECK_EQUAL_128(0x0605060506050605, 0x0605060506050605, q6);
CHECK_EQUAL_128(0x0807080708070807, 0x0807080708070807, q7);
- CHECK_EQUAL_128(0x0000000000000000, 0x0c0b0a090c0b0a09, q8);
- CHECK_EQUAL_128(0x0000000000000000, 0x100f0e0d100f0e0d, q9);
- CHECK_EQUAL_128(0x0d0c0b0a0d0c0b0a, 0x0d0c0b0a0d0c0b0a, q10);
- CHECK_EQUAL_128(0x11100f0e11100f0e, 0x11100f0e11100f0e, q11);
+ CHECK_EQUAL_128(0x0000000000000000, 0x0C0B0A090C0B0A09, q8);
+ CHECK_EQUAL_128(0x0000000000000000, 0x100F0E0D100F0E0D, q9);
+ CHECK_EQUAL_128(0x0D0C0B0A0D0C0B0A, 0x0D0C0B0A0D0C0B0A, q10);
+ CHECK_EQUAL_128(0x11100F0E11100F0E, 0x11100F0E11100F0E, q11);
CHECK_EQUAL_128(0x1918171615141312, 0x1918171615141312, q12);
- CHECK_EQUAL_128(0x21201f1e1d1c1b1a, 0x21201f1e1d1c1b1a, q13);
+ CHECK_EQUAL_128(0x21201F1E1D1C1B1A, 0x21201F1E1D1C1B1A, q13);
TEARDOWN();
}
@@ -3625,12 +3621,12 @@ TEST(neon_ld2_alllanes_postindex) {
CHECK_EQUAL_128(0x0000000000000000, 0x0706070607060706, q5);
CHECK_EQUAL_128(0x0605060506050605, 0x0605060506050605, q6);
CHECK_EQUAL_128(0x0807080708070807, 0x0807080708070807, q7);
- CHECK_EQUAL_128(0x0000000000000000, 0x0c0b0a090c0b0a09, q8);
- CHECK_EQUAL_128(0x0000000000000000, 0x100f0e0d100f0e0d, q9);
- CHECK_EQUAL_128(0x0d0c0b0a0d0c0b0a, 0x0d0c0b0a0d0c0b0a, q10);
- CHECK_EQUAL_128(0x11100f0e11100f0e, 0x11100f0e11100f0e, q11);
+ CHECK_EQUAL_128(0x0000000000000000, 0x0C0B0A090C0B0A09, q8);
+ CHECK_EQUAL_128(0x0000000000000000, 0x100F0E0D100F0E0D, q9);
+ CHECK_EQUAL_128(0x0D0C0B0A0D0C0B0A, 0x0D0C0B0A0D0C0B0A, q10);
+ CHECK_EQUAL_128(0x11100F0E11100F0E, 0x11100F0E11100F0E, q11);
CHECK_EQUAL_128(0x1918171615141312, 0x1918171615141312, q12);
- CHECK_EQUAL_128(0x21201f1e1d1c1b1a, 0x21201f1e1d1c1b1a, q13);
+ CHECK_EQUAL_128(0x21201F1E1D1C1B1A, 0x21201F1E1D1C1B1A, q13);
CHECK_EQUAL_64(src_base + 34, x17);
TEARDOWN();
@@ -3659,18 +3655,18 @@ TEST(neon_ld3_d) {
RUN();
- CHECK_EQUAL_128(0, 0x15120f0c09060300, q2);
- CHECK_EQUAL_128(0, 0x1613100d0a070401, q3);
- CHECK_EQUAL_128(0, 0x1714110e0b080502, q4);
- CHECK_EQUAL_128(0, 0x1613100d0a070401, q5);
- CHECK_EQUAL_128(0, 0x1714110e0b080502, q6);
- CHECK_EQUAL_128(0, 0x1815120f0c090603, q7);
- CHECK_EQUAL_128(0, 0x15140f0e09080302, q8);
- CHECK_EQUAL_128(0, 0x171611100b0a0504, q9);
- CHECK_EQUAL_128(0, 0x191813120d0c0706, q10);
- CHECK_EQUAL_128(0, 0x1211100f06050403, q31);
- CHECK_EQUAL_128(0, 0x161514130a090807, q0);
- CHECK_EQUAL_128(0, 0x1a1918170e0d0c0b, q1);
+ CHECK_EQUAL_128(0, 0x15120F0C09060300, q2);
+ CHECK_EQUAL_128(0, 0x1613100D0A070401, q3);
+ CHECK_EQUAL_128(0, 0x1714110E0B080502, q4);
+ CHECK_EQUAL_128(0, 0x1613100D0A070401, q5);
+ CHECK_EQUAL_128(0, 0x1714110E0B080502, q6);
+ CHECK_EQUAL_128(0, 0x1815120F0C090603, q7);
+ CHECK_EQUAL_128(0, 0x15140F0E09080302, q8);
+ CHECK_EQUAL_128(0, 0x171611100B0A0504, q9);
+ CHECK_EQUAL_128(0, 0x191813120D0C0706, q10);
+ CHECK_EQUAL_128(0, 0x1211100F06050403, q31);
+ CHECK_EQUAL_128(0, 0x161514130A090807, q0);
+ CHECK_EQUAL_128(0, 0x1A1918170E0D0C0B, q1);
TEARDOWN();
}
@@ -3701,21 +3697,21 @@ TEST(neon_ld3_d_postindex) {
RUN();
- CHECK_EQUAL_128(0, 0x15120f0c09060300, q2);
- CHECK_EQUAL_128(0, 0x1613100d0a070401, q3);
- CHECK_EQUAL_128(0, 0x1714110e0b080502, q4);
- CHECK_EQUAL_128(0, 0x1613100d0a070401, q5);
- CHECK_EQUAL_128(0, 0x1714110e0b080502, q6);
- CHECK_EQUAL_128(0, 0x1815120f0c090603, q7);
- CHECK_EQUAL_128(0, 0x15140f0e09080302, q8);
- CHECK_EQUAL_128(0, 0x171611100b0a0504, q9);
- CHECK_EQUAL_128(0, 0x191813120d0c0706, q10);
- CHECK_EQUAL_128(0, 0x1211100f06050403, q11);
- CHECK_EQUAL_128(0, 0x161514130a090807, q12);
- CHECK_EQUAL_128(0, 0x1a1918170e0d0c0b, q13);
+ CHECK_EQUAL_128(0, 0x15120F0C09060300, q2);
+ CHECK_EQUAL_128(0, 0x1613100D0A070401, q3);
+ CHECK_EQUAL_128(0, 0x1714110E0B080502, q4);
+ CHECK_EQUAL_128(0, 0x1613100D0A070401, q5);
+ CHECK_EQUAL_128(0, 0x1714110E0B080502, q6);
+ CHECK_EQUAL_128(0, 0x1815120F0C090603, q7);
+ CHECK_EQUAL_128(0, 0x15140F0E09080302, q8);
+ CHECK_EQUAL_128(0, 0x171611100B0A0504, q9);
+ CHECK_EQUAL_128(0, 0x191813120D0C0706, q10);
+ CHECK_EQUAL_128(0, 0x1211100F06050403, q11);
+ CHECK_EQUAL_128(0, 0x161514130A090807, q12);
+ CHECK_EQUAL_128(0, 0x1A1918170E0D0C0B, q13);
CHECK_EQUAL_128(0, 0x1312111007060504, q31);
- CHECK_EQUAL_128(0, 0x171615140b0a0908, q0);
- CHECK_EQUAL_128(0, 0x1b1a19180f0e0d0c, q1);
+ CHECK_EQUAL_128(0, 0x171615140B0A0908, q0);
+ CHECK_EQUAL_128(0, 0x1B1A19180F0E0D0C, q1);
CHECK_EQUAL_64(src_base + 1, x17);
CHECK_EQUAL_64(src_base + 1 + 24, x18);
@@ -3751,21 +3747,21 @@ TEST(neon_ld3_q) {
RUN();
- CHECK_EQUAL_128(0x2d2a2724211e1b18, 0x15120f0c09060300, q2);
- CHECK_EQUAL_128(0x2e2b2825221f1c19, 0x1613100d0a070401, q3);
- CHECK_EQUAL_128(0x2f2c292623201d1a, 0x1714110e0b080502, q4);
- CHECK_EQUAL_128(0x2e2b2825221f1c19, 0x1613100d0a070401, q5);
- CHECK_EQUAL_128(0x2f2c292623201d1a, 0x1714110e0b080502, q6);
- CHECK_EQUAL_128(0x302d2a2724211e1b, 0x1815120f0c090603, q7);
- CHECK_EQUAL_128(0x2d2c272621201b1a, 0x15140f0e09080302, q8);
- CHECK_EQUAL_128(0x2f2e292823221d1c, 0x171611100b0a0504, q9);
- CHECK_EQUAL_128(0x31302b2a25241f1e, 0x191813120d0c0706, q10);
- CHECK_EQUAL_128(0x2a2928271e1d1c1b, 0x1211100f06050403, q11);
- CHECK_EQUAL_128(0x2e2d2c2b2221201f, 0x161514130a090807, q12);
- CHECK_EQUAL_128(0x3231302f26252423, 0x1a1918170e0d0c0b, q13);
- CHECK_EQUAL_128(0x232221201f1e1d1c, 0x0b0a090807060504, q31);
- CHECK_EQUAL_128(0x2b2a292827262524, 0x131211100f0e0d0c, q0);
- CHECK_EQUAL_128(0x333231302f2e2d2c, 0x1b1a191817161514, q1);
+ CHECK_EQUAL_128(0x2D2A2724211E1B18, 0x15120F0C09060300, q2);
+ CHECK_EQUAL_128(0x2E2B2825221F1C19, 0x1613100D0A070401, q3);
+ CHECK_EQUAL_128(0x2F2C292623201D1A, 0x1714110E0B080502, q4);
+ CHECK_EQUAL_128(0x2E2B2825221F1C19, 0x1613100D0A070401, q5);
+ CHECK_EQUAL_128(0x2F2C292623201D1A, 0x1714110E0B080502, q6);
+ CHECK_EQUAL_128(0x302D2A2724211E1B, 0x1815120F0C090603, q7);
+ CHECK_EQUAL_128(0x2D2C272621201B1A, 0x15140F0E09080302, q8);
+ CHECK_EQUAL_128(0x2F2E292823221D1C, 0x171611100B0A0504, q9);
+ CHECK_EQUAL_128(0x31302B2A25241F1E, 0x191813120D0C0706, q10);
+ CHECK_EQUAL_128(0x2A2928271E1D1C1B, 0x1211100F06050403, q11);
+ CHECK_EQUAL_128(0x2E2D2C2B2221201F, 0x161514130A090807, q12);
+ CHECK_EQUAL_128(0x3231302F26252423, 0x1A1918170E0D0C0B, q13);
+ CHECK_EQUAL_128(0x232221201F1E1D1C, 0x0B0A090807060504, q31);
+ CHECK_EQUAL_128(0x2B2A292827262524, 0x131211100F0E0D0C, q0);
+ CHECK_EQUAL_128(0x333231302F2E2D2C, 0x1B1A191817161514, q1);
TEARDOWN();
}
@@ -3797,21 +3793,21 @@ TEST(neon_ld3_q_postindex) {
RUN();
- CHECK_EQUAL_128(0x2d2a2724211e1b18, 0x15120f0c09060300, q2);
- CHECK_EQUAL_128(0x2e2b2825221f1c19, 0x1613100d0a070401, q3);
- CHECK_EQUAL_128(0x2f2c292623201d1a, 0x1714110e0b080502, q4);
- CHECK_EQUAL_128(0x2e2b2825221f1c19, 0x1613100d0a070401, q5);
- CHECK_EQUAL_128(0x2f2c292623201d1a, 0x1714110e0b080502, q6);
- CHECK_EQUAL_128(0x302d2a2724211e1b, 0x1815120f0c090603, q7);
- CHECK_EQUAL_128(0x2d2c272621201b1a, 0x15140f0e09080302, q8);
- CHECK_EQUAL_128(0x2f2e292823221d1c, 0x171611100b0a0504, q9);
- CHECK_EQUAL_128(0x31302b2a25241f1e, 0x191813120d0c0706, q10);
- CHECK_EQUAL_128(0x2a2928271e1d1c1b, 0x1211100f06050403, q11);
- CHECK_EQUAL_128(0x2e2d2c2b2221201f, 0x161514130a090807, q12);
- CHECK_EQUAL_128(0x3231302f26252423, 0x1a1918170e0d0c0b, q13);
- CHECK_EQUAL_128(0x232221201f1e1d1c, 0x0b0a090807060504, q31);
- CHECK_EQUAL_128(0x2b2a292827262524, 0x131211100f0e0d0c, q0);
- CHECK_EQUAL_128(0x333231302f2e2d2c, 0x1b1a191817161514, q1);
+ CHECK_EQUAL_128(0x2D2A2724211E1B18, 0x15120F0C09060300, q2);
+ CHECK_EQUAL_128(0x2E2B2825221F1C19, 0x1613100D0A070401, q3);
+ CHECK_EQUAL_128(0x2F2C292623201D1A, 0x1714110E0B080502, q4);
+ CHECK_EQUAL_128(0x2E2B2825221F1C19, 0x1613100D0A070401, q5);
+ CHECK_EQUAL_128(0x2F2C292623201D1A, 0x1714110E0B080502, q6);
+ CHECK_EQUAL_128(0x302D2A2724211E1B, 0x1815120F0C090603, q7);
+ CHECK_EQUAL_128(0x2D2C272621201B1A, 0x15140F0E09080302, q8);
+ CHECK_EQUAL_128(0x2F2E292823221D1C, 0x171611100B0A0504, q9);
+ CHECK_EQUAL_128(0x31302B2A25241F1E, 0x191813120D0C0706, q10);
+ CHECK_EQUAL_128(0x2A2928271E1D1C1B, 0x1211100F06050403, q11);
+ CHECK_EQUAL_128(0x2E2D2C2B2221201F, 0x161514130A090807, q12);
+ CHECK_EQUAL_128(0x3231302F26252423, 0x1A1918170E0D0C0B, q13);
+ CHECK_EQUAL_128(0x232221201F1E1D1C, 0x0B0A090807060504, q31);
+ CHECK_EQUAL_128(0x2B2A292827262524, 0x131211100F0E0D0C, q0);
+ CHECK_EQUAL_128(0x333231302F2E2D2C, 0x1B1A191817161514, q1);
CHECK_EQUAL_64(src_base + 1, x17);
CHECK_EQUAL_64(src_base + 1 + 48, x18);
@@ -3886,24 +3882,24 @@ TEST(neon_ld3_lane) {
RUN();
- CHECK_EQUAL_128(0x0001020304050607, 0x08090a0b0c0d0e0f, q0);
- CHECK_EQUAL_128(0x0102030405060708, 0x090a0b0c0d0e0f10, q1);
- CHECK_EQUAL_128(0x0203040506070809, 0x0a0b0c0d0e0f1011, q2);
+ CHECK_EQUAL_128(0x0001020304050607, 0x08090A0B0C0D0E0F, q0);
+ CHECK_EQUAL_128(0x0102030405060708, 0x090A0B0C0D0E0F10, q1);
+ CHECK_EQUAL_128(0x0203040506070809, 0x0A0B0C0D0E0F1011, q2);
CHECK_EQUAL_128(0x0100020103020403, 0x0504060507060807, q3);
- CHECK_EQUAL_128(0x0302040305040605, 0x0706080709080a09, q4);
- CHECK_EQUAL_128(0x0504060507060807, 0x09080a090b0a0c0b, q5);
+ CHECK_EQUAL_128(0x0302040305040605, 0x0706080709080A09, q4);
+ CHECK_EQUAL_128(0x0504060507060807, 0x09080A090B0A0C0B, q5);
CHECK_EQUAL_128(0x0302010004030201, 0x0504030206050403, q6);
- CHECK_EQUAL_128(0x0706050408070605, 0x090807060a090807, q7);
- CHECK_EQUAL_128(0x0b0a09080c0b0a09, 0x0d0c0b0a0e0d0c0b, q8);
+ CHECK_EQUAL_128(0x0706050408070605, 0x090807060A090807, q7);
+ CHECK_EQUAL_128(0x0B0A09080C0B0A09, 0x0D0C0B0A0E0D0C0B, q8);
CHECK_EQUAL_128(0x0706050403020100, 0x0807060504030201, q9);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x100f0e0d0c0b0a09, q10);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x100F0E0D0C0B0A09, q10);
CHECK_EQUAL_128(0x1716151413121110, 0x1817161514131211, q11);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050003020100, q12);
- CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x1716150113121110, q13);
- CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x2726250223222120, q14);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0100050403020100, q15);
- CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x0302151413121110, q16);
- CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x0504252423222120, q17);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0706050003020100, q12);
+ CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x1716150113121110, q13);
+ CHECK_EQUAL_128(0x2F2E2D2C2B2A2928, 0x2726250223222120, q14);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0100050403020100, q15);
+ CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x0302151413121110, q16);
+ CHECK_EQUAL_128(0x2F2E2D2C2B2A2928, 0x0504252423222120, q17);
TEARDOWN();
}
@@ -3978,29 +3974,29 @@ TEST(neon_ld3_lane_postindex) {
RUN();
- CHECK_EQUAL_128(0x000306090c0f1215, 0x181b1e2124272a2d, q0);
- CHECK_EQUAL_128(0x0104070a0d101316, 0x191c1f2225282b2e, q1);
- CHECK_EQUAL_128(0x0205080b0e111417, 0x1a1d202326292c2f, q2);
- CHECK_EQUAL_128(0x010007060d0c1312, 0x19181f1e25242b2a, q3);
- CHECK_EQUAL_128(0x030209080f0e1514, 0x1b1a212027262d2c, q4);
- CHECK_EQUAL_128(0x05040b0a11101716, 0x1d1c232229282f2e, q5);
- CHECK_EQUAL_128(0x030201000f0e0d0c, 0x1b1a191827262524, q6);
- CHECK_EQUAL_128(0x0706050413121110, 0x1f1e1d1c2b2a2928, q7);
- CHECK_EQUAL_128(0x0b0a090817161514, 0x232221202f2e2d2c, q8);
- CHECK_EQUAL_128(0x0706050403020100, 0x1f1e1d1c1b1a1918, q9);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x2726252423222120, q10);
- CHECK_EQUAL_128(0x1716151413121110, 0x2f2e2d2c2b2a2928, q11);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050003020100, q12);
- CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x1716150113121110, q13);
- CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x2726250223222120, q14);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0100050403020100, q15);
- CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x0302151413121110, q16);
- CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x0504252423222120, q17);
- CHECK_EQUAL_128(0x0f0e0d0c03020100, 0x0706050403020100, q18);
- CHECK_EQUAL_128(0x1f1e1d1c07060504, 0x1716151413121110, q19);
- CHECK_EQUAL_128(0x2f2e2d2c0b0a0908, 0x2726252423222120, q20);
+ CHECK_EQUAL_128(0x000306090C0F1215, 0x181B1E2124272A2D, q0);
+ CHECK_EQUAL_128(0x0104070A0D101316, 0x191C1F2225282B2E, q1);
+ CHECK_EQUAL_128(0x0205080B0E111417, 0x1A1D202326292C2F, q2);
+ CHECK_EQUAL_128(0x010007060D0C1312, 0x19181F1E25242B2A, q3);
+ CHECK_EQUAL_128(0x030209080F0E1514, 0x1B1A212027262D2C, q4);
+ CHECK_EQUAL_128(0x05040B0A11101716, 0x1D1C232229282F2E, q5);
+ CHECK_EQUAL_128(0x030201000F0E0D0C, 0x1B1A191827262524, q6);
+ CHECK_EQUAL_128(0x0706050413121110, 0x1F1E1D1C2B2A2928, q7);
+ CHECK_EQUAL_128(0x0B0A090817161514, 0x232221202F2E2D2C, q8);
+ CHECK_EQUAL_128(0x0706050403020100, 0x1F1E1D1C1B1A1918, q9);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x2726252423222120, q10);
+ CHECK_EQUAL_128(0x1716151413121110, 0x2F2E2D2C2B2A2928, q11);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0706050003020100, q12);
+ CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x1716150113121110, q13);
+ CHECK_EQUAL_128(0x2F2E2D2C2B2A2928, 0x2726250223222120, q14);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0100050403020100, q15);
+ CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x0302151413121110, q16);
+ CHECK_EQUAL_128(0x2F2E2D2C2B2A2928, 0x0504252423222120, q17);
+ CHECK_EQUAL_128(0x0F0E0D0C03020100, 0x0706050403020100, q18);
+ CHECK_EQUAL_128(0x1F1E1D1C07060504, 0x1716151413121110, q19);
+ CHECK_EQUAL_128(0x2F2E2D2C0B0A0908, 0x2726252423222120, q20);
CHECK_EQUAL_128(0x0706050403020100, 0x0706050403020100, q21);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x1716151413121110, q22);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x1716151413121110, q22);
CHECK_EQUAL_128(0x1716151413121110, 0x2726252423222120, q23);
CHECK_EQUAL_64(src_base + 48, x17);
@@ -4053,19 +4049,19 @@ TEST(neon_ld3_alllanes) {
CHECK_EQUAL_128(0x0606060606060606, 0x0606060606060606, q5);
CHECK_EQUAL_128(0x0000000000000000, 0x0605060506050605, q6);
CHECK_EQUAL_128(0x0000000000000000, 0x0807080708070807, q7);
- CHECK_EQUAL_128(0x0000000000000000, 0x0a090a090a090a09, q8);
+ CHECK_EQUAL_128(0x0000000000000000, 0x0A090A090A090A09, q8);
CHECK_EQUAL_128(0x0706070607060706, 0x0706070607060706, q9);
CHECK_EQUAL_128(0x0908090809080908, 0x0908090809080908, q10);
- CHECK_EQUAL_128(0x0b0a0b0a0b0a0b0a, 0x0b0a0b0a0b0a0b0a, q11);
- CHECK_EQUAL_128(0x0000000000000000, 0x0f0e0d0c0f0e0d0c, q12);
+ CHECK_EQUAL_128(0x0B0A0B0A0B0A0B0A, 0x0B0A0B0A0B0A0B0A, q11);
+ CHECK_EQUAL_128(0x0000000000000000, 0x0F0E0D0C0F0E0D0C, q12);
CHECK_EQUAL_128(0x0000000000000000, 0x1312111013121110, q13);
CHECK_EQUAL_128(0x0000000000000000, 0x1716151417161514, q14);
- CHECK_EQUAL_128(0x100f0e0d100f0e0d, 0x100f0e0d100f0e0d, q15);
+ CHECK_EQUAL_128(0x100F0E0D100F0E0D, 0x100F0E0D100F0E0D, q15);
CHECK_EQUAL_128(0x1413121114131211, 0x1413121114131211, q16);
CHECK_EQUAL_128(0x1817161518171615, 0x1817161518171615, q17);
- CHECK_EQUAL_128(0x201f1e1d1c1b1a19, 0x201f1e1d1c1b1a19, q18);
+ CHECK_EQUAL_128(0x201F1E1D1C1B1A19, 0x201F1E1D1C1B1A19, q18);
CHECK_EQUAL_128(0x2827262524232221, 0x2827262524232221, q19);
- CHECK_EQUAL_128(0x302f2e2d2c2b2a29, 0x302f2e2d2c2b2a29, q20);
+ CHECK_EQUAL_128(0x302F2E2D2C2B2A29, 0x302F2E2D2C2B2A29, q20);
TEARDOWN();
}
@@ -4104,19 +4100,19 @@ TEST(neon_ld3_alllanes_postindex) {
CHECK_EQUAL_128(0x0606060606060606, 0x0606060606060606, q5);
CHECK_EQUAL_128(0x0000000000000000, 0x0605060506050605, q6);
CHECK_EQUAL_128(0x0000000000000000, 0x0807080708070807, q7);
- CHECK_EQUAL_128(0x0000000000000000, 0x0a090a090a090a09, q8);
+ CHECK_EQUAL_128(0x0000000000000000, 0x0A090A090A090A09, q8);
CHECK_EQUAL_128(0x0706070607060706, 0x0706070607060706, q9);
CHECK_EQUAL_128(0x0908090809080908, 0x0908090809080908, q10);
- CHECK_EQUAL_128(0x0b0a0b0a0b0a0b0a, 0x0b0a0b0a0b0a0b0a, q11);
- CHECK_EQUAL_128(0x0000000000000000, 0x0f0e0d0c0f0e0d0c, q12);
+ CHECK_EQUAL_128(0x0B0A0B0A0B0A0B0A, 0x0B0A0B0A0B0A0B0A, q11);
+ CHECK_EQUAL_128(0x0000000000000000, 0x0F0E0D0C0F0E0D0C, q12);
CHECK_EQUAL_128(0x0000000000000000, 0x1312111013121110, q13);
CHECK_EQUAL_128(0x0000000000000000, 0x1716151417161514, q14);
- CHECK_EQUAL_128(0x100f0e0d100f0e0d, 0x100f0e0d100f0e0d, q15);
+ CHECK_EQUAL_128(0x100F0E0D100F0E0D, 0x100F0E0D100F0E0D, q15);
CHECK_EQUAL_128(0x1413121114131211, 0x1413121114131211, q16);
CHECK_EQUAL_128(0x1817161518171615, 0x1817161518171615, q17);
- CHECK_EQUAL_128(0x201f1e1d1c1b1a19, 0x201f1e1d1c1b1a19, q18);
+ CHECK_EQUAL_128(0x201F1E1D1C1B1A19, 0x201F1E1D1C1B1A19, q18);
CHECK_EQUAL_128(0x2827262524232221, 0x2827262524232221, q19);
- CHECK_EQUAL_128(0x302f2e2d2c2b2a29, 0x302f2e2d2c2b2a29, q20);
+ CHECK_EQUAL_128(0x302F2E2D2C2B2A29, 0x302F2E2D2C2B2A29, q20);
TEARDOWN();
}
@@ -4144,22 +4140,22 @@ TEST(neon_ld4_d) {
RUN();
- CHECK_EQUAL_128(0, 0x1c1814100c080400, q2);
- CHECK_EQUAL_128(0, 0x1d1915110d090501, q3);
- CHECK_EQUAL_128(0, 0x1e1a16120e0a0602, q4);
- CHECK_EQUAL_128(0, 0x1f1b17130f0b0703, q5);
- CHECK_EQUAL_128(0, 0x1d1915110d090501, q6);
- CHECK_EQUAL_128(0, 0x1e1a16120e0a0602, q7);
- CHECK_EQUAL_128(0, 0x1f1b17130f0b0703, q8);
- CHECK_EQUAL_128(0, 0x201c1814100c0804, q9);
- CHECK_EQUAL_128(0, 0x1b1a13120b0a0302, q10);
- CHECK_EQUAL_128(0, 0x1d1c15140d0c0504, q11);
- CHECK_EQUAL_128(0, 0x1f1e17160f0e0706, q12);
+ CHECK_EQUAL_128(0, 0x1C1814100C080400, q2);
+ CHECK_EQUAL_128(0, 0x1D1915110D090501, q3);
+ CHECK_EQUAL_128(0, 0x1E1A16120E0A0602, q4);
+ CHECK_EQUAL_128(0, 0x1F1B17130F0B0703, q5);
+ CHECK_EQUAL_128(0, 0x1D1915110D090501, q6);
+ CHECK_EQUAL_128(0, 0x1E1A16120E0A0602, q7);
+ CHECK_EQUAL_128(0, 0x1F1B17130F0B0703, q8);
+ CHECK_EQUAL_128(0, 0x201C1814100C0804, q9);
+ CHECK_EQUAL_128(0, 0x1B1A13120B0A0302, q10);
+ CHECK_EQUAL_128(0, 0x1D1C15140D0C0504, q11);
+ CHECK_EQUAL_128(0, 0x1F1E17160F0E0706, q12);
CHECK_EQUAL_128(0, 0x2120191811100908, q13);
CHECK_EQUAL_128(0, 0x1615141306050403, q30);
- CHECK_EQUAL_128(0, 0x1a1918170a090807, q31);
- CHECK_EQUAL_128(0, 0x1e1d1c1b0e0d0c0b, q0);
- CHECK_EQUAL_128(0, 0x2221201f1211100f, q1);
+ CHECK_EQUAL_128(0, 0x1A1918170A090807, q31);
+ CHECK_EQUAL_128(0, 0x1E1D1C1B0E0D0C0B, q0);
+ CHECK_EQUAL_128(0, 0x2221201F1211100F, q1);
TEARDOWN();
}
@@ -4195,25 +4191,25 @@ TEST(neon_ld4_d_postindex) {
RUN();
- CHECK_EQUAL_128(0, 0x1c1814100c080400, q2);
- CHECK_EQUAL_128(0, 0x1d1915110d090501, q3);
- CHECK_EQUAL_128(0, 0x1e1a16120e0a0602, q4);
- CHECK_EQUAL_128(0, 0x1f1b17130f0b0703, q5);
- CHECK_EQUAL_128(0, 0x1d1915110d090501, q6);
- CHECK_EQUAL_128(0, 0x1e1a16120e0a0602, q7);
- CHECK_EQUAL_128(0, 0x1f1b17130f0b0703, q8);
- CHECK_EQUAL_128(0, 0x201c1814100c0804, q9);
- CHECK_EQUAL_128(0, 0x1b1a13120b0a0302, q10);
- CHECK_EQUAL_128(0, 0x1d1c15140d0c0504, q11);
- CHECK_EQUAL_128(0, 0x1f1e17160f0e0706, q12);
+ CHECK_EQUAL_128(0, 0x1C1814100C080400, q2);
+ CHECK_EQUAL_128(0, 0x1D1915110D090501, q3);
+ CHECK_EQUAL_128(0, 0x1E1A16120E0A0602, q4);
+ CHECK_EQUAL_128(0, 0x1F1B17130F0B0703, q5);
+ CHECK_EQUAL_128(0, 0x1D1915110D090501, q6);
+ CHECK_EQUAL_128(0, 0x1E1A16120E0A0602, q7);
+ CHECK_EQUAL_128(0, 0x1F1B17130F0B0703, q8);
+ CHECK_EQUAL_128(0, 0x201C1814100C0804, q9);
+ CHECK_EQUAL_128(0, 0x1B1A13120B0A0302, q10);
+ CHECK_EQUAL_128(0, 0x1D1C15140D0C0504, q11);
+ CHECK_EQUAL_128(0, 0x1F1E17160F0E0706, q12);
CHECK_EQUAL_128(0, 0x2120191811100908, q13);
CHECK_EQUAL_128(0, 0x1615141306050403, q14);
- CHECK_EQUAL_128(0, 0x1a1918170a090807, q15);
- CHECK_EQUAL_128(0, 0x1e1d1c1b0e0d0c0b, q16);
- CHECK_EQUAL_128(0, 0x2221201f1211100f, q17);
+ CHECK_EQUAL_128(0, 0x1A1918170A090807, q15);
+ CHECK_EQUAL_128(0, 0x1E1D1C1B0E0D0C0B, q16);
+ CHECK_EQUAL_128(0, 0x2221201F1211100F, q17);
CHECK_EQUAL_128(0, 0x1716151407060504, q30);
- CHECK_EQUAL_128(0, 0x1b1a19180b0a0908, q31);
- CHECK_EQUAL_128(0, 0x1f1e1d1c0f0e0d0c, q0);
+ CHECK_EQUAL_128(0, 0x1B1A19180B0A0908, q31);
+ CHECK_EQUAL_128(0, 0x1F1E1D1C0F0E0D0C, q0);
CHECK_EQUAL_128(0, 0x2322212013121110, q1);
CHECK_EQUAL_64(src_base + 1, x17);
@@ -4249,26 +4245,26 @@ TEST(neon_ld4_q) {
RUN();
- CHECK_EQUAL_128(0x3c3834302c282420, 0x1c1814100c080400, q2);
- CHECK_EQUAL_128(0x3d3935312d292521, 0x1d1915110d090501, q3);
- CHECK_EQUAL_128(0x3e3a36322e2a2622, 0x1e1a16120e0a0602, q4);
- CHECK_EQUAL_128(0x3f3b37332f2b2723, 0x1f1b17130f0b0703, q5);
- CHECK_EQUAL_128(0x3d3935312d292521, 0x1d1915110d090501, q6);
- CHECK_EQUAL_128(0x3e3a36322e2a2622, 0x1e1a16120e0a0602, q7);
- CHECK_EQUAL_128(0x3f3b37332f2b2723, 0x1f1b17130f0b0703, q8);
- CHECK_EQUAL_128(0x403c3834302c2824, 0x201c1814100c0804, q9);
- CHECK_EQUAL_128(0x3b3a33322b2a2322, 0x1b1a13120b0a0302, q10);
- CHECK_EQUAL_128(0x3d3c35342d2c2524, 0x1d1c15140d0c0504, q11);
- CHECK_EQUAL_128(0x3f3e37362f2e2726, 0x1f1e17160f0e0706, q12);
+ CHECK_EQUAL_128(0x3C3834302C282420, 0x1C1814100C080400, q2);
+ CHECK_EQUAL_128(0x3D3935312D292521, 0x1D1915110D090501, q3);
+ CHECK_EQUAL_128(0x3E3A36322E2A2622, 0x1E1A16120E0A0602, q4);
+ CHECK_EQUAL_128(0x3F3B37332F2B2723, 0x1F1B17130F0B0703, q5);
+ CHECK_EQUAL_128(0x3D3935312D292521, 0x1D1915110D090501, q6);
+ CHECK_EQUAL_128(0x3E3A36322E2A2622, 0x1E1A16120E0A0602, q7);
+ CHECK_EQUAL_128(0x3F3B37332F2B2723, 0x1F1B17130F0B0703, q8);
+ CHECK_EQUAL_128(0x403C3834302C2824, 0x201C1814100C0804, q9);
+ CHECK_EQUAL_128(0x3B3A33322B2A2322, 0x1B1A13120B0A0302, q10);
+ CHECK_EQUAL_128(0x3D3C35342D2C2524, 0x1D1C15140D0C0504, q11);
+ CHECK_EQUAL_128(0x3F3E37362F2E2726, 0x1F1E17160F0E0706, q12);
CHECK_EQUAL_128(0x4140393831302928, 0x2120191811100908, q13);
CHECK_EQUAL_128(0x3635343326252423, 0x1615141306050403, q14);
- CHECK_EQUAL_128(0x3a3938372a292827, 0x1a1918170a090807, q15);
- CHECK_EQUAL_128(0x3e3d3c3b2e2d2c2b, 0x1e1d1c1b0e0d0c0b, q16);
- CHECK_EQUAL_128(0x4241403f3231302f, 0x2221201f1211100f, q17);
- CHECK_EQUAL_128(0x2b2a292827262524, 0x0b0a090807060504, q18);
- CHECK_EQUAL_128(0x333231302f2e2d2c, 0x131211100f0e0d0c, q19);
- CHECK_EQUAL_128(0x3b3a393837363534, 0x1b1a191817161514, q20);
- CHECK_EQUAL_128(0x434241403f3e3d3c, 0x232221201f1e1d1c, q21);
+ CHECK_EQUAL_128(0x3A3938372A292827, 0x1A1918170A090807, q15);
+ CHECK_EQUAL_128(0x3E3D3C3B2E2D2C2B, 0x1E1D1C1B0E0D0C0B, q16);
+ CHECK_EQUAL_128(0x4241403F3231302F, 0x2221201F1211100F, q17);
+ CHECK_EQUAL_128(0x2B2A292827262524, 0x0B0A090807060504, q18);
+ CHECK_EQUAL_128(0x333231302F2E2D2C, 0x131211100F0E0D0C, q19);
+ CHECK_EQUAL_128(0x3B3A393837363534, 0x1B1A191817161514, q20);
+ CHECK_EQUAL_128(0x434241403F3E3D3C, 0x232221201F1E1D1C, q21);
TEARDOWN();
}
@@ -4304,26 +4300,26 @@ TEST(neon_ld4_q_postindex) {
RUN();
- CHECK_EQUAL_128(0x3c3834302c282420, 0x1c1814100c080400, q2);
- CHECK_EQUAL_128(0x3d3935312d292521, 0x1d1915110d090501, q3);
- CHECK_EQUAL_128(0x3e3a36322e2a2622, 0x1e1a16120e0a0602, q4);
- CHECK_EQUAL_128(0x3f3b37332f2b2723, 0x1f1b17130f0b0703, q5);
- CHECK_EQUAL_128(0x3d3935312d292521, 0x1d1915110d090501, q6);
- CHECK_EQUAL_128(0x3e3a36322e2a2622, 0x1e1a16120e0a0602, q7);
- CHECK_EQUAL_128(0x3f3b37332f2b2723, 0x1f1b17130f0b0703, q8);
- CHECK_EQUAL_128(0x403c3834302c2824, 0x201c1814100c0804, q9);
- CHECK_EQUAL_128(0x3b3a33322b2a2322, 0x1b1a13120b0a0302, q10);
- CHECK_EQUAL_128(0x3d3c35342d2c2524, 0x1d1c15140d0c0504, q11);
- CHECK_EQUAL_128(0x3f3e37362f2e2726, 0x1f1e17160f0e0706, q12);
+ CHECK_EQUAL_128(0x3C3834302C282420, 0x1C1814100C080400, q2);
+ CHECK_EQUAL_128(0x3D3935312D292521, 0x1D1915110D090501, q3);
+ CHECK_EQUAL_128(0x3E3A36322E2A2622, 0x1E1A16120E0A0602, q4);
+ CHECK_EQUAL_128(0x3F3B37332F2B2723, 0x1F1B17130F0B0703, q5);
+ CHECK_EQUAL_128(0x3D3935312D292521, 0x1D1915110D090501, q6);
+ CHECK_EQUAL_128(0x3E3A36322E2A2622, 0x1E1A16120E0A0602, q7);
+ CHECK_EQUAL_128(0x3F3B37332F2B2723, 0x1F1B17130F0B0703, q8);
+ CHECK_EQUAL_128(0x403C3834302C2824, 0x201C1814100C0804, q9);
+ CHECK_EQUAL_128(0x3B3A33322B2A2322, 0x1B1A13120B0A0302, q10);
+ CHECK_EQUAL_128(0x3D3C35342D2C2524, 0x1D1C15140D0C0504, q11);
+ CHECK_EQUAL_128(0x3F3E37362F2E2726, 0x1F1E17160F0E0706, q12);
CHECK_EQUAL_128(0x4140393831302928, 0x2120191811100908, q13);
CHECK_EQUAL_128(0x3635343326252423, 0x1615141306050403, q14);
- CHECK_EQUAL_128(0x3a3938372a292827, 0x1a1918170a090807, q15);
- CHECK_EQUAL_128(0x3e3d3c3b2e2d2c2b, 0x1e1d1c1b0e0d0c0b, q16);
- CHECK_EQUAL_128(0x4241403f3231302f, 0x2221201f1211100f, q17);
- CHECK_EQUAL_128(0x2b2a292827262524, 0x0b0a090807060504, q30);
- CHECK_EQUAL_128(0x333231302f2e2d2c, 0x131211100f0e0d0c, q31);
- CHECK_EQUAL_128(0x3b3a393837363534, 0x1b1a191817161514, q0);
- CHECK_EQUAL_128(0x434241403f3e3d3c, 0x232221201f1e1d1c, q1);
+ CHECK_EQUAL_128(0x3A3938372A292827, 0x1A1918170A090807, q15);
+ CHECK_EQUAL_128(0x3E3D3C3B2E2D2C2B, 0x1E1D1C1B0E0D0C0B, q16);
+ CHECK_EQUAL_128(0x4241403F3231302F, 0x2221201F1211100F, q17);
+ CHECK_EQUAL_128(0x2B2A292827262524, 0x0B0A090807060504, q30);
+ CHECK_EQUAL_128(0x333231302F2E2D2C, 0x131211100F0E0D0C, q31);
+ CHECK_EQUAL_128(0x3B3A393837363534, 0x1B1A191817161514, q0);
+ CHECK_EQUAL_128(0x434241403F3E3D3C, 0x232221201F1E1D1C, q1);
CHECK_EQUAL_64(src_base + 1, x17);
CHECK_EQUAL_64(src_base + 1 + 64, x18);
@@ -4405,38 +4401,38 @@ TEST(neon_ld4_lane) {
RUN();
- CHECK_EQUAL_128(0x0001020304050607, 0x08090a0b0c0d0e0f, q0);
- CHECK_EQUAL_128(0x0102030405060708, 0x090a0b0c0d0e0f10, q1);
- CHECK_EQUAL_128(0x0203040506070809, 0x0a0b0c0d0e0f1011, q2);
- CHECK_EQUAL_128(0x030405060708090a, 0x0b0c0d0e0f101112, q3);
+ CHECK_EQUAL_128(0x0001020304050607, 0x08090A0B0C0D0E0F, q0);
+ CHECK_EQUAL_128(0x0102030405060708, 0x090A0B0C0D0E0F10, q1);
+ CHECK_EQUAL_128(0x0203040506070809, 0x0A0B0C0D0E0F1011, q2);
+ CHECK_EQUAL_128(0x030405060708090A, 0x0B0C0D0E0F101112, q3);
CHECK_EQUAL_128(0x0100020103020403, 0x0504060507060807, q4);
- CHECK_EQUAL_128(0x0302040305040605, 0x0706080709080a09, q5);
- CHECK_EQUAL_128(0x0504060507060807, 0x09080a090b0a0c0b, q6);
- CHECK_EQUAL_128(0x0706080709080a09, 0x0b0a0c0b0d0c0e0d, q7);
+ CHECK_EQUAL_128(0x0302040305040605, 0x0706080709080A09, q5);
+ CHECK_EQUAL_128(0x0504060507060807, 0x09080A090B0A0C0B, q6);
+ CHECK_EQUAL_128(0x0706080709080A09, 0x0B0A0C0B0D0C0E0D, q7);
CHECK_EQUAL_128(0x0302010004030201, 0x0504030206050403, q8);
- CHECK_EQUAL_128(0x0706050408070605, 0x090807060a090807, q9);
- CHECK_EQUAL_128(0x0b0a09080c0b0a09, 0x0d0c0b0a0e0d0c0b, q10);
- CHECK_EQUAL_128(0x0f0e0d0c100f0e0d, 0x11100f0e1211100f, q11);
+ CHECK_EQUAL_128(0x0706050408070605, 0x090807060A090807, q9);
+ CHECK_EQUAL_128(0x0B0A09080C0B0A09, 0x0D0C0B0A0E0D0C0B, q10);
+ CHECK_EQUAL_128(0x0F0E0D0C100F0E0D, 0x11100F0E1211100F, q11);
CHECK_EQUAL_128(0x0706050403020100, 0x0807060504030201, q12);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x100f0e0d0c0b0a09, q13);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x100F0E0D0C0B0A09, q13);
CHECK_EQUAL_128(0x1716151413121110, 0x1817161514131211, q14);
- CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x201f1e1d1c1b1a19, q15);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050003020100, q16);
- CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x1716150113121110, q17);
- CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x2726250223222120, q18);
- CHECK_EQUAL_128(0x3f3e3d3c3b3a3938, 0x3736350333323130, q19);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0100050403020100, q20);
- CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x0302151413121110, q21);
- CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x0504252423222120, q22);
- CHECK_EQUAL_128(0x3f3e3d3c3b3a3938, 0x0706353433323130, q23);
- CHECK_EQUAL_128(0x0f0e0d0c03020100, 0x0706050403020100, q24);
- CHECK_EQUAL_128(0x1f1e1d1c07060504, 0x1716151413121110, q25);
- CHECK_EQUAL_128(0x2f2e2d2c0b0a0908, 0x2726252423222120, q26);
- CHECK_EQUAL_128(0x3f3e3d3c0f0e0d0c, 0x3736353433323130, q27);
+ CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x201F1E1D1C1B1A19, q15);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0706050003020100, q16);
+ CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x1716150113121110, q17);
+ CHECK_EQUAL_128(0x2F2E2D2C2B2A2928, 0x2726250223222120, q18);
+ CHECK_EQUAL_128(0x3F3E3D3C3B3A3938, 0x3736350333323130, q19);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0100050403020100, q20);
+ CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x0302151413121110, q21);
+ CHECK_EQUAL_128(0x2F2E2D2C2B2A2928, 0x0504252423222120, q22);
+ CHECK_EQUAL_128(0x3F3E3D3C3B3A3938, 0x0706353433323130, q23);
+ CHECK_EQUAL_128(0x0F0E0D0C03020100, 0x0706050403020100, q24);
+ CHECK_EQUAL_128(0x1F1E1D1C07060504, 0x1716151413121110, q25);
+ CHECK_EQUAL_128(0x2F2E2D2C0B0A0908, 0x2726252423222120, q26);
+ CHECK_EQUAL_128(0x3F3E3D3C0F0E0D0C, 0x3736353433323130, q27);
CHECK_EQUAL_128(0x0706050403020100, 0x0706050403020100, q28);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x1716151413121110, q29);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x1716151413121110, q29);
CHECK_EQUAL_128(0x1716151413121110, 0x2726252423222120, q30);
- CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x3736353433323130, q31);
+ CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x3736353433323130, q31);
TEARDOWN();
}
@@ -4522,38 +4518,38 @@ TEST(neon_ld4_lane_postindex) {
RUN();
- CHECK_EQUAL_128(0x0004080c1014181c, 0x2024282c3034383c, q0);
- CHECK_EQUAL_128(0x0105090d1115191d, 0x2125292d3135393d, q1);
- CHECK_EQUAL_128(0x02060a0e12161a1e, 0x22262a2e32363a3e, q2);
- CHECK_EQUAL_128(0x03070b0f13171b1f, 0x23272b2f33373b3f, q3);
+ CHECK_EQUAL_128(0x0004080C1014181C, 0x2024282C3034383C, q0);
+ CHECK_EQUAL_128(0x0105090D1115191D, 0x2125292D3135393D, q1);
+ CHECK_EQUAL_128(0x02060A0E12161A1E, 0x22262A2E32363A3E, q2);
+ CHECK_EQUAL_128(0x03070B0F13171B1F, 0x23272B2F33373B3F, q3);
CHECK_EQUAL_128(0x0100090811101918, 0x2120292831303938, q4);
- CHECK_EQUAL_128(0x03020b0a13121b1a, 0x23222b2a33323b3a, q5);
- CHECK_EQUAL_128(0x05040d0c15141d1c, 0x25242d2c35343d3c, q6);
- CHECK_EQUAL_128(0x07060f0e17161f1e, 0x27262f2e37363f3e, q7);
+ CHECK_EQUAL_128(0x03020B0A13121B1A, 0x23222B2A33323B3A, q5);
+ CHECK_EQUAL_128(0x05040D0C15141D1C, 0x25242D2C35343D3C, q6);
+ CHECK_EQUAL_128(0x07060F0E17161F1E, 0x27262F2E37363F3E, q7);
CHECK_EQUAL_128(0x0302010013121110, 0x2322212033323130, q8);
CHECK_EQUAL_128(0x0706050417161514, 0x2726252437363534, q9);
- CHECK_EQUAL_128(0x0b0a09081b1a1918, 0x2b2a29283b3a3938, q10);
- CHECK_EQUAL_128(0x0f0e0d0c1f1e1d1c, 0x2f2e2d2c3f3e3d3c, q11);
+ CHECK_EQUAL_128(0x0B0A09081B1A1918, 0x2B2A29283B3A3938, q10);
+ CHECK_EQUAL_128(0x0F0E0D0C1F1E1D1C, 0x2F2E2D2C3F3E3D3C, q11);
CHECK_EQUAL_128(0x0706050403020100, 0x2726252423222120, q12);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x2f2e2d2c2b2a2928, q13);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x2F2E2D2C2B2A2928, q13);
CHECK_EQUAL_128(0x1716151413121110, 0x3736353433323130, q14);
- CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x3f3e3d3c3b3a3938, q15);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050003020100, q16);
- CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x1716150113121110, q17);
- CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x2726250223222120, q18);
- CHECK_EQUAL_128(0x3f3e3d3c3b3a3938, 0x3736350333323130, q19);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0100050403020100, q20);
- CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x0302151413121110, q21);
- CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x0504252423222120, q22);
- CHECK_EQUAL_128(0x3f3e3d3c3b3a3938, 0x0706353433323130, q23);
- CHECK_EQUAL_128(0x0f0e0d0c03020100, 0x0706050403020100, q24);
- CHECK_EQUAL_128(0x1f1e1d1c07060504, 0x1716151413121110, q25);
- CHECK_EQUAL_128(0x2f2e2d2c0b0a0908, 0x2726252423222120, q26);
- CHECK_EQUAL_128(0x3f3e3d3c0f0e0d0c, 0x3736353433323130, q27);
+ CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x3F3E3D3C3B3A3938, q15);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0706050003020100, q16);
+ CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x1716150113121110, q17);
+ CHECK_EQUAL_128(0x2F2E2D2C2B2A2928, 0x2726250223222120, q18);
+ CHECK_EQUAL_128(0x3F3E3D3C3B3A3938, 0x3736350333323130, q19);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0100050403020100, q20);
+ CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x0302151413121110, q21);
+ CHECK_EQUAL_128(0x2F2E2D2C2B2A2928, 0x0504252423222120, q22);
+ CHECK_EQUAL_128(0x3F3E3D3C3B3A3938, 0x0706353433323130, q23);
+ CHECK_EQUAL_128(0x0F0E0D0C03020100, 0x0706050403020100, q24);
+ CHECK_EQUAL_128(0x1F1E1D1C07060504, 0x1716151413121110, q25);
+ CHECK_EQUAL_128(0x2F2E2D2C0B0A0908, 0x2726252423222120, q26);
+ CHECK_EQUAL_128(0x3F3E3D3C0F0E0D0C, 0x3736353433323130, q27);
CHECK_EQUAL_128(0x0706050403020100, 0x0706050403020100, q28);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x1716151413121110, q29);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x1716151413121110, q29);
CHECK_EQUAL_128(0x1716151413121110, 0x2726252423222120, q30);
- CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x3736353433323130, q31);
+ CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x3736353433323130, q31);
CHECK_EQUAL_64(src_base + 64, x17);
CHECK_EQUAL_64(src_base + 64, x18);
@@ -4608,24 +4604,24 @@ TEST(neon_ld4_alllanes) {
CHECK_EQUAL_128(0x0808080808080808, 0x0808080808080808, q7);
CHECK_EQUAL_128(0x0000000000000000, 0x0706070607060706, q8);
CHECK_EQUAL_128(0x0000000000000000, 0x0908090809080908, q9);
- CHECK_EQUAL_128(0x0000000000000000, 0x0b0a0b0a0b0a0b0a, q10);
- CHECK_EQUAL_128(0x0000000000000000, 0x0d0c0d0c0d0c0d0c, q11);
+ CHECK_EQUAL_128(0x0000000000000000, 0x0B0A0B0A0B0A0B0A, q10);
+ CHECK_EQUAL_128(0x0000000000000000, 0x0D0C0D0C0D0C0D0C, q11);
CHECK_EQUAL_128(0x0807080708070807, 0x0807080708070807, q12);
- CHECK_EQUAL_128(0x0a090a090a090a09, 0x0a090a090a090a09, q13);
- CHECK_EQUAL_128(0x0c0b0c0b0c0b0c0b, 0x0c0b0c0b0c0b0c0b, q14);
- CHECK_EQUAL_128(0x0e0d0e0d0e0d0e0d, 0x0e0d0e0d0e0d0e0d, q15);
- CHECK_EQUAL_128(0x0000000000000000, 0x1211100f1211100f, q16);
+ CHECK_EQUAL_128(0x0A090A090A090A09, 0x0A090A090A090A09, q13);
+ CHECK_EQUAL_128(0x0C0B0C0B0C0B0C0B, 0x0C0B0C0B0C0B0C0B, q14);
+ CHECK_EQUAL_128(0x0E0D0E0D0E0D0E0D, 0x0E0D0E0D0E0D0E0D, q15);
+ CHECK_EQUAL_128(0x0000000000000000, 0x1211100F1211100F, q16);
CHECK_EQUAL_128(0x0000000000000000, 0x1615141316151413, q17);
- CHECK_EQUAL_128(0x0000000000000000, 0x1a1918171a191817, q18);
- CHECK_EQUAL_128(0x0000000000000000, 0x1e1d1c1b1e1d1c1b, q19);
+ CHECK_EQUAL_128(0x0000000000000000, 0x1A1918171A191817, q18);
+ CHECK_EQUAL_128(0x0000000000000000, 0x1E1D1C1B1E1D1C1B, q19);
CHECK_EQUAL_128(0x1312111013121110, 0x1312111013121110, q20);
CHECK_EQUAL_128(0x1716151417161514, 0x1716151417161514, q21);
- CHECK_EQUAL_128(0x1b1a19181b1a1918, 0x1b1a19181b1a1918, q22);
- CHECK_EQUAL_128(0x1f1e1d1c1f1e1d1c, 0x1f1e1d1c1f1e1d1c, q23);
+ CHECK_EQUAL_128(0x1B1A19181B1A1918, 0x1B1A19181B1A1918, q22);
+ CHECK_EQUAL_128(0x1F1E1D1C1F1E1D1C, 0x1F1E1D1C1F1E1D1C, q23);
CHECK_EQUAL_128(0x2726252423222120, 0x2726252423222120, q24);
- CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x2f2e2d2c2b2a2928, q25);
+ CHECK_EQUAL_128(0x2F2E2D2C2B2A2928, 0x2F2E2D2C2B2A2928, q25);
CHECK_EQUAL_128(0x3736353433323130, 0x3736353433323130, q26);
- CHECK_EQUAL_128(0x3f3e3d3c3b3a3938, 0x3f3e3d3c3b3a3938, q27);
+ CHECK_EQUAL_128(0x3F3E3D3C3B3A3938, 0x3F3E3D3C3B3A3938, q27);
TEARDOWN();
}
@@ -4673,24 +4669,24 @@ TEST(neon_ld4_alllanes_postindex) {
CHECK_EQUAL_128(0x0808080808080808, 0x0808080808080808, q7);
CHECK_EQUAL_128(0x0000000000000000, 0x0706070607060706, q8);
CHECK_EQUAL_128(0x0000000000000000, 0x0908090809080908, q9);
- CHECK_EQUAL_128(0x0000000000000000, 0x0b0a0b0a0b0a0b0a, q10);
- CHECK_EQUAL_128(0x0000000000000000, 0x0d0c0d0c0d0c0d0c, q11);
+ CHECK_EQUAL_128(0x0000000000000000, 0x0B0A0B0A0B0A0B0A, q10);
+ CHECK_EQUAL_128(0x0000000000000000, 0x0D0C0D0C0D0C0D0C, q11);
CHECK_EQUAL_128(0x0807080708070807, 0x0807080708070807, q12);
- CHECK_EQUAL_128(0x0a090a090a090a09, 0x0a090a090a090a09, q13);
- CHECK_EQUAL_128(0x0c0b0c0b0c0b0c0b, 0x0c0b0c0b0c0b0c0b, q14);
- CHECK_EQUAL_128(0x0e0d0e0d0e0d0e0d, 0x0e0d0e0d0e0d0e0d, q15);
- CHECK_EQUAL_128(0x0000000000000000, 0x1211100f1211100f, q16);
+ CHECK_EQUAL_128(0x0A090A090A090A09, 0x0A090A090A090A09, q13);
+ CHECK_EQUAL_128(0x0C0B0C0B0C0B0C0B, 0x0C0B0C0B0C0B0C0B, q14);
+ CHECK_EQUAL_128(0x0E0D0E0D0E0D0E0D, 0x0E0D0E0D0E0D0E0D, q15);
+ CHECK_EQUAL_128(0x0000000000000000, 0x1211100F1211100F, q16);
CHECK_EQUAL_128(0x0000000000000000, 0x1615141316151413, q17);
- CHECK_EQUAL_128(0x0000000000000000, 0x1a1918171a191817, q18);
- CHECK_EQUAL_128(0x0000000000000000, 0x1e1d1c1b1e1d1c1b, q19);
+ CHECK_EQUAL_128(0x0000000000000000, 0x1A1918171A191817, q18);
+ CHECK_EQUAL_128(0x0000000000000000, 0x1E1D1C1B1E1D1C1B, q19);
CHECK_EQUAL_128(0x1312111013121110, 0x1312111013121110, q20);
CHECK_EQUAL_128(0x1716151417161514, 0x1716151417161514, q21);
- CHECK_EQUAL_128(0x1b1a19181b1a1918, 0x1b1a19181b1a1918, q22);
- CHECK_EQUAL_128(0x1f1e1d1c1f1e1d1c, 0x1f1e1d1c1f1e1d1c, q23);
+ CHECK_EQUAL_128(0x1B1A19181B1A1918, 0x1B1A19181B1A1918, q22);
+ CHECK_EQUAL_128(0x1F1E1D1C1F1E1D1C, 0x1F1E1D1C1F1E1D1C, q23);
CHECK_EQUAL_128(0x2726252423222120, 0x2726252423222120, q24);
- CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x2f2e2d2c2b2a2928, q25);
+ CHECK_EQUAL_128(0x2F2E2D2C2B2A2928, 0x2F2E2D2C2B2A2928, q25);
CHECK_EQUAL_128(0x3736353433323130, 0x3736353433323130, q26);
- CHECK_EQUAL_128(0x3f3e3d3c3b3a3938, 0x3f3e3d3c3b3a3938, q27);
+ CHECK_EQUAL_128(0x3F3E3D3C3B3A3938, 0x3F3E3D3C3B3A3938, q27);
CHECK_EQUAL_64(src_base + 64, x17);
TEARDOWN();
@@ -4739,10 +4735,10 @@ TEST(neon_st1_lane) {
RUN();
- CHECK_EQUAL_128(0x0001020304050607, 0x08090a0b0c0d0e0f, q1);
- CHECK_EQUAL_128(0x0100030205040706, 0x09080b0a0d0c0f0e, q2);
- CHECK_EQUAL_128(0x0302010007060504, 0x0b0a09080f0e0d0c, q3);
- CHECK_EQUAL_128(0x0706050403020100, 0x0f0e0d0c0b0a0908, q4);
+ CHECK_EQUAL_128(0x0001020304050607, 0x08090A0B0C0D0E0F, q1);
+ CHECK_EQUAL_128(0x0100030205040706, 0x09080B0A0D0C0F0E, q2);
+ CHECK_EQUAL_128(0x0302010007060504, 0x0B0A09080F0E0D0C, q3);
+ CHECK_EQUAL_128(0x0706050403020100, 0x0F0E0D0C0B0A0908, q4);
TEARDOWN();
}
@@ -4759,8 +4755,8 @@ TEST(neon_st2_lane) {
START();
__ Mov(x17, dst_base);
__ Mov(x18, dst_base);
- __ Movi(v0.V2D(), 0x0001020304050607, 0x08090a0b0c0d0e0f);
- __ Movi(v1.V2D(), 0x1011121314151617, 0x18191a1b1c1d1e1f);
+ __ Movi(v0.V2D(), 0x0001020304050607, 0x08090A0B0C0D0E0F);
+ __ Movi(v1.V2D(), 0x1011121314151617, 0x18191A1B1C1D1E1F);
// Test B stores with and without post index.
for (int i = 15; i >= 0; i--) {
@@ -4818,24 +4814,24 @@ TEST(neon_st2_lane) {
RUN();
CHECK_EQUAL_128(0x1707160615051404, 0x1303120211011000, q2);
- CHECK_EQUAL_128(0x1f0f1e0e1d0d1c0c, 0x1b0b1a0a19091808, q3);
+ CHECK_EQUAL_128(0x1F0F1E0E1D0D1C0C, 0x1B0B1A0A19091808, q3);
CHECK_EQUAL_128(0x1707160615051404, 0x1303120211011000, q4);
- CHECK_EQUAL_128(0x1f0f1e0e1d0d1c0c, 0x1b0b1a0a19091808, q5);
+ CHECK_EQUAL_128(0x1F0F1E0E1D0D1C0C, 0x1B0B1A0A19091808, q5);
CHECK_EQUAL_128(0x1617060714150405, 0x1213020310110001, q6);
- CHECK_EQUAL_128(0x1e1f0e0f1c1d0c0d, 0x1a1b0a0b18190809, q7);
+ CHECK_EQUAL_128(0x1E1F0E0F1C1D0C0D, 0x1A1B0A0B18190809, q7);
CHECK_EQUAL_128(0x1617060714150405, 0x1213020310110001, q16);
- CHECK_EQUAL_128(0x1e1f0e0f1c1d0c0d, 0x1a1b0a0b18190809, q17);
+ CHECK_EQUAL_128(0x1E1F0E0F1C1D0C0D, 0x1A1B0A0B18190809, q17);
CHECK_EQUAL_128(0x1415161704050607, 0x1011121300010203, q18);
- CHECK_EQUAL_128(0x1c1d1e1f0c0d0e0f, 0x18191a1b08090a0b, q19);
+ CHECK_EQUAL_128(0x1C1D1E1F0C0D0E0F, 0x18191A1B08090A0B, q19);
CHECK_EQUAL_128(0x1415161704050607, 0x1011121300010203, q20);
- CHECK_EQUAL_128(0x1c1d1e1f0c0d0e0f, 0x18191a1b08090a0b, q21);
+ CHECK_EQUAL_128(0x1C1D1E1F0C0D0E0F, 0x18191A1B08090A0B, q21);
CHECK_EQUAL_128(0x1011121314151617, 0x0001020304050607, q22);
- CHECK_EQUAL_128(0x18191a1b1c1d1e1f, 0x08090a0b0c0d0e0f, q23);
+ CHECK_EQUAL_128(0x18191A1B1C1D1E1F, 0x08090A0B0C0D0E0F, q23);
CHECK_EQUAL_128(0x1011121314151617, 0x0001020304050607, q22);
- CHECK_EQUAL_128(0x18191a1b1c1d1e1f, 0x08090a0b0c0d0e0f, q23);
+ CHECK_EQUAL_128(0x18191A1B1C1D1E1F, 0x08090A0B0C0D0E0F, q23);
TEARDOWN();
}
@@ -4852,9 +4848,9 @@ TEST(neon_st3_lane) {
START();
__ Mov(x17, dst_base);
__ Mov(x18, dst_base);
- __ Movi(v0.V2D(), 0x0001020304050607, 0x08090a0b0c0d0e0f);
- __ Movi(v1.V2D(), 0x1011121314151617, 0x18191a1b1c1d1e1f);
- __ Movi(v2.V2D(), 0x2021222324252627, 0x28292a2b2c2d2e2f);
+ __ Movi(v0.V2D(), 0x0001020304050607, 0x08090A0B0C0D0E0F);
+ __ Movi(v1.V2D(), 0x1011121314151617, 0x18191A1B1C1D1E1F);
+ __ Movi(v2.V2D(), 0x2021222324252627, 0x28292A2B2C2D2E2F);
// Test B stores with and without post index.
for (int i = 15; i >= 0; i--) {
@@ -4916,25 +4912,25 @@ TEST(neon_st3_lane) {
RUN();
CHECK_EQUAL_128(0x0524140423130322, 0x1202211101201000, q3);
- CHECK_EQUAL_128(0x1a0a291909281808, 0x2717072616062515, q4);
- CHECK_EQUAL_128(0x2f1f0f2e1e0e2d1d, 0x0d2c1c0c2b1b0b2a, q5);
+ CHECK_EQUAL_128(0x1A0A291909281808, 0x2717072616062515, q4);
+ CHECK_EQUAL_128(0x2F1F0F2E1E0E2D1D, 0x0D2C1C0C2B1B0B2A, q5);
CHECK_EQUAL_128(0x0524140423130322, 0x1202211101201000, q6);
- CHECK_EQUAL_128(0x1a0a291909281808, 0x2717072616062515, q7);
- CHECK_EQUAL_128(0x2f1f0f2e1e0e2d1d, 0x0d2c1c0c2b1b0b2a, q16);
+ CHECK_EQUAL_128(0x1A0A291909281808, 0x2717072616062515, q7);
+ CHECK_EQUAL_128(0x2F1F0F2E1E0E2D1D, 0x0D2C1C0C2B1B0B2A, q16);
CHECK_EQUAL_128(0x1415040522231213, 0x0203202110110001, q17);
- CHECK_EQUAL_128(0x0a0b282918190809, 0x2627161706072425, q18);
- CHECK_EQUAL_128(0x2e2f1e1f0e0f2c2d, 0x1c1d0c0d2a2b1a1b, q19);
+ CHECK_EQUAL_128(0x0A0B282918190809, 0x2627161706072425, q18);
+ CHECK_EQUAL_128(0x2E2F1E1F0E0F2C2D, 0x1C1D0C0D2A2B1A1B, q19);
CHECK_EQUAL_128(0x1415040522231213, 0x0203202110110001, q20);
- CHECK_EQUAL_128(0x0a0b282918190809, 0x2627161706072425, q21);
- CHECK_EQUAL_128(0x2e2f1e1f0e0f2c2d, 0x1c1d0c0d2a2b1a1b, q22);
+ CHECK_EQUAL_128(0x0A0B282918190809, 0x2627161706072425, q21);
+ CHECK_EQUAL_128(0x2E2F1E1F0E0F2C2D, 0x1C1D0C0D2A2B1A1B, q22);
CHECK_EQUAL_128(0x0405060720212223, 0x1011121300010203, q23);
- CHECK_EQUAL_128(0x18191a1b08090a0b, 0x2425262714151617, q24);
- CHECK_EQUAL_128(0x2c2d2e2f1c1d1e1f, 0x0c0d0e0f28292a2b, q25);
+ CHECK_EQUAL_128(0x18191A1B08090A0B, 0x2425262714151617, q24);
+ CHECK_EQUAL_128(0x2C2D2E2F1C1D1E1F, 0x0C0D0E0F28292A2B, q25);
CHECK_EQUAL_128(0x0405060720212223, 0x1011121300010203, q26);
- CHECK_EQUAL_128(0x18191a1b08090a0b, 0x2425262714151617, q27);
- CHECK_EQUAL_128(0x2c2d2e2f1c1d1e1f, 0x0c0d0e0f28292a2b, q28);
+ CHECK_EQUAL_128(0x18191A1B08090A0B, 0x2425262714151617, q27);
+ CHECK_EQUAL_128(0x2C2D2E2F1C1D1E1F, 0x0C0D0E0F28292A2B, q28);
TEARDOWN();
}
@@ -4951,10 +4947,10 @@ TEST(neon_st4_lane) {
START();
__ Mov(x17, dst_base);
__ Mov(x18, dst_base);
- __ Movi(v0.V2D(), 0x0001020304050607, 0x08090a0b0c0d0e0f);
- __ Movi(v1.V2D(), 0x1011121314151617, 0x18191a1b1c1d1e1f);
- __ Movi(v2.V2D(), 0x2021222324252627, 0x28292a2b2c2d2e2f);
- __ Movi(v3.V2D(), 0x2021222324252627, 0x28292a2b2c2d2e2f);
+ __ Movi(v0.V2D(), 0x0001020304050607, 0x08090A0B0C0D0E0F);
+ __ Movi(v1.V2D(), 0x1011121314151617, 0x18191A1B1C1D1E1F);
+ __ Movi(v2.V2D(), 0x2021222324252627, 0x28292A2B2C2D2E2F);
+ __ Movi(v3.V2D(), 0x2021222324252627, 0x28292A2B2C2D2E2F);
// Test B stores without post index.
for (int i = 15; i >= 0; i--) {
@@ -5001,21 +4997,21 @@ TEST(neon_st4_lane) {
CHECK_EQUAL_128(0x2323130322221202, 0x2121110120201000, q4);
CHECK_EQUAL_128(0x2727170726261606, 0x2525150524241404, q5);
- CHECK_EQUAL_128(0x2b2b1b0b2a2a1a0a, 0x2929190928281808, q6);
- CHECK_EQUAL_128(0x2f2f1f0f2e2e1e0e, 0x2d2d1d0d2c2c1c0c, q7);
+ CHECK_EQUAL_128(0x2B2B1B0B2A2A1A0A, 0x2929190928281808, q6);
+ CHECK_EQUAL_128(0x2F2F1F0F2E2E1E0E, 0x2D2D1D0D2C2C1C0C, q7);
CHECK_EQUAL_128(0x2223222312130203, 0x2021202110110001, q16);
CHECK_EQUAL_128(0x2627262716170607, 0x2425242514150405, q17);
- CHECK_EQUAL_128(0x2a2b2a2b1a1b0a0b, 0x2829282918190809, q18);
- CHECK_EQUAL_128(0x2e2f2e2f1e1f0e0f, 0x2c2d2c2d1c1d0c0d, q19);
+ CHECK_EQUAL_128(0x2A2B2A2B1A1B0A0B, 0x2829282918190809, q18);
+ CHECK_EQUAL_128(0x2E2F2E2F1E1F0E0F, 0x2C2D2C2D1C1D0C0D, q19);
CHECK_EQUAL_128(0x2021222320212223, 0x1011121300010203, q20);
CHECK_EQUAL_128(0x2425262724252627, 0x1415161704050607, q21);
- CHECK_EQUAL_128(0x28292a2b28292a2b, 0x18191a1b08090a0b, q22);
- CHECK_EQUAL_128(0x2c2d2e2f2c2d2e2f, 0x1c1d1e1f0c0d0e0f, q23);
+ CHECK_EQUAL_128(0x28292A2B28292A2B, 0x18191A1B08090A0B, q22);
+ CHECK_EQUAL_128(0x2C2D2E2F2C2D2E2F, 0x1C1D1E1F0C0D0E0F, q23);
- CHECK_EQUAL_128(0x18191a1b1c1d1e1f, 0x08090a0b0c0d0e0f, q24);
- CHECK_EQUAL_128(0x28292a2b2c2d2e2f, 0x28292a2b2c2d2e2f, q25);
+ CHECK_EQUAL_128(0x18191A1B1C1D1E1F, 0x08090A0B0C0D0E0F, q24);
+ CHECK_EQUAL_128(0x28292A2B2C2D2E2F, 0x28292A2B2C2D2E2F, q25);
CHECK_EQUAL_128(0x1011121314151617, 0x0001020304050607, q26);
CHECK_EQUAL_128(0x2021222324252627, 0x2021222324252627, q27);
@@ -5080,13 +5076,13 @@ TEST(neon_ld1_lane_postindex) {
RUN();
- CHECK_EQUAL_128(0x0001020304050607, 0x08090a0b0c0d0e0f, q0);
- CHECK_EQUAL_128(0x0100030205040706, 0x09080b0a0d0c0f0e, q1);
- CHECK_EQUAL_128(0x0302010007060504, 0x0b0a09080f0e0d0c, q2);
- CHECK_EQUAL_128(0x0706050403020100, 0x0f0e0d0c0b0a0908, q3);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050003020100, q4);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0100050403020100, q5);
- CHECK_EQUAL_128(0x0f0e0d0c03020100, 0x0706050403020100, q6);
+ CHECK_EQUAL_128(0x0001020304050607, 0x08090A0B0C0D0E0F, q0);
+ CHECK_EQUAL_128(0x0100030205040706, 0x09080B0A0D0C0F0E, q1);
+ CHECK_EQUAL_128(0x0302010007060504, 0x0B0A09080F0E0D0C, q2);
+ CHECK_EQUAL_128(0x0706050403020100, 0x0F0E0D0C0B0A0908, q3);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0706050003020100, q4);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0100050403020100, q5);
+ CHECK_EQUAL_128(0x0F0E0D0C03020100, 0x0706050403020100, q6);
CHECK_EQUAL_128(0x0706050403020100, 0x0706050403020100, q7);
CHECK_EQUAL_64(src_base + 16, x17);
CHECK_EQUAL_64(src_base + 16, x18);
@@ -5139,10 +5135,10 @@ TEST(neon_st1_lane_postindex) {
RUN();
- CHECK_EQUAL_128(0x0001020304050607, 0x08090a0b0c0d0e0f, q1);
- CHECK_EQUAL_128(0x0100030205040706, 0x09080b0a0d0c0f0e, q2);
- CHECK_EQUAL_128(0x0302010007060504, 0x0b0a09080f0e0d0c, q3);
- CHECK_EQUAL_128(0x0706050403020100, 0x0f0e0d0c0b0a0908, q4);
+ CHECK_EQUAL_128(0x0001020304050607, 0x08090A0B0C0D0E0F, q1);
+ CHECK_EQUAL_128(0x0100030205040706, 0x09080B0A0D0C0F0E, q2);
+ CHECK_EQUAL_128(0x0302010007060504, 0x0B0A09080F0E0D0C, q3);
+ CHECK_EQUAL_128(0x0706050403020100, 0x0F0E0D0C0B0A0908, q4);
TEARDOWN();
}
@@ -5184,8 +5180,8 @@ TEST(neon_ld1_alllanes) {
CHECK_EQUAL_128(0x0504050405040504, 0x0504050405040504, q3);
CHECK_EQUAL_128(0, 0x0807060508070605, q4);
CHECK_EQUAL_128(0x0908070609080706, 0x0908070609080706, q5);
- CHECK_EQUAL_128(0, 0x0e0d0c0b0a090807, q6);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, q7);
+ CHECK_EQUAL_128(0, 0x0E0D0C0B0A090807, q6);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0F0E0D0C0B0A0908, q7);
TEARDOWN();
}
@@ -5219,8 +5215,8 @@ TEST(neon_ld1_alllanes_postindex) {
CHECK_EQUAL_128(0, 0x0403040304030403, q2);
CHECK_EQUAL_128(0x0504050405040504, 0x0504050405040504, q3);
CHECK_EQUAL_128(0, 0x0908070609080706, q4);
- CHECK_EQUAL_128(0x0a0908070a090807, 0x0a0908070a090807, q5);
- CHECK_EQUAL_128(0x1211100f0e0d0c0b, 0x1211100f0e0d0c0b, q6);
+ CHECK_EQUAL_128(0x0A0908070A090807, 0x0A0908070A090807, q5);
+ CHECK_EQUAL_128(0x1211100F0E0D0C0B, 0x1211100F0E0D0C0B, q6);
CHECK_EQUAL_64(src_base + 19, x17);
TEARDOWN();
@@ -5266,10 +5262,10 @@ TEST(neon_st1_d) {
RUN();
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050403020100, q0);
- CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x1716151413121110, q1);
- CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x2726252423222120, q2);
- CHECK_EQUAL_128(0x3f3e3d3c3b3a3938, 0x3736353433323130, q3);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0706050403020100, q0);
+ CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x1716151413121110, q1);
+ CHECK_EQUAL_128(0x2F2E2D2C2B2A2928, 0x2726252423222120, q2);
+ CHECK_EQUAL_128(0x3F3E3D3C3B3A3938, 0x3736353433323130, q3);
CHECK_EQUAL_128(0, 0x0706050403020100, q16);
CHECK_EQUAL_128(0x1716151413121110, 0x0706050403020100, q17);
CHECK_EQUAL_128(0, 0x0706050403020100, q18);
@@ -5380,16 +5376,16 @@ TEST(neon_st1_q) {
RUN();
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050403020100, q16);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050403020100, q17);
- CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x1716151413121110, q18);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050403020100, q19);
- CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x1716151413121110, q20);
- CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x2726252423222120, q21);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050403020100, q22);
- CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x1716151413121110, q23);
- CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x2726252423222120, q24);
- CHECK_EQUAL_128(0x3f3e3d3c3b3a3938, 0x3736353433323130, q25);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0706050403020100, q16);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0706050403020100, q17);
+ CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x1716151413121110, q18);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0706050403020100, q19);
+ CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x1716151413121110, q20);
+ CHECK_EQUAL_128(0x2F2E2D2C2B2A2928, 0x2726252423222120, q21);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0706050403020100, q22);
+ CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x1716151413121110, q23);
+ CHECK_EQUAL_128(0x2F2E2D2C2B2A2928, 0x2726252423222120, q24);
+ CHECK_EQUAL_128(0x3F3E3D3C3B3A3938, 0x3736353433323130, q25);
TEARDOWN();
}
@@ -5438,16 +5434,16 @@ TEST(neon_st1_q_postindex) {
RUN();
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050403020100, q16);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050403020100, q17);
- CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x1716151413121110, q18);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050403020100, q19);
- CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x1716151413121110, q20);
- CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x2726252423222120, q21);
- CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050403020100, q22);
- CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x1716151413121110, q23);
- CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x2726252423222120, q24);
- CHECK_EQUAL_128(0x3f3e3d3c3b3a3938, 0x3736353433323130, q25);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0706050403020100, q16);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0706050403020100, q17);
+ CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x1716151413121110, q18);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0706050403020100, q19);
+ CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x1716151413121110, q20);
+ CHECK_EQUAL_128(0x2F2E2D2C2B2A2928, 0x2726252423222120, q21);
+ CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0706050403020100, q22);
+ CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x1716151413121110, q23);
+ CHECK_EQUAL_128(0x2F2E2D2C2B2A2928, 0x2726252423222120, q24);
+ CHECK_EQUAL_128(0x3F3E3D3C3B3A3938, 0x3736353433323130, q25);
TEARDOWN();
}
@@ -5487,7 +5483,7 @@ TEST(neon_st2_d) {
CHECK_EQUAL_128(0x1707160615051404, 0x1303120211011000, q0);
CHECK_EQUAL_128(0x0504131203021110, 0x0100151413121110, q1);
CHECK_EQUAL_128(0x1615140706050413, 0x1211100302010014, q2);
- CHECK_EQUAL_128(0x3f3e3d3c3b3a3938, 0x3736353433323117, q3);
+ CHECK_EQUAL_128(0x3F3E3D3C3B3A3938, 0x3736353433323117, q3);
TEARDOWN();
}
@@ -5524,7 +5520,7 @@ TEST(neon_st2_d_postindex) {
CHECK_EQUAL_128(0x1405041312030211, 0x1001000211011000, q0);
CHECK_EQUAL_128(0x0605041312111003, 0x0201001716070615, q1);
- CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x2726251716151407, q2);
+ CHECK_EQUAL_128(0x2F2E2D2C2B2A2928, 0x2726251716151407, q2);
TEARDOWN();
}
@@ -5564,9 +5560,9 @@ TEST(neon_st2_q) {
RUN();
CHECK_EQUAL_128(0x1312030211100100, 0x1303120211011000, q0);
- CHECK_EQUAL_128(0x01000b0a19180908, 0x1716070615140504, q1);
+ CHECK_EQUAL_128(0x01000B0A19180908, 0x1716070615140504, q1);
CHECK_EQUAL_128(0x1716151413121110, 0x0706050403020100, q2);
- CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x0f0e0d0c0b0a0908, q3);
+ CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x0F0E0D0C0B0A0908, q3);
TEARDOWN();
}
@@ -5604,10 +5600,10 @@ TEST(neon_st2_q_postindex) {
RUN();
CHECK_EQUAL_128(0x1405041312030211, 0x1001000211011000, q0);
- CHECK_EQUAL_128(0x1c0d0c1b1a0b0a19, 0x1809081716070615, q1);
- CHECK_EQUAL_128(0x0504030201001003, 0x0201001f1e0f0e1d, q2);
- CHECK_EQUAL_128(0x0d0c0b0a09081716, 0x1514131211100706, q3);
- CHECK_EQUAL_128(0x4f4e4d4c4b4a1f1e, 0x1d1c1b1a19180f0e, q4);
+ CHECK_EQUAL_128(0x1C0D0C1B1A0B0A19, 0x1809081716070615, q1);
+ CHECK_EQUAL_128(0x0504030201001003, 0x0201001F1E0F0E1D, q2);
+ CHECK_EQUAL_128(0x0D0C0B0A09081716, 0x1514131211100706, q3);
+ CHECK_EQUAL_128(0x4F4E4D4C4B4A1F1E, 0x1D1C1B1A19180F0E, q4);
TEARDOWN();
}
@@ -5644,7 +5640,7 @@ TEST(neon_st3_d) {
RUN();
CHECK_EQUAL_128(0x2221201312111003, 0x0201000100201000, q0);
- CHECK_EQUAL_128(0x1f1e1d2726252417, 0x1615140706050423, q1);
+ CHECK_EQUAL_128(0x1F1E1D2726252417, 0x1615140706050423, q1);
TEARDOWN();
}
@@ -5684,7 +5680,7 @@ TEST(neon_st3_d_postindex) {
CHECK_EQUAL_128(0x2213120302212011, 0x1001001101201000, q0);
CHECK_EQUAL_128(0x0201002726171607, 0x0625241514050423, q1);
CHECK_EQUAL_128(0x1615140706050423, 0x2221201312111003, q2);
- CHECK_EQUAL_128(0x3f3e3d3c3b3a3938, 0x3736352726252417, q3);
+ CHECK_EQUAL_128(0x3F3E3D3C3B3A3938, 0x3736352726252417, q3);
TEARDOWN();
}
@@ -5730,8 +5726,8 @@ TEST(neon_st3_q) {
CHECK_EQUAL_128(0x0605042322212013, 0x1211100302010023, q1);
CHECK_EQUAL_128(0x1007060504030201, 0x0025241716151407, q2);
CHECK_EQUAL_128(0x0827262524232221, 0x2017161514131211, q3);
- CHECK_EQUAL_128(0x281f1e1d1c1b1a19, 0x180f0e0d0c0b0a09, q4);
- CHECK_EQUAL_128(0x5f5e5d5c5b5a5958, 0x572f2e2d2c2b2a29, q5);
+ CHECK_EQUAL_128(0x281F1E1D1C1B1A19, 0x180F0E0D0C0B0A09, q4);
+ CHECK_EQUAL_128(0x5F5E5D5C5B5A5958, 0x572F2E2D2C2B2A29, q5);
TEARDOWN();
}
@@ -5774,11 +5770,11 @@ TEST(neon_st3_q_postindex) {
CHECK_EQUAL_128(0x2213120302212011, 0x1001001101201000, q0);
CHECK_EQUAL_128(0x1809082726171607, 0x0625241514050423, q1);
- CHECK_EQUAL_128(0x0e2d2c1d1c0d0c2b, 0x2a1b1a0b0a292819, q2);
- CHECK_EQUAL_128(0x0504030201001003, 0x0201002f2e1f1e0f, q3);
+ CHECK_EQUAL_128(0x0E2D2C1D1C0D0C2B, 0x2A1B1A0B0A292819, q2);
+ CHECK_EQUAL_128(0x0504030201001003, 0x0201002F2E1F1E0F, q3);
CHECK_EQUAL_128(0x2524232221201716, 0x1514131211100706, q4);
- CHECK_EQUAL_128(0x1d1c1b1a19180f0e, 0x0d0c0b0a09082726, q5);
- CHECK_EQUAL_128(0x6f6e6d6c6b6a2f2e, 0x2d2c2b2a29281f1e, q6);
+ CHECK_EQUAL_128(0x1D1C1B1A19180F0E, 0x0D0C0B0A09082726, q5);
+ CHECK_EQUAL_128(0x6F6E6D6C6B6A2F2E, 0x2D2C2B2A29281F1E, q6);
TEARDOWN();
}
@@ -5820,7 +5816,7 @@ TEST(neon_st4_d) {
CHECK_EQUAL_128(0x1110010032221202, 0X3121110130201000, q0);
CHECK_EQUAL_128(0x1003020100322322, 0X1312030231302120, q1);
CHECK_EQUAL_128(0x1407060504333231, 0X3023222120131211, q2);
- CHECK_EQUAL_128(0x3f3e3d3c3b373635, 0x3427262524171615, q3);
+ CHECK_EQUAL_128(0x3F3E3D3C3B373635, 0x3427262524171615, q3);
TEARDOWN();
}
@@ -5865,7 +5861,7 @@ TEST(neon_st4_d_postindex) {
CHECK_EQUAL_128(0x1607063534252415, 0x1405043332232213, q1);
CHECK_EQUAL_128(0x2221201312111003, 0x0201003736272617, q2);
CHECK_EQUAL_128(0x2625241716151407, 0x0605043332313023, q3);
- CHECK_EQUAL_128(0x4f4e4d4c4b4a4948, 0x4746453736353427, q4);
+ CHECK_EQUAL_128(0x4F4E4D4C4B4A4948, 0x4746453736353427, q4);
TEARDOWN();
}
@@ -5914,9 +5910,9 @@ TEST(neon_st4_q) {
CHECK_EQUAL_128(0x3231302322212013, 0x1211100302010013, q1);
CHECK_EQUAL_128(0x1007060504030201, 0x0015140706050433, q2);
CHECK_EQUAL_128(0x3027262524232221, 0x2017161514131211, q3);
- CHECK_EQUAL_128(0x180f0e0d0c0b0a09, 0x0837363534333231, q4);
- CHECK_EQUAL_128(0x382f2e2d2c2b2a29, 0x281f1e1d1c1b1a19, q5);
- CHECK_EQUAL_128(0x6f6e6d6c6b6a6968, 0x673f3e3d3c3b3a39, q6);
+ CHECK_EQUAL_128(0x180F0E0D0C0B0A09, 0x0837363534333231, q4);
+ CHECK_EQUAL_128(0x382F2E2D2C2B2A29, 0x281F1E1D1C1B1A19, q5);
+ CHECK_EQUAL_128(0x6F6E6D6C6B6A6968, 0x673F3E3D3C3B3A39, q6);
TEARDOWN();
}
@@ -5965,13 +5961,13 @@ TEST(neon_st4_q_postindex) {
CHECK_EQUAL_128(0x1203023130212011, 0x1001000130201000, q0);
CHECK_EQUAL_128(0x1607063534252415, 0x1405043332232213, q1);
- CHECK_EQUAL_128(0x1a0b0a3938292819, 0x1809083736272617, q2);
- CHECK_EQUAL_128(0x1e0f0e3d3c2d2c1d, 0x1c0d0c3b3a2b2a1b, q3);
- CHECK_EQUAL_128(0x0504030201001003, 0x0201003f3e2f2e1f, q4);
+ CHECK_EQUAL_128(0x1A0B0A3938292819, 0x1809083736272617, q2);
+ CHECK_EQUAL_128(0x1E0F0E3D3C2D2C1D, 0x1C0D0C3B3A2B2A1B, q3);
+ CHECK_EQUAL_128(0x0504030201001003, 0x0201003F3E2F2E1F, q4);
CHECK_EQUAL_128(0x2524232221201716, 0x1514131211100706, q5);
- CHECK_EQUAL_128(0x0d0c0b0a09083736, 0x3534333231302726, q6);
- CHECK_EQUAL_128(0x2d2c2b2a29281f1e, 0x1d1c1b1a19180f0e, q7);
- CHECK_EQUAL_128(0x8f8e8d8c8b8a3f3e, 0x3d3c3b3a39382f2e, q8);
+ CHECK_EQUAL_128(0x0D0C0B0A09083736, 0x3534333231302726, q6);
+ CHECK_EQUAL_128(0x2D2C2B2A29281F1E, 0x1D1C1B1A19180F0E, q7);
+ CHECK_EQUAL_128(0x8F8E8D8C8B8A3F3E, 0x3D3C3B3A39382F2E, q8);
TEARDOWN();
}
@@ -6047,11 +6043,11 @@ TEST(neon_destructive_tbl) {
SETUP();
START();
- __ Movi(v0.V2D(), 0x0041424334353627, 0x28291a1b1c0d0e0f);
- __ Movi(v1.V2D(), 0xafaeadacabaaa9a8, 0xa7a6a5a4a3a2a1a0);
- __ Movi(v2.V2D(), 0xbfbebdbcbbbab9b8, 0xb7b6b5b4b3b2b1b0);
- __ Movi(v3.V2D(), 0xcfcecdcccbcac9c8, 0xc7c6c5c4c3c2c1c0);
- __ Movi(v4.V2D(), 0xdfdedddcdbdad9d8, 0xd7d6d5d4d3d2d1d0);
+ __ Movi(v0.V2D(), 0x0041424334353627, 0x28291A1B1C0D0E0F);
+ __ Movi(v1.V2D(), 0xAFAEADACABAAA9A8, 0xA7A6A5A4A3A2A1A0);
+ __ Movi(v2.V2D(), 0xBFBEBDBCBBBAB9B8, 0xB7B6B5B4B3B2B1B0);
+ __ Movi(v3.V2D(), 0xCFCECDCCCBCAC9C8, 0xC7C6C5C4C3C2C1C0);
+ __ Movi(v4.V2D(), 0xDFDEDDDCDBDAD9D8, 0xD7D6D5D4D3D2D1D0);
__ Movi(v16.V2D(), 0x5555555555555555, 0x5555555555555555);
__ Tbl(v16.V16B(), v1.V16B(), v0.V16B());
@@ -6081,15 +6077,15 @@ TEST(neon_destructive_tbl) {
RUN();
- CHECK_EQUAL_128(0xa000000000000000, 0x0000000000adaeaf, q16);
- CHECK_EQUAL_128(0xa000000000000000, 0x0000000000adaeaf, q17);
- CHECK_EQUAL_128(0xa000000000000000, 0x0000000000adaeaf, q18);
- CHECK_EQUAL_128(0x0f00000000000000, 0x0000000000424100, q19);
+ CHECK_EQUAL_128(0xA000000000000000, 0x0000000000ADAEAF, q16);
+ CHECK_EQUAL_128(0xA000000000000000, 0x0000000000ADAEAF, q17);
+ CHECK_EQUAL_128(0xA000000000000000, 0x0000000000ADAEAF, q18);
+ CHECK_EQUAL_128(0x0F00000000000000, 0x0000000000424100, q19);
- CHECK_EQUAL_128(0xa0000000d4d5d6c7, 0xc8c9babbbcadaeaf, q20);
- CHECK_EQUAL_128(0xa0000000d4d5d6c7, 0xc8c9babbbcadaeaf, q21);
- CHECK_EQUAL_128(0xa0000000d4d5d6c7, 0xc8c9babbbcadaeaf, q22);
- CHECK_EQUAL_128(0x0f000000c4c5c6b7, 0xb8b9aaabac424100, q26);
+ CHECK_EQUAL_128(0xA0000000D4D5D6C7, 0xC8C9BABBBCADAEAF, q20);
+ CHECK_EQUAL_128(0xA0000000D4D5D6C7, 0xC8C9BABBBCADAEAF, q21);
+ CHECK_EQUAL_128(0xA0000000D4D5D6C7, 0xC8C9BABBBCADAEAF, q22);
+ CHECK_EQUAL_128(0x0F000000C4C5C6B7, 0xB8B9AAABAC424100, q26);
TEARDOWN();
}
@@ -6099,11 +6095,11 @@ TEST(neon_destructive_tbx) {
SETUP();
START();
- __ Movi(v0.V2D(), 0x0041424334353627, 0x28291a1b1c0d0e0f);
- __ Movi(v1.V2D(), 0xafaeadacabaaa9a8, 0xa7a6a5a4a3a2a1a0);
- __ Movi(v2.V2D(), 0xbfbebdbcbbbab9b8, 0xb7b6b5b4b3b2b1b0);
- __ Movi(v3.V2D(), 0xcfcecdcccbcac9c8, 0xc7c6c5c4c3c2c1c0);
- __ Movi(v4.V2D(), 0xdfdedddcdbdad9d8, 0xd7d6d5d4d3d2d1d0);
+ __ Movi(v0.V2D(), 0x0041424334353627, 0x28291A1B1C0D0E0F);
+ __ Movi(v1.V2D(), 0xAFAEADACABAAA9A8, 0xA7A6A5A4A3A2A1A0);
+ __ Movi(v2.V2D(), 0xBFBEBDBCBBBAB9B8, 0xB7B6B5B4B3B2B1B0);
+ __ Movi(v3.V2D(), 0xCFCECDCCCBCAC9C8, 0xC7C6C5C4C3C2C1C0);
+ __ Movi(v4.V2D(), 0xDFDEDDDCDBDAD9D8, 0xD7D6D5D4D3D2D1D0);
__ Movi(v16.V2D(), 0x5555555555555555, 0x5555555555555555);
__ Tbx(v16.V16B(), v1.V16B(), v0.V16B());
@@ -6133,15 +6129,15 @@ TEST(neon_destructive_tbx) {
RUN();
- CHECK_EQUAL_128(0xa055555555555555, 0x5555555555adaeaf, q16);
- CHECK_EQUAL_128(0xa041424334353627, 0x28291a1b1cadaeaf, q17);
- CHECK_EQUAL_128(0xa0aeadacabaaa9a8, 0xa7a6a5a4a3adaeaf, q18);
- CHECK_EQUAL_128(0x0f41424334353627, 0x28291a1b1c424100, q19);
+ CHECK_EQUAL_128(0xA055555555555555, 0x5555555555ADAEAF, q16);
+ CHECK_EQUAL_128(0xA041424334353627, 0x28291A1B1CADAEAF, q17);
+ CHECK_EQUAL_128(0xA0AEADACABAAA9A8, 0xA7A6A5A4A3ADAEAF, q18);
+ CHECK_EQUAL_128(0x0F41424334353627, 0x28291A1B1C424100, q19);
- CHECK_EQUAL_128(0xa0555555d4d5d6c7, 0xc8c9babbbcadaeaf, q20);
- CHECK_EQUAL_128(0xa0414243d4d5d6c7, 0xc8c9babbbcadaeaf, q21);
- CHECK_EQUAL_128(0xa0aeadacd4d5d6c7, 0xc8c9babbbcadaeaf, q22);
- CHECK_EQUAL_128(0x0f414243c4c5c6b7, 0xb8b9aaabac424100, q26);
+ CHECK_EQUAL_128(0xA0555555D4D5D6C7, 0xC8C9BABBBCADAEAF, q20);
+ CHECK_EQUAL_128(0xA0414243D4D5D6C7, 0xC8C9BABBBCADAEAF, q21);
+ CHECK_EQUAL_128(0xA0AEADACD4D5D6C7, 0xC8C9BABBBCADAEAF, q22);
+ CHECK_EQUAL_128(0x0F414243C4C5C6B7, 0xB8B9AAABAC424100, q26);
TEARDOWN();
}
@@ -6151,7 +6147,7 @@ TEST(neon_destructive_fcvtl) {
SETUP();
START();
- __ Movi(v0.V2D(), 0x400000003f800000, 0xbf800000c0000000);
+ __ Movi(v0.V2D(), 0x400000003F800000, 0xBF800000C0000000);
__ Fcvtl(v16.V2D(), v0.V2S());
__ Fcvtl2(v17.V2D(), v0.V4S());
__ Mov(v18, v0);
@@ -6159,7 +6155,7 @@ TEST(neon_destructive_fcvtl) {
__ Fcvtl(v18.V2D(), v18.V2S());
__ Fcvtl2(v19.V2D(), v19.V4S());
- __ Movi(v1.V2D(), 0x40003c003c004000, 0xc000bc00bc00c000);
+ __ Movi(v1.V2D(), 0x40003C003C004000, 0xC000BC00BC00C000);
__ Fcvtl(v20.V4S(), v1.V4H());
__ Fcvtl2(v21.V4S(), v1.V8H());
__ Mov(v22, v1);
@@ -6171,15 +6167,15 @@ TEST(neon_destructive_fcvtl) {
RUN();
- CHECK_EQUAL_128(0xbff0000000000000, 0xc000000000000000, q16);
- CHECK_EQUAL_128(0x4000000000000000, 0x3ff0000000000000, q17);
- CHECK_EQUAL_128(0xbff0000000000000, 0xc000000000000000, q18);
- CHECK_EQUAL_128(0x4000000000000000, 0x3ff0000000000000, q19);
+ CHECK_EQUAL_128(0xBFF0000000000000, 0xC000000000000000, q16);
+ CHECK_EQUAL_128(0x4000000000000000, 0x3FF0000000000000, q17);
+ CHECK_EQUAL_128(0xBFF0000000000000, 0xC000000000000000, q18);
+ CHECK_EQUAL_128(0x4000000000000000, 0x3FF0000000000000, q19);
- CHECK_EQUAL_128(0xc0000000bf800000, 0xbf800000c0000000, q20);
- CHECK_EQUAL_128(0x400000003f800000, 0x3f80000040000000, q21);
- CHECK_EQUAL_128(0xc0000000bf800000, 0xbf800000c0000000, q22);
- CHECK_EQUAL_128(0x400000003f800000, 0x3f80000040000000, q23);
+ CHECK_EQUAL_128(0xC0000000BF800000, 0xBF800000C0000000, q20);
+ CHECK_EQUAL_128(0x400000003F800000, 0x3F80000040000000, q21);
+ CHECK_EQUAL_128(0xC0000000BF800000, 0xBF800000C0000000, q22);
+ CHECK_EQUAL_128(0x400000003F800000, 0x3F80000040000000, q23);
TEARDOWN();
}
@@ -6247,8 +6243,8 @@ TEST(ldp_stp_double) {
TEST(ldp_stp_quad) {
SETUP();
- uint64_t src[4] = {0x0123456789abcdef, 0xaaaaaaaa55555555, 0xfedcba9876543210,
- 0x55555555aaaaaaaa};
+ uint64_t src[4] = {0x0123456789ABCDEF, 0xAAAAAAAA55555555, 0xFEDCBA9876543210,
+ 0x55555555AAAAAAAA};
uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
@@ -6262,14 +6258,14 @@ TEST(ldp_stp_quad) {
RUN();
- CHECK_EQUAL_128(0xaaaaaaaa55555555, 0x0123456789abcdef, q31);
- CHECK_EQUAL_128(0x55555555aaaaaaaa, 0xfedcba9876543210, q0);
+ CHECK_EQUAL_128(0xAAAAAAAA55555555, 0x0123456789ABCDEF, q31);
+ CHECK_EQUAL_128(0x55555555AAAAAAAA, 0xFEDCBA9876543210, q0);
CHECK_EQUAL_64(0, dst[0]);
CHECK_EQUAL_64(0, dst[1]);
- CHECK_EQUAL_64(0xfedcba9876543210, dst[2]);
- CHECK_EQUAL_64(0x55555555aaaaaaaa, dst[3]);
- CHECK_EQUAL_64(0x0123456789abcdef, dst[4]);
- CHECK_EQUAL_64(0xaaaaaaaa55555555, dst[5]);
+ CHECK_EQUAL_64(0xFEDCBA9876543210, dst[2]);
+ CHECK_EQUAL_64(0x55555555AAAAAAAA, dst[3]);
+ CHECK_EQUAL_64(0x0123456789ABCDEF, dst[4]);
+ CHECK_EQUAL_64(0xAAAAAAAA55555555, dst[5]);
CHECK_EQUAL_64(src_base + 4 * sizeof(src[0]), x16);
CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[1]), x17);
@@ -6280,8 +6276,8 @@ TEST(ldp_stp_offset) {
INIT_V8();
SETUP();
- uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
- 0xffeeddccbbaa9988UL};
+ uint64_t src[3] = {0x0011223344556677UL, 0x8899AABBCCDDEEFFUL,
+ 0xFFEEDDCCBBAA9988UL};
uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
@@ -6309,19 +6305,19 @@ TEST(ldp_stp_offset) {
CHECK_EQUAL_64(0x00112233, x1);
CHECK_EQUAL_64(0x0011223344556677UL, dst[0]);
CHECK_EQUAL_64(0x00112233, x2);
- CHECK_EQUAL_64(0xccddeeff, x3);
- CHECK_EQUAL_64(0xccddeeff00112233UL, dst[1]);
- CHECK_EQUAL_64(0x8899aabbccddeeffUL, x4);
- CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
- CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x5);
- CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
- CHECK_EQUAL_64(0x8899aabb, x6);
- CHECK_EQUAL_64(0xbbaa9988, x7);
- CHECK_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
- CHECK_EQUAL_64(0x8899aabbccddeeffUL, x8);
- CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
- CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x9);
- CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
+ CHECK_EQUAL_64(0xCCDDEEFF, x3);
+ CHECK_EQUAL_64(0xCCDDEEFF00112233UL, dst[1]);
+ CHECK_EQUAL_64(0x8899AABBCCDDEEFFUL, x4);
+ CHECK_EQUAL_64(0x8899AABBCCDDEEFFUL, dst[2]);
+ CHECK_EQUAL_64(0xFFEEDDCCBBAA9988UL, x5);
+ CHECK_EQUAL_64(0xFFEEDDCCBBAA9988UL, dst[3]);
+ CHECK_EQUAL_64(0x8899AABB, x6);
+ CHECK_EQUAL_64(0xBBAA9988, x7);
+ CHECK_EQUAL_64(0xBBAA99888899AABBUL, dst[4]);
+ CHECK_EQUAL_64(0x8899AABBCCDDEEFFUL, x8);
+ CHECK_EQUAL_64(0x8899AABBCCDDEEFFUL, dst[5]);
+ CHECK_EQUAL_64(0xFFEEDDCCBBAA9988UL, x9);
+ CHECK_EQUAL_64(0xFFEEDDCCBBAA9988UL, dst[6]);
CHECK_EQUAL_64(src_base, x16);
CHECK_EQUAL_64(dst_base, x17);
CHECK_EQUAL_64(src_base + 24, x18);
@@ -6335,8 +6331,8 @@ TEST(ldp_stp_offset_wide) {
INIT_V8();
SETUP();
- uint64_t src[3] = {0x0011223344556677, 0x8899aabbccddeeff,
- 0xffeeddccbbaa9988};
+ uint64_t src[3] = {0x0011223344556677, 0x8899AABBCCDDEEFF,
+ 0xFFEEDDCCBBAA9988};
uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
@@ -6367,19 +6363,19 @@ TEST(ldp_stp_offset_wide) {
CHECK_EQUAL_64(0x00112233, x1);
CHECK_EQUAL_64(0x0011223344556677UL, dst[0]);
CHECK_EQUAL_64(0x00112233, x2);
- CHECK_EQUAL_64(0xccddeeff, x3);
- CHECK_EQUAL_64(0xccddeeff00112233UL, dst[1]);
- CHECK_EQUAL_64(0x8899aabbccddeeffUL, x4);
- CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
- CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x5);
- CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
- CHECK_EQUAL_64(0x8899aabb, x6);
- CHECK_EQUAL_64(0xbbaa9988, x7);
- CHECK_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
- CHECK_EQUAL_64(0x8899aabbccddeeffUL, x8);
- CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
- CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x9);
- CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
+ CHECK_EQUAL_64(0xCCDDEEFF, x3);
+ CHECK_EQUAL_64(0xCCDDEEFF00112233UL, dst[1]);
+ CHECK_EQUAL_64(0x8899AABBCCDDEEFFUL, x4);
+ CHECK_EQUAL_64(0x8899AABBCCDDEEFFUL, dst[2]);
+ CHECK_EQUAL_64(0xFFEEDDCCBBAA9988UL, x5);
+ CHECK_EQUAL_64(0xFFEEDDCCBBAA9988UL, dst[3]);
+ CHECK_EQUAL_64(0x8899AABB, x6);
+ CHECK_EQUAL_64(0xBBAA9988, x7);
+ CHECK_EQUAL_64(0xBBAA99888899AABBUL, dst[4]);
+ CHECK_EQUAL_64(0x8899AABBCCDDEEFFUL, x8);
+ CHECK_EQUAL_64(0x8899AABBCCDDEEFFUL, dst[5]);
+ CHECK_EQUAL_64(0xFFEEDDCCBBAA9988UL, x9);
+ CHECK_EQUAL_64(0xFFEEDDCCBBAA9988UL, dst[6]);
CHECK_EQUAL_64(src_base - base_offset, x20);
CHECK_EQUAL_64(dst_base - base_offset, x21);
CHECK_EQUAL_64(src_base + base_offset + 24, x18);
@@ -6393,8 +6389,8 @@ TEST(ldp_stp_preindex) {
INIT_V8();
SETUP();
- uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
- 0xffeeddccbbaa9988UL};
+ uint64_t src[3] = {0x0011223344556677UL, 0x8899AABBCCDDEEFFUL,
+ 0xFFEEDDCCBBAA9988UL};
uint64_t dst[5] = {0, 0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
@@ -6420,17 +6416,17 @@ TEST(ldp_stp_preindex) {
RUN();
CHECK_EQUAL_64(0x00112233, x0);
- CHECK_EQUAL_64(0xccddeeff, x1);
+ CHECK_EQUAL_64(0xCCDDEEFF, x1);
CHECK_EQUAL_64(0x44556677, x2);
CHECK_EQUAL_64(0x00112233, x3);
- CHECK_EQUAL_64(0xccddeeff00112233UL, dst[0]);
+ CHECK_EQUAL_64(0xCCDDEEFF00112233UL, dst[0]);
CHECK_EQUAL_64(0x0000000000112233UL, dst[1]);
- CHECK_EQUAL_64(0x8899aabbccddeeffUL, x4);
- CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x5);
+ CHECK_EQUAL_64(0x8899AABBCCDDEEFFUL, x4);
+ CHECK_EQUAL_64(0xFFEEDDCCBBAA9988UL, x5);
CHECK_EQUAL_64(0x0011223344556677UL, x6);
- CHECK_EQUAL_64(0x8899aabbccddeeffUL, x7);
- CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
- CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
+ CHECK_EQUAL_64(0x8899AABBCCDDEEFFUL, x7);
+ CHECK_EQUAL_64(0xFFEEDDCCBBAA9988UL, dst[2]);
+ CHECK_EQUAL_64(0x8899AABBCCDDEEFFUL, dst[3]);
CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
CHECK_EQUAL_64(src_base, x16);
CHECK_EQUAL_64(dst_base, x17);
@@ -6448,8 +6444,8 @@ TEST(ldp_stp_preindex_wide) {
INIT_V8();
SETUP();
- uint64_t src[3] = {0x0011223344556677, 0x8899aabbccddeeff,
- 0xffeeddccbbaa9988};
+ uint64_t src[3] = {0x0011223344556677, 0x8899AABBCCDDEEFF,
+ 0xFFEEDDCCBBAA9988};
uint64_t dst[5] = {0, 0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
@@ -6483,17 +6479,17 @@ TEST(ldp_stp_preindex_wide) {
RUN();
CHECK_EQUAL_64(0x00112233, x0);
- CHECK_EQUAL_64(0xccddeeff, x1);
+ CHECK_EQUAL_64(0xCCDDEEFF, x1);
CHECK_EQUAL_64(0x44556677, x2);
CHECK_EQUAL_64(0x00112233, x3);
- CHECK_EQUAL_64(0xccddeeff00112233UL, dst[0]);
+ CHECK_EQUAL_64(0xCCDDEEFF00112233UL, dst[0]);
CHECK_EQUAL_64(0x0000000000112233UL, dst[1]);
- CHECK_EQUAL_64(0x8899aabbccddeeffUL, x4);
- CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x5);
+ CHECK_EQUAL_64(0x8899AABBCCDDEEFFUL, x4);
+ CHECK_EQUAL_64(0xFFEEDDCCBBAA9988UL, x5);
CHECK_EQUAL_64(0x0011223344556677UL, x6);
- CHECK_EQUAL_64(0x8899aabbccddeeffUL, x7);
- CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
- CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
+ CHECK_EQUAL_64(0x8899AABBCCDDEEFFUL, x7);
+ CHECK_EQUAL_64(0xFFEEDDCCBBAA9988UL, dst[2]);
+ CHECK_EQUAL_64(0x8899AABBCCDDEEFFUL, dst[3]);
CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
CHECK_EQUAL_64(src_base, x24);
CHECK_EQUAL_64(dst_base, x25);
@@ -6511,8 +6507,8 @@ TEST(ldp_stp_postindex) {
INIT_V8();
SETUP();
- uint64_t src[4] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
- 0xffeeddccbbaa9988UL, 0x7766554433221100UL};
+ uint64_t src[4] = {0x0011223344556677UL, 0x8899AABBCCDDEEFFUL,
+ 0xFFEEDDCCBBAA9988UL, 0x7766554433221100UL};
uint64_t dst[5] = {0, 0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
@@ -6540,15 +6536,15 @@ TEST(ldp_stp_postindex) {
CHECK_EQUAL_64(0x44556677, x0);
CHECK_EQUAL_64(0x00112233, x1);
CHECK_EQUAL_64(0x00112233, x2);
- CHECK_EQUAL_64(0xccddeeff, x3);
+ CHECK_EQUAL_64(0xCCDDEEFF, x3);
CHECK_EQUAL_64(0x4455667700112233UL, dst[0]);
CHECK_EQUAL_64(0x0000000000112233UL, dst[1]);
CHECK_EQUAL_64(0x0011223344556677UL, x4);
- CHECK_EQUAL_64(0x8899aabbccddeeffUL, x5);
- CHECK_EQUAL_64(0x8899aabbccddeeffUL, x6);
- CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x7);
- CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
- CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
+ CHECK_EQUAL_64(0x8899AABBCCDDEEFFUL, x5);
+ CHECK_EQUAL_64(0x8899AABBCCDDEEFFUL, x6);
+ CHECK_EQUAL_64(0xFFEEDDCCBBAA9988UL, x7);
+ CHECK_EQUAL_64(0xFFEEDDCCBBAA9988UL, dst[2]);
+ CHECK_EQUAL_64(0x8899AABBCCDDEEFFUL, dst[3]);
CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
CHECK_EQUAL_64(src_base, x16);
CHECK_EQUAL_64(dst_base, x17);
@@ -6566,7 +6562,7 @@ TEST(ldp_stp_postindex_wide) {
INIT_V8();
SETUP();
- uint64_t src[4] = {0x0011223344556677, 0x8899aabbccddeeff, 0xffeeddccbbaa9988,
+ uint64_t src[4] = {0x0011223344556677, 0x8899AABBCCDDEEFF, 0xFFEEDDCCBBAA9988,
0x7766554433221100};
uint64_t dst[5] = {0, 0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
@@ -6603,15 +6599,15 @@ TEST(ldp_stp_postindex_wide) {
CHECK_EQUAL_64(0x44556677, x0);
CHECK_EQUAL_64(0x00112233, x1);
CHECK_EQUAL_64(0x00112233, x2);
- CHECK_EQUAL_64(0xccddeeff, x3);
+ CHECK_EQUAL_64(0xCCDDEEFF, x3);
CHECK_EQUAL_64(0x4455667700112233UL, dst[0]);
CHECK_EQUAL_64(0x0000000000112233UL, dst[1]);
CHECK_EQUAL_64(0x0011223344556677UL, x4);
- CHECK_EQUAL_64(0x8899aabbccddeeffUL, x5);
- CHECK_EQUAL_64(0x8899aabbccddeeffUL, x6);
- CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x7);
- CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
- CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
+ CHECK_EQUAL_64(0x8899AABBCCDDEEFFUL, x5);
+ CHECK_EQUAL_64(0x8899AABBCCDDEEFFUL, x6);
+ CHECK_EQUAL_64(0xFFEEDDCCBBAA9988UL, x7);
+ CHECK_EQUAL_64(0xFFEEDDCCBBAA9988UL, dst[2]);
+ CHECK_EQUAL_64(0x8899AABBCCDDEEFFUL, dst[3]);
CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
CHECK_EQUAL_64(src_base + base_offset, x24);
CHECK_EQUAL_64(dst_base - base_offset, x25);
@@ -6629,7 +6625,7 @@ TEST(ldp_sign_extend) {
INIT_V8();
SETUP();
- uint32_t src[2] = {0x80000000, 0x7fffffff};
+ uint32_t src[2] = {0x80000000, 0x7FFFFFFF};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
START();
@@ -6639,8 +6635,8 @@ TEST(ldp_sign_extend) {
RUN();
- CHECK_EQUAL_64(0xffffffff80000000UL, x0);
- CHECK_EQUAL_64(0x000000007fffffffUL, x1);
+ CHECK_EQUAL_64(0xFFFFFFFF80000000UL, x0);
+ CHECK_EQUAL_64(0x000000007FFFFFFFUL, x1);
TEARDOWN();
}
@@ -6650,7 +6646,7 @@ TEST(ldur_stur) {
INIT_V8();
SETUP();
- int64_t src[2] = {0x0123456789abcdefUL, 0x0123456789abcdefUL};
+ int64_t src[2] = {0x0123456789ABCDEFUL, 0x0123456789ABCDEFUL};
int64_t dst[5] = {0, 0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
@@ -6673,13 +6669,13 @@ TEST(ldur_stur) {
RUN();
- CHECK_EQUAL_64(0x6789abcd, x0);
- CHECK_EQUAL_64(0x6789abcd0000L, dst[0]);
- CHECK_EQUAL_64(0xabcdef0123456789L, x1);
- CHECK_EQUAL_64(0xcdef012345678900L, dst[1]);
- CHECK_EQUAL_64(0x000000ab, dst[2]);
- CHECK_EQUAL_64(0xabcdef01, x2);
- CHECK_EQUAL_64(0x00abcdef01000000L, dst[3]);
+ CHECK_EQUAL_64(0x6789ABCD, x0);
+ CHECK_EQUAL_64(0x6789ABCD0000L, dst[0]);
+ CHECK_EQUAL_64(0xABCDEF0123456789L, x1);
+ CHECK_EQUAL_64(0xCDEF012345678900L, dst[1]);
+ CHECK_EQUAL_64(0x000000AB, dst[2]);
+ CHECK_EQUAL_64(0xABCDEF01, x2);
+ CHECK_EQUAL_64(0x00ABCDEF01000000L, dst[3]);
CHECK_EQUAL_64(0x00000001, x3);
CHECK_EQUAL_64(0x0100000000000000L, dst[4]);
CHECK_EQUAL_64(src_base, x17);
@@ -6696,7 +6692,7 @@ TEST(ldr_pcrel_large_offset) {
START();
- __ Ldr(x1, Immediate(0x1234567890abcdefUL));
+ __ Ldr(x1, Immediate(0x1234567890ABCDEFUL));
{
v8::internal::PatchingAssembler::BlockPoolsScope scope(&masm);
@@ -6706,14 +6702,14 @@ TEST(ldr_pcrel_large_offset) {
}
}
- __ Ldr(x2, Immediate(0x1234567890abcdefUL));
+ __ Ldr(x2, Immediate(0x1234567890ABCDEFUL));
END();
RUN();
- CHECK_EQUAL_64(0x1234567890abcdefUL, x1);
- CHECK_EQUAL_64(0x1234567890abcdefUL, x2);
+ CHECK_EQUAL_64(0x1234567890ABCDEFUL, x1);
+ CHECK_EQUAL_64(0x1234567890ABCDEFUL, x2);
TEARDOWN();
}
@@ -6723,13 +6719,13 @@ TEST(ldr_literal) {
SETUP();
START();
- __ Ldr(x2, Immediate(0x1234567890abcdefUL));
+ __ Ldr(x2, Immediate(0x1234567890ABCDEFUL));
__ Ldr(d13, 1.234);
END();
RUN();
- CHECK_EQUAL_64(0x1234567890abcdefUL, x2);
+ CHECK_EQUAL_64(0x1234567890ABCDEFUL, x2);
CHECK_EQUAL_FP64(1.234, d13);
TEARDOWN();
@@ -6762,7 +6758,7 @@ static void LdrLiteralRangeHelper(int range_, LiteralPoolEmitOption option,
__ CheckConstPool(true, true);
CHECK_CONSTANT_POOL_SIZE(0);
- __ Ldr(x0, Immediate(0x1234567890abcdefUL));
+ __ Ldr(x0, Immediate(0x1234567890ABCDEFUL));
__ Ldr(d0, 1.234);
CHECK_CONSTANT_POOL_SIZE(16);
@@ -6803,7 +6799,7 @@ static void LdrLiteralRangeHelper(int range_, LiteralPoolEmitOption option,
CHECK_CONSTANT_POOL_SIZE(0);
// These loads should be after the pool (and will require a new one).
- __ Ldr(x4, Immediate(0x34567890abcdef12UL));
+ __ Ldr(x4, Immediate(0x34567890ABCDEF12UL));
__ Ldr(d4, 123.4);
CHECK_CONSTANT_POOL_SIZE(16);
END();
@@ -6811,9 +6807,9 @@ static void LdrLiteralRangeHelper(int range_, LiteralPoolEmitOption option,
RUN();
// Check that the literals loaded correctly.
- CHECK_EQUAL_64(0x1234567890abcdefUL, x0);
+ CHECK_EQUAL_64(0x1234567890ABCDEFUL, x0);
CHECK_EQUAL_FP64(1.234, d0);
- CHECK_EQUAL_64(0x34567890abcdef12UL, x4);
+ CHECK_EQUAL_64(0x34567890ABCDEF12UL, x4);
CHECK_EQUAL_FP64(123.4, d4);
TEARDOWN();
@@ -6857,17 +6853,17 @@ TEST(add_sub_imm) {
START();
__ Mov(x0, 0x0);
__ Mov(x1, 0x1111);
- __ Mov(x2, 0xffffffffffffffffL);
+ __ Mov(x2, 0xFFFFFFFFFFFFFFFFL);
__ Mov(x3, 0x8000000000000000L);
__ Add(x10, x0, Operand(0x123));
__ Add(x11, x1, Operand(0x122000));
- __ Add(x12, x0, Operand(0xabc << 12));
+ __ Add(x12, x0, Operand(0xABC << 12));
__ Add(x13, x2, Operand(1));
__ Add(w14, w0, Operand(0x123));
__ Add(w15, w1, Operand(0x122000));
- __ Add(w16, w0, Operand(0xabc << 12));
+ __ Add(w16, w0, Operand(0xABC << 12));
__ Add(w17, w2, Operand(1));
__ Sub(x20, x0, Operand(0x1));
@@ -6885,23 +6881,23 @@ TEST(add_sub_imm) {
CHECK_EQUAL_64(0x123, x10);
CHECK_EQUAL_64(0x123111, x11);
- CHECK_EQUAL_64(0xabc000, x12);
+ CHECK_EQUAL_64(0xABC000, x12);
CHECK_EQUAL_64(0x0, x13);
CHECK_EQUAL_32(0x123, w14);
CHECK_EQUAL_32(0x123111, w15);
- CHECK_EQUAL_32(0xabc000, w16);
+ CHECK_EQUAL_32(0xABC000, w16);
CHECK_EQUAL_32(0x0, w17);
- CHECK_EQUAL_64(0xffffffffffffffffL, x20);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFL, x20);
CHECK_EQUAL_64(0x1000, x21);
CHECK_EQUAL_64(0x111, x22);
- CHECK_EQUAL_64(0x7fffffffffffffffL, x23);
+ CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFL, x23);
- CHECK_EQUAL_32(0xffffffff, w24);
+ CHECK_EQUAL_32(0xFFFFFFFF, w24);
CHECK_EQUAL_32(0x1000, w25);
CHECK_EQUAL_32(0x111, w26);
- CHECK_EQUAL_32(0xffffffff, w27);
+ CHECK_EQUAL_32(0xFFFFFFFF, w27);
TEARDOWN();
}
@@ -6915,22 +6911,22 @@ TEST(add_sub_wide_imm) {
__ Mov(x0, 0x0);
__ Mov(x1, 0x1);
- __ Add(x10, x0, Operand(0x1234567890abcdefUL));
- __ Add(x11, x1, Operand(0xffffffff));
+ __ Add(x10, x0, Operand(0x1234567890ABCDEFUL));
+ __ Add(x11, x1, Operand(0xFFFFFFFF));
__ Add(w12, w0, Operand(0x12345678));
- __ Add(w13, w1, Operand(0xffffffff));
+ __ Add(w13, w1, Operand(0xFFFFFFFF));
__ Add(w18, w0, Operand(kWMinInt));
__ Sub(w19, w0, Operand(kWMinInt));
- __ Sub(x20, x0, Operand(0x1234567890abcdefUL));
+ __ Sub(x20, x0, Operand(0x1234567890ABCDEFUL));
__ Sub(w21, w0, Operand(0x12345678));
END();
RUN();
- CHECK_EQUAL_64(0x1234567890abcdefUL, x10);
+ CHECK_EQUAL_64(0x1234567890ABCDEFUL, x10);
CHECK_EQUAL_64(0x100000000UL, x11);
CHECK_EQUAL_32(0x12345678, w12);
@@ -6939,7 +6935,7 @@ TEST(add_sub_wide_imm) {
CHECK_EQUAL_32(kWMinInt, w18);
CHECK_EQUAL_32(kWMinInt, w19);
- CHECK_EQUAL_64(-0x1234567890abcdefUL, x20);
+ CHECK_EQUAL_64(-0x1234567890ABCDEFUL, x20);
CHECK_EQUAL_32(-0x12345678, w21);
TEARDOWN();
@@ -6952,9 +6948,9 @@ TEST(add_sub_shifted) {
START();
__ Mov(x0, 0);
- __ Mov(x1, 0x0123456789abcdefL);
- __ Mov(x2, 0xfedcba9876543210L);
- __ Mov(x3, 0xffffffffffffffffL);
+ __ Mov(x1, 0x0123456789ABCDEFL);
+ __ Mov(x2, 0xFEDCBA9876543210L);
+ __ Mov(x3, 0xFFFFFFFFFFFFFFFFL);
__ Add(x10, x1, Operand(x2));
__ Add(x11, x0, Operand(x1, LSL, 8));
@@ -6977,23 +6973,23 @@ TEST(add_sub_shifted) {
RUN();
- CHECK_EQUAL_64(0xffffffffffffffffL, x10);
- CHECK_EQUAL_64(0x23456789abcdef00L, x11);
- CHECK_EQUAL_64(0x000123456789abcdL, x12);
- CHECK_EQUAL_64(0x000123456789abcdL, x13);
- CHECK_EQUAL_64(0xfffedcba98765432L, x14);
- CHECK_EQUAL_64(0xff89abcd, x15);
- CHECK_EQUAL_64(0xef89abcc, x18);
- CHECK_EQUAL_64(0xef0123456789abccL, x19);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFL, x10);
+ CHECK_EQUAL_64(0x23456789ABCDEF00L, x11);
+ CHECK_EQUAL_64(0x000123456789ABCDL, x12);
+ CHECK_EQUAL_64(0x000123456789ABCDL, x13);
+ CHECK_EQUAL_64(0xFFFEDCBA98765432L, x14);
+ CHECK_EQUAL_64(0xFF89ABCD, x15);
+ CHECK_EQUAL_64(0xEF89ABCC, x18);
+ CHECK_EQUAL_64(0xEF0123456789ABCCL, x19);
- CHECK_EQUAL_64(0x0123456789abcdefL, x20);
- CHECK_EQUAL_64(0xdcba9876543210ffL, x21);
- CHECK_EQUAL_64(0xfffedcba98765432L, x22);
- CHECK_EQUAL_64(0xfffedcba98765432L, x23);
- CHECK_EQUAL_64(0x000123456789abcdL, x24);
+ CHECK_EQUAL_64(0x0123456789ABCDEFL, x20);
+ CHECK_EQUAL_64(0xDCBA9876543210FFL, x21);
+ CHECK_EQUAL_64(0xFFFEDCBA98765432L, x22);
+ CHECK_EQUAL_64(0xFFFEDCBA98765432L, x23);
+ CHECK_EQUAL_64(0x000123456789ABCDL, x24);
CHECK_EQUAL_64(0x00765432, x25);
CHECK_EQUAL_64(0x10765432, x26);
- CHECK_EQUAL_64(0x10fedcba98765432L, x27);
+ CHECK_EQUAL_64(0x10FEDCBA98765432L, x27);
TEARDOWN();
}
@@ -7005,8 +7001,8 @@ TEST(add_sub_extended) {
START();
__ Mov(x0, 0);
- __ Mov(x1, 0x0123456789abcdefL);
- __ Mov(x2, 0xfedcba9876543210L);
+ __ Mov(x1, 0x0123456789ABCDEFL);
+ __ Mov(x2, 0xFEDCBA9876543210L);
__ Mov(w3, 0x80);
__ Add(x10, x0, Operand(x1, UXTB, 0));
@@ -7039,30 +7035,30 @@ TEST(add_sub_extended) {
RUN();
- CHECK_EQUAL_64(0xefL, x10);
- CHECK_EQUAL_64(0x1deL, x11);
- CHECK_EQUAL_64(0x337bcL, x12);
- CHECK_EQUAL_64(0x89abcdef0L, x13);
+ CHECK_EQUAL_64(0xEFL, x10);
+ CHECK_EQUAL_64(0x1DEL, x11);
+ CHECK_EQUAL_64(0x337BCL, x12);
+ CHECK_EQUAL_64(0x89ABCDEF0L, x13);
- CHECK_EQUAL_64(0xffffffffffffffefL, x14);
- CHECK_EQUAL_64(0xffffffffffffffdeL, x15);
- CHECK_EQUAL_64(0xffffffffffff37bcL, x16);
- CHECK_EQUAL_64(0xfffffffc4d5e6f78L, x17);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFEFL, x14);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFDEL, x15);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFF37BCL, x16);
+ CHECK_EQUAL_64(0xFFFFFFFC4D5E6F78L, x17);
CHECK_EQUAL_64(0x10L, x18);
CHECK_EQUAL_64(0x20L, x19);
- CHECK_EQUAL_64(0xc840L, x20);
- CHECK_EQUAL_64(0x3b2a19080L, x21);
+ CHECK_EQUAL_64(0xC840L, x20);
+ CHECK_EQUAL_64(0x3B2A19080L, x21);
- CHECK_EQUAL_64(0x0123456789abce0fL, x22);
- CHECK_EQUAL_64(0x0123456789abcdcfL, x23);
+ CHECK_EQUAL_64(0x0123456789ABCE0FL, x22);
+ CHECK_EQUAL_64(0x0123456789ABCDCFL, x23);
- CHECK_EQUAL_32(0x89abce2f, w24);
- CHECK_EQUAL_32(0xffffffef, w25);
- CHECK_EQUAL_32(0xffffffde, w26);
- CHECK_EQUAL_32(0xc3b2a188, w27);
+ CHECK_EQUAL_32(0x89ABCE2F, w24);
+ CHECK_EQUAL_32(0xFFFFFFEF, w25);
+ CHECK_EQUAL_32(0xFFFFFFDE, w26);
+ CHECK_EQUAL_32(0xC3B2A188, w27);
- CHECK_EQUAL_32(0x4d5e6f78, w28);
- CHECK_EQUAL_64(0xfffffffc4d5e6f78L, x29);
+ CHECK_EQUAL_32(0x4D5E6F78, w28);
+ CHECK_EQUAL_64(0xFFFFFFFC4D5E6F78L, x29);
CHECK_EQUAL_64(256, x30);
@@ -7092,7 +7088,7 @@ TEST(add_sub_negative) {
__ Add(w19, w3, -0x344);
__ Add(w20, w4, -2000);
- __ Sub(w21, w3, -0xbc);
+ __ Sub(w21, w3, -0xBC);
__ Sub(w22, w4, -2000);
END();
@@ -7104,7 +7100,7 @@ TEST(add_sub_negative) {
CHECK_EQUAL_64(600, x13);
CHECK_EQUAL_64(5000, x14);
- CHECK_EQUAL_64(0x1122334455667cdd, x15);
+ CHECK_EQUAL_64(0x1122334455667CDD, x15);
CHECK_EQUAL_32(0x11223000, w19);
CHECK_EQUAL_32(398000, w20);
@@ -7162,8 +7158,7 @@ TEST(preshift_immediates) {
// pre-shifted encodable immediate followed by a post-shift applied to
// the arithmetic or logical operation.
- // Save csp and change stack pointer to avoid consistency check.
- __ SetStackPointer(jssp);
+ // Save csp.
__ Mov(x29, csp);
// Set the registers to known values.
@@ -7171,28 +7166,28 @@ TEST(preshift_immediates) {
__ Mov(csp, 0x1000);
// Arithmetic ops.
- __ Add(x1, x0, 0x1f7de);
- __ Add(w2, w0, 0xffffff1);
+ __ Add(x1, x0, 0x1F7DE);
+ __ Add(w2, w0, 0xFFFFFF1);
__ Adds(x3, x0, 0x18001);
- __ Adds(w4, w0, 0xffffff1);
+ __ Adds(w4, w0, 0xFFFFFF1);
__ Add(x5, x0, 0x10100);
- __ Sub(w6, w0, 0xffffff1);
+ __ Sub(w6, w0, 0xFFFFFF1);
__ Subs(x7, x0, 0x18001);
- __ Subs(w8, w0, 0xffffff1);
+ __ Subs(w8, w0, 0xFFFFFF1);
// Logical ops.
- __ And(x9, x0, 0x1f7de);
- __ Orr(w10, w0, 0xffffff1);
+ __ And(x9, x0, 0x1F7DE);
+ __ Orr(w10, w0, 0xFFFFFF1);
__ Eor(x11, x0, 0x18001);
// Ops using the stack pointer.
- __ Add(csp, csp, 0x1f7f0);
+ __ Add(csp, csp, 0x1F7F0);
__ Mov(x12, csp);
__ Mov(csp, 0x1000);
- __ Adds(x13, csp, 0x1f7f0);
+ __ Adds(x13, csp, 0x1F7F0);
- __ Orr(csp, x0, 0x1f7f0);
+ __ Orr(csp, x0, 0x1F7F0);
__ Mov(x14, csp);
__ Mov(csp, 0x1000);
@@ -7201,25 +7196,24 @@ TEST(preshift_immediates) {
// Restore csp.
__ Mov(csp, x29);
- __ SetStackPointer(csp);
END();
RUN();
CHECK_EQUAL_64(0x1000, x0);
- CHECK_EQUAL_64(0x207de, x1);
- CHECK_EQUAL_64(0x10000ff1, x2);
+ CHECK_EQUAL_64(0x207DE, x1);
+ CHECK_EQUAL_64(0x10000FF1, x2);
CHECK_EQUAL_64(0x19001, x3);
- CHECK_EQUAL_64(0x10000ff1, x4);
+ CHECK_EQUAL_64(0x10000FF1, x4);
CHECK_EQUAL_64(0x11100, x5);
- CHECK_EQUAL_64(0xf000100f, x6);
- CHECK_EQUAL_64(0xfffffffffffe8fff, x7);
- CHECK_EQUAL_64(0xf000100f, x8);
+ CHECK_EQUAL_64(0xF000100F, x6);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFE8FFF, x7);
+ CHECK_EQUAL_64(0xF000100F, x8);
CHECK_EQUAL_64(0x1000, x9);
- CHECK_EQUAL_64(0xffffff1, x10);
- CHECK_EQUAL_64(0x207f0, x12);
- CHECK_EQUAL_64(0x207f0, x13);
- CHECK_EQUAL_64(0x1f7f0, x14);
+ CHECK_EQUAL_64(0xFFFFFF1, x10);
+ CHECK_EQUAL_64(0x207F0, x12);
+ CHECK_EQUAL_64(0x207F0, x13);
+ CHECK_EQUAL_64(0x1F7F0, x14);
CHECK_EQUAL_64(0x11100, x15);
TEARDOWN();
@@ -7260,7 +7254,7 @@ TEST(neg) {
SETUP();
START();
- __ Mov(x0, 0xf123456789abcdefL);
+ __ Mov(x0, 0xF123456789ABCDEFL);
// Immediate.
__ Neg(x1, 0x123);
@@ -7285,17 +7279,17 @@ TEST(neg) {
RUN();
- CHECK_EQUAL_64(0xfffffffffffffeddUL, x1);
- CHECK_EQUAL_64(0xfffffedd, x2);
- CHECK_EQUAL_64(0x1db97530eca86422UL, x3);
- CHECK_EQUAL_64(0xd950c844, x4);
- CHECK_EQUAL_64(0xe1db97530eca8643UL, x5);
- CHECK_EQUAL_64(0xf7654322, x6);
- CHECK_EQUAL_64(0x0076e5d4c3b2a191UL, x7);
- CHECK_EQUAL_64(0x01d950c9, x8);
- CHECK_EQUAL_64(0xffffff11, x9);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFEDDUL, x1);
+ CHECK_EQUAL_64(0xFFFFFEDD, x2);
+ CHECK_EQUAL_64(0x1DB97530ECA86422UL, x3);
+ CHECK_EQUAL_64(0xD950C844, x4);
+ CHECK_EQUAL_64(0xE1DB97530ECA8643UL, x5);
+ CHECK_EQUAL_64(0xF7654322, x6);
+ CHECK_EQUAL_64(0x0076E5D4C3B2A191UL, x7);
+ CHECK_EQUAL_64(0x01D950C9, x8);
+ CHECK_EQUAL_64(0xFFFFFF11, x9);
CHECK_EQUAL_64(0x0000000000000022UL, x10);
- CHECK_EQUAL_64(0xfffcc844, x11);
+ CHECK_EQUAL_64(0xFFFCC844, x11);
CHECK_EQUAL_64(0x0000000000019088UL, x12);
CHECK_EQUAL_64(0x65432110, x13);
CHECK_EQUAL_64(0x0000000765432110UL, x14);
@@ -7337,9 +7331,9 @@ static void AdcsSbcsHelper(Op op, T left, T right, int carry, T expected,
TEST(adcs_sbcs_x) {
INIT_V8();
uint64_t inputs[] = {
- 0x0000000000000000, 0x0000000000000001, 0x7ffffffffffffffe,
- 0x7fffffffffffffff, 0x8000000000000000, 0x8000000000000001,
- 0xfffffffffffffffe, 0xffffffffffffffff,
+ 0x0000000000000000, 0x0000000000000001, 0x7FFFFFFFFFFFFFFE,
+ 0x7FFFFFFFFFFFFFFF, 0x8000000000000000, 0x8000000000000001,
+ 0xFFFFFFFFFFFFFFFE, 0xFFFFFFFFFFFFFFFF,
};
static const size_t input_count = sizeof(inputs) / sizeof(inputs[0]);
@@ -7353,134 +7347,134 @@ TEST(adcs_sbcs_x) {
static const Expected expected_adcs_x[input_count][input_count] = {
{{0x0000000000000000, ZFlag, 0x0000000000000001, NoFlag},
{0x0000000000000001, NoFlag, 0x0000000000000002, NoFlag},
- {0x7ffffffffffffffe, NoFlag, 0x7fffffffffffffff, NoFlag},
- {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
+ {0x7FFFFFFFFFFFFFFE, NoFlag, 0x7FFFFFFFFFFFFFFF, NoFlag},
+ {0x7FFFFFFFFFFFFFFF, NoFlag, 0x8000000000000000, NVFlag},
{0x8000000000000000, NFlag, 0x8000000000000001, NFlag},
{0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
- {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag}},
+ {0xFFFFFFFFFFFFFFFE, NFlag, 0xFFFFFFFFFFFFFFFF, NFlag},
+ {0xFFFFFFFFFFFFFFFF, NFlag, 0x0000000000000000, ZCFlag}},
{{0x0000000000000001, NoFlag, 0x0000000000000002, NoFlag},
{0x0000000000000002, NoFlag, 0x0000000000000003, NoFlag},
- {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
+ {0x7FFFFFFFFFFFFFFF, NoFlag, 0x8000000000000000, NVFlag},
{0x8000000000000000, NVFlag, 0x8000000000000001, NVFlag},
{0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
{0x8000000000000002, NFlag, 0x8000000000000003, NFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0xFFFFFFFFFFFFFFFF, NFlag, 0x0000000000000000, ZCFlag},
{0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag}},
- {{0x7ffffffffffffffe, NoFlag, 0x7fffffffffffffff, NoFlag},
- {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
- {0xfffffffffffffffc, NVFlag, 0xfffffffffffffffd, NVFlag},
- {0xfffffffffffffffd, NVFlag, 0xfffffffffffffffe, NVFlag},
- {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
- {0x7ffffffffffffffc, CFlag, 0x7ffffffffffffffd, CFlag},
- {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag}},
- {{0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
+ {{0x7FFFFFFFFFFFFFFE, NoFlag, 0x7FFFFFFFFFFFFFFF, NoFlag},
+ {0x7FFFFFFFFFFFFFFF, NoFlag, 0x8000000000000000, NVFlag},
+ {0xFFFFFFFFFFFFFFFC, NVFlag, 0xFFFFFFFFFFFFFFFD, NVFlag},
+ {0xFFFFFFFFFFFFFFFD, NVFlag, 0xFFFFFFFFFFFFFFFE, NVFlag},
+ {0xFFFFFFFFFFFFFFFE, NFlag, 0xFFFFFFFFFFFFFFFF, NFlag},
+ {0xFFFFFFFFFFFFFFFF, NFlag, 0x0000000000000000, ZCFlag},
+ {0x7FFFFFFFFFFFFFFC, CFlag, 0x7FFFFFFFFFFFFFFD, CFlag},
+ {0x7FFFFFFFFFFFFFFD, CFlag, 0x7FFFFFFFFFFFFFFE, CFlag}},
+ {{0x7FFFFFFFFFFFFFFF, NoFlag, 0x8000000000000000, NVFlag},
{0x8000000000000000, NVFlag, 0x8000000000000001, NVFlag},
- {0xfffffffffffffffd, NVFlag, 0xfffffffffffffffe, NVFlag},
- {0xfffffffffffffffe, NVFlag, 0xffffffffffffffff, NVFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0xFFFFFFFFFFFFFFFD, NVFlag, 0xFFFFFFFFFFFFFFFE, NVFlag},
+ {0xFFFFFFFFFFFFFFFE, NVFlag, 0xFFFFFFFFFFFFFFFF, NVFlag},
+ {0xFFFFFFFFFFFFFFFF, NFlag, 0x0000000000000000, ZCFlag},
{0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
- {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
- {0x7ffffffffffffffe, CFlag, 0x7fffffffffffffff, CFlag}},
+ {0x7FFFFFFFFFFFFFFD, CFlag, 0x7FFFFFFFFFFFFFFE, CFlag},
+ {0x7FFFFFFFFFFFFFFE, CFlag, 0x7FFFFFFFFFFFFFFF, CFlag}},
{{0x8000000000000000, NFlag, 0x8000000000000001, NFlag},
{0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
- {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0xFFFFFFFFFFFFFFFE, NFlag, 0xFFFFFFFFFFFFFFFF, NFlag},
+ {0xFFFFFFFFFFFFFFFF, NFlag, 0x0000000000000000, ZCFlag},
{0x0000000000000000, ZCVFlag, 0x0000000000000001, CVFlag},
{0x0000000000000001, CVFlag, 0x0000000000000002, CVFlag},
- {0x7ffffffffffffffe, CVFlag, 0x7fffffffffffffff, CVFlag},
- {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag}},
+ {0x7FFFFFFFFFFFFFFE, CVFlag, 0x7FFFFFFFFFFFFFFF, CVFlag},
+ {0x7FFFFFFFFFFFFFFF, CVFlag, 0x8000000000000000, NCFlag}},
{{0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
{0x8000000000000002, NFlag, 0x8000000000000003, NFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0xFFFFFFFFFFFFFFFF, NFlag, 0x0000000000000000, ZCFlag},
{0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
{0x0000000000000001, CVFlag, 0x0000000000000002, CVFlag},
{0x0000000000000002, CVFlag, 0x0000000000000003, CVFlag},
- {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
+ {0x7FFFFFFFFFFFFFFF, CVFlag, 0x8000000000000000, NCFlag},
{0x8000000000000000, NCFlag, 0x8000000000000001, NCFlag}},
- {{0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
- {0x7ffffffffffffffc, CFlag, 0x7ffffffffffffffd, CFlag},
- {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
- {0x7ffffffffffffffe, CVFlag, 0x7fffffffffffffff, CVFlag},
- {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
- {0xfffffffffffffffc, NCFlag, 0xfffffffffffffffd, NCFlag},
- {0xfffffffffffffffd, NCFlag, 0xfffffffffffffffe, NCFlag}},
- {{0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {{0xFFFFFFFFFFFFFFFE, NFlag, 0xFFFFFFFFFFFFFFFF, NFlag},
+ {0xFFFFFFFFFFFFFFFF, NFlag, 0x0000000000000000, ZCFlag},
+ {0x7FFFFFFFFFFFFFFC, CFlag, 0x7FFFFFFFFFFFFFFD, CFlag},
+ {0x7FFFFFFFFFFFFFFD, CFlag, 0x7FFFFFFFFFFFFFFE, CFlag},
+ {0x7FFFFFFFFFFFFFFE, CVFlag, 0x7FFFFFFFFFFFFFFF, CVFlag},
+ {0x7FFFFFFFFFFFFFFF, CVFlag, 0x8000000000000000, NCFlag},
+ {0xFFFFFFFFFFFFFFFC, NCFlag, 0xFFFFFFFFFFFFFFFD, NCFlag},
+ {0xFFFFFFFFFFFFFFFD, NCFlag, 0xFFFFFFFFFFFFFFFE, NCFlag}},
+ {{0xFFFFFFFFFFFFFFFF, NFlag, 0x0000000000000000, ZCFlag},
{0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
- {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
- {0x7ffffffffffffffe, CFlag, 0x7fffffffffffffff, CFlag},
- {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
+ {0x7FFFFFFFFFFFFFFD, CFlag, 0x7FFFFFFFFFFFFFFE, CFlag},
+ {0x7FFFFFFFFFFFFFFE, CFlag, 0x7FFFFFFFFFFFFFFF, CFlag},
+ {0x7FFFFFFFFFFFFFFF, CVFlag, 0x8000000000000000, NCFlag},
{0x8000000000000000, NCFlag, 0x8000000000000001, NCFlag},
- {0xfffffffffffffffd, NCFlag, 0xfffffffffffffffe, NCFlag},
- {0xfffffffffffffffe, NCFlag, 0xffffffffffffffff, NCFlag}}};
+ {0xFFFFFFFFFFFFFFFD, NCFlag, 0xFFFFFFFFFFFFFFFE, NCFlag},
+ {0xFFFFFFFFFFFFFFFE, NCFlag, 0xFFFFFFFFFFFFFFFF, NCFlag}}};
static const Expected expected_sbcs_x[input_count][input_count] = {
- {{0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
- {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
+ {{0xFFFFFFFFFFFFFFFF, NFlag, 0x0000000000000000, ZCFlag},
+ {0xFFFFFFFFFFFFFFFE, NFlag, 0xFFFFFFFFFFFFFFFF, NFlag},
{0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
{0x8000000000000000, NFlag, 0x8000000000000001, NFlag},
- {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
- {0x7ffffffffffffffe, NoFlag, 0x7fffffffffffffff, NoFlag},
+ {0x7FFFFFFFFFFFFFFF, NoFlag, 0x8000000000000000, NVFlag},
+ {0x7FFFFFFFFFFFFFFE, NoFlag, 0x7FFFFFFFFFFFFFFF, NoFlag},
{0x0000000000000001, NoFlag, 0x0000000000000002, NoFlag},
{0x0000000000000000, ZFlag, 0x0000000000000001, NoFlag}},
{{0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0xFFFFFFFFFFFFFFFF, NFlag, 0x0000000000000000, ZCFlag},
{0x8000000000000002, NFlag, 0x8000000000000003, NFlag},
{0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
{0x8000000000000000, NVFlag, 0x8000000000000001, NVFlag},
- {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
+ {0x7FFFFFFFFFFFFFFF, NoFlag, 0x8000000000000000, NVFlag},
{0x0000000000000002, NoFlag, 0x0000000000000003, NoFlag},
{0x0000000000000001, NoFlag, 0x0000000000000002, NoFlag}},
- {{0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
- {0x7ffffffffffffffc, CFlag, 0x7ffffffffffffffd, CFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
- {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
- {0xfffffffffffffffd, NVFlag, 0xfffffffffffffffe, NVFlag},
- {0xfffffffffffffffc, NVFlag, 0xfffffffffffffffd, NVFlag},
- {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
- {0x7ffffffffffffffe, NoFlag, 0x7fffffffffffffff, NoFlag}},
- {{0x7ffffffffffffffe, CFlag, 0x7fffffffffffffff, CFlag},
- {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
+ {{0x7FFFFFFFFFFFFFFD, CFlag, 0x7FFFFFFFFFFFFFFE, CFlag},
+ {0x7FFFFFFFFFFFFFFC, CFlag, 0x7FFFFFFFFFFFFFFD, CFlag},
+ {0xFFFFFFFFFFFFFFFF, NFlag, 0x0000000000000000, ZCFlag},
+ {0xFFFFFFFFFFFFFFFE, NFlag, 0xFFFFFFFFFFFFFFFF, NFlag},
+ {0xFFFFFFFFFFFFFFFD, NVFlag, 0xFFFFFFFFFFFFFFFE, NVFlag},
+ {0xFFFFFFFFFFFFFFFC, NVFlag, 0xFFFFFFFFFFFFFFFD, NVFlag},
+ {0x7FFFFFFFFFFFFFFF, NoFlag, 0x8000000000000000, NVFlag},
+ {0x7FFFFFFFFFFFFFFE, NoFlag, 0x7FFFFFFFFFFFFFFF, NoFlag}},
+ {{0x7FFFFFFFFFFFFFFE, CFlag, 0x7FFFFFFFFFFFFFFF, CFlag},
+ {0x7FFFFFFFFFFFFFFD, CFlag, 0x7FFFFFFFFFFFFFFE, CFlag},
{0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
- {0xfffffffffffffffe, NVFlag, 0xffffffffffffffff, NVFlag},
- {0xfffffffffffffffd, NVFlag, 0xfffffffffffffffe, NVFlag},
+ {0xFFFFFFFFFFFFFFFF, NFlag, 0x0000000000000000, ZCFlag},
+ {0xFFFFFFFFFFFFFFFE, NVFlag, 0xFFFFFFFFFFFFFFFF, NVFlag},
+ {0xFFFFFFFFFFFFFFFD, NVFlag, 0xFFFFFFFFFFFFFFFE, NVFlag},
{0x8000000000000000, NVFlag, 0x8000000000000001, NVFlag},
- {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag}},
- {{0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
- {0x7ffffffffffffffe, CVFlag, 0x7fffffffffffffff, CVFlag},
+ {0x7FFFFFFFFFFFFFFF, NoFlag, 0x8000000000000000, NVFlag}},
+ {{0x7FFFFFFFFFFFFFFF, CVFlag, 0x8000000000000000, NCFlag},
+ {0x7FFFFFFFFFFFFFFE, CVFlag, 0x7FFFFFFFFFFFFFFF, CVFlag},
{0x0000000000000001, CVFlag, 0x0000000000000002, CVFlag},
{0x0000000000000000, ZCVFlag, 0x0000000000000001, CVFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
- {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
+ {0xFFFFFFFFFFFFFFFF, NFlag, 0x0000000000000000, ZCFlag},
+ {0xFFFFFFFFFFFFFFFE, NFlag, 0xFFFFFFFFFFFFFFFF, NFlag},
{0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
{0x8000000000000000, NFlag, 0x8000000000000001, NFlag}},
{{0x8000000000000000, NCFlag, 0x8000000000000001, NCFlag},
- {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
+ {0x7FFFFFFFFFFFFFFF, CVFlag, 0x8000000000000000, NCFlag},
{0x0000000000000002, CVFlag, 0x0000000000000003, CVFlag},
{0x0000000000000001, CVFlag, 0x0000000000000002, CVFlag},
{0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0xFFFFFFFFFFFFFFFF, NFlag, 0x0000000000000000, ZCFlag},
{0x8000000000000002, NFlag, 0x8000000000000003, NFlag},
{0x8000000000000001, NFlag, 0x8000000000000002, NFlag}},
- {{0xfffffffffffffffd, NCFlag, 0xfffffffffffffffe, NCFlag},
- {0xfffffffffffffffc, NCFlag, 0xfffffffffffffffd, NCFlag},
- {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
- {0x7ffffffffffffffe, CVFlag, 0x7fffffffffffffff, CVFlag},
- {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
- {0x7ffffffffffffffc, CFlag, 0x7ffffffffffffffd, CFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
- {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag}},
- {{0xfffffffffffffffe, NCFlag, 0xffffffffffffffff, NCFlag},
- {0xfffffffffffffffd, NCFlag, 0xfffffffffffffffe, NCFlag},
+ {{0xFFFFFFFFFFFFFFFD, NCFlag, 0xFFFFFFFFFFFFFFFE, NCFlag},
+ {0xFFFFFFFFFFFFFFFC, NCFlag, 0xFFFFFFFFFFFFFFFD, NCFlag},
+ {0x7FFFFFFFFFFFFFFF, CVFlag, 0x8000000000000000, NCFlag},
+ {0x7FFFFFFFFFFFFFFE, CVFlag, 0x7FFFFFFFFFFFFFFF, CVFlag},
+ {0x7FFFFFFFFFFFFFFD, CFlag, 0x7FFFFFFFFFFFFFFE, CFlag},
+ {0x7FFFFFFFFFFFFFFC, CFlag, 0x7FFFFFFFFFFFFFFD, CFlag},
+ {0xFFFFFFFFFFFFFFFF, NFlag, 0x0000000000000000, ZCFlag},
+ {0xFFFFFFFFFFFFFFFE, NFlag, 0xFFFFFFFFFFFFFFFF, NFlag}},
+ {{0xFFFFFFFFFFFFFFFE, NCFlag, 0xFFFFFFFFFFFFFFFF, NCFlag},
+ {0xFFFFFFFFFFFFFFFD, NCFlag, 0xFFFFFFFFFFFFFFFE, NCFlag},
{0x8000000000000000, NCFlag, 0x8000000000000001, NCFlag},
- {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
- {0x7ffffffffffffffe, CFlag, 0x7fffffffffffffff, CFlag},
- {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
+ {0x7FFFFFFFFFFFFFFF, CVFlag, 0x8000000000000000, NCFlag},
+ {0x7FFFFFFFFFFFFFFE, CFlag, 0x7FFFFFFFFFFFFFFF, CFlag},
+ {0x7FFFFFFFFFFFFFFD, CFlag, 0x7FFFFFFFFFFFFFFE, CFlag},
{0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
- {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag}}};
+ {0xFFFFFFFFFFFFFFFF, NFlag, 0x0000000000000000, ZCFlag}}};
for (size_t left = 0; left < input_count; left++) {
for (size_t right = 0; right < input_count; right++) {
@@ -7507,8 +7501,8 @@ TEST(adcs_sbcs_x) {
TEST(adcs_sbcs_w) {
INIT_V8();
uint32_t inputs[] = {
- 0x00000000, 0x00000001, 0x7ffffffe, 0x7fffffff,
- 0x80000000, 0x80000001, 0xfffffffe, 0xffffffff,
+ 0x00000000, 0x00000001, 0x7FFFFFFE, 0x7FFFFFFF,
+ 0x80000000, 0x80000001, 0xFFFFFFFE, 0xFFFFFFFF,
};
static const size_t input_count = sizeof(inputs) / sizeof(inputs[0]);
@@ -7522,134 +7516,134 @@ TEST(adcs_sbcs_w) {
static const Expected expected_adcs_w[input_count][input_count] = {
{{0x00000000, ZFlag, 0x00000001, NoFlag},
{0x00000001, NoFlag, 0x00000002, NoFlag},
- {0x7ffffffe, NoFlag, 0x7fffffff, NoFlag},
- {0x7fffffff, NoFlag, 0x80000000, NVFlag},
+ {0x7FFFFFFE, NoFlag, 0x7FFFFFFF, NoFlag},
+ {0x7FFFFFFF, NoFlag, 0x80000000, NVFlag},
{0x80000000, NFlag, 0x80000001, NFlag},
{0x80000001, NFlag, 0x80000002, NFlag},
- {0xfffffffe, NFlag, 0xffffffff, NFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag}},
+ {0xFFFFFFFE, NFlag, 0xFFFFFFFF, NFlag},
+ {0xFFFFFFFF, NFlag, 0x00000000, ZCFlag}},
{{0x00000001, NoFlag, 0x00000002, NoFlag},
{0x00000002, NoFlag, 0x00000003, NoFlag},
- {0x7fffffff, NoFlag, 0x80000000, NVFlag},
+ {0x7FFFFFFF, NoFlag, 0x80000000, NVFlag},
{0x80000000, NVFlag, 0x80000001, NVFlag},
{0x80000001, NFlag, 0x80000002, NFlag},
{0x80000002, NFlag, 0x80000003, NFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0xFFFFFFFF, NFlag, 0x00000000, ZCFlag},
{0x00000000, ZCFlag, 0x00000001, CFlag}},
- {{0x7ffffffe, NoFlag, 0x7fffffff, NoFlag},
- {0x7fffffff, NoFlag, 0x80000000, NVFlag},
- {0xfffffffc, NVFlag, 0xfffffffd, NVFlag},
- {0xfffffffd, NVFlag, 0xfffffffe, NVFlag},
- {0xfffffffe, NFlag, 0xffffffff, NFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag},
- {0x7ffffffc, CFlag, 0x7ffffffd, CFlag},
- {0x7ffffffd, CFlag, 0x7ffffffe, CFlag}},
- {{0x7fffffff, NoFlag, 0x80000000, NVFlag},
+ {{0x7FFFFFFE, NoFlag, 0x7FFFFFFF, NoFlag},
+ {0x7FFFFFFF, NoFlag, 0x80000000, NVFlag},
+ {0xFFFFFFFC, NVFlag, 0xFFFFFFFD, NVFlag},
+ {0xFFFFFFFD, NVFlag, 0xFFFFFFFE, NVFlag},
+ {0xFFFFFFFE, NFlag, 0xFFFFFFFF, NFlag},
+ {0xFFFFFFFF, NFlag, 0x00000000, ZCFlag},
+ {0x7FFFFFFC, CFlag, 0x7FFFFFFD, CFlag},
+ {0x7FFFFFFD, CFlag, 0x7FFFFFFE, CFlag}},
+ {{0x7FFFFFFF, NoFlag, 0x80000000, NVFlag},
{0x80000000, NVFlag, 0x80000001, NVFlag},
- {0xfffffffd, NVFlag, 0xfffffffe, NVFlag},
- {0xfffffffe, NVFlag, 0xffffffff, NVFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0xFFFFFFFD, NVFlag, 0xFFFFFFFE, NVFlag},
+ {0xFFFFFFFE, NVFlag, 0xFFFFFFFF, NVFlag},
+ {0xFFFFFFFF, NFlag, 0x00000000, ZCFlag},
{0x00000000, ZCFlag, 0x00000001, CFlag},
- {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
- {0x7ffffffe, CFlag, 0x7fffffff, CFlag}},
+ {0x7FFFFFFD, CFlag, 0x7FFFFFFE, CFlag},
+ {0x7FFFFFFE, CFlag, 0x7FFFFFFF, CFlag}},
{{0x80000000, NFlag, 0x80000001, NFlag},
{0x80000001, NFlag, 0x80000002, NFlag},
- {0xfffffffe, NFlag, 0xffffffff, NFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0xFFFFFFFE, NFlag, 0xFFFFFFFF, NFlag},
+ {0xFFFFFFFF, NFlag, 0x00000000, ZCFlag},
{0x00000000, ZCVFlag, 0x00000001, CVFlag},
{0x00000001, CVFlag, 0x00000002, CVFlag},
- {0x7ffffffe, CVFlag, 0x7fffffff, CVFlag},
- {0x7fffffff, CVFlag, 0x80000000, NCFlag}},
+ {0x7FFFFFFE, CVFlag, 0x7FFFFFFF, CVFlag},
+ {0x7FFFFFFF, CVFlag, 0x80000000, NCFlag}},
{{0x80000001, NFlag, 0x80000002, NFlag},
{0x80000002, NFlag, 0x80000003, NFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0xFFFFFFFF, NFlag, 0x00000000, ZCFlag},
{0x00000000, ZCFlag, 0x00000001, CFlag},
{0x00000001, CVFlag, 0x00000002, CVFlag},
{0x00000002, CVFlag, 0x00000003, CVFlag},
- {0x7fffffff, CVFlag, 0x80000000, NCFlag},
+ {0x7FFFFFFF, CVFlag, 0x80000000, NCFlag},
{0x80000000, NCFlag, 0x80000001, NCFlag}},
- {{0xfffffffe, NFlag, 0xffffffff, NFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag},
- {0x7ffffffc, CFlag, 0x7ffffffd, CFlag},
- {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
- {0x7ffffffe, CVFlag, 0x7fffffff, CVFlag},
- {0x7fffffff, CVFlag, 0x80000000, NCFlag},
- {0xfffffffc, NCFlag, 0xfffffffd, NCFlag},
- {0xfffffffd, NCFlag, 0xfffffffe, NCFlag}},
- {{0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {{0xFFFFFFFE, NFlag, 0xFFFFFFFF, NFlag},
+ {0xFFFFFFFF, NFlag, 0x00000000, ZCFlag},
+ {0x7FFFFFFC, CFlag, 0x7FFFFFFD, CFlag},
+ {0x7FFFFFFD, CFlag, 0x7FFFFFFE, CFlag},
+ {0x7FFFFFFE, CVFlag, 0x7FFFFFFF, CVFlag},
+ {0x7FFFFFFF, CVFlag, 0x80000000, NCFlag},
+ {0xFFFFFFFC, NCFlag, 0xFFFFFFFD, NCFlag},
+ {0xFFFFFFFD, NCFlag, 0xFFFFFFFE, NCFlag}},
+ {{0xFFFFFFFF, NFlag, 0x00000000, ZCFlag},
{0x00000000, ZCFlag, 0x00000001, CFlag},
- {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
- {0x7ffffffe, CFlag, 0x7fffffff, CFlag},
- {0x7fffffff, CVFlag, 0x80000000, NCFlag},
+ {0x7FFFFFFD, CFlag, 0x7FFFFFFE, CFlag},
+ {0x7FFFFFFE, CFlag, 0x7FFFFFFF, CFlag},
+ {0x7FFFFFFF, CVFlag, 0x80000000, NCFlag},
{0x80000000, NCFlag, 0x80000001, NCFlag},
- {0xfffffffd, NCFlag, 0xfffffffe, NCFlag},
- {0xfffffffe, NCFlag, 0xffffffff, NCFlag}}};
+ {0xFFFFFFFD, NCFlag, 0xFFFFFFFE, NCFlag},
+ {0xFFFFFFFE, NCFlag, 0xFFFFFFFF, NCFlag}}};
static const Expected expected_sbcs_w[input_count][input_count] = {
- {{0xffffffff, NFlag, 0x00000000, ZCFlag},
- {0xfffffffe, NFlag, 0xffffffff, NFlag},
+ {{0xFFFFFFFF, NFlag, 0x00000000, ZCFlag},
+ {0xFFFFFFFE, NFlag, 0xFFFFFFFF, NFlag},
{0x80000001, NFlag, 0x80000002, NFlag},
{0x80000000, NFlag, 0x80000001, NFlag},
- {0x7fffffff, NoFlag, 0x80000000, NVFlag},
- {0x7ffffffe, NoFlag, 0x7fffffff, NoFlag},
+ {0x7FFFFFFF, NoFlag, 0x80000000, NVFlag},
+ {0x7FFFFFFE, NoFlag, 0x7FFFFFFF, NoFlag},
{0x00000001, NoFlag, 0x00000002, NoFlag},
{0x00000000, ZFlag, 0x00000001, NoFlag}},
{{0x00000000, ZCFlag, 0x00000001, CFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0xFFFFFFFF, NFlag, 0x00000000, ZCFlag},
{0x80000002, NFlag, 0x80000003, NFlag},
{0x80000001, NFlag, 0x80000002, NFlag},
{0x80000000, NVFlag, 0x80000001, NVFlag},
- {0x7fffffff, NoFlag, 0x80000000, NVFlag},
+ {0x7FFFFFFF, NoFlag, 0x80000000, NVFlag},
{0x00000002, NoFlag, 0x00000003, NoFlag},
{0x00000001, NoFlag, 0x00000002, NoFlag}},
- {{0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
- {0x7ffffffc, CFlag, 0x7ffffffd, CFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag},
- {0xfffffffe, NFlag, 0xffffffff, NFlag},
- {0xfffffffd, NVFlag, 0xfffffffe, NVFlag},
- {0xfffffffc, NVFlag, 0xfffffffd, NVFlag},
- {0x7fffffff, NoFlag, 0x80000000, NVFlag},
- {0x7ffffffe, NoFlag, 0x7fffffff, NoFlag}},
- {{0x7ffffffe, CFlag, 0x7fffffff, CFlag},
- {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
+ {{0x7FFFFFFD, CFlag, 0x7FFFFFFE, CFlag},
+ {0x7FFFFFFC, CFlag, 0x7FFFFFFD, CFlag},
+ {0xFFFFFFFF, NFlag, 0x00000000, ZCFlag},
+ {0xFFFFFFFE, NFlag, 0xFFFFFFFF, NFlag},
+ {0xFFFFFFFD, NVFlag, 0xFFFFFFFE, NVFlag},
+ {0xFFFFFFFC, NVFlag, 0xFFFFFFFD, NVFlag},
+ {0x7FFFFFFF, NoFlag, 0x80000000, NVFlag},
+ {0x7FFFFFFE, NoFlag, 0x7FFFFFFF, NoFlag}},
+ {{0x7FFFFFFE, CFlag, 0x7FFFFFFF, CFlag},
+ {0x7FFFFFFD, CFlag, 0x7FFFFFFE, CFlag},
{0x00000000, ZCFlag, 0x00000001, CFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag},
- {0xfffffffe, NVFlag, 0xffffffff, NVFlag},
- {0xfffffffd, NVFlag, 0xfffffffe, NVFlag},
+ {0xFFFFFFFF, NFlag, 0x00000000, ZCFlag},
+ {0xFFFFFFFE, NVFlag, 0xFFFFFFFF, NVFlag},
+ {0xFFFFFFFD, NVFlag, 0xFFFFFFFE, NVFlag},
{0x80000000, NVFlag, 0x80000001, NVFlag},
- {0x7fffffff, NoFlag, 0x80000000, NVFlag}},
- {{0x7fffffff, CVFlag, 0x80000000, NCFlag},
- {0x7ffffffe, CVFlag, 0x7fffffff, CVFlag},
+ {0x7FFFFFFF, NoFlag, 0x80000000, NVFlag}},
+ {{0x7FFFFFFF, CVFlag, 0x80000000, NCFlag},
+ {0x7FFFFFFE, CVFlag, 0x7FFFFFFF, CVFlag},
{0x00000001, CVFlag, 0x00000002, CVFlag},
{0x00000000, ZCVFlag, 0x00000001, CVFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag},
- {0xfffffffe, NFlag, 0xffffffff, NFlag},
+ {0xFFFFFFFF, NFlag, 0x00000000, ZCFlag},
+ {0xFFFFFFFE, NFlag, 0xFFFFFFFF, NFlag},
{0x80000001, NFlag, 0x80000002, NFlag},
{0x80000000, NFlag, 0x80000001, NFlag}},
{{0x80000000, NCFlag, 0x80000001, NCFlag},
- {0x7fffffff, CVFlag, 0x80000000, NCFlag},
+ {0x7FFFFFFF, CVFlag, 0x80000000, NCFlag},
{0x00000002, CVFlag, 0x00000003, CVFlag},
{0x00000001, CVFlag, 0x00000002, CVFlag},
{0x00000000, ZCFlag, 0x00000001, CFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0xFFFFFFFF, NFlag, 0x00000000, ZCFlag},
{0x80000002, NFlag, 0x80000003, NFlag},
{0x80000001, NFlag, 0x80000002, NFlag}},
- {{0xfffffffd, NCFlag, 0xfffffffe, NCFlag},
- {0xfffffffc, NCFlag, 0xfffffffd, NCFlag},
- {0x7fffffff, CVFlag, 0x80000000, NCFlag},
- {0x7ffffffe, CVFlag, 0x7fffffff, CVFlag},
- {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
- {0x7ffffffc, CFlag, 0x7ffffffd, CFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag},
- {0xfffffffe, NFlag, 0xffffffff, NFlag}},
- {{0xfffffffe, NCFlag, 0xffffffff, NCFlag},
- {0xfffffffd, NCFlag, 0xfffffffe, NCFlag},
+ {{0xFFFFFFFD, NCFlag, 0xFFFFFFFE, NCFlag},
+ {0xFFFFFFFC, NCFlag, 0xFFFFFFFD, NCFlag},
+ {0x7FFFFFFF, CVFlag, 0x80000000, NCFlag},
+ {0x7FFFFFFE, CVFlag, 0x7FFFFFFF, CVFlag},
+ {0x7FFFFFFD, CFlag, 0x7FFFFFFE, CFlag},
+ {0x7FFFFFFC, CFlag, 0x7FFFFFFD, CFlag},
+ {0xFFFFFFFF, NFlag, 0x00000000, ZCFlag},
+ {0xFFFFFFFE, NFlag, 0xFFFFFFFF, NFlag}},
+ {{0xFFFFFFFE, NCFlag, 0xFFFFFFFF, NCFlag},
+ {0xFFFFFFFD, NCFlag, 0xFFFFFFFE, NCFlag},
{0x80000000, NCFlag, 0x80000001, NCFlag},
- {0x7fffffff, CVFlag, 0x80000000, NCFlag},
- {0x7ffffffe, CFlag, 0x7fffffff, CFlag},
- {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
+ {0x7FFFFFFF, CVFlag, 0x80000000, NCFlag},
+ {0x7FFFFFFE, CFlag, 0x7FFFFFFF, CFlag},
+ {0x7FFFFFFD, CFlag, 0x7FFFFFFE, CFlag},
{0x00000000, ZCFlag, 0x00000001, CFlag},
- {0xffffffff, NFlag, 0x00000000, ZCFlag}}};
+ {0xFFFFFFFF, NFlag, 0x00000000, ZCFlag}}};
for (size_t left = 0; left < input_count; left++) {
for (size_t right = 0; right < input_count; right++) {
@@ -7680,9 +7674,9 @@ TEST(adc_sbc_shift) {
START();
__ Mov(x0, 0);
__ Mov(x1, 1);
- __ Mov(x2, 0x0123456789abcdefL);
- __ Mov(x3, 0xfedcba9876543210L);
- __ Mov(x4, 0xffffffffffffffffL);
+ __ Mov(x2, 0x0123456789ABCDEFL);
+ __ Mov(x3, 0xFEDCBA9876543210L);
+ __ Mov(x4, 0xFFFFFFFFFFFFFFFFL);
// Clear the C flag.
__ Adds(x0, x0, Operand(0));
@@ -7717,29 +7711,29 @@ TEST(adc_sbc_shift) {
RUN();
- CHECK_EQUAL_64(0xffffffffffffffffL, x5);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFL, x5);
CHECK_EQUAL_64(1L << 60, x6);
- CHECK_EQUAL_64(0xf0123456789abcddL, x7);
+ CHECK_EQUAL_64(0xF0123456789ABCDDL, x7);
CHECK_EQUAL_64(0x0111111111111110L, x8);
CHECK_EQUAL_64(0x1222222222222221L, x9);
- CHECK_EQUAL_32(0xffffffff, w10);
+ CHECK_EQUAL_32(0xFFFFFFFF, w10);
CHECK_EQUAL_32(1 << 30, w11);
- CHECK_EQUAL_32(0xf89abcdd, w12);
+ CHECK_EQUAL_32(0xF89ABCDD, w12);
CHECK_EQUAL_32(0x91111110, w13);
- CHECK_EQUAL_32(0x9a222221, w14);
+ CHECK_EQUAL_32(0x9A222221, w14);
- CHECK_EQUAL_64(0xffffffffffffffffL + 1, x18);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFL + 1, x18);
CHECK_EQUAL_64((1L << 60) + 1, x19);
- CHECK_EQUAL_64(0xf0123456789abcddL + 1, x20);
+ CHECK_EQUAL_64(0xF0123456789ABCDDL + 1, x20);
CHECK_EQUAL_64(0x0111111111111110L + 1, x21);
CHECK_EQUAL_64(0x1222222222222221L + 1, x22);
- CHECK_EQUAL_32(0xffffffff + 1, w23);
+ CHECK_EQUAL_32(0xFFFFFFFF + 1, w23);
CHECK_EQUAL_32((1 << 30) + 1, w24);
- CHECK_EQUAL_32(0xf89abcdd + 1, w25);
+ CHECK_EQUAL_32(0xF89ABCDD + 1, w25);
CHECK_EQUAL_32(0x91111110 + 1, w26);
- CHECK_EQUAL_32(0x9a222221 + 1, w27);
+ CHECK_EQUAL_32(0x9A222221 + 1, w27);
TEARDOWN();
}
@@ -7755,7 +7749,7 @@ TEST(adc_sbc_extend) {
__ Mov(x0, 0);
__ Mov(x1, 1);
- __ Mov(x2, 0x0123456789abcdefL);
+ __ Mov(x2, 0x0123456789ABCDEFL);
__ Adc(x10, x1, Operand(w2, UXTB, 1));
__ Adc(x11, x1, Operand(x2, SXTH, 2));
@@ -7781,28 +7775,28 @@ TEST(adc_sbc_extend) {
RUN();
- CHECK_EQUAL_64(0x1df, x10);
- CHECK_EQUAL_64(0xffffffffffff37bdL, x11);
- CHECK_EQUAL_64(0xfffffff765432110L, x12);
- CHECK_EQUAL_64(0x123456789abcdef1L, x13);
+ CHECK_EQUAL_64(0x1DF, x10);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFF37BDL, x11);
+ CHECK_EQUAL_64(0xFFFFFFF765432110L, x12);
+ CHECK_EQUAL_64(0x123456789ABCDEF1L, x13);
- CHECK_EQUAL_32(0x1df, w14);
- CHECK_EQUAL_32(0xffff37bd, w15);
- CHECK_EQUAL_32(0x9abcdef1, w9);
+ CHECK_EQUAL_32(0x1DF, w14);
+ CHECK_EQUAL_32(0xFFFF37BD, w15);
+ CHECK_EQUAL_32(0x9ABCDEF1, w9);
- CHECK_EQUAL_64(0x1df + 1, x20);
- CHECK_EQUAL_64(0xffffffffffff37bdL + 1, x21);
- CHECK_EQUAL_64(0xfffffff765432110L + 1, x22);
- CHECK_EQUAL_64(0x123456789abcdef1L + 1, x23);
+ CHECK_EQUAL_64(0x1DF + 1, x20);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFF37BDL + 1, x21);
+ CHECK_EQUAL_64(0xFFFFFFF765432110L + 1, x22);
+ CHECK_EQUAL_64(0x123456789ABCDEF1L + 1, x23);
- CHECK_EQUAL_32(0x1df + 1, w24);
- CHECK_EQUAL_32(0xffff37bd + 1, w25);
- CHECK_EQUAL_32(0x9abcdef1 + 1, w26);
+ CHECK_EQUAL_32(0x1DF + 1, w24);
+ CHECK_EQUAL_32(0xFFFF37BD + 1, w25);
+ CHECK_EQUAL_32(0x9ABCDEF1 + 1, w26);
// Check that adc correctly sets the condition flags.
START();
- __ Mov(x0, 0xff);
- __ Mov(x1, 0xffffffffffffffffL);
+ __ Mov(x0, 0xFF);
+ __ Mov(x1, 0xFFFFFFFFFFFFFFFFL);
// Clear the C flag.
__ Adds(x0, x0, Operand(0));
__ Adcs(x10, x0, Operand(x1, SXTX, 1));
@@ -7813,7 +7807,7 @@ TEST(adc_sbc_extend) {
CHECK_EQUAL_NZCV(CFlag);
START();
- __ Mov(x0, 0x7fffffffffffffffL);
+ __ Mov(x0, 0x7FFFFFFFFFFFFFFFL);
__ Mov(x1, 1);
// Clear the C flag.
__ Adds(x0, x0, Operand(0));
@@ -7825,7 +7819,7 @@ TEST(adc_sbc_extend) {
CHECK_EQUAL_NZCV(NVFlag);
START();
- __ Mov(x0, 0x7fffffffffffffffL);
+ __ Mov(x0, 0x7FFFFFFFFFFFFFFFL);
// Clear the C flag.
__ Adds(x0, x0, Operand(0));
__ Adcs(x10, x0, Operand(1));
@@ -7849,36 +7843,36 @@ TEST(adc_sbc_wide_imm) {
// Clear the C flag.
__ Adds(x0, x0, Operand(0));
- __ Adc(x7, x0, Operand(0x1234567890abcdefUL));
- __ Adc(w8, w0, Operand(0xffffffff));
- __ Sbc(x9, x0, Operand(0x1234567890abcdefUL));
- __ Sbc(w10, w0, Operand(0xffffffff));
- __ Ngc(x11, Operand(0xffffffff00000000UL));
- __ Ngc(w12, Operand(0xffff0000));
+ __ Adc(x7, x0, Operand(0x1234567890ABCDEFUL));
+ __ Adc(w8, w0, Operand(0xFFFFFFFF));
+ __ Sbc(x9, x0, Operand(0x1234567890ABCDEFUL));
+ __ Sbc(w10, w0, Operand(0xFFFFFFFF));
+ __ Ngc(x11, Operand(0xFFFFFFFF00000000UL));
+ __ Ngc(w12, Operand(0xFFFF0000));
// Set the C flag.
__ Cmp(w0, Operand(w0));
- __ Adc(x18, x0, Operand(0x1234567890abcdefUL));
- __ Adc(w19, w0, Operand(0xffffffff));
- __ Sbc(x20, x0, Operand(0x1234567890abcdefUL));
- __ Sbc(w21, w0, Operand(0xffffffff));
- __ Ngc(x22, Operand(0xffffffff00000000UL));
- __ Ngc(w23, Operand(0xffff0000));
+ __ Adc(x18, x0, Operand(0x1234567890ABCDEFUL));
+ __ Adc(w19, w0, Operand(0xFFFFFFFF));
+ __ Sbc(x20, x0, Operand(0x1234567890ABCDEFUL));
+ __ Sbc(w21, w0, Operand(0xFFFFFFFF));
+ __ Ngc(x22, Operand(0xFFFFFFFF00000000UL));
+ __ Ngc(w23, Operand(0xFFFF0000));
END();
RUN();
- CHECK_EQUAL_64(0x1234567890abcdefUL, x7);
- CHECK_EQUAL_64(0xffffffff, x8);
- CHECK_EQUAL_64(0xedcba9876f543210UL, x9);
+ CHECK_EQUAL_64(0x1234567890ABCDEFUL, x7);
+ CHECK_EQUAL_64(0xFFFFFFFF, x8);
+ CHECK_EQUAL_64(0xEDCBA9876F543210UL, x9);
CHECK_EQUAL_64(0, x10);
- CHECK_EQUAL_64(0xffffffff, x11);
- CHECK_EQUAL_64(0xffff, x12);
+ CHECK_EQUAL_64(0xFFFFFFFF, x11);
+ CHECK_EQUAL_64(0xFFFF, x12);
- CHECK_EQUAL_64(0x1234567890abcdefUL + 1, x18);
+ CHECK_EQUAL_64(0x1234567890ABCDEFUL + 1, x18);
CHECK_EQUAL_64(0, x19);
- CHECK_EQUAL_64(0xedcba9876f543211UL, x20);
+ CHECK_EQUAL_64(0xEDCBA9876F543211UL, x20);
CHECK_EQUAL_64(1, x21);
CHECK_EQUAL_64(0x100000000UL, x22);
CHECK_EQUAL_64(0x10000, x23);
@@ -7971,7 +7965,7 @@ TEST(flags) {
START();
__ Mov(x0, 1);
- __ Mov(x1, 0x7fffffffffffffffL);
+ __ Mov(x1, 0x7FFFFFFFFFFFFFFFL);
__ Cmn(x1, Operand(x0));
END();
@@ -7981,7 +7975,7 @@ TEST(flags) {
START();
__ Mov(w0, 1);
- __ Mov(w1, 0x7fffffff);
+ __ Mov(w1, 0x7FFFFFFF);
__ Cmn(w1, Operand(w0));
END();
@@ -7991,7 +7985,7 @@ TEST(flags) {
START();
__ Mov(x0, 1);
- __ Mov(x1, 0xffffffffffffffffL);
+ __ Mov(x1, 0xFFFFFFFFFFFFFFFFL);
__ Cmn(x1, Operand(x0));
END();
@@ -8001,7 +7995,7 @@ TEST(flags) {
START();
__ Mov(w0, 1);
- __ Mov(w1, 0xffffffff);
+ __ Mov(w1, 0xFFFFFFFF);
__ Cmn(w1, Operand(w0));
END();
@@ -8042,16 +8036,16 @@ TEST(cmp_shift) {
SETUP();
START();
- __ Mov(x18, 0xf0000000);
- __ Mov(x19, 0xf000000010000000UL);
- __ Mov(x20, 0xf0000000f0000000UL);
+ __ Mov(x18, 0xF0000000);
+ __ Mov(x19, 0xF000000010000000UL);
+ __ Mov(x20, 0xF0000000F0000000UL);
__ Mov(x21, 0x7800000078000000UL);
- __ Mov(x22, 0x3c0000003c000000UL);
+ __ Mov(x22, 0x3C0000003C000000UL);
__ Mov(x23, 0x8000000780000000UL);
- __ Mov(x24, 0x0000000f00000000UL);
- __ Mov(x25, 0x00000003c0000000UL);
+ __ Mov(x24, 0x0000000F00000000UL);
+ __ Mov(x25, 0x00000003C0000000UL);
__ Mov(x26, 0x8000000780000000UL);
- __ Mov(x27, 0xc0000003);
+ __ Mov(x27, 0xC0000003);
__ Cmp(w20, Operand(w21, LSL, 1));
__ Mrs(x0, NZCV);
@@ -8100,11 +8094,11 @@ TEST(cmp_extend) {
START();
__ Mov(w20, 0x2);
__ Mov(w21, 0x1);
- __ Mov(x22, 0xffffffffffffffffUL);
- __ Mov(x23, 0xff);
- __ Mov(x24, 0xfffffffffffffffeUL);
- __ Mov(x25, 0xffff);
- __ Mov(x26, 0xffffffff);
+ __ Mov(x22, 0xFFFFFFFFFFFFFFFFUL);
+ __ Mov(x23, 0xFF);
+ __ Mov(x24, 0xFFFFFFFFFFFFFFFEUL);
+ __ Mov(x25, 0xFFFF);
+ __ Mov(x26, 0xFFFFFFFF);
__ Cmp(w20, Operand(w21, LSL, 1));
__ Mrs(x0, NZCV);
@@ -8202,7 +8196,7 @@ TEST(ccmp_wide_imm) {
__ Mrs(x0, NZCV);
__ Cmp(w20, Operand(w20));
- __ Ccmp(x20, Operand(0xffffffffffffffffUL), NZCVFlag, eq);
+ __ Ccmp(x20, Operand(0xFFFFFFFFFFFFFFFFUL), NZCVFlag, eq);
__ Mrs(x1, NZCV);
END();
@@ -8222,9 +8216,9 @@ TEST(ccmp_shift_extend) {
START();
__ Mov(w20, 0x2);
__ Mov(w21, 0x1);
- __ Mov(x22, 0xffffffffffffffffUL);
- __ Mov(x23, 0xff);
- __ Mov(x24, 0xfffffffffffffffeUL);
+ __ Mov(x22, 0xFFFFFFFFFFFFFFFFUL);
+ __ Mov(x23, 0xFF);
+ __ Mov(x24, 0xFFFFFFFFFFFFFFFEUL);
__ Cmp(w20, Operand(w20));
__ Ccmp(w20, Operand(w21, LSL, 1), NZCVFlag, eq);
@@ -8265,8 +8259,8 @@ TEST(csel) {
START();
__ Mov(x16, 0);
- __ Mov(x24, 0x0000000f0000000fUL);
- __ Mov(x25, 0x0000001f0000001fUL);
+ __ Mov(x24, 0x0000000F0000000FUL);
+ __ Mov(x25, 0x0000001F0000001FUL);
__ Mov(x26, 0);
__ Mov(x27, 0);
@@ -8303,26 +8297,26 @@ TEST(csel) {
RUN();
- CHECK_EQUAL_64(0x0000000f, x0);
- CHECK_EQUAL_64(0x0000001f, x1);
+ CHECK_EQUAL_64(0x0000000F, x0);
+ CHECK_EQUAL_64(0x0000001F, x1);
CHECK_EQUAL_64(0x00000020, x2);
- CHECK_EQUAL_64(0x0000000f, x3);
- CHECK_EQUAL_64(0xffffffe0ffffffe0UL, x4);
- CHECK_EQUAL_64(0x0000000f0000000fUL, x5);
- CHECK_EQUAL_64(0xffffffe0ffffffe1UL, x6);
- CHECK_EQUAL_64(0x0000000f0000000fUL, x7);
+ CHECK_EQUAL_64(0x0000000F, x3);
+ CHECK_EQUAL_64(0xFFFFFFE0FFFFFFE0UL, x4);
+ CHECK_EQUAL_64(0x0000000F0000000FUL, x5);
+ CHECK_EQUAL_64(0xFFFFFFE0FFFFFFE1UL, x6);
+ CHECK_EQUAL_64(0x0000000F0000000FUL, x7);
CHECK_EQUAL_64(0x00000001, x8);
- CHECK_EQUAL_64(0xffffffff, x9);
- CHECK_EQUAL_64(0x0000001f00000020UL, x10);
- CHECK_EQUAL_64(0xfffffff0fffffff0UL, x11);
- CHECK_EQUAL_64(0xfffffff0fffffff1UL, x12);
- CHECK_EQUAL_64(0x0000000f, x13);
- CHECK_EQUAL_64(0x0000000f0000000fUL, x14);
- CHECK_EQUAL_64(0x0000000f, x15);
- CHECK_EQUAL_64(0x0000000f0000000fUL, x18);
+ CHECK_EQUAL_64(0xFFFFFFFF, x9);
+ CHECK_EQUAL_64(0x0000001F00000020UL, x10);
+ CHECK_EQUAL_64(0xFFFFFFF0FFFFFFF0UL, x11);
+ CHECK_EQUAL_64(0xFFFFFFF0FFFFFFF1UL, x12);
+ CHECK_EQUAL_64(0x0000000F, x13);
+ CHECK_EQUAL_64(0x0000000F0000000FUL, x14);
+ CHECK_EQUAL_64(0x0000000F, x15);
+ CHECK_EQUAL_64(0x0000000F0000000FUL, x18);
CHECK_EQUAL_64(0, x24);
- CHECK_EQUAL_64(0x0000001f0000001fUL, x25);
- CHECK_EQUAL_64(0x0000001f0000001fUL, x26);
+ CHECK_EQUAL_64(0x0000001F0000001FUL, x25);
+ CHECK_EQUAL_64(0x0000001F0000001FUL, x26);
CHECK_EQUAL_64(0, x27);
TEARDOWN();
@@ -8387,7 +8381,7 @@ TEST(lslv) {
INIT_V8();
SETUP();
- uint64_t value = 0x0123456789abcdefUL;
+ uint64_t value = 0x0123456789ABCDEFUL;
int shift[] = {1, 3, 5, 9, 17, 33};
START();
@@ -8440,7 +8434,7 @@ TEST(lsrv) {
INIT_V8();
SETUP();
- uint64_t value = 0x0123456789abcdefUL;
+ uint64_t value = 0x0123456789ABCDEFUL;
int shift[] = {1, 3, 5, 9, 17, 33};
START();
@@ -8479,7 +8473,7 @@ TEST(lsrv) {
CHECK_EQUAL_64(value >> (shift[4] & 63), x20);
CHECK_EQUAL_64(value >> (shift[5] & 63), x21);
- value &= 0xffffffffUL;
+ value &= 0xFFFFFFFFUL;
CHECK_EQUAL_32(value >> (shift[0] & 31), w22);
CHECK_EQUAL_32(value >> (shift[1] & 31), w23);
CHECK_EQUAL_32(value >> (shift[2] & 31), w24);
@@ -8495,7 +8489,7 @@ TEST(asrv) {
INIT_V8();
SETUP();
- int64_t value = 0xfedcba98fedcba98UL;
+ int64_t value = 0xFEDCBA98FEDCBA98UL;
int shift[] = {1, 3, 5, 9, 17, 33};
START();
@@ -8534,7 +8528,7 @@ TEST(asrv) {
CHECK_EQUAL_64(value >> (shift[4] & 63), x20);
CHECK_EQUAL_64(value >> (shift[5] & 63), x21);
- int32_t value32 = static_cast<int32_t>(value & 0xffffffffUL);
+ int32_t value32 = static_cast<int32_t>(value & 0xFFFFFFFFUL);
CHECK_EQUAL_32(value32 >> (shift[0] & 31), w22);
CHECK_EQUAL_32(value32 >> (shift[1] & 31), w23);
CHECK_EQUAL_32(value32 >> (shift[2] & 31), w24);
@@ -8550,7 +8544,7 @@ TEST(rorv) {
INIT_V8();
SETUP();
- uint64_t value = 0x0123456789abcdefUL;
+ uint64_t value = 0x0123456789ABCDEFUL;
int shift[] = {4, 8, 12, 16, 24, 36};
START();
@@ -8582,18 +8576,18 @@ TEST(rorv) {
RUN();
CHECK_EQUAL_64(value, x0);
- CHECK_EQUAL_64(0xf0123456789abcdeUL, x16);
- CHECK_EQUAL_64(0xef0123456789abcdUL, x17);
- CHECK_EQUAL_64(0xdef0123456789abcUL, x18);
- CHECK_EQUAL_64(0xcdef0123456789abUL, x19);
- CHECK_EQUAL_64(0xabcdef0123456789UL, x20);
- CHECK_EQUAL_64(0x789abcdef0123456UL, x21);
- CHECK_EQUAL_32(0xf89abcde, w22);
- CHECK_EQUAL_32(0xef89abcd, w23);
- CHECK_EQUAL_32(0xdef89abc, w24);
- CHECK_EQUAL_32(0xcdef89ab, w25);
- CHECK_EQUAL_32(0xabcdef89, w26);
- CHECK_EQUAL_32(0xf89abcde, w27);
+ CHECK_EQUAL_64(0xF0123456789ABCDEUL, x16);
+ CHECK_EQUAL_64(0xEF0123456789ABCDUL, x17);
+ CHECK_EQUAL_64(0xDEF0123456789ABCUL, x18);
+ CHECK_EQUAL_64(0xCDEF0123456789ABUL, x19);
+ CHECK_EQUAL_64(0xABCDEF0123456789UL, x20);
+ CHECK_EQUAL_64(0x789ABCDEF0123456UL, x21);
+ CHECK_EQUAL_32(0xF89ABCDE, w22);
+ CHECK_EQUAL_32(0xEF89ABCD, w23);
+ CHECK_EQUAL_32(0xDEF89ABC, w24);
+ CHECK_EQUAL_32(0xCDEF89AB, w25);
+ CHECK_EQUAL_32(0xABCDEF89, w26);
+ CHECK_EQUAL_32(0xF89ABCDE, w27);
TEARDOWN();
}
@@ -8604,7 +8598,7 @@ TEST(bfm) {
SETUP();
START();
- __ Mov(x1, 0x0123456789abcdefL);
+ __ Mov(x1, 0x0123456789ABCDEFL);
__ Mov(x10, 0x8888888888888888L);
__ Mov(x11, 0x8888888888888888L);
@@ -8626,15 +8620,14 @@ TEST(bfm) {
RUN();
+ CHECK_EQUAL_64(0x88888888888889ABL, x10);
+ CHECK_EQUAL_64(0x8888CDEF88888888L, x11);
- CHECK_EQUAL_64(0x88888888888889abL, x10);
- CHECK_EQUAL_64(0x8888cdef88888888L, x11);
-
- CHECK_EQUAL_32(0x888888ab, w20);
- CHECK_EQUAL_32(0x88cdef88, w21);
+ CHECK_EQUAL_32(0x888888AB, w20);
+ CHECK_EQUAL_32(0x88CDEF88, w21);
- CHECK_EQUAL_64(0x8888888888ef8888L, x12);
- CHECK_EQUAL_64(0x88888888888888abL, x13);
+ CHECK_EQUAL_64(0x8888888888EF8888L, x12);
+ CHECK_EQUAL_64(0x88888888888888ABL, x13);
TEARDOWN();
}
@@ -8645,8 +8638,8 @@ TEST(sbfm) {
SETUP();
START();
- __ Mov(x1, 0x0123456789abcdefL);
- __ Mov(x2, 0xfedcba9876543210L);
+ __ Mov(x1, 0x0123456789ABCDEFL);
+ __ Mov(x2, 0xFEDCBA9876543210L);
__ sbfm(x10, x1, 16, 31);
__ sbfm(x11, x1, 32, 15);
@@ -8675,28 +8668,27 @@ TEST(sbfm) {
RUN();
-
- CHECK_EQUAL_64(0xffffffffffff89abL, x10);
- CHECK_EQUAL_64(0xffffcdef00000000L, x11);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFF89ABL, x10);
+ CHECK_EQUAL_64(0xFFFFCDEF00000000L, x11);
CHECK_EQUAL_64(0x4567L, x12);
- CHECK_EQUAL_64(0x789abcdef0000L, x13);
+ CHECK_EQUAL_64(0x789ABCDEF0000L, x13);
- CHECK_EQUAL_32(0xffffffab, w14);
- CHECK_EQUAL_32(0xffcdef00, w15);
+ CHECK_EQUAL_32(0xFFFFFFAB, w14);
+ CHECK_EQUAL_32(0xFFCDEF00, w15);
CHECK_EQUAL_32(0x54, w16);
CHECK_EQUAL_32(0x00321000, w17);
CHECK_EQUAL_64(0x01234567L, x18);
- CHECK_EQUAL_64(0xfffffffffedcba98L, x19);
- CHECK_EQUAL_64(0xffffffffffcdef00L, x20);
+ CHECK_EQUAL_64(0xFFFFFFFFFEDCBA98L, x19);
+ CHECK_EQUAL_64(0xFFFFFFFFFFCDEF00L, x20);
CHECK_EQUAL_64(0x321000L, x21);
- CHECK_EQUAL_64(0xffffffffffffabcdL, x22);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFABCDL, x22);
CHECK_EQUAL_64(0x5432L, x23);
- CHECK_EQUAL_64(0xffffffffffffffefL, x24);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFEFL, x24);
CHECK_EQUAL_64(0x10, x25);
- CHECK_EQUAL_64(0xffffffffffffcdefL, x26);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFCDEFL, x26);
CHECK_EQUAL_64(0x3210, x27);
- CHECK_EQUAL_64(0xffffffff89abcdefL, x28);
+ CHECK_EQUAL_64(0xFFFFFFFF89ABCDEFL, x28);
CHECK_EQUAL_64(0x76543210, x29);
TEARDOWN();
@@ -8708,8 +8700,8 @@ TEST(ubfm) {
SETUP();
START();
- __ Mov(x1, 0x0123456789abcdefL);
- __ Mov(x2, 0xfedcba9876543210L);
+ __ Mov(x1, 0x0123456789ABCDEFL);
+ __ Mov(x2, 0xFEDCBA9876543210L);
__ Mov(x10, 0x8888888888888888L);
__ Mov(x11, 0x8888888888888888L);
@@ -8737,24 +8729,24 @@ TEST(ubfm) {
RUN();
- CHECK_EQUAL_64(0x00000000000089abL, x10);
- CHECK_EQUAL_64(0x0000cdef00000000L, x11);
+ CHECK_EQUAL_64(0x00000000000089ABL, x10);
+ CHECK_EQUAL_64(0x0000CDEF00000000L, x11);
CHECK_EQUAL_64(0x4567L, x12);
- CHECK_EQUAL_64(0x789abcdef0000L, x13);
+ CHECK_EQUAL_64(0x789ABCDEF0000L, x13);
- CHECK_EQUAL_32(0x000000ab, w25);
- CHECK_EQUAL_32(0x00cdef00, w26);
+ CHECK_EQUAL_32(0x000000AB, w25);
+ CHECK_EQUAL_32(0x00CDEF00, w26);
CHECK_EQUAL_32(0x54, w27);
CHECK_EQUAL_32(0x00321000, w28);
CHECK_EQUAL_64(0x8000000000000000L, x15);
- CHECK_EQUAL_64(0x0123456789abcdefL, x16);
+ CHECK_EQUAL_64(0x0123456789ABCDEFL, x16);
CHECK_EQUAL_64(0x01234567L, x17);
- CHECK_EQUAL_64(0xcdef00L, x18);
- CHECK_EQUAL_64(0xabcdL, x19);
- CHECK_EQUAL_64(0xefL, x20);
- CHECK_EQUAL_64(0xcdefL, x21);
- CHECK_EQUAL_64(0x89abcdefL, x22);
+ CHECK_EQUAL_64(0xCDEF00L, x18);
+ CHECK_EQUAL_64(0xABCDL, x19);
+ CHECK_EQUAL_64(0xEFL, x20);
+ CHECK_EQUAL_64(0xCDEFL, x21);
+ CHECK_EQUAL_64(0x89ABCDEFL, x22);
TEARDOWN();
}
@@ -8765,8 +8757,8 @@ TEST(extr) {
SETUP();
START();
- __ Mov(x1, 0x0123456789abcdefL);
- __ Mov(x2, 0xfedcba9876543210L);
+ __ Mov(x1, 0x0123456789ABCDEFL);
+ __ Mov(x2, 0xFEDCBA9876543210L);
__ Extr(w10, w1, w2, 0);
__ Extr(x11, x1, x2, 0);
@@ -8784,15 +8776,15 @@ TEST(extr) {
RUN();
CHECK_EQUAL_64(0x76543210, x10);
- CHECK_EQUAL_64(0xfedcba9876543210L, x11);
- CHECK_EQUAL_64(0xbb2a1908, x12);
- CHECK_EQUAL_64(0x0048d159e26af37bUL, x13);
- CHECK_EQUAL_64(0x89abcdef, x20);
- CHECK_EQUAL_64(0x0123456789abcdefL, x21);
- CHECK_EQUAL_64(0x19083b2a, x22);
- CHECK_EQUAL_64(0x13579bdf, x23);
- CHECK_EQUAL_64(0x7f6e5d4c3b2a1908UL, x24);
- CHECK_EQUAL_64(0x02468acf13579bdeUL, x25);
+ CHECK_EQUAL_64(0xFEDCBA9876543210L, x11);
+ CHECK_EQUAL_64(0xBB2A1908, x12);
+ CHECK_EQUAL_64(0x0048D159E26AF37BUL, x13);
+ CHECK_EQUAL_64(0x89ABCDEF, x20);
+ CHECK_EQUAL_64(0x0123456789ABCDEFL, x21);
+ CHECK_EQUAL_64(0x19083B2A, x22);
+ CHECK_EQUAL_64(0x13579BDF, x23);
+ CHECK_EQUAL_64(0x7F6E5D4C3B2A1908UL, x24);
+ CHECK_EQUAL_64(0x02468ACF13579BDEUL, x25);
TEARDOWN();
}
@@ -8841,7 +8833,7 @@ TEST(fmov_reg) {
__ Fmov(x1, d1);
__ Fmov(d2, x1);
__ Fmov(d4, d1);
- __ Fmov(d6, bit_cast<double>(0x0123456789abcdefL));
+ __ Fmov(d6, bit_cast<double>(0x0123456789ABCDEFL));
__ Fmov(s6, s6);
END();
@@ -8853,7 +8845,7 @@ TEST(fmov_reg) {
CHECK_EQUAL_64(bit_cast<uint64_t>(-13.0), x1);
CHECK_EQUAL_FP64(-13.0, d2);
CHECK_EQUAL_FP64(-13.0, d4);
- CHECK_EQUAL_FP32(bit_cast<float>(0x89abcdef), s6);
+ CHECK_EQUAL_FP32(bit_cast<float>(0x89ABCDEF), s6);
TEARDOWN();
}
@@ -9169,12 +9161,12 @@ TEST(fmadd_fmsub_float) {
TEST(fmadd_fmsub_double_nans) {
INIT_V8();
// Make sure that NaN propagation works correctly.
- double s1 = bit_cast<double>(0x7ff5555511111111);
- double s2 = bit_cast<double>(0x7ff5555522222222);
- double sa = bit_cast<double>(0x7ff55555aaaaaaaa);
- double q1 = bit_cast<double>(0x7ffaaaaa11111111);
- double q2 = bit_cast<double>(0x7ffaaaaa22222222);
- double qa = bit_cast<double>(0x7ffaaaaaaaaaaaaa);
+ double s1 = bit_cast<double>(0x7FF5555511111111);
+ double s2 = bit_cast<double>(0x7FF5555522222222);
+ double sa = bit_cast<double>(0x7FF55555AAAAAAAA);
+ double q1 = bit_cast<double>(0x7FFAAAAA11111111);
+ double q2 = bit_cast<double>(0x7FFAAAAA22222222);
+ double qa = bit_cast<double>(0x7FFAAAAAAAAAAAAA);
CHECK(IsSignallingNaN(s1));
CHECK(IsSignallingNaN(s2));
CHECK(IsSignallingNaN(sa));
@@ -9183,9 +9175,9 @@ TEST(fmadd_fmsub_double_nans) {
CHECK(IsQuietNaN(qa));
// The input NaNs after passing through ProcessNaN.
- double s1_proc = bit_cast<double>(0x7ffd555511111111);
- double s2_proc = bit_cast<double>(0x7ffd555522222222);
- double sa_proc = bit_cast<double>(0x7ffd5555aaaaaaaa);
+ double s1_proc = bit_cast<double>(0x7FFD555511111111);
+ double s2_proc = bit_cast<double>(0x7FFD555522222222);
+ double sa_proc = bit_cast<double>(0x7FFD5555AAAAAAAA);
double q1_proc = q1;
double q2_proc = q2;
double qa_proc = qa;
@@ -9197,10 +9189,10 @@ TEST(fmadd_fmsub_double_nans) {
CHECK(IsQuietNaN(qa_proc));
// Negated NaNs as it would be done on ARMv8 hardware.
- double s1_proc_neg = bit_cast<double>(0xfffd555511111111);
- double sa_proc_neg = bit_cast<double>(0xfffd5555aaaaaaaa);
- double q1_proc_neg = bit_cast<double>(0xfffaaaaa11111111);
- double qa_proc_neg = bit_cast<double>(0xfffaaaaaaaaaaaaa);
+ double s1_proc_neg = bit_cast<double>(0xFFFD555511111111);
+ double sa_proc_neg = bit_cast<double>(0xFFFD5555AAAAAAAA);
+ double q1_proc_neg = bit_cast<double>(0xFFFAAAAA11111111);
+ double qa_proc_neg = bit_cast<double>(0xFFFAAAAAAAAAAAAA);
CHECK(IsQuietNaN(s1_proc_neg));
CHECK(IsQuietNaN(sa_proc_neg));
CHECK(IsQuietNaN(q1_proc_neg));
@@ -9252,12 +9244,12 @@ TEST(fmadd_fmsub_double_nans) {
TEST(fmadd_fmsub_float_nans) {
INIT_V8();
// Make sure that NaN propagation works correctly.
- float s1 = bit_cast<float>(0x7f951111);
- float s2 = bit_cast<float>(0x7f952222);
- float sa = bit_cast<float>(0x7f95aaaa);
- float q1 = bit_cast<float>(0x7fea1111);
- float q2 = bit_cast<float>(0x7fea2222);
- float qa = bit_cast<float>(0x7feaaaaa);
+ float s1 = bit_cast<float>(0x7F951111);
+ float s2 = bit_cast<float>(0x7F952222);
+ float sa = bit_cast<float>(0x7F95AAAA);
+ float q1 = bit_cast<float>(0x7FEA1111);
+ float q2 = bit_cast<float>(0x7FEA2222);
+ float qa = bit_cast<float>(0x7FEAAAAA);
CHECK(IsSignallingNaN(s1));
CHECK(IsSignallingNaN(s2));
CHECK(IsSignallingNaN(sa));
@@ -9266,9 +9258,9 @@ TEST(fmadd_fmsub_float_nans) {
CHECK(IsQuietNaN(qa));
// The input NaNs after passing through ProcessNaN.
- float s1_proc = bit_cast<float>(0x7fd51111);
- float s2_proc = bit_cast<float>(0x7fd52222);
- float sa_proc = bit_cast<float>(0x7fd5aaaa);
+ float s1_proc = bit_cast<float>(0x7FD51111);
+ float s2_proc = bit_cast<float>(0x7FD52222);
+ float sa_proc = bit_cast<float>(0x7FD5AAAA);
float q1_proc = q1;
float q2_proc = q2;
float qa_proc = qa;
@@ -9280,10 +9272,10 @@ TEST(fmadd_fmsub_float_nans) {
CHECK(IsQuietNaN(qa_proc));
// Negated NaNs as it would be done on ARMv8 hardware.
- float s1_proc_neg = bit_cast<float>(0xffd51111);
- float sa_proc_neg = bit_cast<float>(0xffd5aaaa);
- float q1_proc_neg = bit_cast<float>(0xffea1111);
- float qa_proc_neg = bit_cast<float>(0xffeaaaaa);
+ float s1_proc_neg = bit_cast<float>(0xFFD51111);
+ float sa_proc_neg = bit_cast<float>(0xFFD5AAAA);
+ float q1_proc_neg = bit_cast<float>(0xFFEA1111);
+ float qa_proc_neg = bit_cast<float>(0xFFEAAAAA);
CHECK(IsQuietNaN(s1_proc_neg));
CHECK(IsQuietNaN(sa_proc_neg));
CHECK(IsQuietNaN(q1_proc_neg));
@@ -9499,10 +9491,10 @@ static void FminFmaxDoubleHelper(double n, double m, double min, double max,
TEST(fmax_fmin_d) {
INIT_V8();
// Use non-standard NaNs to check that the payload bits are preserved.
- double snan = bit_cast<double>(0x7ff5555512345678);
- double qnan = bit_cast<double>(0x7ffaaaaa87654321);
+ double snan = bit_cast<double>(0x7FF5555512345678);
+ double qnan = bit_cast<double>(0x7FFAAAAA87654321);
- double snan_processed = bit_cast<double>(0x7ffd555512345678);
+ double snan_processed = bit_cast<double>(0x7FFD555512345678);
double qnan_processed = qnan;
CHECK(IsSignallingNaN(snan));
@@ -9584,10 +9576,10 @@ static void FminFmaxFloatHelper(float n, float m, float min, float max,
TEST(fmax_fmin_s) {
INIT_V8();
// Use non-standard NaNs to check that the payload bits are preserved.
- float snan = bit_cast<float>(0x7f951234);
- float qnan = bit_cast<float>(0x7fea8765);
+ float snan = bit_cast<float>(0x7F951234);
+ float qnan = bit_cast<float>(0x7FEA8765);
- float snan_processed = bit_cast<float>(0x7fd51234);
+ float snan_processed = bit_cast<float>(0x7FD51234);
float qnan_processed = qnan;
CHECK(IsSignallingNaN(snan));
@@ -9727,7 +9719,7 @@ TEST(fcmp) {
__ Fmov(s8, 0.0);
__ Fmov(s9, 0.5);
- __ Mov(w18, 0x7f800001); // Single precision NaN.
+ __ Mov(w18, 0x7F800001); // Single precision NaN.
__ Fmov(s18, w18);
__ Fcmp(s8, s8);
@@ -9749,7 +9741,7 @@ TEST(fcmp) {
__ Fmov(d19, 0.0);
__ Fmov(d20, 0.5);
- __ Mov(x21, 0x7ff0000000000001UL); // Double precision NaN.
+ __ Mov(x21, 0x7FF0000000000001UL); // Double precision NaN.
__ Fmov(d21, x21);
__ Fcmp(d19, d19);
@@ -10422,8 +10414,8 @@ TEST(fcvt_ds) {
__ Fmov(s26, -0.0);
__ Fmov(s27, FLT_MAX);
__ Fmov(s28, FLT_MIN);
- __ Fmov(s29, bit_cast<float>(0x7fc12345)); // Quiet NaN.
- __ Fmov(s30, bit_cast<float>(0x7f812345)); // Signalling NaN.
+ __ Fmov(s29, bit_cast<float>(0x7FC12345)); // Quiet NaN.
+ __ Fmov(s30, bit_cast<float>(0x7F812345)); // Signalling NaN.
__ Fcvt(d0, s16);
__ Fcvt(d1, s17);
@@ -10464,8 +10456,8 @@ TEST(fcvt_ds) {
// - The top bit of the mantissa is forced to 1 (making it a quiet NaN).
// - The remaining mantissa bits are copied until they run out.
// - The low-order bits that haven't already been assigned are set to 0.
- CHECK_EQUAL_FP64(bit_cast<double>(0x7ff82468a0000000), d13);
- CHECK_EQUAL_FP64(bit_cast<double>(0x7ff82468a0000000), d14);
+ CHECK_EQUAL_FP64(bit_cast<double>(0x7FF82468A0000000), d13);
+ CHECK_EQUAL_FP64(bit_cast<double>(0x7FF82468A0000000), d14);
TEARDOWN();
}
@@ -10496,23 +10488,23 @@ TEST(fcvt_sd) {
// For normalized numbers:
// bit 29 (0x0000000020000000) is the lowest-order bit which will
// fit in the float's mantissa.
- {bit_cast<double>(0x3ff0000000000000), bit_cast<float>(0x3f800000)},
- {bit_cast<double>(0x3ff0000000000001), bit_cast<float>(0x3f800000)},
- {bit_cast<double>(0x3ff0000010000000), bit_cast<float>(0x3f800000)},
- {bit_cast<double>(0x3ff0000010000001), bit_cast<float>(0x3f800001)},
- {bit_cast<double>(0x3ff0000020000000), bit_cast<float>(0x3f800001)},
- {bit_cast<double>(0x3ff0000020000001), bit_cast<float>(0x3f800001)},
- {bit_cast<double>(0x3ff0000030000000), bit_cast<float>(0x3f800002)},
- {bit_cast<double>(0x3ff0000030000001), bit_cast<float>(0x3f800002)},
- {bit_cast<double>(0x3ff0000040000000), bit_cast<float>(0x3f800002)},
- {bit_cast<double>(0x3ff0000040000001), bit_cast<float>(0x3f800002)},
- {bit_cast<double>(0x3ff0000050000000), bit_cast<float>(0x3f800002)},
- {bit_cast<double>(0x3ff0000050000001), bit_cast<float>(0x3f800003)},
- {bit_cast<double>(0x3ff0000060000000), bit_cast<float>(0x3f800003)},
+ {bit_cast<double>(0x3FF0000000000000), bit_cast<float>(0x3F800000)},
+ {bit_cast<double>(0x3FF0000000000001), bit_cast<float>(0x3F800000)},
+ {bit_cast<double>(0x3FF0000010000000), bit_cast<float>(0x3F800000)},
+ {bit_cast<double>(0x3FF0000010000001), bit_cast<float>(0x3F800001)},
+ {bit_cast<double>(0x3FF0000020000000), bit_cast<float>(0x3F800001)},
+ {bit_cast<double>(0x3FF0000020000001), bit_cast<float>(0x3F800001)},
+ {bit_cast<double>(0x3FF0000030000000), bit_cast<float>(0x3F800002)},
+ {bit_cast<double>(0x3FF0000030000001), bit_cast<float>(0x3F800002)},
+ {bit_cast<double>(0x3FF0000040000000), bit_cast<float>(0x3F800002)},
+ {bit_cast<double>(0x3FF0000040000001), bit_cast<float>(0x3F800002)},
+ {bit_cast<double>(0x3FF0000050000000), bit_cast<float>(0x3F800002)},
+ {bit_cast<double>(0x3FF0000050000001), bit_cast<float>(0x3F800003)},
+ {bit_cast<double>(0x3FF0000060000000), bit_cast<float>(0x3F800003)},
// - A mantissa that overflows into the exponent during rounding.
- {bit_cast<double>(0x3feffffff0000000), bit_cast<float>(0x3f800000)},
+ {bit_cast<double>(0x3FEFFFFFF0000000), bit_cast<float>(0x3F800000)},
// - The largest double that rounds to a normal float.
- {bit_cast<double>(0x47efffffefffffff), bit_cast<float>(0x7f7fffff)},
+ {bit_cast<double>(0x47EFFFFFEFFFFFFF), bit_cast<float>(0x7F7FFFFF)},
// Doubles that are too big for a float.
{kFP64PositiveInfinity, kFP32PositiveInfinity},
@@ -10520,7 +10512,7 @@ TEST(fcvt_sd) {
// - The smallest exponent that's too big for a float.
{pow(2.0, 128), kFP32PositiveInfinity},
// - This exponent is in range, but the value rounds to infinity.
- {bit_cast<double>(0x47effffff0000000), kFP32PositiveInfinity},
+ {bit_cast<double>(0x47EFFFFFF0000000), kFP32PositiveInfinity},
// Doubles that are too small for a float.
// - The smallest (subnormal) double.
@@ -10530,36 +10522,36 @@ TEST(fcvt_sd) {
// Normal doubles that become subnormal floats.
// - The largest subnormal float.
- {bit_cast<double>(0x380fffffc0000000), bit_cast<float>(0x007fffff)},
+ {bit_cast<double>(0x380FFFFFC0000000), bit_cast<float>(0x007FFFFF)},
// - The smallest subnormal float.
- {bit_cast<double>(0x36a0000000000000), bit_cast<float>(0x00000001)},
+ {bit_cast<double>(0x36A0000000000000), bit_cast<float>(0x00000001)},
// - Subnormal floats that need (ties-to-even) rounding.
// For these subnormals:
// bit 34 (0x0000000400000000) is the lowest-order bit which will
// fit in the float's mantissa.
- {bit_cast<double>(0x37c159e000000000), bit_cast<float>(0x00045678)},
- {bit_cast<double>(0x37c159e000000001), bit_cast<float>(0x00045678)},
- {bit_cast<double>(0x37c159e200000000), bit_cast<float>(0x00045678)},
- {bit_cast<double>(0x37c159e200000001), bit_cast<float>(0x00045679)},
- {bit_cast<double>(0x37c159e400000000), bit_cast<float>(0x00045679)},
- {bit_cast<double>(0x37c159e400000001), bit_cast<float>(0x00045679)},
- {bit_cast<double>(0x37c159e600000000), bit_cast<float>(0x0004567a)},
- {bit_cast<double>(0x37c159e600000001), bit_cast<float>(0x0004567a)},
- {bit_cast<double>(0x37c159e800000000), bit_cast<float>(0x0004567a)},
- {bit_cast<double>(0x37c159e800000001), bit_cast<float>(0x0004567a)},
- {bit_cast<double>(0x37c159ea00000000), bit_cast<float>(0x0004567a)},
- {bit_cast<double>(0x37c159ea00000001), bit_cast<float>(0x0004567b)},
- {bit_cast<double>(0x37c159ec00000000), bit_cast<float>(0x0004567b)},
+ {bit_cast<double>(0x37C159E000000000), bit_cast<float>(0x00045678)},
+ {bit_cast<double>(0x37C159E000000001), bit_cast<float>(0x00045678)},
+ {bit_cast<double>(0x37C159E200000000), bit_cast<float>(0x00045678)},
+ {bit_cast<double>(0x37C159E200000001), bit_cast<float>(0x00045679)},
+ {bit_cast<double>(0x37C159E400000000), bit_cast<float>(0x00045679)},
+ {bit_cast<double>(0x37C159E400000001), bit_cast<float>(0x00045679)},
+ {bit_cast<double>(0x37C159E600000000), bit_cast<float>(0x0004567A)},
+ {bit_cast<double>(0x37C159E600000001), bit_cast<float>(0x0004567A)},
+ {bit_cast<double>(0x37C159E800000000), bit_cast<float>(0x0004567A)},
+ {bit_cast<double>(0x37C159E800000001), bit_cast<float>(0x0004567A)},
+ {bit_cast<double>(0x37C159EA00000000), bit_cast<float>(0x0004567A)},
+ {bit_cast<double>(0x37C159EA00000001), bit_cast<float>(0x0004567B)},
+ {bit_cast<double>(0x37C159EC00000000), bit_cast<float>(0x0004567B)},
// - The smallest double which rounds up to become a subnormal float.
{bit_cast<double>(0x3690000000000001), bit_cast<float>(0x00000001)},
// Check NaN payload preservation.
- {bit_cast<double>(0x7ff82468a0000000), bit_cast<float>(0x7fc12345)},
- {bit_cast<double>(0x7ff82468bfffffff), bit_cast<float>(0x7fc12345)},
+ {bit_cast<double>(0x7FF82468A0000000), bit_cast<float>(0x7FC12345)},
+ {bit_cast<double>(0x7FF82468BFFFFFFF), bit_cast<float>(0x7FC12345)},
// - Signalling NaNs become quiet NaNs.
- {bit_cast<double>(0x7ff02468a0000000), bit_cast<float>(0x7fc12345)},
- {bit_cast<double>(0x7ff02468bfffffff), bit_cast<float>(0x7fc12345)},
- {bit_cast<double>(0x7ff000001fffffff), bit_cast<float>(0x7fc00000)},
+ {bit_cast<double>(0x7FF02468A0000000), bit_cast<float>(0x7FC12345)},
+ {bit_cast<double>(0x7FF02468BFFFFFFF), bit_cast<float>(0x7FC12345)},
+ {bit_cast<double>(0x7FF000001FFFFFFF), bit_cast<float>(0x7FC00000)},
};
int count = sizeof(test) / sizeof(test[0]);
@@ -10600,7 +10592,7 @@ TEST(fcvtas) {
__ Fmov(s3, -2.5);
__ Fmov(s4, kFP32PositiveInfinity);
__ Fmov(s5, kFP32NegativeInfinity);
- __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fmov(s6, 0x7FFFFF80); // Largest float < INT32_MAX.
__ Fneg(s7, s6); // Smallest float > INT32_MIN.
__ Fmov(d8, 1.0);
__ Fmov(d9, 1.1);
@@ -10615,14 +10607,14 @@ TEST(fcvtas) {
__ Fmov(s19, -2.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
- __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fmov(s22, 0x7FFFFF8000000000UL); // Largest float < INT64_MAX.
__ Fneg(s23, s22); // Smallest float > INT64_MIN.
__ Fmov(d24, 1.1);
__ Fmov(d25, 2.5);
__ Fmov(d26, -2.5);
__ Fmov(d27, kFP64PositiveInfinity);
__ Fmov(d28, kFP64NegativeInfinity);
- __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fmov(d29, 0x7FFFFFFFFFFFFC00UL); // Largest double < INT64_MAX.
__ Fneg(d30, d29); // Smallest double > INT64_MIN.
__ Fcvtas(w0, s0);
@@ -10662,32 +10654,32 @@ TEST(fcvtas) {
CHECK_EQUAL_64(1, x0);
CHECK_EQUAL_64(1, x1);
CHECK_EQUAL_64(3, x2);
- CHECK_EQUAL_64(0xfffffffd, x3);
- CHECK_EQUAL_64(0x7fffffff, x4);
+ CHECK_EQUAL_64(0xFFFFFFFD, x3);
+ CHECK_EQUAL_64(0x7FFFFFFF, x4);
CHECK_EQUAL_64(0x80000000, x5);
- CHECK_EQUAL_64(0x7fffff80, x6);
+ CHECK_EQUAL_64(0x7FFFFF80, x6);
CHECK_EQUAL_64(0x80000080, x7);
CHECK_EQUAL_64(1, x8);
CHECK_EQUAL_64(1, x9);
CHECK_EQUAL_64(3, x10);
- CHECK_EQUAL_64(0xfffffffd, x11);
- CHECK_EQUAL_64(0x7fffffff, x12);
+ CHECK_EQUAL_64(0xFFFFFFFD, x11);
+ CHECK_EQUAL_64(0x7FFFFFFF, x12);
CHECK_EQUAL_64(0x80000000, x13);
- CHECK_EQUAL_64(0x7ffffffe, x14);
+ CHECK_EQUAL_64(0x7FFFFFFE, x14);
CHECK_EQUAL_64(0x80000001, x15);
CHECK_EQUAL_64(1, x17);
CHECK_EQUAL_64(3, x18);
- CHECK_EQUAL_64(0xfffffffffffffffdUL, x19);
- CHECK_EQUAL_64(0x7fffffffffffffffUL, x20);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFDUL, x19);
+ CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0x8000000000000000UL, x21);
- CHECK_EQUAL_64(0x7fffff8000000000UL, x22);
+ CHECK_EQUAL_64(0x7FFFFF8000000000UL, x22);
CHECK_EQUAL_64(0x8000008000000000UL, x23);
CHECK_EQUAL_64(1, x24);
CHECK_EQUAL_64(3, x25);
- CHECK_EQUAL_64(0xfffffffffffffffdUL, x26);
- CHECK_EQUAL_64(0x7fffffffffffffffUL, x27);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFDUL, x26);
+ CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x27);
CHECK_EQUAL_64(0x8000000000000000UL, x28);
- CHECK_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ CHECK_EQUAL_64(0x7FFFFFFFFFFFFC00UL, x29);
CHECK_EQUAL_64(0x8000000000000400UL, x30);
TEARDOWN();
@@ -10705,27 +10697,27 @@ TEST(fcvtau) {
__ Fmov(s3, -2.5);
__ Fmov(s4, kFP32PositiveInfinity);
__ Fmov(s5, kFP32NegativeInfinity);
- __ Fmov(s6, 0xffffff00); // Largest float < UINT32_MAX.
+ __ Fmov(s6, 0xFFFFFF00); // Largest float < UINT32_MAX.
__ Fmov(d8, 1.0);
__ Fmov(d9, 1.1);
__ Fmov(d10, 2.5);
__ Fmov(d11, -2.5);
__ Fmov(d12, kFP64PositiveInfinity);
__ Fmov(d13, kFP64NegativeInfinity);
- __ Fmov(d14, 0xfffffffe);
+ __ Fmov(d14, 0xFFFFFFFE);
__ Fmov(s16, 1.0);
__ Fmov(s17, 1.1);
__ Fmov(s18, 2.5);
__ Fmov(s19, -2.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
- __ Fmov(s22, 0xffffff0000000000UL); // Largest float < UINT64_MAX.
+ __ Fmov(s22, 0xFFFFFF0000000000UL); // Largest float < UINT64_MAX.
__ Fmov(d24, 1.1);
__ Fmov(d25, 2.5);
__ Fmov(d26, -2.5);
__ Fmov(d27, kFP64PositiveInfinity);
__ Fmov(d28, kFP64NegativeInfinity);
- __ Fmov(d29, 0xfffffffffffff800UL); // Largest double < UINT64_MAX.
+ __ Fmov(d29, 0xFFFFFFFFFFFFF800UL); // Largest double < UINT64_MAX.
__ Fmov(s30, 0x100000000UL);
__ Fcvtau(w0, s0);
@@ -10765,30 +10757,30 @@ TEST(fcvtau) {
CHECK_EQUAL_64(1, x1);
CHECK_EQUAL_64(3, x2);
CHECK_EQUAL_64(0, x3);
- CHECK_EQUAL_64(0xffffffff, x4);
+ CHECK_EQUAL_64(0xFFFFFFFF, x4);
CHECK_EQUAL_64(0, x5);
- CHECK_EQUAL_64(0xffffff00, x6);
+ CHECK_EQUAL_64(0xFFFFFF00, x6);
CHECK_EQUAL_64(1, x8);
CHECK_EQUAL_64(1, x9);
CHECK_EQUAL_64(3, x10);
CHECK_EQUAL_64(0, x11);
- CHECK_EQUAL_64(0xffffffff, x12);
+ CHECK_EQUAL_64(0xFFFFFFFF, x12);
CHECK_EQUAL_64(0, x13);
- CHECK_EQUAL_64(0xfffffffe, x14);
+ CHECK_EQUAL_64(0xFFFFFFFE, x14);
CHECK_EQUAL_64(1, x16);
CHECK_EQUAL_64(1, x17);
CHECK_EQUAL_64(3, x18);
CHECK_EQUAL_64(0, x19);
- CHECK_EQUAL_64(0xffffffffffffffffUL, x20);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0, x21);
- CHECK_EQUAL_64(0xffffff0000000000UL, x22);
+ CHECK_EQUAL_64(0xFFFFFF0000000000UL, x22);
CHECK_EQUAL_64(1, x24);
CHECK_EQUAL_64(3, x25);
CHECK_EQUAL_64(0, x26);
- CHECK_EQUAL_64(0xffffffffffffffffUL, x27);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x27);
CHECK_EQUAL_64(0, x28);
- CHECK_EQUAL_64(0xfffffffffffff800UL, x29);
- CHECK_EQUAL_64(0xffffffff, x30);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFF800UL, x29);
+ CHECK_EQUAL_64(0xFFFFFFFF, x30);
TEARDOWN();
}
@@ -10805,7 +10797,7 @@ TEST(fcvtms) {
__ Fmov(s3, -1.5);
__ Fmov(s4, kFP32PositiveInfinity);
__ Fmov(s5, kFP32NegativeInfinity);
- __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fmov(s6, 0x7FFFFF80); // Largest float < INT32_MAX.
__ Fneg(s7, s6); // Smallest float > INT32_MIN.
__ Fmov(d8, 1.0);
__ Fmov(d9, 1.1);
@@ -10820,14 +10812,14 @@ TEST(fcvtms) {
__ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
- __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fmov(s22, 0x7FFFFF8000000000UL); // Largest float < INT64_MAX.
__ Fneg(s23, s22); // Smallest float > INT64_MIN.
__ Fmov(d24, 1.1);
__ Fmov(d25, 1.5);
__ Fmov(d26, -1.5);
__ Fmov(d27, kFP64PositiveInfinity);
__ Fmov(d28, kFP64NegativeInfinity);
- __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fmov(d29, 0x7FFFFFFFFFFFFC00UL); // Largest double < INT64_MAX.
__ Fneg(d30, d29); // Smallest double > INT64_MIN.
__ Fcvtms(w0, s0);
@@ -10867,32 +10859,32 @@ TEST(fcvtms) {
CHECK_EQUAL_64(1, x0);
CHECK_EQUAL_64(1, x1);
CHECK_EQUAL_64(1, x2);
- CHECK_EQUAL_64(0xfffffffe, x3);
- CHECK_EQUAL_64(0x7fffffff, x4);
+ CHECK_EQUAL_64(0xFFFFFFFE, x3);
+ CHECK_EQUAL_64(0x7FFFFFFF, x4);
CHECK_EQUAL_64(0x80000000, x5);
- CHECK_EQUAL_64(0x7fffff80, x6);
+ CHECK_EQUAL_64(0x7FFFFF80, x6);
CHECK_EQUAL_64(0x80000080, x7);
CHECK_EQUAL_64(1, x8);
CHECK_EQUAL_64(1, x9);
CHECK_EQUAL_64(1, x10);
- CHECK_EQUAL_64(0xfffffffe, x11);
- CHECK_EQUAL_64(0x7fffffff, x12);
+ CHECK_EQUAL_64(0xFFFFFFFE, x11);
+ CHECK_EQUAL_64(0x7FFFFFFF, x12);
CHECK_EQUAL_64(0x80000000, x13);
- CHECK_EQUAL_64(0x7ffffffe, x14);
+ CHECK_EQUAL_64(0x7FFFFFFE, x14);
CHECK_EQUAL_64(0x80000001, x15);
CHECK_EQUAL_64(1, x17);
CHECK_EQUAL_64(1, x18);
- CHECK_EQUAL_64(0xfffffffffffffffeUL, x19);
- CHECK_EQUAL_64(0x7fffffffffffffffUL, x20);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFEUL, x19);
+ CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0x8000000000000000UL, x21);
- CHECK_EQUAL_64(0x7fffff8000000000UL, x22);
+ CHECK_EQUAL_64(0x7FFFFF8000000000UL, x22);
CHECK_EQUAL_64(0x8000008000000000UL, x23);
CHECK_EQUAL_64(1, x24);
CHECK_EQUAL_64(1, x25);
- CHECK_EQUAL_64(0xfffffffffffffffeUL, x26);
- CHECK_EQUAL_64(0x7fffffffffffffffUL, x27);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFEUL, x26);
+ CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x27);
CHECK_EQUAL_64(0x8000000000000000UL, x28);
- CHECK_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ CHECK_EQUAL_64(0x7FFFFFFFFFFFFC00UL, x29);
CHECK_EQUAL_64(0x8000000000000400UL, x30);
TEARDOWN();
@@ -10910,7 +10902,7 @@ TEST(fcvtmu) {
__ Fmov(s3, -1.5);
__ Fmov(s4, kFP32PositiveInfinity);
__ Fmov(s5, kFP32NegativeInfinity);
- __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fmov(s6, 0x7FFFFF80); // Largest float < INT32_MAX.
__ Fneg(s7, s6); // Smallest float > INT32_MIN.
__ Fmov(d8, 1.0);
__ Fmov(d9, 1.1);
@@ -10925,14 +10917,14 @@ TEST(fcvtmu) {
__ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
- __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fmov(s22, 0x7FFFFF8000000000UL); // Largest float < INT64_MAX.
__ Fneg(s23, s22); // Smallest float > INT64_MIN.
__ Fmov(d24, 1.1);
__ Fmov(d25, 1.5);
__ Fmov(d26, -1.5);
__ Fmov(d27, kFP64PositiveInfinity);
__ Fmov(d28, kFP64NegativeInfinity);
- __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fmov(d29, 0x7FFFFFFFFFFFFC00UL); // Largest double < INT64_MAX.
__ Fneg(d30, d29); // Smallest double > INT64_MIN.
__ Fcvtmu(w0, s0);
@@ -10972,30 +10964,30 @@ TEST(fcvtmu) {
CHECK_EQUAL_64(1, x1);
CHECK_EQUAL_64(1, x2);
CHECK_EQUAL_64(0, x3);
- CHECK_EQUAL_64(0xffffffff, x4);
+ CHECK_EQUAL_64(0xFFFFFFFF, x4);
CHECK_EQUAL_64(0, x5);
- CHECK_EQUAL_64(0x7fffff80, x6);
+ CHECK_EQUAL_64(0x7FFFFF80, x6);
CHECK_EQUAL_64(0, x7);
CHECK_EQUAL_64(1, x8);
CHECK_EQUAL_64(1, x9);
CHECK_EQUAL_64(1, x10);
CHECK_EQUAL_64(0, x11);
- CHECK_EQUAL_64(0xffffffff, x12);
+ CHECK_EQUAL_64(0xFFFFFFFF, x12);
CHECK_EQUAL_64(0, x13);
- CHECK_EQUAL_64(0x7ffffffe, x14);
+ CHECK_EQUAL_64(0x7FFFFFFE, x14);
CHECK_EQUAL_64(1, x17);
CHECK_EQUAL_64(1, x18);
CHECK_EQUAL_64(0x0UL, x19);
- CHECK_EQUAL_64(0xffffffffffffffffUL, x20);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0x0UL, x21);
- CHECK_EQUAL_64(0x7fffff8000000000UL, x22);
+ CHECK_EQUAL_64(0x7FFFFF8000000000UL, x22);
CHECK_EQUAL_64(0x0UL, x23);
CHECK_EQUAL_64(1, x24);
CHECK_EQUAL_64(1, x25);
CHECK_EQUAL_64(0x0UL, x26);
- CHECK_EQUAL_64(0xffffffffffffffffUL, x27);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x27);
CHECK_EQUAL_64(0x0UL, x28);
- CHECK_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ CHECK_EQUAL_64(0x7FFFFFFFFFFFFC00UL, x29);
CHECK_EQUAL_64(0x0UL, x30);
TEARDOWN();
@@ -11013,7 +11005,7 @@ TEST(fcvtns) {
__ Fmov(s3, -1.5);
__ Fmov(s4, kFP32PositiveInfinity);
__ Fmov(s5, kFP32NegativeInfinity);
- __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fmov(s6, 0x7FFFFF80); // Largest float < INT32_MAX.
__ Fneg(s7, s6); // Smallest float > INT32_MIN.
__ Fmov(d8, 1.0);
__ Fmov(d9, 1.1);
@@ -11028,14 +11020,14 @@ TEST(fcvtns) {
__ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
- __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fmov(s22, 0x7FFFFF8000000000UL); // Largest float < INT64_MAX.
__ Fneg(s23, s22); // Smallest float > INT64_MIN.
__ Fmov(d24, 1.1);
__ Fmov(d25, 1.5);
__ Fmov(d26, -1.5);
__ Fmov(d27, kFP64PositiveInfinity);
__ Fmov(d28, kFP64NegativeInfinity);
- __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fmov(d29, 0x7FFFFFFFFFFFFC00UL); // Largest double < INT64_MAX.
__ Fneg(d30, d29); // Smallest double > INT64_MIN.
__ Fcvtns(w0, s0);
@@ -11075,32 +11067,32 @@ TEST(fcvtns) {
CHECK_EQUAL_64(1, x0);
CHECK_EQUAL_64(1, x1);
CHECK_EQUAL_64(2, x2);
- CHECK_EQUAL_64(0xfffffffe, x3);
- CHECK_EQUAL_64(0x7fffffff, x4);
+ CHECK_EQUAL_64(0xFFFFFFFE, x3);
+ CHECK_EQUAL_64(0x7FFFFFFF, x4);
CHECK_EQUAL_64(0x80000000, x5);
- CHECK_EQUAL_64(0x7fffff80, x6);
+ CHECK_EQUAL_64(0x7FFFFF80, x6);
CHECK_EQUAL_64(0x80000080, x7);
CHECK_EQUAL_64(1, x8);
CHECK_EQUAL_64(1, x9);
CHECK_EQUAL_64(2, x10);
- CHECK_EQUAL_64(0xfffffffe, x11);
- CHECK_EQUAL_64(0x7fffffff, x12);
+ CHECK_EQUAL_64(0xFFFFFFFE, x11);
+ CHECK_EQUAL_64(0x7FFFFFFF, x12);
CHECK_EQUAL_64(0x80000000, x13);
- CHECK_EQUAL_64(0x7ffffffe, x14);
+ CHECK_EQUAL_64(0x7FFFFFFE, x14);
CHECK_EQUAL_64(0x80000001, x15);
CHECK_EQUAL_64(1, x17);
CHECK_EQUAL_64(2, x18);
- CHECK_EQUAL_64(0xfffffffffffffffeUL, x19);
- CHECK_EQUAL_64(0x7fffffffffffffffUL, x20);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFEUL, x19);
+ CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0x8000000000000000UL, x21);
- CHECK_EQUAL_64(0x7fffff8000000000UL, x22);
+ CHECK_EQUAL_64(0x7FFFFF8000000000UL, x22);
CHECK_EQUAL_64(0x8000008000000000UL, x23);
CHECK_EQUAL_64(1, x24);
CHECK_EQUAL_64(2, x25);
- CHECK_EQUAL_64(0xfffffffffffffffeUL, x26);
- CHECK_EQUAL_64(0x7fffffffffffffffUL, x27);
-// CHECK_EQUAL_64(0x8000000000000000UL, x28);
- CHECK_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFEUL, x26);
+ CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x27);
+ // CHECK_EQUAL_64(0x8000000000000000UL, x28);
+ CHECK_EQUAL_64(0x7FFFFFFFFFFFFC00UL, x29);
CHECK_EQUAL_64(0x8000000000000400UL, x30);
TEARDOWN();
@@ -11118,27 +11110,27 @@ TEST(fcvtnu) {
__ Fmov(s3, -1.5);
__ Fmov(s4, kFP32PositiveInfinity);
__ Fmov(s5, kFP32NegativeInfinity);
- __ Fmov(s6, 0xffffff00); // Largest float < UINT32_MAX.
+ __ Fmov(s6, 0xFFFFFF00); // Largest float < UINT32_MAX.
__ Fmov(d8, 1.0);
__ Fmov(d9, 1.1);
__ Fmov(d10, 1.5);
__ Fmov(d11, -1.5);
__ Fmov(d12, kFP64PositiveInfinity);
__ Fmov(d13, kFP64NegativeInfinity);
- __ Fmov(d14, 0xfffffffe);
+ __ Fmov(d14, 0xFFFFFFFE);
__ Fmov(s16, 1.0);
__ Fmov(s17, 1.1);
__ Fmov(s18, 1.5);
__ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
- __ Fmov(s22, 0xffffff0000000000UL); // Largest float < UINT64_MAX.
+ __ Fmov(s22, 0xFFFFFF0000000000UL); // Largest float < UINT64_MAX.
__ Fmov(d24, 1.1);
__ Fmov(d25, 1.5);
__ Fmov(d26, -1.5);
__ Fmov(d27, kFP64PositiveInfinity);
__ Fmov(d28, kFP64NegativeInfinity);
- __ Fmov(d29, 0xfffffffffffff800UL); // Largest double < UINT64_MAX.
+ __ Fmov(d29, 0xFFFFFFFFFFFFF800UL); // Largest double < UINT64_MAX.
__ Fmov(s30, 0x100000000UL);
__ Fcvtnu(w0, s0);
@@ -11178,30 +11170,30 @@ TEST(fcvtnu) {
CHECK_EQUAL_64(1, x1);
CHECK_EQUAL_64(2, x2);
CHECK_EQUAL_64(0, x3);
- CHECK_EQUAL_64(0xffffffff, x4);
+ CHECK_EQUAL_64(0xFFFFFFFF, x4);
CHECK_EQUAL_64(0, x5);
- CHECK_EQUAL_64(0xffffff00, x6);
+ CHECK_EQUAL_64(0xFFFFFF00, x6);
CHECK_EQUAL_64(1, x8);
CHECK_EQUAL_64(1, x9);
CHECK_EQUAL_64(2, x10);
CHECK_EQUAL_64(0, x11);
- CHECK_EQUAL_64(0xffffffff, x12);
+ CHECK_EQUAL_64(0xFFFFFFFF, x12);
CHECK_EQUAL_64(0, x13);
- CHECK_EQUAL_64(0xfffffffe, x14);
+ CHECK_EQUAL_64(0xFFFFFFFE, x14);
CHECK_EQUAL_64(1, x16);
CHECK_EQUAL_64(1, x17);
CHECK_EQUAL_64(2, x18);
CHECK_EQUAL_64(0, x19);
- CHECK_EQUAL_64(0xffffffffffffffffUL, x20);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0, x21);
- CHECK_EQUAL_64(0xffffff0000000000UL, x22);
+ CHECK_EQUAL_64(0xFFFFFF0000000000UL, x22);
CHECK_EQUAL_64(1, x24);
CHECK_EQUAL_64(2, x25);
CHECK_EQUAL_64(0, x26);
- CHECK_EQUAL_64(0xffffffffffffffffUL, x27);
-// CHECK_EQUAL_64(0, x28);
- CHECK_EQUAL_64(0xfffffffffffff800UL, x29);
- CHECK_EQUAL_64(0xffffffff, x30);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x27);
+ // CHECK_EQUAL_64(0, x28);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFF800UL, x29);
+ CHECK_EQUAL_64(0xFFFFFFFF, x30);
TEARDOWN();
}
@@ -11218,7 +11210,7 @@ TEST(fcvtzs) {
__ Fmov(s3, -1.5);
__ Fmov(s4, kFP32PositiveInfinity);
__ Fmov(s5, kFP32NegativeInfinity);
- __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fmov(s6, 0x7FFFFF80); // Largest float < INT32_MAX.
__ Fneg(s7, s6); // Smallest float > INT32_MIN.
__ Fmov(d8, 1.0);
__ Fmov(d9, 1.1);
@@ -11233,14 +11225,14 @@ TEST(fcvtzs) {
__ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
- __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fmov(s22, 0x7FFFFF8000000000UL); // Largest float < INT64_MAX.
__ Fneg(s23, s22); // Smallest float > INT64_MIN.
__ Fmov(d24, 1.1);
__ Fmov(d25, 1.5);
__ Fmov(d26, -1.5);
__ Fmov(d27, kFP64PositiveInfinity);
__ Fmov(d28, kFP64NegativeInfinity);
- __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fmov(d29, 0x7FFFFFFFFFFFFC00UL); // Largest double < INT64_MAX.
__ Fneg(d30, d29); // Smallest double > INT64_MIN.
__ Fcvtzs(w0, s0);
@@ -11280,32 +11272,32 @@ TEST(fcvtzs) {
CHECK_EQUAL_64(1, x0);
CHECK_EQUAL_64(1, x1);
CHECK_EQUAL_64(1, x2);
- CHECK_EQUAL_64(0xffffffff, x3);
- CHECK_EQUAL_64(0x7fffffff, x4);
+ CHECK_EQUAL_64(0xFFFFFFFF, x3);
+ CHECK_EQUAL_64(0x7FFFFFFF, x4);
CHECK_EQUAL_64(0x80000000, x5);
- CHECK_EQUAL_64(0x7fffff80, x6);
+ CHECK_EQUAL_64(0x7FFFFF80, x6);
CHECK_EQUAL_64(0x80000080, x7);
CHECK_EQUAL_64(1, x8);
CHECK_EQUAL_64(1, x9);
CHECK_EQUAL_64(1, x10);
- CHECK_EQUAL_64(0xffffffff, x11);
- CHECK_EQUAL_64(0x7fffffff, x12);
+ CHECK_EQUAL_64(0xFFFFFFFF, x11);
+ CHECK_EQUAL_64(0x7FFFFFFF, x12);
CHECK_EQUAL_64(0x80000000, x13);
- CHECK_EQUAL_64(0x7ffffffe, x14);
+ CHECK_EQUAL_64(0x7FFFFFFE, x14);
CHECK_EQUAL_64(0x80000001, x15);
CHECK_EQUAL_64(1, x17);
CHECK_EQUAL_64(1, x18);
- CHECK_EQUAL_64(0xffffffffffffffffUL, x19);
- CHECK_EQUAL_64(0x7fffffffffffffffUL, x20);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x19);
+ CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0x8000000000000000UL, x21);
- CHECK_EQUAL_64(0x7fffff8000000000UL, x22);
+ CHECK_EQUAL_64(0x7FFFFF8000000000UL, x22);
CHECK_EQUAL_64(0x8000008000000000UL, x23);
CHECK_EQUAL_64(1, x24);
CHECK_EQUAL_64(1, x25);
- CHECK_EQUAL_64(0xffffffffffffffffUL, x26);
- CHECK_EQUAL_64(0x7fffffffffffffffUL, x27);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x26);
+ CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x27);
CHECK_EQUAL_64(0x8000000000000000UL, x28);
- CHECK_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ CHECK_EQUAL_64(0x7FFFFFFFFFFFFC00UL, x29);
CHECK_EQUAL_64(0x8000000000000400UL, x30);
TEARDOWN();
@@ -11323,7 +11315,7 @@ TEST(fcvtzu) {
__ Fmov(s3, -1.5);
__ Fmov(s4, kFP32PositiveInfinity);
__ Fmov(s5, kFP32NegativeInfinity);
- __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fmov(s6, 0x7FFFFF80); // Largest float < INT32_MAX.
__ Fneg(s7, s6); // Smallest float > INT32_MIN.
__ Fmov(d8, 1.0);
__ Fmov(d9, 1.1);
@@ -11338,14 +11330,14 @@ TEST(fcvtzu) {
__ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
- __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fmov(s22, 0x7FFFFF8000000000UL); // Largest float < INT64_MAX.
__ Fneg(s23, s22); // Smallest float > INT64_MIN.
__ Fmov(d24, 1.1);
__ Fmov(d25, 1.5);
__ Fmov(d26, -1.5);
__ Fmov(d27, kFP64PositiveInfinity);
__ Fmov(d28, kFP64NegativeInfinity);
- __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fmov(d29, 0x7FFFFFFFFFFFFC00UL); // Largest double < INT64_MAX.
__ Fneg(d30, d29); // Smallest double > INT64_MIN.
__ Fcvtzu(w0, s0);
@@ -11385,30 +11377,30 @@ TEST(fcvtzu) {
CHECK_EQUAL_64(1, x1);
CHECK_EQUAL_64(1, x2);
CHECK_EQUAL_64(0, x3);
- CHECK_EQUAL_64(0xffffffff, x4);
+ CHECK_EQUAL_64(0xFFFFFFFF, x4);
CHECK_EQUAL_64(0, x5);
- CHECK_EQUAL_64(0x7fffff80, x6);
+ CHECK_EQUAL_64(0x7FFFFF80, x6);
CHECK_EQUAL_64(0, x7);
CHECK_EQUAL_64(1, x8);
CHECK_EQUAL_64(1, x9);
CHECK_EQUAL_64(1, x10);
CHECK_EQUAL_64(0, x11);
- CHECK_EQUAL_64(0xffffffff, x12);
+ CHECK_EQUAL_64(0xFFFFFFFF, x12);
CHECK_EQUAL_64(0, x13);
- CHECK_EQUAL_64(0x7ffffffe, x14);
+ CHECK_EQUAL_64(0x7FFFFFFE, x14);
CHECK_EQUAL_64(1, x17);
CHECK_EQUAL_64(1, x18);
CHECK_EQUAL_64(0x0UL, x19);
- CHECK_EQUAL_64(0xffffffffffffffffUL, x20);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0x0UL, x21);
- CHECK_EQUAL_64(0x7fffff8000000000UL, x22);
+ CHECK_EQUAL_64(0x7FFFFF8000000000UL, x22);
CHECK_EQUAL_64(0x0UL, x23);
CHECK_EQUAL_64(1, x24);
CHECK_EQUAL_64(1, x25);
CHECK_EQUAL_64(0x0UL, x26);
- CHECK_EQUAL_64(0xffffffffffffffffUL, x27);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x27);
CHECK_EQUAL_64(0x0UL, x28);
- CHECK_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ CHECK_EQUAL_64(0x7FFFFFFFFFFFFC00UL, x29);
CHECK_EQUAL_64(0x0UL, x30);
TEARDOWN();
@@ -11429,9 +11421,9 @@ static void TestUScvtfHelper(uint64_t in,
uint64_t expected_scvtf_bits,
uint64_t expected_ucvtf_bits) {
uint64_t u64 = in;
- uint32_t u32 = u64 & 0xffffffff;
+ uint32_t u32 = u64 & 0xFFFFFFFF;
int64_t s64 = static_cast<int64_t>(in);
- int32_t s32 = s64 & 0x7fffffff;
+ int32_t s32 = s64 & 0x7FFFFFFF;
bool cvtf_s32 = (s64 == s32);
bool cvtf_u32 = (u64 == u32);
@@ -11519,63 +11511,63 @@ TEST(scvtf_ucvtf_double) {
// results should not depened on the rounding mode, and ucvtf and scvtf should
// produce the same result.
TestUScvtfHelper(0x0000000000000000, 0x0000000000000000, 0x0000000000000000);
- TestUScvtfHelper(0x0000000000000001, 0x3ff0000000000000, 0x3ff0000000000000);
- TestUScvtfHelper(0x0000000040000000, 0x41d0000000000000, 0x41d0000000000000);
- TestUScvtfHelper(0x0000000100000000, 0x41f0000000000000, 0x41f0000000000000);
- TestUScvtfHelper(0x4000000000000000, 0x43d0000000000000, 0x43d0000000000000);
+ TestUScvtfHelper(0x0000000000000001, 0x3FF0000000000000, 0x3FF0000000000000);
+ TestUScvtfHelper(0x0000000040000000, 0x41D0000000000000, 0x41D0000000000000);
+ TestUScvtfHelper(0x0000000100000000, 0x41F0000000000000, 0x41F0000000000000);
+ TestUScvtfHelper(0x4000000000000000, 0x43D0000000000000, 0x43D0000000000000);
// Test mantissa extremities.
- TestUScvtfHelper(0x4000000000000400, 0x43d0000000000001, 0x43d0000000000001);
+ TestUScvtfHelper(0x4000000000000400, 0x43D0000000000001, 0x43D0000000000001);
// The largest int32_t that fits in a double.
- TestUScvtfHelper(0x000000007fffffff, 0x41dfffffffc00000, 0x41dfffffffc00000);
+ TestUScvtfHelper(0x000000007FFFFFFF, 0x41DFFFFFFFC00000, 0x41DFFFFFFFC00000);
// Values that would be negative if treated as an int32_t.
- TestUScvtfHelper(0x00000000ffffffff, 0x41efffffffe00000, 0x41efffffffe00000);
- TestUScvtfHelper(0x0000000080000000, 0x41e0000000000000, 0x41e0000000000000);
- TestUScvtfHelper(0x0000000080000001, 0x41e0000000200000, 0x41e0000000200000);
+ TestUScvtfHelper(0x00000000FFFFFFFF, 0x41EFFFFFFFE00000, 0x41EFFFFFFFE00000);
+ TestUScvtfHelper(0x0000000080000000, 0x41E0000000000000, 0x41E0000000000000);
+ TestUScvtfHelper(0x0000000080000001, 0x41E0000000200000, 0x41E0000000200000);
// The largest int64_t that fits in a double.
- TestUScvtfHelper(0x7ffffffffffffc00, 0x43dfffffffffffff, 0x43dfffffffffffff);
+ TestUScvtfHelper(0x7FFFFFFFFFFFFC00, 0x43DFFFFFFFFFFFFF, 0x43DFFFFFFFFFFFFF);
// Check for bit pattern reproduction.
- TestUScvtfHelper(0x0123456789abcde0, 0x43723456789abcde, 0x43723456789abcde);
- TestUScvtfHelper(0x0000000012345678, 0x41b2345678000000, 0x41b2345678000000);
+ TestUScvtfHelper(0x0123456789ABCDE0, 0x43723456789ABCDE, 0x43723456789ABCDE);
+ TestUScvtfHelper(0x0000000012345678, 0x41B2345678000000, 0x41B2345678000000);
// Simple conversions of negative int64_t values. These require no rounding,
// and the results should not depend on the rounding mode.
- TestUScvtfHelper(0xffffffffc0000000, 0xc1d0000000000000, 0x43effffffff80000);
- TestUScvtfHelper(0xffffffff00000000, 0xc1f0000000000000, 0x43efffffffe00000);
- TestUScvtfHelper(0xc000000000000000, 0xc3d0000000000000, 0x43e8000000000000);
+ TestUScvtfHelper(0xFFFFFFFFC0000000, 0xC1D0000000000000, 0x43EFFFFFFFF80000);
+ TestUScvtfHelper(0xFFFFFFFF00000000, 0xC1F0000000000000, 0x43EFFFFFFFE00000);
+ TestUScvtfHelper(0xC000000000000000, 0xC3D0000000000000, 0x43E8000000000000);
// Conversions which require rounding.
- TestUScvtfHelper(0x1000000000000000, 0x43b0000000000000, 0x43b0000000000000);
- TestUScvtfHelper(0x1000000000000001, 0x43b0000000000000, 0x43b0000000000000);
- TestUScvtfHelper(0x1000000000000080, 0x43b0000000000000, 0x43b0000000000000);
- TestUScvtfHelper(0x1000000000000081, 0x43b0000000000001, 0x43b0000000000001);
- TestUScvtfHelper(0x1000000000000100, 0x43b0000000000001, 0x43b0000000000001);
- TestUScvtfHelper(0x1000000000000101, 0x43b0000000000001, 0x43b0000000000001);
- TestUScvtfHelper(0x1000000000000180, 0x43b0000000000002, 0x43b0000000000002);
- TestUScvtfHelper(0x1000000000000181, 0x43b0000000000002, 0x43b0000000000002);
- TestUScvtfHelper(0x1000000000000200, 0x43b0000000000002, 0x43b0000000000002);
- TestUScvtfHelper(0x1000000000000201, 0x43b0000000000002, 0x43b0000000000002);
- TestUScvtfHelper(0x1000000000000280, 0x43b0000000000002, 0x43b0000000000002);
- TestUScvtfHelper(0x1000000000000281, 0x43b0000000000003, 0x43b0000000000003);
- TestUScvtfHelper(0x1000000000000300, 0x43b0000000000003, 0x43b0000000000003);
+ TestUScvtfHelper(0x1000000000000000, 0x43B0000000000000, 0x43B0000000000000);
+ TestUScvtfHelper(0x1000000000000001, 0x43B0000000000000, 0x43B0000000000000);
+ TestUScvtfHelper(0x1000000000000080, 0x43B0000000000000, 0x43B0000000000000);
+ TestUScvtfHelper(0x1000000000000081, 0x43B0000000000001, 0x43B0000000000001);
+ TestUScvtfHelper(0x1000000000000100, 0x43B0000000000001, 0x43B0000000000001);
+ TestUScvtfHelper(0x1000000000000101, 0x43B0000000000001, 0x43B0000000000001);
+ TestUScvtfHelper(0x1000000000000180, 0x43B0000000000002, 0x43B0000000000002);
+ TestUScvtfHelper(0x1000000000000181, 0x43B0000000000002, 0x43B0000000000002);
+ TestUScvtfHelper(0x1000000000000200, 0x43B0000000000002, 0x43B0000000000002);
+ TestUScvtfHelper(0x1000000000000201, 0x43B0000000000002, 0x43B0000000000002);
+ TestUScvtfHelper(0x1000000000000280, 0x43B0000000000002, 0x43B0000000000002);
+ TestUScvtfHelper(0x1000000000000281, 0x43B0000000000003, 0x43B0000000000003);
+ TestUScvtfHelper(0x1000000000000300, 0x43B0000000000003, 0x43B0000000000003);
// Check rounding of negative int64_t values (and large uint64_t values).
- TestUScvtfHelper(0x8000000000000000, 0xc3e0000000000000, 0x43e0000000000000);
- TestUScvtfHelper(0x8000000000000001, 0xc3e0000000000000, 0x43e0000000000000);
- TestUScvtfHelper(0x8000000000000200, 0xc3e0000000000000, 0x43e0000000000000);
- TestUScvtfHelper(0x8000000000000201, 0xc3dfffffffffffff, 0x43e0000000000000);
- TestUScvtfHelper(0x8000000000000400, 0xc3dfffffffffffff, 0x43e0000000000000);
- TestUScvtfHelper(0x8000000000000401, 0xc3dfffffffffffff, 0x43e0000000000001);
- TestUScvtfHelper(0x8000000000000600, 0xc3dffffffffffffe, 0x43e0000000000001);
- TestUScvtfHelper(0x8000000000000601, 0xc3dffffffffffffe, 0x43e0000000000001);
- TestUScvtfHelper(0x8000000000000800, 0xc3dffffffffffffe, 0x43e0000000000001);
- TestUScvtfHelper(0x8000000000000801, 0xc3dffffffffffffe, 0x43e0000000000001);
- TestUScvtfHelper(0x8000000000000a00, 0xc3dffffffffffffe, 0x43e0000000000001);
- TestUScvtfHelper(0x8000000000000a01, 0xc3dffffffffffffd, 0x43e0000000000001);
- TestUScvtfHelper(0x8000000000000c00, 0xc3dffffffffffffd, 0x43e0000000000002);
+ TestUScvtfHelper(0x8000000000000000, 0xC3E0000000000000, 0x43E0000000000000);
+ TestUScvtfHelper(0x8000000000000001, 0xC3E0000000000000, 0x43E0000000000000);
+ TestUScvtfHelper(0x8000000000000200, 0xC3E0000000000000, 0x43E0000000000000);
+ TestUScvtfHelper(0x8000000000000201, 0xC3DFFFFFFFFFFFFF, 0x43E0000000000000);
+ TestUScvtfHelper(0x8000000000000400, 0xC3DFFFFFFFFFFFFF, 0x43E0000000000000);
+ TestUScvtfHelper(0x8000000000000401, 0xC3DFFFFFFFFFFFFF, 0x43E0000000000001);
+ TestUScvtfHelper(0x8000000000000600, 0xC3DFFFFFFFFFFFFE, 0x43E0000000000001);
+ TestUScvtfHelper(0x8000000000000601, 0xC3DFFFFFFFFFFFFE, 0x43E0000000000001);
+ TestUScvtfHelper(0x8000000000000800, 0xC3DFFFFFFFFFFFFE, 0x43E0000000000001);
+ TestUScvtfHelper(0x8000000000000801, 0xC3DFFFFFFFFFFFFE, 0x43E0000000000001);
+ TestUScvtfHelper(0x8000000000000A00, 0xC3DFFFFFFFFFFFFE, 0x43E0000000000001);
+ TestUScvtfHelper(0x8000000000000A01, 0xC3DFFFFFFFFFFFFD, 0x43E0000000000001);
+ TestUScvtfHelper(0x8000000000000C00, 0xC3DFFFFFFFFFFFFD, 0x43E0000000000002);
// Round up to produce a result that's too big for the input to represent.
- TestUScvtfHelper(0x7ffffffffffffe00, 0x43e0000000000000, 0x43e0000000000000);
- TestUScvtfHelper(0x7fffffffffffffff, 0x43e0000000000000, 0x43e0000000000000);
- TestUScvtfHelper(0xfffffffffffffc00, 0xc090000000000000, 0x43f0000000000000);
- TestUScvtfHelper(0xffffffffffffffff, 0xbff0000000000000, 0x43f0000000000000);
+ TestUScvtfHelper(0x7FFFFFFFFFFFFE00, 0x43E0000000000000, 0x43E0000000000000);
+ TestUScvtfHelper(0x7FFFFFFFFFFFFFFF, 0x43E0000000000000, 0x43E0000000000000);
+ TestUScvtfHelper(0xFFFFFFFFFFFFFC00, 0xC090000000000000, 0x43F0000000000000);
+ TestUScvtfHelper(0xFFFFFFFFFFFFFFFF, 0xBFF0000000000000, 0x43F0000000000000);
}
@@ -11584,9 +11576,9 @@ static void TestUScvtf32Helper(uint64_t in,
uint32_t expected_scvtf_bits,
uint32_t expected_ucvtf_bits) {
uint64_t u64 = in;
- uint32_t u32 = u64 & 0xffffffff;
+ uint32_t u32 = u64 & 0xFFFFFFFF;
int64_t s64 = static_cast<int64_t>(in);
- int32_t s32 = s64 & 0x7fffffff;
+ int32_t s32 = s64 & 0x7FFFFFFF;
bool cvtf_s32 = (s64 == s32);
bool cvtf_u32 = (u64 == u32);
@@ -11656,10 +11648,8 @@ static void TestUScvtf32Helper(uint64_t in,
CHECK_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
if (cvtf_s32) CHECK_EQUAL_FP32(expected_scvtf, results_scvtf_w[fbits]);
if (cvtf_u32) CHECK_EQUAL_FP32(expected_ucvtf, results_ucvtf_w[fbits]);
- break;
}
for (int fbits = 33; fbits <= 64; fbits++) {
- break;
float expected_scvtf = expected_scvtf_base / powf(2, fbits);
float expected_ucvtf = expected_ucvtf_base / powf(2, fbits);
CHECK_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
@@ -11676,28 +11666,28 @@ TEST(scvtf_ucvtf_float) {
// results should not depened on the rounding mode, and ucvtf and scvtf should
// produce the same result.
TestUScvtf32Helper(0x0000000000000000, 0x00000000, 0x00000000);
- TestUScvtf32Helper(0x0000000000000001, 0x3f800000, 0x3f800000);
- TestUScvtf32Helper(0x0000000040000000, 0x4e800000, 0x4e800000);
- TestUScvtf32Helper(0x0000000100000000, 0x4f800000, 0x4f800000);
- TestUScvtf32Helper(0x4000000000000000, 0x5e800000, 0x5e800000);
+ TestUScvtf32Helper(0x0000000000000001, 0x3F800000, 0x3F800000);
+ TestUScvtf32Helper(0x0000000040000000, 0x4E800000, 0x4E800000);
+ TestUScvtf32Helper(0x0000000100000000, 0x4F800000, 0x4F800000);
+ TestUScvtf32Helper(0x4000000000000000, 0x5E800000, 0x5E800000);
// Test mantissa extremities.
- TestUScvtf32Helper(0x0000000000800001, 0x4b000001, 0x4b000001);
- TestUScvtf32Helper(0x4000008000000000, 0x5e800001, 0x5e800001);
+ TestUScvtf32Helper(0x0000000000800001, 0x4B000001, 0x4B000001);
+ TestUScvtf32Helper(0x4000008000000000, 0x5E800001, 0x5E800001);
// The largest int32_t that fits in a float.
- TestUScvtf32Helper(0x000000007fffff80, 0x4effffff, 0x4effffff);
+ TestUScvtf32Helper(0x000000007FFFFF80, 0x4EFFFFFF, 0x4EFFFFFF);
// Values that would be negative if treated as an int32_t.
- TestUScvtf32Helper(0x00000000ffffff00, 0x4f7fffff, 0x4f7fffff);
- TestUScvtf32Helper(0x0000000080000000, 0x4f000000, 0x4f000000);
- TestUScvtf32Helper(0x0000000080000100, 0x4f000001, 0x4f000001);
+ TestUScvtf32Helper(0x00000000FFFFFF00, 0x4F7FFFFF, 0x4F7FFFFF);
+ TestUScvtf32Helper(0x0000000080000000, 0x4F000000, 0x4F000000);
+ TestUScvtf32Helper(0x0000000080000100, 0x4F000001, 0x4F000001);
// The largest int64_t that fits in a float.
- TestUScvtf32Helper(0x7fffff8000000000, 0x5effffff, 0x5effffff);
+ TestUScvtf32Helper(0x7FFFFF8000000000, 0x5EFFFFFF, 0x5EFFFFFF);
// Check for bit pattern reproduction.
- TestUScvtf32Helper(0x0000000000876543, 0x4b076543, 0x4b076543);
+ TestUScvtf32Helper(0x0000000000876543, 0x4B076543, 0x4B076543);
// Simple conversions of negative int64_t values. These require no rounding,
// and the results should not depend on the rounding mode.
- TestUScvtf32Helper(0xfffffc0000000000, 0xd4800000, 0x5f7ffffc);
- TestUScvtf32Helper(0xc000000000000000, 0xde800000, 0x5f400000);
+ TestUScvtf32Helper(0xFFFFFC0000000000, 0xD4800000, 0x5F7FFFFC);
+ TestUScvtf32Helper(0xC000000000000000, 0xDE800000, 0x5F400000);
// Conversions which require rounding.
TestUScvtf32Helper(0x0000800000000000, 0x57000000, 0x57000000);
@@ -11714,28 +11704,28 @@ TEST(scvtf_ucvtf_float) {
TestUScvtf32Helper(0x0000800002800001, 0x57000003, 0x57000003);
TestUScvtf32Helper(0x0000800003000000, 0x57000003, 0x57000003);
// Check rounding of negative int64_t values (and large uint64_t values).
- TestUScvtf32Helper(0x8000000000000000, 0xdf000000, 0x5f000000);
- TestUScvtf32Helper(0x8000000000000001, 0xdf000000, 0x5f000000);
- TestUScvtf32Helper(0x8000004000000000, 0xdf000000, 0x5f000000);
- TestUScvtf32Helper(0x8000004000000001, 0xdeffffff, 0x5f000000);
- TestUScvtf32Helper(0x8000008000000000, 0xdeffffff, 0x5f000000);
- TestUScvtf32Helper(0x8000008000000001, 0xdeffffff, 0x5f000001);
- TestUScvtf32Helper(0x800000c000000000, 0xdefffffe, 0x5f000001);
- TestUScvtf32Helper(0x800000c000000001, 0xdefffffe, 0x5f000001);
- TestUScvtf32Helper(0x8000010000000000, 0xdefffffe, 0x5f000001);
- TestUScvtf32Helper(0x8000010000000001, 0xdefffffe, 0x5f000001);
- TestUScvtf32Helper(0x8000014000000000, 0xdefffffe, 0x5f000001);
- TestUScvtf32Helper(0x8000014000000001, 0xdefffffd, 0x5f000001);
- TestUScvtf32Helper(0x8000018000000000, 0xdefffffd, 0x5f000002);
+ TestUScvtf32Helper(0x8000000000000000, 0xDF000000, 0x5F000000);
+ TestUScvtf32Helper(0x8000000000000001, 0xDF000000, 0x5F000000);
+ TestUScvtf32Helper(0x8000004000000000, 0xDF000000, 0x5F000000);
+ TestUScvtf32Helper(0x8000004000000001, 0xDEFFFFFF, 0x5F000000);
+ TestUScvtf32Helper(0x8000008000000000, 0xDEFFFFFF, 0x5F000000);
+ TestUScvtf32Helper(0x8000008000000001, 0xDEFFFFFF, 0x5F000001);
+ TestUScvtf32Helper(0x800000C000000000, 0xDEFFFFFE, 0x5F000001);
+ TestUScvtf32Helper(0x800000C000000001, 0xDEFFFFFE, 0x5F000001);
+ TestUScvtf32Helper(0x8000010000000000, 0xDEFFFFFE, 0x5F000001);
+ TestUScvtf32Helper(0x8000010000000001, 0xDEFFFFFE, 0x5F000001);
+ TestUScvtf32Helper(0x8000014000000000, 0xDEFFFFFE, 0x5F000001);
+ TestUScvtf32Helper(0x8000014000000001, 0xDEFFFFFD, 0x5F000001);
+ TestUScvtf32Helper(0x8000018000000000, 0xDEFFFFFD, 0x5F000002);
// Round up to produce a result that's too big for the input to represent.
- TestUScvtf32Helper(0x000000007fffffc0, 0x4f000000, 0x4f000000);
- TestUScvtf32Helper(0x000000007fffffff, 0x4f000000, 0x4f000000);
- TestUScvtf32Helper(0x00000000ffffff80, 0x4f800000, 0x4f800000);
- TestUScvtf32Helper(0x00000000ffffffff, 0x4f800000, 0x4f800000);
- TestUScvtf32Helper(0x7fffffc000000000, 0x5f000000, 0x5f000000);
- TestUScvtf32Helper(0x7fffffffffffffff, 0x5f000000, 0x5f000000);
- TestUScvtf32Helper(0xffffff8000000000, 0xd3000000, 0x5f800000);
- TestUScvtf32Helper(0xffffffffffffffff, 0xbf800000, 0x5f800000);
+ TestUScvtf32Helper(0x000000007FFFFFC0, 0x4F000000, 0x4F000000);
+ TestUScvtf32Helper(0x000000007FFFFFFF, 0x4F000000, 0x4F000000);
+ TestUScvtf32Helper(0x00000000FFFFFF80, 0x4F800000, 0x4F800000);
+ TestUScvtf32Helper(0x00000000FFFFFFFF, 0x4F800000, 0x4F800000);
+ TestUScvtf32Helper(0x7FFFFFC000000000, 0x5F000000, 0x5F000000);
+ TestUScvtf32Helper(0x7FFFFFFFFFFFFFFF, 0x5F000000, 0x5F000000);
+ TestUScvtf32Helper(0xFFFFFF8000000000, 0xD3000000, 0x5F800000);
+ TestUScvtf32Helper(0xFFFFFFFFFFFFFFFF, 0xBF800000, 0x5F800000);
}
@@ -11782,18 +11772,18 @@ TEST(system_mrs) {
TEST(system_msr) {
INIT_V8();
// All FPCR fields that must be implemented: AHP, DN, FZ, RMode
- const uint64_t fpcr_core = 0x07c00000;
+ const uint64_t fpcr_core = 0x07C00000;
// All FPCR fields (including fields which may be read-as-zero):
// Stride, Len
// IDE, IXE, UFE, OFE, DZE, IOE
- const uint64_t fpcr_all = fpcr_core | 0x00379f00;
+ const uint64_t fpcr_all = fpcr_core | 0x00379F00;
SETUP();
START();
__ Mov(w0, 0);
- __ Mov(w1, 0x7fffffff);
+ __ Mov(w1, 0x7FFFFFFF);
__ Mov(x7, 0);
@@ -11963,14 +11953,14 @@ TEST(zero_dest_setflags) {
__ adds(xzr, x1, xzr);
__ adds(xzr, xzr, x1);
- __ ands(xzr, x2, ~0xf);
- __ ands(xzr, xzr, ~0xf);
+ __ ands(xzr, x2, ~0xF);
+ __ ands(xzr, xzr, ~0xF);
__ ands(xzr, x0, x2);
__ ands(xzr, x2, xzr);
__ ands(xzr, xzr, x2);
- __ bics(xzr, x3, ~0xf);
- __ bics(xzr, xzr, ~0xf);
+ __ bics(xzr, x3, ~0xF);
+ __ bics(xzr, xzr, ~0xF);
__ bics(xzr, x0, x3);
__ bics(xzr, x3, xzr);
__ bics(xzr, xzr, x3);
@@ -12018,7 +12008,6 @@ TEST(register_bit) {
CHECK(xzr.bit() == (1UL << kZeroRegCode));
// Internal ABI definitions.
- CHECK(jssp.bit() == (1UL << kJSSPCode));
CHECK(csp.bit() == (1UL << kSPRegInternalCode));
CHECK(csp.bit() != xzr.bit());
@@ -12026,35 +12015,11 @@ TEST(register_bit) {
CHECK(x0.bit() == w0.bit());
CHECK(x1.bit() == w1.bit());
CHECK(x10.bit() == w10.bit());
- CHECK(jssp.bit() == wjssp.bit());
CHECK(xzr.bit() == wzr.bit());
CHECK(csp.bit() == wcsp.bit());
}
-TEST(stack_pointer_override) {
- // This test generates some stack maintenance code, but the test only checks
- // the reported state.
- INIT_V8();
- SETUP();
- START();
-
- // The default stack pointer in V8 is jssp, but for compatibility with W16,
- // the test framework sets it to csp before calling the test.
- CHECK(csp.Is(__ StackPointer()));
- __ SetStackPointer(x0);
- CHECK(x0.Is(__ StackPointer()));
- __ SetStackPointer(jssp);
- CHECK(jssp.Is(__ StackPointer()));
- __ SetStackPointer(csp);
- CHECK(csp.Is(__ StackPointer()));
-
- END();
- RUN();
- TEARDOWN();
-}
-
-
TEST(peek_poke_simple) {
INIT_V8();
SETUP();
@@ -12113,10 +12078,10 @@ TEST(peek_poke_simple) {
CHECK_EQUAL_64(literal_base * 3, x2);
CHECK_EQUAL_64(literal_base * 4, x3);
- CHECK_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
- CHECK_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
- CHECK_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
- CHECK_EQUAL_64((literal_base * 4) & 0xffffffff, x13);
+ CHECK_EQUAL_64((literal_base * 1) & 0xFFFFFFFF, x10);
+ CHECK_EQUAL_64((literal_base * 2) & 0xFFFFFFFF, x11);
+ CHECK_EQUAL_64((literal_base * 3) & 0xFFFFFFFF, x12);
+ CHECK_EQUAL_64((literal_base * 4) & 0xFFFFFFFF, x13);
TEARDOWN();
}
@@ -12194,9 +12159,9 @@ TEST(peek_poke_unaligned) {
CHECK_EQUAL_64(literal_base * 6, x5);
CHECK_EQUAL_64(literal_base * 7, x6);
- CHECK_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
- CHECK_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
- CHECK_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
+ CHECK_EQUAL_64((literal_base * 1) & 0xFFFFFFFF, x10);
+ CHECK_EQUAL_64((literal_base * 2) & 0xFFFFFFFF, x11);
+ CHECK_EQUAL_64((literal_base * 3) & 0xFFFFFFFF, x12);
TEARDOWN();
}
@@ -12240,8 +12205,8 @@ TEST(peek_poke_endianness) {
uint64_t x0_expected = literal_base * 1;
uint64_t x1_expected = literal_base * 2;
uint64_t x4_expected = (x0_expected << 32) | (x0_expected >> 32);
- uint64_t x5_expected = ((x1_expected << 16) & 0xffff0000) |
- ((x1_expected >> 16) & 0x0000ffff);
+ uint64_t x5_expected =
+ ((x1_expected << 16) & 0xFFFF0000) | ((x1_expected >> 16) & 0x0000FFFF);
CHECK_EQUAL_64(x0_expected, x0);
CHECK_EQUAL_64(x1_expected, x1);
@@ -12280,23 +12245,16 @@ TEST(peek_poke_mixed) {
__ Poke(x1, 8);
__ Poke(x0, 0);
{
- CHECK(__ StackPointer().Is(csp));
- __ Mov(x4, __ StackPointer());
- __ SetStackPointer(x4);
-
- __ Poke(wzr, 0); // Clobber the space we're about to drop.
- __ Drop(1, kWRegSize);
- __ Peek(x6, 0);
- __ Claim(1);
- __ Peek(w7, 10);
- __ Poke(x3, 28);
+ __ Peek(x6, 4);
+ __ Peek(w7, 6);
__ Poke(xzr, 0); // Clobber the space we're about to drop.
- __ Drop(1);
- __ Poke(x2, 12);
- __ Push(w0);
-
- __ Mov(csp, __ StackPointer());
- __ SetStackPointer(csp);
+ __ Poke(xzr, 8); // Clobber the space we're about to drop.
+ __ Drop(2);
+ __ Poke(x3, 8);
+ __ Poke(x2, 0);
+ __ Claim(2);
+ __ Poke(x0, 0);
+ __ Poke(x1, 8);
}
__ Pop(x0, x1, x2, x3);
@@ -12309,8 +12267,8 @@ TEST(peek_poke_mixed) {
uint64_t x2_expected = literal_base * 3;
uint64_t x3_expected = literal_base * 4;
uint64_t x6_expected = (x1_expected << 32) | (x0_expected >> 32);
- uint64_t x7_expected = ((x1_expected << 16) & 0xffff0000) |
- ((x0_expected >> 48) & 0x0000ffff);
+ uint64_t x7_expected =
+ ((x1_expected << 16) & 0xFFFF0000) | ((x0_expected >> 48) & 0x0000FFFF);
CHECK_EQUAL_64(x0_expected, x0);
CHECK_EQUAL_64(x1_expected, x1);
@@ -12333,34 +12291,28 @@ enum PushPopMethod {
PushPopRegList
};
-
-// The maximum number of registers that can be used by the PushPopJssp* tests,
+// The maximum number of registers that can be used by the PushPop* tests,
// where a reg_count field is provided.
-static int const kPushPopJsspMaxRegCount = -1;
+static int const kPushPopMaxRegCount = -1;
// Test a simple push-pop pattern:
-// * Claim <claim> bytes to set the stack alignment.
// * Push <reg_count> registers with size <reg_size>.
// * Clobber the register contents.
// * Pop <reg_count> registers to restore the original contents.
-// * Drop <claim> bytes to restore the original stack pointer.
//
// Different push and pop methods can be specified independently to test for
// proper word-endian behaviour.
-static void PushPopJsspSimpleHelper(int reg_count,
- int claim,
- int reg_size,
- PushPopMethod push_method,
- PushPopMethod pop_method) {
+static void PushPopSimpleHelper(int reg_count, int reg_size,
+ PushPopMethod push_method,
+ PushPopMethod pop_method) {
SETUP();
START();
// Registers in the TmpList can be used by the macro assembler for debug code
- // (for example in 'Pop'), so we can't use them here. We can't use jssp
- // because it will be the stack pointer for this test.
- static RegList const allowed = ~(masm.TmpList()->list() | jssp.bit());
- if (reg_count == kPushPopJsspMaxRegCount) {
+ // (for example in 'Pop'), so we can't use them here.
+ static RegList const allowed = ~(masm.TmpList()->list());
+ if (reg_count == kPushPopMaxRegCount) {
reg_count = CountSetBits(allowed, kNumberOfRegisters);
}
// Work out which registers to use, based on reg_size.
@@ -12377,10 +12329,6 @@ static void PushPopJsspSimpleHelper(int reg_count,
uint64_t literal_base = 0x0100001000100101UL;
{
- CHECK(__ StackPointer().Is(csp));
- __ Mov(jssp, __ StackPointer());
- __ SetStackPointer(jssp);
-
int i;
// Initialize the registers.
@@ -12392,9 +12340,6 @@ static void PushPopJsspSimpleHelper(int reg_count,
}
}
- // Claim memory first, as requested.
- __ Claim(claim, kByteSizeInBytes);
-
switch (push_method) {
case PushPopByFour:
// Push high-numbered registers first (to the highest addresses).
@@ -12439,12 +12384,6 @@ static void PushPopJsspSimpleHelper(int reg_count,
__ PopSizeRegList(list, reg_size);
break;
}
-
- // Drop memory to restore jssp.
- __ Drop(claim, kByteSizeInBytes);
-
- __ Mov(csp, __ StackPointer());
- __ SetStackPointer(csp);
}
END();
@@ -12454,7 +12393,7 @@ static void PushPopJsspSimpleHelper(int reg_count,
// Check that the register contents were preserved.
// Always use CHECK_EQUAL_64, even when testing W registers, so we can test
// that the upper word was properly cleared by Pop.
- literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
+ literal_base &= (0xFFFFFFFFFFFFFFFFUL >> (64 - reg_size));
for (int i = 0; i < reg_count; i++) {
if (x[i].IsZero()) {
CHECK_EQUAL_64(0, x[i]);
@@ -12466,77 +12405,53 @@ static void PushPopJsspSimpleHelper(int reg_count,
TEARDOWN();
}
-
-TEST(push_pop_jssp_simple_32) {
+TEST(push_pop_simple_32) {
INIT_V8();
- for (int claim = 0; claim <= 8; claim++) {
- for (int count = 0; count <= 8; count++) {
- PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
- PushPopByFour, PushPopByFour);
- PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
- PushPopByFour, PushPopRegList);
- PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
- PushPopRegList, PushPopByFour);
- PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
- PushPopRegList, PushPopRegList);
- }
- // Test with the maximum number of registers.
- PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
- PushPopByFour, PushPopByFour);
- PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
- PushPopByFour, PushPopRegList);
- PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
- PushPopRegList, PushPopByFour);
- PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
- PushPopRegList, PushPopRegList);
- }
-}
-
-
-TEST(push_pop_jssp_simple_64) {
- INIT_V8();
- for (int claim = 0; claim <= 8; claim++) {
- for (int count = 0; count <= 8; count++) {
- PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
- PushPopByFour, PushPopByFour);
- PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
- PushPopByFour, PushPopRegList);
- PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
- PushPopRegList, PushPopByFour);
- PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
- PushPopRegList, PushPopRegList);
- }
- // Test with the maximum number of registers.
- PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
- PushPopByFour, PushPopByFour);
- PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
- PushPopByFour, PushPopRegList);
- PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
- PushPopRegList, PushPopByFour);
- PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
- PushPopRegList, PushPopRegList);
+
+ for (int count = 0; count < kPushPopMaxRegCount; count += 4) {
+ PushPopSimpleHelper(count, kWRegSizeInBits, PushPopByFour, PushPopByFour);
+ PushPopSimpleHelper(count, kWRegSizeInBits, PushPopByFour, PushPopRegList);
+ PushPopSimpleHelper(count, kWRegSizeInBits, PushPopRegList, PushPopByFour);
+ PushPopSimpleHelper(count, kWRegSizeInBits, PushPopRegList, PushPopRegList);
}
+ // Skip testing kPushPopMaxRegCount, as we exclude the temporary registers
+ // and we end up with a number of registers that is not a multiple of four and
+ // is not supported for pushing.
}
+TEST(push_pop_simple_64) {
+ INIT_V8();
+ for (int count = 0; count <= 8; count += 2) {
+ PushPopSimpleHelper(count, kXRegSizeInBits, PushPopByFour, PushPopByFour);
+ PushPopSimpleHelper(count, kXRegSizeInBits, PushPopByFour, PushPopRegList);
+ PushPopSimpleHelper(count, kXRegSizeInBits, PushPopRegList, PushPopByFour);
+ PushPopSimpleHelper(count, kXRegSizeInBits, PushPopRegList, PushPopRegList);
+ }
+ // Test with the maximum number of registers.
+ PushPopSimpleHelper(kPushPopMaxRegCount, kXRegSizeInBits, PushPopByFour,
+ PushPopByFour);
+ PushPopSimpleHelper(kPushPopMaxRegCount, kXRegSizeInBits, PushPopByFour,
+ PushPopRegList);
+ PushPopSimpleHelper(kPushPopMaxRegCount, kXRegSizeInBits, PushPopRegList,
+ PushPopByFour);
+ PushPopSimpleHelper(kPushPopMaxRegCount, kXRegSizeInBits, PushPopRegList,
+ PushPopRegList);
+}
-// The maximum number of registers that can be used by the PushPopFPJssp* tests,
+// The maximum number of registers that can be used by the PushPopFP* tests,
// where a reg_count field is provided.
-static int const kPushPopFPJsspMaxRegCount = -1;
+static int const kPushPopFPMaxRegCount = -1;
// Test a simple push-pop pattern:
-// * Claim <claim> bytes to set the stack alignment.
// * Push <reg_count> FP registers with size <reg_size>.
// * Clobber the register contents.
// * Pop <reg_count> FP registers to restore the original contents.
-// * Drop <claim> bytes to restore the original stack pointer.
//
// Different push and pop methods can be specified independently to test for
// proper word-endian behaviour.
-static void PushPopFPJsspSimpleHelper(int reg_count,
- int claim,
- int reg_size,
- PushPopMethod push_method,
- PushPopMethod pop_method) {
+static void PushPopFPSimpleHelper(int reg_count, int reg_size,
+ PushPopMethod push_method,
+ PushPopMethod pop_method) {
SETUP();
START();
@@ -12544,7 +12459,7 @@ static void PushPopFPJsspSimpleHelper(int reg_count,
// We can use any floating-point register. None of them are reserved for
// debug code, for example.
static RegList const allowed = ~0;
- if (reg_count == kPushPopFPJsspMaxRegCount) {
+ if (reg_count == kPushPopFPMaxRegCount) {
reg_count = CountSetBits(allowed, kNumberOfVRegisters);
}
// Work out which registers to use, based on reg_size.
@@ -12564,9 +12479,6 @@ static void PushPopFPJsspSimpleHelper(int reg_count,
{
CHECK(__ StackPointer().Is(csp));
- __ Mov(jssp, __ StackPointer());
- __ SetStackPointer(jssp);
-
int i;
// Initialize the registers, using X registers to load the literal.
@@ -12580,9 +12492,6 @@ static void PushPopFPJsspSimpleHelper(int reg_count,
__ Add(x0, x0, x1);
}
- // Claim memory first, as requested.
- __ Claim(claim, kByteSizeInBytes);
-
switch (push_method) {
case PushPopByFour:
// Push high-numbered registers first (to the highest addresses).
@@ -12627,12 +12536,6 @@ static void PushPopFPJsspSimpleHelper(int reg_count,
__ PopSizeRegList(list, reg_size, CPURegister::kVRegister);
break;
}
-
- // Drop memory to restore jssp.
- __ Drop(claim, kByteSizeInBytes);
-
- __ Mov(csp, __ StackPointer());
- __ SetStackPointer(csp);
}
END();
@@ -12642,7 +12545,7 @@ static void PushPopFPJsspSimpleHelper(int reg_count,
// Check that the register contents were preserved.
// Always use CHECK_EQUAL_FP64, even when testing S registers, so we can
// test that the upper word was properly cleared by Pop.
- literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
+ literal_base &= (0xFFFFFFFFFFFFFFFFUL >> (64 - reg_size));
for (int i = 0; i < reg_count; i++) {
uint64_t literal = literal_base * i;
double expected;
@@ -12653,69 +12556,59 @@ static void PushPopFPJsspSimpleHelper(int reg_count,
TEARDOWN();
}
+TEST(push_pop_fp_simple_32) {
+ INIT_V8();
+ for (int count = 0; count <= 8; count += 4) {
+ PushPopFPSimpleHelper(count, kSRegSizeInBits, PushPopByFour, PushPopByFour);
+ PushPopFPSimpleHelper(count, kSRegSizeInBits, PushPopByFour,
+ PushPopRegList);
+ PushPopFPSimpleHelper(count, kSRegSizeInBits, PushPopRegList,
+ PushPopByFour);
+ PushPopFPSimpleHelper(count, kSRegSizeInBits, PushPopRegList,
+ PushPopRegList);
+ }
+ // Test with the maximum number of registers.
+ PushPopFPSimpleHelper(kPushPopFPMaxRegCount, kSRegSizeInBits, PushPopByFour,
+ PushPopByFour);
+ PushPopFPSimpleHelper(kPushPopFPMaxRegCount, kSRegSizeInBits, PushPopByFour,
+ PushPopRegList);
+ PushPopFPSimpleHelper(kPushPopFPMaxRegCount, kSRegSizeInBits, PushPopRegList,
+ PushPopByFour);
+ PushPopFPSimpleHelper(kPushPopFPMaxRegCount, kSRegSizeInBits, PushPopRegList,
+ PushPopRegList);
+}
-TEST(push_pop_fp_jssp_simple_32) {
+TEST(push_pop_fp_simple_64) {
INIT_V8();
- for (int claim = 0; claim <= 8; claim++) {
- for (int count = 0; count <= 8; count++) {
- PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
- PushPopByFour, PushPopByFour);
- PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
- PushPopByFour, PushPopRegList);
- PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
- PushPopRegList, PushPopByFour);
- PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
- PushPopRegList, PushPopRegList);
- }
- // Test with the maximum number of registers.
- PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
- PushPopByFour, PushPopByFour);
- PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
- PushPopByFour, PushPopRegList);
- PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
- PushPopRegList, PushPopByFour);
- PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
- PushPopRegList, PushPopRegList);
- }
-}
-
-
-TEST(push_pop_fp_jssp_simple_64) {
- INIT_V8();
- for (int claim = 0; claim <= 8; claim++) {
- for (int count = 0; count <= 8; count++) {
- PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
- PushPopByFour, PushPopByFour);
- PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
- PushPopByFour, PushPopRegList);
- PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
- PushPopRegList, PushPopByFour);
- PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
- PushPopRegList, PushPopRegList);
- }
- // Test with the maximum number of registers.
- PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
- PushPopByFour, PushPopByFour);
- PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
- PushPopByFour, PushPopRegList);
- PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
- PushPopRegList, PushPopByFour);
- PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
- PushPopRegList, PushPopRegList);
+ for (int count = 0; count <= 8; count += 2) {
+ PushPopFPSimpleHelper(count, kDRegSizeInBits, PushPopByFour, PushPopByFour);
+ PushPopFPSimpleHelper(count, kDRegSizeInBits, PushPopByFour,
+ PushPopRegList);
+ PushPopFPSimpleHelper(count, kDRegSizeInBits, PushPopRegList,
+ PushPopByFour);
+ PushPopFPSimpleHelper(count, kDRegSizeInBits, PushPopRegList,
+ PushPopRegList);
}
+ // Test with the maximum number of registers.
+ PushPopFPSimpleHelper(kPushPopFPMaxRegCount, kDRegSizeInBits, PushPopByFour,
+ PushPopByFour);
+ PushPopFPSimpleHelper(kPushPopFPMaxRegCount, kDRegSizeInBits, PushPopByFour,
+ PushPopRegList);
+ PushPopFPSimpleHelper(kPushPopFPMaxRegCount, kDRegSizeInBits, PushPopRegList,
+ PushPopByFour);
+ PushPopFPSimpleHelper(kPushPopFPMaxRegCount, kDRegSizeInBits, PushPopRegList,
+ PushPopRegList);
}
// Push and pop data using an overlapping combination of Push/Pop and
// RegList-based methods.
-static void PushPopJsspMixedMethodsHelper(int claim, int reg_size) {
+static void PushPopMixedMethodsHelper(int reg_size) {
SETUP();
- // Registers x8 and x9 are used by the macro assembler for debug code (for
- // example in 'Pop'), so we can't use them here. We can't use jssp because it
- // will be the stack pointer for this test.
- static RegList const allowed =
- ~(x8.bit() | x9.bit() | jssp.bit() | xzr.bit());
+ // Registers in the TmpList can be used by the macro assembler for debug code
+ // (for example in 'Pop'), so we can't use them here.
+ static RegList const allowed = ~(masm.TmpList()->list());
// Work out which registers to use, based on reg_size.
auto r = CreateRegisterArray<Register, 10>();
auto x = CreateRegisterArray<Register, 10>();
@@ -12745,11 +12638,6 @@ static void PushPopJsspMixedMethodsHelper(int claim, int reg_size) {
START();
{
CHECK(__ StackPointer().Is(csp));
- __ Mov(jssp, __ StackPointer());
- __ SetStackPointer(jssp);
-
- // Claim memory first, as requested.
- __ Claim(claim, kByteSizeInBytes);
__ Mov(x[3], literal_base * 3);
__ Mov(x[2], literal_base * 2);
@@ -12768,12 +12656,6 @@ static void PushPopJsspMixedMethodsHelper(int claim, int reg_size) {
__ Pop(r[4], r[5]);
Clobber(&masm, r6_to_r9);
__ Pop(r[6], r[7], r[8], r[9]);
-
- // Drop memory to restore jssp.
- __ Drop(claim, kByteSizeInBytes);
-
- __ Mov(csp, __ StackPointer());
- __ SetStackPointer(csp);
}
END();
@@ -12782,7 +12664,7 @@ static void PushPopJsspMixedMethodsHelper(int claim, int reg_size) {
// Always use CHECK_EQUAL_64, even when testing W registers, so we can test
// that the upper word was properly cleared by Pop.
- literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
+ literal_base &= (0xFFFFFFFFFFFFFFFFUL >> (64 - reg_size));
CHECK_EQUAL_64(literal_base * 3, x[9]);
CHECK_EQUAL_64(literal_base * 2, x[8]);
@@ -12794,232 +12676,9 @@ static void PushPopJsspMixedMethodsHelper(int claim, int reg_size) {
TEARDOWN();
}
-
-TEST(push_pop_jssp_mixed_methods_64) {
- INIT_V8();
- for (int claim = 0; claim <= 8; claim++) {
- PushPopJsspMixedMethodsHelper(claim, kXRegSizeInBits);
- }
-}
-
-
-TEST(push_pop_jssp_mixed_methods_32) {
- INIT_V8();
- for (int claim = 0; claim <= 8; claim++) {
- PushPopJsspMixedMethodsHelper(claim, kWRegSizeInBits);
- }
-}
-
-
-// Push and pop data using overlapping X- and W-sized quantities.
-static void PushPopJsspWXOverlapHelper(int reg_count, int claim) {
- // This test emits rather a lot of code.
- SETUP_SIZE(BUF_SIZE * 2);
-
- // Work out which registers to use, based on reg_size.
- Register tmp = x8;
- static RegList const allowed = ~(tmp.bit() | jssp.bit());
- if (reg_count == kPushPopJsspMaxRegCount) {
- reg_count = CountSetBits(allowed, kNumberOfRegisters);
- }
- auto w = CreateRegisterArray<Register, kNumberOfRegisters>();
- auto x = CreateRegisterArray<Register, kNumberOfRegisters>();
- RegList list =
- PopulateRegisterArray(w.data(), x.data(), nullptr, 0, reg_count, allowed);
-
- // The number of W-sized slots we expect to pop. When we pop, we alternate
- // between W and X registers, so we need reg_count*1.5 W-sized slots.
- int const requested_w_slots = reg_count + reg_count / 2;
-
- // Track what _should_ be on the stack, using W-sized slots.
- static int const kMaxWSlots = kNumberOfRegisters + kNumberOfRegisters / 2;
- uint32_t stack[kMaxWSlots];
- for (int i = 0; i < kMaxWSlots; i++) {
- stack[i] = 0xdeadbeef;
- }
-
- // The literal base is chosen to have two useful properties:
- // * When multiplied by small values (such as a register index), this value
- // is clearly readable in the result.
- // * The value is not formed from repeating fixed-size smaller values, so it
- // can be used to detect endianness-related errors.
- static uint64_t const literal_base = 0x0100001000100101UL;
- static uint64_t const literal_base_hi = literal_base >> 32;
- static uint64_t const literal_base_lo = literal_base & 0xffffffff;
- static uint64_t const literal_base_w = literal_base & 0xffffffff;
-
- START();
- {
- CHECK(__ StackPointer().Is(csp));
- __ Mov(jssp, __ StackPointer());
- __ SetStackPointer(jssp);
-
- // Initialize the registers.
- for (int i = 0; i < reg_count; i++) {
- // Always write into the X register, to ensure that the upper word is
- // properly ignored by Push when testing W registers.
- if (!x[i].IsZero()) {
- __ Mov(x[i], literal_base * i);
- }
- }
-
- // Claim memory first, as requested.
- __ Claim(claim, kByteSizeInBytes);
-
- // The push-pop pattern is as follows:
- // Push: Pop:
- // x[0](hi) -> w[0]
- // x[0](lo) -> x[1](hi)
- // w[1] -> x[1](lo)
- // w[1] -> w[2]
- // x[2](hi) -> x[2](hi)
- // x[2](lo) -> x[2](lo)
- // x[2](hi) -> w[3]
- // x[2](lo) -> x[4](hi)
- // x[2](hi) -> x[4](lo)
- // x[2](lo) -> w[5]
- // w[3] -> x[5](hi)
- // w[3] -> x[6](lo)
- // w[3] -> w[7]
- // w[3] -> x[8](hi)
- // x[4](hi) -> x[8](lo)
- // x[4](lo) -> w[9]
- // ... pattern continues ...
- //
- // That is, registers are pushed starting with the lower numbers,
- // alternating between x and w registers, and pushing i%4+1 copies of each,
- // where i is the register number.
- // Registers are popped starting with the higher numbers one-by-one,
- // alternating between x and w registers, but only popping one at a time.
- //
- // This pattern provides a wide variety of alignment effects and overlaps.
-
- // ---- Push ----
-
- int active_w_slots = 0;
- for (int i = 0; active_w_slots < requested_w_slots; i++) {
- CHECK(i < reg_count);
- // In order to test various arguments to PushMultipleTimes, and to try to
- // exercise different alignment and overlap effects, we push each
- // register a different number of times.
- int times = i % 4 + 1;
- if (i & 1) {
- // Push odd-numbered registers as W registers.
- __ Mov(tmp.W(), times);
- __ PushMultipleTimes(w[i], tmp.W());
-
- // Fill in the expected stack slots.
- for (int j = 0; j < times; j++) {
- if (w[i].Is(wzr)) {
- // The zero register always writes zeroes.
- stack[active_w_slots++] = 0;
- } else {
- stack[active_w_slots++] = literal_base_w * i;
- }
- }
- } else {
- // Push even-numbered registers as X registers.
- __ Mov(tmp, times);
- __ PushMultipleTimes(x[i], tmp);
-
- // Fill in the expected stack slots.
- for (int j = 0; j < times; j++) {
- if (x[i].IsZero()) {
- // The zero register always writes zeroes.
- stack[active_w_slots++] = 0;
- stack[active_w_slots++] = 0;
- } else {
- stack[active_w_slots++] = literal_base_hi * i;
- stack[active_w_slots++] = literal_base_lo * i;
- }
- }
- }
- }
- // Because we were pushing several registers at a time, we probably pushed
- // more than we needed to.
- if (active_w_slots > requested_w_slots) {
- __ Drop(active_w_slots - requested_w_slots, kWRegSize);
- // Bump the number of active W-sized slots back to where it should be,
- // and fill the empty space with a dummy value.
- do {
- stack[active_w_slots--] = 0xdeadbeef;
- } while (active_w_slots > requested_w_slots);
- }
-
- // ---- Pop ----
-
- Clobber(&masm, list);
-
- // If popping an even number of registers, the first one will be X-sized.
- // Otherwise, the first one will be W-sized.
- bool next_is_64 = !(reg_count & 1);
- for (int i = reg_count-1; i >= 0; i--) {
- if (next_is_64) {
- __ Pop(x[i]);
- active_w_slots -= 2;
- } else {
- __ Pop(w[i]);
- active_w_slots -= 1;
- }
- next_is_64 = !next_is_64;
- }
- CHECK_EQ(active_w_slots, 0);
-
- // Drop memory to restore jssp.
- __ Drop(claim, kByteSizeInBytes);
-
- __ Mov(csp, __ StackPointer());
- __ SetStackPointer(csp);
- }
-
- END();
-
- RUN();
-
- int slot = 0;
- for (int i = 0; i < reg_count; i++) {
- // Even-numbered registers were written as W registers.
- // Odd-numbered registers were written as X registers.
- bool expect_64 = (i & 1);
- uint64_t expected;
-
- if (expect_64) {
- uint64_t hi = stack[slot++];
- uint64_t lo = stack[slot++];
- expected = (hi << 32) | lo;
- } else {
- expected = stack[slot++];
- }
-
- // Always use CHECK_EQUAL_64, even when testing W registers, so we can
- // test that the upper word was properly cleared by Pop.
- if (x[i].IsZero()) {
- CHECK_EQUAL_64(0, x[i]);
- } else {
- CHECK_EQUAL_64(expected, x[i]);
- }
- }
- CHECK(slot == requested_w_slots);
-
- TEARDOWN();
-}
-
-
-TEST(push_pop_jssp_wx_overlap) {
+TEST(push_pop_mixed_methods_64) {
INIT_V8();
- for (int claim = 0; claim <= 8; claim++) {
- for (int count = 1; count <= 8; count++) {
- PushPopJsspWXOverlapHelper(count, claim);
- PushPopJsspWXOverlapHelper(count, claim);
- PushPopJsspWXOverlapHelper(count, claim);
- PushPopJsspWXOverlapHelper(count, claim);
- }
- // Test with the maximum number of registers.
- PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
- PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
- PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
- PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
- }
+ PushPopMixedMethodsHelper(kXRegSizeInBits);
}
@@ -13066,8 +12725,8 @@ TEST(push_pop_csp) {
__ Claim(2);
__ PushXRegList(0);
__ PopXRegList(0);
- __ PushXRegList(0xffffffff);
- __ PopXRegList(0xffffffff);
+ __ PushXRegList(0xFFFFFFFF);
+ __ PopXRegList(0xFFFFFFFF);
__ Drop(12);
END();
@@ -13118,10 +12777,6 @@ TEST(push_queued) {
START();
- CHECK(__ StackPointer().Is(csp));
- __ Mov(jssp, __ StackPointer());
- __ SetStackPointer(jssp);
-
MacroAssembler::PushPopQueue queue(&masm);
// Queue up registers.
@@ -13133,11 +12788,15 @@ TEST(push_queued) {
queue.Queue(w4);
queue.Queue(w5);
queue.Queue(w6);
+ queue.Queue(w7);
queue.Queue(d0);
queue.Queue(d1);
queue.Queue(s2);
+ queue.Queue(s3);
+ queue.Queue(s4);
+ queue.Queue(s5);
__ Mov(x0, 0x1234000000000000);
__ Mov(x1, 0x1234000100010001);
@@ -13146,25 +12805,26 @@ TEST(push_queued) {
__ Mov(w4, 0x12340004);
__ Mov(w5, 0x12340005);
__ Mov(w6, 0x12340006);
+ __ Mov(w7, 0x12340007);
__ Fmov(d0, 123400.0);
__ Fmov(d1, 123401.0);
__ Fmov(s2, 123402.0);
+ __ Fmov(s3, 123403.0);
+ __ Fmov(s4, 123404.0);
+ __ Fmov(s5, 123405.0);
// Actually push them.
queue.PushQueued();
- Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSizeInBits, 0, 6));
- Clobber(&masm, CPURegList(CPURegister::kVRegister, kDRegSizeInBits, 0, 2));
+ Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSizeInBits, 0, 8));
+ Clobber(&masm, CPURegList(CPURegister::kVRegister, kDRegSizeInBits, 0, 6));
// Pop them conventionally.
- __ Pop(s2);
+ __ Pop(s5, s4, s3, s2);
__ Pop(d1, d0);
- __ Pop(w6, w5, w4);
+ __ Pop(w7, w6, w5, w4);
__ Pop(x3, x2, x1, x0);
- __ Mov(csp, __ StackPointer());
- __ SetStackPointer(csp);
-
END();
RUN();
@@ -13174,14 +12834,18 @@ TEST(push_queued) {
CHECK_EQUAL_64(0x1234000200020002, x2);
CHECK_EQUAL_64(0x1234000300030003, x3);
- CHECK_EQUAL_32(0x12340004, w4);
- CHECK_EQUAL_32(0x12340005, w5);
- CHECK_EQUAL_32(0x12340006, w6);
+ CHECK_EQUAL_64(0x0000000012340004, x4);
+ CHECK_EQUAL_64(0x0000000012340005, x5);
+ CHECK_EQUAL_64(0x0000000012340006, x6);
+ CHECK_EQUAL_64(0x0000000012340007, x7);
CHECK_EQUAL_FP64(123400.0, d0);
CHECK_EQUAL_FP64(123401.0, d1);
CHECK_EQUAL_FP32(123402.0, s2);
+ CHECK_EQUAL_FP32(123403.0, s3);
+ CHECK_EQUAL_FP32(123404.0, s4);
+ CHECK_EQUAL_FP32(123405.0, s5);
TEARDOWN();
}
@@ -13193,10 +12857,6 @@ TEST(pop_queued) {
START();
- CHECK(__ StackPointer().Is(csp));
- __ Mov(jssp, __ StackPointer());
- __ SetStackPointer(jssp);
-
MacroAssembler::PushPopQueue queue(&masm);
__ Mov(x0, 0x1234000000000000);
@@ -13206,22 +12866,30 @@ TEST(pop_queued) {
__ Mov(w4, 0x12340004);
__ Mov(w5, 0x12340005);
__ Mov(w6, 0x12340006);
+ __ Mov(w7, 0x12340007);
__ Fmov(d0, 123400.0);
__ Fmov(d1, 123401.0);
__ Fmov(s2, 123402.0);
+ __ Fmov(s3, 123403.0);
+ __ Fmov(s4, 123404.0);
+ __ Fmov(s5, 123405.0);
// Push registers conventionally.
__ Push(x0, x1, x2, x3);
- __ Push(w4, w5, w6);
+ __ Push(w4, w5, w6, w7);
__ Push(d0, d1);
- __ Push(s2);
+ __ Push(s2, s3, s4, s5);
// Queue up a pop.
+ queue.Queue(s5);
+ queue.Queue(s4);
+ queue.Queue(s3);
queue.Queue(s2);
queue.Queue(d1);
queue.Queue(d0);
+ queue.Queue(w7);
queue.Queue(w6);
queue.Queue(w5);
queue.Queue(w4);
@@ -13231,15 +12899,12 @@ TEST(pop_queued) {
queue.Queue(x1);
queue.Queue(x0);
- Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSizeInBits, 0, 6));
- Clobber(&masm, CPURegList(CPURegister::kVRegister, kDRegSizeInBits, 0, 2));
+ Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSizeInBits, 0, 8));
+ Clobber(&masm, CPURegList(CPURegister::kVRegister, kDRegSizeInBits, 0, 6));
// Actually pop them.
queue.PopQueued();
- __ Mov(csp, __ StackPointer());
- __ SetStackPointer(csp);
-
END();
RUN();
@@ -13252,11 +12917,15 @@ TEST(pop_queued) {
CHECK_EQUAL_64(0x0000000012340004, x4);
CHECK_EQUAL_64(0x0000000012340005, x5);
CHECK_EQUAL_64(0x0000000012340006, x6);
+ CHECK_EQUAL_64(0x0000000012340007, x7);
CHECK_EQUAL_FP64(123400.0, d0);
CHECK_EQUAL_FP64(123401.0, d1);
CHECK_EQUAL_FP32(123402.0, s2);
+ CHECK_EQUAL_FP32(123403.0, s3);
+ CHECK_EQUAL_FP32(123404.0, s4);
+ CHECK_EQUAL_FP32(123405.0, s5);
TEARDOWN();
}
@@ -13273,9 +12942,6 @@ TEST(copy_slots_down) {
START();
// Test copying 12 slots down one slot.
- __ Mov(jssp, __ StackPointer());
- __ SetStackPointer(jssp);
-
__ Mov(x1, ones);
__ Mov(x2, twos);
__ Mov(x3, threes);
@@ -13284,32 +12950,28 @@ TEST(copy_slots_down) {
__ Push(x1, x2, x3, x4);
__ Push(x1, x2, x1, x2);
__ Push(x3, x4, x3, x4);
- __ Push(xzr);
+ __ Push(xzr, xzr);
- __ Mov(x5, 0);
- __ Mov(x6, 1);
+ __ Mov(x5, 1);
+ __ Mov(x6, 2);
__ Mov(x7, 12);
__ CopySlots(x5, x6, x7);
- __ Pop(x4, x5, x6, x7);
- __ Pop(x8, x9, x10, x11);
- __ Pop(x12, x13, x14, x15);
- __ Drop(1);
+ __ Pop(xzr, x4, x5, x6);
+ __ Pop(x7, x8, x9, x10);
+ __ Pop(x11, x12, x13, x14);
+ __ Pop(x15, xzr);
// Test copying one slot down one slot.
- __ Push(x1, xzr, xzr);
+ __ Push(x1, xzr, xzr, xzr);
- __ Mov(x1, 1);
- __ Mov(x2, 2);
+ __ Mov(x1, 2);
+ __ Mov(x2, 3);
__ Mov(x3, 1);
__ CopySlots(x1, x2, x3);
- __ Drop(1);
- __ Pop(x0);
- __ Drop(1);
-
- __ Mov(csp, jssp);
- __ SetStackPointer(csp);
+ __ Drop(2);
+ __ Pop(x0, xzr);
END();
@@ -13345,9 +13007,6 @@ TEST(copy_slots_up) {
START();
- __ Mov(jssp, __ StackPointer());
- __ SetStackPointer(jssp);
-
__ Mov(x1, ones);
__ Mov(x2, twos);
__ Mov(x3, threes);
@@ -13360,8 +13019,7 @@ TEST(copy_slots_up) {
__ Mov(x7, 1);
__ CopySlots(x5, x6, x7);
- __ Drop(1);
- __ Pop(x10);
+ __ Pop(xzr, x10);
// Test copying two slots to the next two slots higher in memory.
__ Push(xzr, xzr);
@@ -13376,19 +13034,16 @@ TEST(copy_slots_up) {
__ Pop(x11, x12);
// Test copying three slots to the next three slots higher in memory.
- __ Push(xzr, xzr, xzr);
- __ Push(x1, x2, x3);
+ __ Push(xzr, xzr, xzr, x1);
+ __ Push(x2, x3);
__ Mov(x5, 3);
__ Mov(x6, 0);
__ Mov(x7, 3);
__ CopySlots(x5, x6, x7);
- __ Drop(3);
- __ Pop(x0, x1, x2);
-
- __ Mov(csp, jssp);
- __ SetStackPointer(csp);
+ __ Drop(2);
+ __ Pop(xzr, x0, x1, x2);
END();
@@ -13415,16 +13070,13 @@ TEST(copy_double_words_downwards_even) {
START();
- __ Mov(jssp, __ StackPointer());
- __ SetStackPointer(jssp);
-
// Test copying 12 slots up one slot.
__ Mov(x1, ones);
__ Mov(x2, twos);
__ Mov(x3, threes);
__ Mov(x4, fours);
- __ Push(xzr);
+ __ Push(xzr, xzr);
__ Push(x1, x2, x3, x4);
__ Push(x1, x2, x1, x2);
__ Push(x3, x4, x3, x4);
@@ -13434,13 +13086,10 @@ TEST(copy_double_words_downwards_even) {
__ Mov(x7, 12);
__ CopyDoubleWords(x5, x6, x7, TurboAssembler::kSrcLessThanDst);
- __ Drop(1);
- __ Pop(x4, x5, x6, x7);
- __ Pop(x8, x9, x10, x11);
- __ Pop(x12, x13, x14, x15);
-
- __ Mov(csp, jssp);
- __ SetStackPointer(csp);
+ __ Pop(xzr, x4, x5, x6);
+ __ Pop(x7, x8, x9, x10);
+ __ Pop(x11, x12, x13, x14);
+ __ Pop(x15, xzr);
END();
@@ -13476,9 +13125,6 @@ TEST(copy_double_words_downwards_odd) {
START();
- __ Mov(jssp, __ StackPointer());
- __ SetStackPointer(jssp);
-
// Test copying 13 slots up one slot.
__ Mov(x1, ones);
__ Mov(x2, twos);
@@ -13496,15 +13142,11 @@ TEST(copy_double_words_downwards_odd) {
__ Mov(x7, 13);
__ CopyDoubleWords(x5, x6, x7, TurboAssembler::kSrcLessThanDst);
- __ Drop(1);
- __ Pop(x4);
+ __ Pop(xzr, x4);
__ Pop(x5, x6, x7, x8);
__ Pop(x9, x10, x11, x12);
__ Pop(x13, x14, x15, x16);
- __ Mov(csp, jssp);
- __ SetStackPointer(csp);
-
END();
RUN();
@@ -13541,9 +13183,6 @@ TEST(copy_noop) {
START();
- __ Mov(jssp, __ StackPointer());
- __ SetStackPointer(jssp);
-
__ Mov(x1, ones);
__ Mov(x2, twos);
__ Mov(x3, threes);
@@ -13572,9 +13211,6 @@ TEST(copy_noop) {
__ Pop(x9, x10, x11, x12);
__ Pop(x13, x14, x15, x16);
- __ Mov(csp, jssp);
- __ SetStackPointer(csp);
-
END();
RUN();
@@ -13613,13 +13249,13 @@ TEST(jump_both_smi) {
START();
__ Mov(x0, 0x5555555500000001UL); // A pointer.
- __ Mov(x1, 0xaaaaaaaa00000001UL); // A pointer.
+ __ Mov(x1, 0xAAAAAAAA00000001UL); // A pointer.
__ Mov(x2, 0x1234567800000000UL); // A smi.
__ Mov(x3, 0x8765432100000000UL); // A smi.
- __ Mov(x4, 0xdead);
- __ Mov(x5, 0xdead);
- __ Mov(x6, 0xdead);
- __ Mov(x7, 0xdead);
+ __ Mov(x4, 0xDEAD);
+ __ Mov(x5, 0xDEAD);
+ __ Mov(x6, 0xDEAD);
+ __ Mov(x7, 0xDEAD);
__ JumpIfBothSmi(x0, x1, &cond_pass_00, &cond_fail_00);
__ Bind(&return1);
@@ -13663,7 +13299,7 @@ TEST(jump_both_smi) {
RUN();
CHECK_EQUAL_64(0x5555555500000001UL, x0);
- CHECK_EQUAL_64(0xaaaaaaaa00000001UL, x1);
+ CHECK_EQUAL_64(0xAAAAAAAA00000001UL, x1);
CHECK_EQUAL_64(0x1234567800000000UL, x2);
CHECK_EQUAL_64(0x8765432100000000UL, x3);
CHECK_EQUAL_64(0, x4);
@@ -13686,13 +13322,13 @@ TEST(jump_either_smi) {
START();
__ Mov(x0, 0x5555555500000001UL); // A pointer.
- __ Mov(x1, 0xaaaaaaaa00000001UL); // A pointer.
+ __ Mov(x1, 0xAAAAAAAA00000001UL); // A pointer.
__ Mov(x2, 0x1234567800000000UL); // A smi.
__ Mov(x3, 0x8765432100000000UL); // A smi.
- __ Mov(x4, 0xdead);
- __ Mov(x5, 0xdead);
- __ Mov(x6, 0xdead);
- __ Mov(x7, 0xdead);
+ __ Mov(x4, 0xDEAD);
+ __ Mov(x5, 0xDEAD);
+ __ Mov(x6, 0xDEAD);
+ __ Mov(x7, 0xDEAD);
__ JumpIfEitherSmi(x0, x1, &cond_pass_00, &cond_fail_00);
__ Bind(&return1);
@@ -13736,7 +13372,7 @@ TEST(jump_either_smi) {
RUN();
CHECK_EQUAL_64(0x5555555500000001UL, x0);
- CHECK_EQUAL_64(0xaaaaaaaa00000001UL, x1);
+ CHECK_EQUAL_64(0xAAAAAAAA00000001UL, x1);
CHECK_EQUAL_64(0x1234567800000000UL, x2);
CHECK_EQUAL_64(0x8765432100000000UL, x3);
CHECK_EQUAL_64(0, x4);
@@ -14615,17 +14251,17 @@ TEST(printf) {
__ Mov(x2, reinterpret_cast<uintptr_t>(test_substring));
// Test the maximum number of arguments, and sign extension.
- __ Mov(w3, 0xffffffff);
- __ Mov(w4, 0xffffffff);
- __ Mov(x5, 0xffffffffffffffff);
- __ Mov(x6, 0xffffffffffffffff);
+ __ Mov(w3, 0xFFFFFFFF);
+ __ Mov(w4, 0xFFFFFFFF);
+ __ Mov(x5, 0xFFFFFFFFFFFFFFFF);
+ __ Mov(x6, 0xFFFFFFFFFFFFFFFF);
__ Fmov(s1, 1.234);
__ Fmov(s2, 2.345);
__ Fmov(d3, 3.456);
__ Fmov(d4, 4.567);
// Test printing callee-saved registers.
- __ Mov(x28, 0x123456789abcdef);
+ __ Mov(x28, 0x123456789ABCDEF);
__ Fmov(d10, 42.0);
// Test with three arguments.
@@ -14657,16 +14293,6 @@ TEST(printf) {
__ Printf("StackPointer(csp): 0x%016" PRIx64 ", 0x%08" PRIx32 "\n",
__ StackPointer(), __ StackPointer().W());
- // Test with a different stack pointer.
- const Register old_stack_pointer = __ StackPointer();
- __ Mov(x29, old_stack_pointer);
- __ SetStackPointer(x29);
- // Print the stack pointer (not csp).
- __ Printf("StackPointer(not csp): 0x%016" PRIx64 ", 0x%08" PRIx32 "\n",
- __ StackPointer(), __ StackPointer().W());
- __ Mov(old_stack_pointer, __ StackPointer());
- __ SetStackPointer(old_stack_pointer);
-
// Test with three arguments.
__ Printf("3=%u, 4=%u, 5=%u\n", x10, x11, x12);
@@ -14717,10 +14343,10 @@ TEST(printf_no_preserve) {
__ Mov(x22, x0);
// Test the maximum number of arguments, and sign extension.
- __ Mov(w3, 0xffffffff);
- __ Mov(w4, 0xffffffff);
- __ Mov(x5, 0xffffffffffffffff);
- __ Mov(x6, 0xffffffffffffffff);
+ __ Mov(w3, 0xFFFFFFFF);
+ __ Mov(w4, 0xFFFFFFFF);
+ __ Mov(x5, 0xFFFFFFFFFFFFFFFF);
+ __ Mov(x6, 0xFFFFFFFFFFFFFFFF);
__ PrintfNoPreserve("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n"
"x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
w3, w4, x5, x6);
@@ -14734,7 +14360,7 @@ TEST(printf_no_preserve) {
__ Mov(x24, x0);
// Test printing callee-saved registers.
- __ Mov(x28, 0x123456789abcdef);
+ __ Mov(x28, 0x123456789ABCDEF);
__ PrintfNoPreserve("0x%" PRIx32 ", 0x%" PRIx64 "\n", w28, x28);
__ Mov(x25, x0);
@@ -14742,33 +14368,21 @@ TEST(printf_no_preserve) {
__ PrintfNoPreserve("%g\n", d10);
__ Mov(x26, x0);
- // Test with a different stack pointer.
- const Register old_stack_pointer = __ StackPointer();
- __ Mov(x29, old_stack_pointer);
- __ SetStackPointer(x29);
- // Print the stack pointer (not csp).
- __ PrintfNoPreserve(
- "StackPointer(not csp): 0x%016" PRIx64 ", 0x%08" PRIx32 "\n",
- __ StackPointer(), __ StackPointer().W());
- __ Mov(x27, x0);
- __ Mov(old_stack_pointer, __ StackPointer());
- __ SetStackPointer(old_stack_pointer);
-
// Test with three arguments.
__ Mov(x3, 3);
__ Mov(x4, 40);
__ Mov(x5, 500);
__ PrintfNoPreserve("3=%u, 4=%u, 5=%u\n", x3, x4, x5);
- __ Mov(x28, x0);
+ __ Mov(x27, x0);
// Mixed argument types.
- __ Mov(w3, 0xffffffff);
+ __ Mov(w3, 0xFFFFFFFF);
__ Fmov(s1, 1.234);
- __ Mov(x5, 0xffffffffffffffff);
+ __ Mov(x5, 0xFFFFFFFFFFFFFFFF);
__ Fmov(d3, 3.456);
__ PrintfNoPreserve("w3: %" PRIu32 ", s1: %f, x5: %" PRIu64 ", d3: %f\n",
w3, s1, x5, d3);
- __ Mov(x29, x0);
+ __ Mov(x28, x0);
END();
RUN();
@@ -14794,18 +14408,14 @@ TEST(printf_no_preserve) {
// %e: 3.456000e+00
// %E: 4.567000E+00
CHECK_EQUAL_64(13 + 10 + 17 + 17, x24);
- // 0x89abcdef, 0x123456789abcdef
+ // 0x89ABCDEF, 0x123456789ABCDEF
CHECK_EQUAL_64(30, x25);
// 42
CHECK_EQUAL_64(3, x26);
- // StackPointer(not csp): 0x00007fb037ae2370, 0x37ae2370
- // Note: This is an example value, but the field width is fixed here so the
- // string length is still predictable.
- CHECK_EQUAL_64(54, x27);
// 3=3, 4=40, 5=500
- CHECK_EQUAL_64(17, x28);
+ CHECK_EQUAL_64(17, x27);
// w3: 4294967295, s1: 1.234000, x5: 18446744073709551615, d3: 3.456000
- CHECK_EQUAL_64(69, x29);
+ CHECK_EQUAL_64(69, x28);
TEARDOWN();
}
@@ -14824,18 +14434,18 @@ TEST(blr_lr) {
__ Adr(lr, &target);
__ Blr(lr);
- __ Mov(x0, 0xdeadbeef);
+ __ Mov(x0, 0xDEADBEEF);
__ B(&end);
__ Bind(&target);
- __ Mov(x0, 0xc001c0de);
+ __ Mov(x0, 0xC001C0DE);
__ Bind(&end);
END();
RUN();
- CHECK_EQUAL_64(0xc001c0de, x0);
+ CHECK_EQUAL_64(0xC001C0DE, x0);
TEARDOWN();
}
@@ -14904,13 +14514,13 @@ TEST(barriers) {
TEST(process_nan_double) {
INIT_V8();
// Make sure that NaN propagation works correctly.
- double sn = bit_cast<double>(0x7ff5555511111111);
- double qn = bit_cast<double>(0x7ffaaaaa11111111);
+ double sn = bit_cast<double>(0x7FF5555511111111);
+ double qn = bit_cast<double>(0x7FFAAAAA11111111);
CHECK(IsSignallingNaN(sn));
CHECK(IsQuietNaN(qn));
// The input NaNs after passing through ProcessNaN.
- double sn_proc = bit_cast<double>(0x7ffd555511111111);
+ double sn_proc = bit_cast<double>(0x7FFD555511111111);
double qn_proc = qn;
CHECK(IsQuietNaN(sn_proc));
CHECK(IsQuietNaN(qn_proc));
@@ -14980,13 +14590,13 @@ TEST(process_nan_double) {
TEST(process_nan_float) {
INIT_V8();
// Make sure that NaN propagation works correctly.
- float sn = bit_cast<float>(0x7f951111);
- float qn = bit_cast<float>(0x7fea1111);
+ float sn = bit_cast<float>(0x7F951111);
+ float qn = bit_cast<float>(0x7FEA1111);
CHECK(IsSignallingNaN(sn));
CHECK(IsQuietNaN(qn));
// The input NaNs after passing through ProcessNaN.
- float sn_proc = bit_cast<float>(0x7fd51111);
+ float sn_proc = bit_cast<float>(0x7FD51111);
float qn_proc = qn;
CHECK(IsQuietNaN(sn_proc));
CHECK(IsQuietNaN(qn_proc));
@@ -15090,18 +14700,18 @@ static void ProcessNaNsHelper(double n, double m, double expected) {
TEST(process_nans_double) {
INIT_V8();
// Make sure that NaN propagation works correctly.
- double sn = bit_cast<double>(0x7ff5555511111111);
- double sm = bit_cast<double>(0x7ff5555522222222);
- double qn = bit_cast<double>(0x7ffaaaaa11111111);
- double qm = bit_cast<double>(0x7ffaaaaa22222222);
+ double sn = bit_cast<double>(0x7FF5555511111111);
+ double sm = bit_cast<double>(0x7FF5555522222222);
+ double qn = bit_cast<double>(0x7FFAAAAA11111111);
+ double qm = bit_cast<double>(0x7FFAAAAA22222222);
CHECK(IsSignallingNaN(sn));
CHECK(IsSignallingNaN(sm));
CHECK(IsQuietNaN(qn));
CHECK(IsQuietNaN(qm));
// The input NaNs after passing through ProcessNaN.
- double sn_proc = bit_cast<double>(0x7ffd555511111111);
- double sm_proc = bit_cast<double>(0x7ffd555522222222);
+ double sn_proc = bit_cast<double>(0x7FFD555511111111);
+ double sm_proc = bit_cast<double>(0x7FFD555522222222);
double qn_proc = qn;
double qm_proc = qm;
CHECK(IsQuietNaN(sn_proc));
@@ -15162,18 +14772,18 @@ static void ProcessNaNsHelper(float n, float m, float expected) {
TEST(process_nans_float) {
INIT_V8();
// Make sure that NaN propagation works correctly.
- float sn = bit_cast<float>(0x7f951111);
- float sm = bit_cast<float>(0x7f952222);
- float qn = bit_cast<float>(0x7fea1111);
- float qm = bit_cast<float>(0x7fea2222);
+ float sn = bit_cast<float>(0x7F951111);
+ float sm = bit_cast<float>(0x7F952222);
+ float qn = bit_cast<float>(0x7FEA1111);
+ float qm = bit_cast<float>(0x7FEA2222);
CHECK(IsSignallingNaN(sn));
CHECK(IsSignallingNaN(sm));
CHECK(IsQuietNaN(qn));
CHECK(IsQuietNaN(qm));
// The input NaNs after passing through ProcessNaN.
- float sn_proc = bit_cast<float>(0x7fd51111);
- float sm_proc = bit_cast<float>(0x7fd52222);
+ float sn_proc = bit_cast<float>(0x7FD51111);
+ float sm_proc = bit_cast<float>(0x7FD52222);
float qn_proc = qn;
float qm_proc = qm;
CHECK(IsQuietNaN(sn_proc));
@@ -15287,12 +14897,12 @@ static void DefaultNaNHelper(float n, float m, float a) {
TEST(default_nan_float) {
INIT_V8();
- float sn = bit_cast<float>(0x7f951111);
- float sm = bit_cast<float>(0x7f952222);
- float sa = bit_cast<float>(0x7f95aaaa);
- float qn = bit_cast<float>(0x7fea1111);
- float qm = bit_cast<float>(0x7fea2222);
- float qa = bit_cast<float>(0x7feaaaaa);
+ float sn = bit_cast<float>(0x7F951111);
+ float sm = bit_cast<float>(0x7F952222);
+ float sa = bit_cast<float>(0x7F95AAAA);
+ float qn = bit_cast<float>(0x7FEA1111);
+ float qm = bit_cast<float>(0x7FEA2222);
+ float qa = bit_cast<float>(0x7FEAAAAA);
CHECK(IsSignallingNaN(sn));
CHECK(IsSignallingNaN(sm));
CHECK(IsSignallingNaN(sa));
@@ -15415,12 +15025,12 @@ static void DefaultNaNHelper(double n, double m, double a) {
TEST(default_nan_double) {
INIT_V8();
- double sn = bit_cast<double>(0x7ff5555511111111);
- double sm = bit_cast<double>(0x7ff5555522222222);
- double sa = bit_cast<double>(0x7ff55555aaaaaaaa);
- double qn = bit_cast<double>(0x7ffaaaaa11111111);
- double qm = bit_cast<double>(0x7ffaaaaa22222222);
- double qa = bit_cast<double>(0x7ffaaaaaaaaaaaaa);
+ double sn = bit_cast<double>(0x7FF5555511111111);
+ double sm = bit_cast<double>(0x7FF5555522222222);
+ double sa = bit_cast<double>(0x7FF55555AAAAAAAA);
+ double qn = bit_cast<double>(0x7FFAAAAA11111111);
+ double qm = bit_cast<double>(0x7FFAAAAA22222222);
+ double qa = bit_cast<double>(0x7FFAAAAAAAAAAAAA);
CHECK(IsSignallingNaN(sn));
CHECK(IsSignallingNaN(sm));
CHECK(IsSignallingNaN(sa));
diff --git a/deps/v8/test/cctest/test-assembler-ia32.cc b/deps/v8/test/cctest/test-assembler-ia32.cc
index e39489b93d..5b79ff1fc1 100644
--- a/deps/v8/test/cctest/test-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-assembler-ia32.cc
@@ -521,6 +521,7 @@ TEST(AssemblerIa32SSE) {
__ mulps(xmm2, xmm1);
__ subps(xmm2, xmm0);
__ divps(xmm2, xmm1);
+ __ haddps(xmm1, xmm0);
__ cvttss2si(eax, xmm2);
__ ret(0);
}
@@ -1054,12 +1055,12 @@ TEST(AssemblerIa32BMI1) {
// blsmsk
__ inc(eax);
__ blsmsk(ebx, ecx);
- __ cmp(ebx, Immediate(0x0000000fu)); // expected result
+ __ cmp(ebx, Immediate(0x0000000Fu)); // expected result
__ j(not_equal, &exit);
__ inc(eax);
__ blsmsk(ebx, Operand(esp, 0));
- __ cmp(ebx, Immediate(0x0000000fu)); // expected result
+ __ cmp(ebx, Immediate(0x0000000Fu)); // expected result
__ j(not_equal, &exit);
// blsr
@@ -1250,7 +1251,7 @@ TEST(AssemblerIa32BMI2) {
__ j(not_equal, &exit);
// pdep
- __ mov(edx, Immediate(0xfffffff0u));
+ __ mov(edx, Immediate(0xFFFFFFF0u));
__ inc(eax);
__ pdep(ebx, edx, ecx);
@@ -1263,16 +1264,16 @@ TEST(AssemblerIa32BMI2) {
__ j(not_equal, &exit);
// pext
- __ mov(edx, Immediate(0xfffffff0u));
+ __ mov(edx, Immediate(0xFFFFFFF0u));
__ inc(eax);
__ pext(ebx, edx, ecx);
- __ cmp(ebx, Immediate(0x0000fffeu)); // expected result
+ __ cmp(ebx, Immediate(0x0000FFFEu)); // expected result
__ j(not_equal, &exit);
__ inc(eax);
__ pext(ebx, edx, Operand(esp, 0));
- __ cmp(ebx, Immediate(0x0000fffeu)); // expected result
+ __ cmp(ebx, Immediate(0x0000FFFEu)); // expected result
__ j(not_equal, &exit);
// sarx
diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc
index 79a80c3a43..1b337f525c 100644
--- a/deps/v8/test/cctest/test-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-assembler-mips.cc
@@ -35,7 +35,7 @@
#include "src/factory.h"
#include "src/macro-assembler.h"
#include "src/mips/macro-assembler-mips.h"
-#include "src/mips/simulator-mips.h"
+#include "src/simulator.h"
#include "test/cctest/cctest.h"
@@ -43,10 +43,11 @@ namespace v8 {
namespace internal {
// Define these function prototypes to match JSEntryFunction in execution.cc.
-typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
-typedef Object* (*F2)(int x, int y, int p2, int p3, int p4);
-typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
-typedef Object* (*F4)(void* p0, void* p1, int p2, int p3, int p4);
+// TODO(mips): Refine these signatures per test case.
+typedef Object*(F1)(int x, int p1, int p2, int p3, int p4);
+typedef Object*(F2)(int x, int y, int p2, int p3, int p4);
+typedef Object*(F3)(void* p, int p1, int p2, int p3, int p4);
+typedef Object*(F4)(void* p0, void* p1, int p2, int p3, int p4);
#define __ assm.
@@ -67,10 +68,9 @@ TEST(MIPS0) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
- int res = reinterpret_cast<int>(
- CALL_GENERATED_CODE(isolate, f, 0xab0, 0xc, 0, 0, 0));
- CHECK_EQ(static_cast<int32_t>(0xabc), res);
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ int res = reinterpret_cast<int>(f.Call(0xAB0, 0xC, 0, 0, 0));
+ CHECK_EQ(static_cast<int32_t>(0xABC), res);
}
@@ -104,9 +104,8 @@ TEST(MIPS1) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F1 f = FUNCTION_CAST<F1>(code->entry());
- int res = reinterpret_cast<int>(
- CALL_GENERATED_CODE(isolate, f, 50, 0, 0, 0, 0));
+ auto f = GeneratedCode<F1>::FromCode(*code);
+ int res = reinterpret_cast<int>(f.Call(50, 0, 0, 0, 0));
CHECK_EQ(1275, res);
}
@@ -129,8 +128,8 @@ TEST(MIPS2) {
__ ori(t0, zero_reg, 0);
__ lui(t0, 0x1234);
__ ori(t0, t0, 0);
- __ ori(t0, t0, 0x0f0f);
- __ ori(t0, t0, 0xf0f0);
+ __ ori(t0, t0, 0x0F0F);
+ __ ori(t0, t0, 0xF0F0);
__ addiu(t1, t0, 1);
__ addiu(t2, t1, -0x10);
@@ -138,20 +137,20 @@ TEST(MIPS2) {
__ li(t0, 0x00000004);
__ li(t1, 0x00001234);
__ li(t2, 0x12345678);
- __ li(t3, 0x7fffffff);
- __ li(t4, 0xfffffffc);
- __ li(t5, 0xffffedcc);
- __ li(t6, 0xedcba988);
+ __ li(t3, 0x7FFFFFFF);
+ __ li(t4, 0xFFFFFFFC);
+ __ li(t5, 0xFFFFEDCC);
+ __ li(t6, 0xEDCBA988);
__ li(t7, 0x80000000);
// SPECIAL class.
__ srl(v0, t2, 8); // 0x00123456
- __ sll(v0, v0, 11); // 0x91a2b000
- __ sra(v0, v0, 3); // 0xf2345600
- __ srav(v0, v0, t0); // 0xff234560
- __ sllv(v0, v0, t0); // 0xf2345600
- __ srlv(v0, v0, t0); // 0x0f234560
- __ Branch(&error, ne, v0, Operand(0x0f234560));
+ __ sll(v0, v0, 11); // 0x91A2B000
+ __ sra(v0, v0, 3); // 0xF2345600
+ __ srav(v0, v0, t0); // 0xFF234560
+ __ sllv(v0, v0, t0); // 0xF2345600
+ __ srlv(v0, v0, t0); // 0x0F234560
+ __ Branch(&error, ne, v0, Operand(0x0F234560));
__ nop();
__ addu(v0, t0, t1); // 0x00001238
@@ -161,15 +160,15 @@ TEST(MIPS2) {
__ addu(v1, t3, t0);
__ Branch(&error, ne, v1, Operand(0x80000003));
__ nop();
- __ subu(v1, t7, t0); // 0x7ffffffc
- __ Branch(&error, ne, v1, Operand(0x7ffffffc));
+ __ subu(v1, t7, t0); // 0x7FFFFFFC
+ __ Branch(&error, ne, v1, Operand(0x7FFFFFFC));
__ nop();
__ and_(v0, t1, t2); // 0x00001230
__ or_(v0, v0, t1); // 0x00001234
- __ xor_(v0, v0, t2); // 0x1234444c
- __ nor(v0, v0, t2); // 0xedcba987
- __ Branch(&error, ne, v0, Operand(0xedcba983));
+ __ xor_(v0, v0, t2); // 0x1234444C
+ __ nor(v0, v0, t2); // 0xEDCBA987
+ __ Branch(&error, ne, v0, Operand(0xEDCBA983));
__ nop();
__ slt(v0, t7, t3);
@@ -190,7 +189,7 @@ TEST(MIPS2) {
__ nop();
__ slti(v0, t1, 0x00002000); // 0x1
- __ slti(v0, v0, 0xffff8000); // 0x0
+ __ slti(v0, v0, 0xFFFF8000); // 0x0
__ Branch(&error, ne, v0, Operand(zero_reg));
__ nop();
__ sltiu(v0, t1, 0x00002000); // 0x1
@@ -198,10 +197,10 @@ TEST(MIPS2) {
__ Branch(&error, ne, v0, Operand(0x1));
__ nop();
- __ andi(v0, t1, 0xf0f0); // 0x00001030
- __ ori(v0, v0, 0x8a00); // 0x00009a30
- __ xori(v0, v0, 0x83cc); // 0x000019fc
- __ Branch(&error, ne, v0, Operand(0x000019fc));
+ __ andi(v0, t1, 0xF0F0); // 0x00001030
+ __ ori(v0, v0, 0x8A00); // 0x00009A30
+ __ xori(v0, v0, 0x83CC); // 0x000019FC
+ __ Branch(&error, ne, v0, Operand(0x000019FC));
__ nop();
__ lui(v1, 0x8123); // 0x81230000
__ Branch(&error, ne, v1, Operand(0x81230000));
@@ -218,11 +217,11 @@ TEST(MIPS2) {
__ addu(v0, v0, v1); // 51
__ Branch(&error, ne, v0, Operand(51));
__ Movn(a0, t3, t0); // Move a0<-t3 (t0 is NOT 0).
- __ Ins(a0, t1, 12, 8); // 0x7ff34fff
- __ Branch(&error, ne, a0, Operand(0x7ff34fff));
+ __ Ins(a0, t1, 12, 8); // 0x7FF34FFF
+ __ Branch(&error, ne, a0, Operand(0x7FF34FFF));
__ Movz(a0, t6, t7); // a0 not updated (t7 is NOT 0).
- __ Ext(a1, a0, 8, 12); // 0x34f
- __ Branch(&error, ne, a1, Operand(0x34f));
+ __ Ext(a1, a0, 8, 12); // 0x34F
+ __ Branch(&error, ne, a1, Operand(0x34F));
__ Movz(a0, t6, v1); // a0<-t6, v0 is 0, from 8 instr back.
__ Branch(&error, ne, a0, Operand(t6));
@@ -243,9 +242,8 @@ TEST(MIPS2) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
- int res = reinterpret_cast<int>(
- CALL_GENERATED_CODE(isolate, f, 0xab0, 0xc, 0, 0, 0));
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ int res = reinterpret_cast<int>(f.Call(0xAB0, 0xC, 0, 0, 0));
CHECK_EQ(static_cast<int32_t>(0x31415926), res);
}
@@ -346,7 +344,7 @@ TEST(MIPS3) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
// Double test values.
t.a = 1.5e14;
t.b = 2.75e11;
@@ -363,8 +361,7 @@ TEST(MIPS3) {
t.fd = 0.0;
t.fe = 0.0;
t.ff = 0.0;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
// Expected double results.
CHECK_EQ(1.5e14, t.a);
CHECK_EQ(1.5e14, t.b);
@@ -451,12 +448,11 @@ TEST(MIPS4) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 1.5e22;
t.b = 2.75e11;
t.c = 17.17;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(2.75e11, t.a);
CHECK_EQ(2.75e11, t.b);
@@ -515,13 +511,12 @@ TEST(MIPS5) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 1.5e4;
t.b = 2.75e8;
t.i = 12345678;
t.j = -100000;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(12345678.0, t.a);
CHECK_EQ(-100000.0, t.b);
@@ -585,25 +580,24 @@ TEST(MIPS6) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
t.ui = 0x11223344;
- t.si = 0x99aabbcc;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ t.si = 0x99AABBCC;
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(static_cast<int32_t>(0x11223344), t.r1);
#if __BYTE_ORDER == __LITTLE_ENDIAN
CHECK_EQ(static_cast<int32_t>(0x3344), t.r2);
- CHECK_EQ(static_cast<int32_t>(0xffffbbcc), t.r3);
- CHECK_EQ(static_cast<int32_t>(0x0000bbcc), t.r4);
- CHECK_EQ(static_cast<int32_t>(0xffffffcc), t.r5);
- CHECK_EQ(static_cast<int32_t>(0x3333bbcc), t.r6);
+ CHECK_EQ(static_cast<int32_t>(0xFFFFBBCC), t.r3);
+ CHECK_EQ(static_cast<int32_t>(0x0000BBCC), t.r4);
+ CHECK_EQ(static_cast<int32_t>(0xFFFFFFCC), t.r5);
+ CHECK_EQ(static_cast<int32_t>(0x3333BBCC), t.r6);
#elif __BYTE_ORDER == __BIG_ENDIAN
CHECK_EQ(static_cast<int32_t>(0x1122), t.r2);
- CHECK_EQ(static_cast<int32_t>(0xffff99aa), t.r3);
- CHECK_EQ(static_cast<int32_t>(0x000099aa), t.r4);
- CHECK_EQ(static_cast<int32_t>(0xffffff99), t.r5);
- CHECK_EQ(static_cast<int32_t>(0x99aa3333), t.r6);
+ CHECK_EQ(static_cast<int32_t>(0xFFFF99AA), t.r3);
+ CHECK_EQ(static_cast<int32_t>(0x000099AA), t.r4);
+ CHECK_EQ(static_cast<int32_t>(0xFFFFFF99), t.r5);
+ CHECK_EQ(static_cast<int32_t>(0x99AA3333), t.r6);
#else
#error Unknown endianness
#endif
@@ -679,7 +673,7 @@ TEST(MIPS7) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 1.5e14;
t.b = 2.75e11;
t.c = 2.0;
@@ -687,8 +681,7 @@ TEST(MIPS7) {
t.e = 0.0;
t.f = 0.0;
t.result = 0;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(1.5e14, t.a);
CHECK_EQ(2.75e11, t.b);
CHECK_EQ(1, t.result);
@@ -730,11 +723,11 @@ TEST(MIPS8) {
// ROTR instruction (called through the Ror macro).
__ Ror(t1, t0, 0x0004);
__ Ror(t2, t0, 0x0008);
- __ Ror(t3, t0, 0x000c);
+ __ Ror(t3, t0, 0x000C);
__ Ror(t4, t0, 0x0010);
__ Ror(t5, t0, 0x0014);
__ Ror(t6, t0, 0x0018);
- __ Ror(t7, t0, 0x001c);
+ __ Ror(t7, t0, 0x001C);
// Basic word store.
__ sw(t1, MemOperand(a0, offsetof(T, result_rotr_4)) );
@@ -777,10 +770,9 @@ TEST(MIPS8) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
t.input = 0x12345678;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0x0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0x0, 0, 0, 0);
CHECK_EQ(static_cast<int32_t>(0x81234567), t.result_rotr_4);
CHECK_EQ(static_cast<int32_t>(0x78123456), t.result_rotr_8);
CHECK_EQ(static_cast<int32_t>(0x67812345), t.result_rotr_12);
@@ -875,11 +867,10 @@ TEST(MIPS10) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 2.147483646e+09; // 0x7FFFFFFE -> 0xFF80000041DFFFFF as double.
- t.b_word = 0x0ff00ff0; // 0x0FF00FF0 -> 0x as double.
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ t.b_word = 0x0FF00FF0; // 0x0FF00FF0 -> 0x as double.
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(static_cast<int32_t>(0x41DFFFFF), t.dbl_exp);
CHECK_EQ(static_cast<int32_t>(0xFF800000), t.dbl_mant);
CHECK_EQ(static_cast<int32_t>(0x7FFFFFFE), t.word);
@@ -1003,53 +994,52 @@ TEST(MIPS11) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
- t.reg_init = 0xaabbccdd;
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ t.reg_init = 0xAABBCCDD;
t.mem_init = 0x11223344;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
#if __BYTE_ORDER == __LITTLE_ENDIAN
- CHECK_EQ(static_cast<int32_t>(0x44bbccdd), t.lwl_0);
- CHECK_EQ(static_cast<int32_t>(0x3344ccdd), t.lwl_1);
- CHECK_EQ(static_cast<int32_t>(0x223344dd), t.lwl_2);
+ CHECK_EQ(static_cast<int32_t>(0x44BBCCDD), t.lwl_0);
+ CHECK_EQ(static_cast<int32_t>(0x3344CCDD), t.lwl_1);
+ CHECK_EQ(static_cast<int32_t>(0x223344DD), t.lwl_2);
CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwl_3);
CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwr_0);
- CHECK_EQ(static_cast<int32_t>(0xaa112233), t.lwr_1);
- CHECK_EQ(static_cast<int32_t>(0xaabb1122), t.lwr_2);
- CHECK_EQ(static_cast<int32_t>(0xaabbcc11), t.lwr_3);
-
- CHECK_EQ(static_cast<int32_t>(0x112233aa), t.swl_0);
- CHECK_EQ(static_cast<int32_t>(0x1122aabb), t.swl_1);
- CHECK_EQ(static_cast<int32_t>(0x11aabbcc), t.swl_2);
- CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swl_3);
-
- CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swr_0);
- CHECK_EQ(static_cast<int32_t>(0xbbccdd44), t.swr_1);
- CHECK_EQ(static_cast<int32_t>(0xccdd3344), t.swr_2);
- CHECK_EQ(static_cast<int32_t>(0xdd223344), t.swr_3);
+ CHECK_EQ(static_cast<int32_t>(0xAA112233), t.lwr_1);
+ CHECK_EQ(static_cast<int32_t>(0xAABB1122), t.lwr_2);
+ CHECK_EQ(static_cast<int32_t>(0xAABBCC11), t.lwr_3);
+
+ CHECK_EQ(static_cast<int32_t>(0x112233AA), t.swl_0);
+ CHECK_EQ(static_cast<int32_t>(0x1122AABB), t.swl_1);
+ CHECK_EQ(static_cast<int32_t>(0x11AABBCC), t.swl_2);
+ CHECK_EQ(static_cast<int32_t>(0xAABBCCDD), t.swl_3);
+
+ CHECK_EQ(static_cast<int32_t>(0xAABBCCDD), t.swr_0);
+ CHECK_EQ(static_cast<int32_t>(0xBBCCDD44), t.swr_1);
+ CHECK_EQ(static_cast<int32_t>(0xCCDD3344), t.swr_2);
+ CHECK_EQ(static_cast<int32_t>(0xDD223344), t.swr_3);
#elif __BYTE_ORDER == __BIG_ENDIAN
CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwl_0);
- CHECK_EQ(static_cast<int32_t>(0x223344dd), t.lwl_1);
- CHECK_EQ(static_cast<int32_t>(0x3344ccdd), t.lwl_2);
- CHECK_EQ(static_cast<int32_t>(0x44bbccdd), t.lwl_3);
+ CHECK_EQ(static_cast<int32_t>(0x223344DD), t.lwl_1);
+ CHECK_EQ(static_cast<int32_t>(0x3344CCDD), t.lwl_2);
+ CHECK_EQ(static_cast<int32_t>(0x44BBCCDD), t.lwl_3);
- CHECK_EQ(static_cast<int32_t>(0xaabbcc11), t.lwr_0);
- CHECK_EQ(static_cast<int32_t>(0xaabb1122), t.lwr_1);
- CHECK_EQ(static_cast<int32_t>(0xaa112233), t.lwr_2);
+ CHECK_EQ(static_cast<int32_t>(0xAABBCC11), t.lwr_0);
+ CHECK_EQ(static_cast<int32_t>(0xAABB1122), t.lwr_1);
+ CHECK_EQ(static_cast<int32_t>(0xAA112233), t.lwr_2);
CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwr_3);
- CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swl_0);
- CHECK_EQ(static_cast<int32_t>(0x11aabbcc), t.swl_1);
- CHECK_EQ(static_cast<int32_t>(0x1122aabb), t.swl_2);
- CHECK_EQ(static_cast<int32_t>(0x112233aa), t.swl_3);
+ CHECK_EQ(static_cast<int32_t>(0xAABBCCDD), t.swl_0);
+ CHECK_EQ(static_cast<int32_t>(0x11AABBCC), t.swl_1);
+ CHECK_EQ(static_cast<int32_t>(0x1122AABB), t.swl_2);
+ CHECK_EQ(static_cast<int32_t>(0x112233AA), t.swl_3);
- CHECK_EQ(static_cast<int32_t>(0xdd223344), t.swr_0);
- CHECK_EQ(static_cast<int32_t>(0xccdd3344), t.swr_1);
- CHECK_EQ(static_cast<int32_t>(0xbbccdd44), t.swr_2);
- CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swr_3);
+ CHECK_EQ(static_cast<int32_t>(0xDD223344), t.swr_0);
+ CHECK_EQ(static_cast<int32_t>(0xCCDD3344), t.swr_1);
+ CHECK_EQ(static_cast<int32_t>(0xBBCCDD44), t.swr_2);
+ CHECK_EQ(static_cast<int32_t>(0xAABBCCDD), t.swr_3);
#else
#error Unknown endianness
#endif
@@ -1130,7 +1120,7 @@ TEST(MIPS12) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
t.x = 1;
t.y = 2;
t.y1 = 3;
@@ -1138,8 +1128,7 @@ TEST(MIPS12) {
t.y3 = 0XBABA;
t.y4 = 0xDEDA;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(3, t.y1);
}
@@ -1185,13 +1174,12 @@ TEST(MIPS13) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
t.cvt_big_in = 0xFFFFFFFF;
t.cvt_small_in = 333;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(t.cvt_big_out, static_cast<double>(t.cvt_big_in));
CHECK_EQ(t.cvt_small_out, static_cast<double>(t.cvt_small_in));
@@ -1307,7 +1295,7 @@ TEST(MIPS14) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
t.round_up_in = 123.51;
t.round_down_in = 123.49;
@@ -1318,8 +1306,7 @@ TEST(MIPS14) {
t.err3_in = static_cast<double>(1) + 0xFFFFFFFF;
t.err4_in = NAN;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
#define GET_FPU_ERR(x) (static_cast<int>(x & kFCSRFlagMask))
#define CHECK_NAN2008(x) (x & kFCSRNaN2008FlagMask)
@@ -1413,9 +1400,9 @@ TEST(seleqz_selnez) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(1, test.a);
CHECK_EQ(0, test.b);
@@ -1443,7 +1430,7 @@ TEST(seleqz_selnez) {
test.f = tests_D[j];
test.i = inputs_S[i];
test.j = tests_S[j];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(outputs_D[i], test.g);
CHECK_EQ(0, test.h);
CHECK_EQ(outputs_S[i], test.k);
@@ -1451,7 +1438,7 @@ TEST(seleqz_selnez) {
test.f = tests_D[j+1];
test.j = tests_S[j+1];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(0, test.g);
CHECK_EQ(outputs_D[i], test.h);
CHECK_EQ(0, test.k);
@@ -1528,14 +1515,14 @@ TEST(min_max) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
test.b = inputsb[i];
test.e = inputse[i];
test.f = inputsf[i];
- CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0);
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(0, memcmp(&test.c, &outputsdmin[i], sizeof(test.c)));
CHECK_EQ(0, memcmp(&test.d, &outputsdmax[i], sizeof(test.d)));
@@ -1639,13 +1626,13 @@ TEST(rint_d) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int j = 0; j < 4; j++) {
test.fcsr = fcsr_inputs[j];
for (int i = 0; i < kTableLength; i++) {
test.a = inputs[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.b, outputs[j][i]);
}
}
@@ -1687,7 +1674,7 @@ TEST(sel) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
const int test_size = 3;
const int input_size = 5;
@@ -1712,13 +1699,13 @@ TEST(sel) {
test.ft = inputs_ft[i];
test.fd = tests_S[j];
test.fs = inputs_fs[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.dd, inputs_ds[i]);
CHECK_EQ(test.fd, inputs_fs[i]);
test.dd = tests_D[j+1];
test.fd = tests_S[j+1];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.dd, inputs_dt[i]);
CHECK_EQ(test.fd, inputs_ft[i]);
}
@@ -1820,13 +1807,13 @@ TEST(rint_s) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int j = 0; j < 4; j++) {
test.fcsr = fcsr_inputs[j];
for (int i = 0; i < kTableLength; i++) {
test.a = inputs[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.b, outputs[j][i]);
}
}
@@ -1846,14 +1833,10 @@ TEST(Cvt_d_uw) {
uint64_t output;
} TestStruct;
- unsigned inputs[] = {
- 0x0, 0xffffffff, 0x80000000, 0x7fffffff
- };
+ unsigned inputs[] = {0x0, 0xFFFFFFFF, 0x80000000, 0x7FFFFFFF};
- uint64_t outputs[] = {
- 0x0, 0x41efffffffe00000,
- 0x41e0000000000000, 0x41dfffffffc00000
- };
+ uint64_t outputs[] = {0x0, 0x41EFFFFFFFE00000, 0x41E0000000000000,
+ 0x41DFFFFFFFC00000};
int kTableLength = sizeof(inputs)/sizeof(inputs[0]);
@@ -1869,10 +1852,10 @@ TEST(Cvt_d_uw) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.input = inputs[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
// Check outputs
CHECK_EQ(test.output, outputs[i]);
}
@@ -1951,13 +1934,13 @@ TEST(mina_maxa) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
test.b = inputsb[i];
test.c = inputsc[i];
test.d = inputsd[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
if (i < kTableLength - 1) {
CHECK_EQ(test.resd, resd[i]);
CHECK_EQ(test.resf, resf[i]);
@@ -2032,11 +2015,11 @@ TEST(trunc_l) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
if ((test.isNaN2008 & kFCSRNaN2008FlagMask) &&
kArchVariant == kMips32r6) {
CHECK_EQ(test.c, outputsNaN2008[i]);
@@ -2113,20 +2096,20 @@ TEST(movz_movn) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.c = inputs_S[i];
test.rt = 1;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.b, test.bold);
CHECK_EQ(test.d, test.dold);
CHECK_EQ(test.b1, outputs_D[i]);
CHECK_EQ(test.d1, outputs_S[i]);
test.rt = 0;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.b, outputs_D[i]);
CHECK_EQ(test.d, outputs_S[i]);
CHECK_EQ(test.b1, test.bold1);
@@ -2215,15 +2198,15 @@ TEST(movt_movd) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.dstf, outputs_S[i]);
CHECK_EQ(test.dstd, outputs_D[i]);
CHECK_EQ(test.dstf1, test.dstfold1);
CHECK_EQ(test.dstd1, test.dstdold1);
test.fcsr = 0;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.dstf, test.dstfold);
CHECK_EQ(test.dstd, test.dstdold);
CHECK_EQ(test.dstf1, outputs_S[i]);
@@ -2301,12 +2284,12 @@ TEST(cvt_w_d) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int j = 0; j < 4; j++) {
test.fcsr = fcsr_inputs[j];
for (int i = 0; i < kTableLength; i++) {
test.a = inputs[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.b, outputs[j][i]);
}
}
@@ -2369,11 +2352,11 @@ TEST(trunc_w) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
if ((test.isNaN2008 & kFCSRNaN2008FlagMask) && kArchVariant == kMips32r6) {
CHECK_EQ(test.c, outputsNaN2008[i]);
} else {
@@ -2439,11 +2422,11 @@ TEST(round_w) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
if ((test.isNaN2008 & kFCSRNaN2008FlagMask) && kArchVariant == kMips32r6) {
CHECK_EQ(test.c, outputsNaN2008[i]);
} else {
@@ -2511,11 +2494,11 @@ TEST(round_l) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
if ((test.isNaN2008 & kFCSRNaN2008FlagMask) &&
kArchVariant == kMips32r6) {
CHECK_EQ(test.c, outputsNaN2008[i]);
@@ -2585,13 +2568,13 @@ TEST(sub) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputfs_S[i];
test.b = inputft_S[i];
test.c = inputfs_D[i];
test.d = inputft_D[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.resultS, outputs_S[i]);
CHECK_EQ(test.resultD, outputs_D[i]);
}
@@ -2665,7 +2648,7 @@ TEST(sqrt_rsqrt_recip) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
float f1;
@@ -2673,7 +2656,7 @@ TEST(sqrt_rsqrt_recip) {
test.a = inputs_S[i];
test.c = inputs_D[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.resultS, outputs_S[i]);
CHECK_EQ(test.resultD, outputs_D[i]);
@@ -2746,11 +2729,11 @@ TEST(neg) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_S[i];
test.c = inputs_D[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.resultS, outputs_S[i]);
CHECK_EQ(test.resultD, outputs_D[i]);
}
@@ -2804,13 +2787,13 @@ TEST(mul) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputfs_S[i];
test.b = inputft_S[i];
test.c = inputfs_D[i];
test.d = inputft_D[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.resultS, inputfs_S[i]*inputft_S[i]);
CHECK_EQ(test.resultD, inputfs_D[i]*inputft_D[i]);
}
@@ -2861,12 +2844,12 @@ TEST(mov) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.c = inputs_S[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.b, outputs_D[i]);
CHECK_EQ(test.d, outputs_S[i]);
}
@@ -2929,11 +2912,11 @@ TEST(floor_w) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
if ((test.isNaN2008 & kFCSRNaN2008FlagMask) && kArchVariant == kMips32r6) {
CHECK_EQ(test.c, outputsNaN2008[i]);
} else {
@@ -3001,11 +2984,11 @@ TEST(floor_l) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
if ((test.isNaN2008 & kFCSRNaN2008FlagMask) &&
kArchVariant == kMips32r6) {
CHECK_EQ(test.c, outputsNaN2008[i]);
@@ -3074,11 +3057,11 @@ TEST(ceil_w) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
if ((test.isNaN2008 & kFCSRNaN2008FlagMask) && kArchVariant == kMips32r6) {
CHECK_EQ(test.c, outputsNaN2008[i]);
} else {
@@ -3146,11 +3129,11 @@ TEST(ceil_l) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
if ((test.isNaN2008 & kFCSRNaN2008FlagMask) &&
kArchVariant == kMips32r6) {
CHECK_EQ(test.c, outputsNaN2008[i]);
@@ -3200,8 +3183,8 @@ TEST(jump_tables1) {
for (int i = 0; i < kNumCases; ++i) {
__ bind(&labels[i]);
- __ lui(v0, (values[i] >> 16) & 0xffff);
- __ ori(v0, v0, values[i] & 0xffff);
+ __ lui(v0, (values[i] >> 16) & 0xFFFF);
+ __ ori(v0, v0, values[i] & 0xFFFF);
__ b(&done);
__ nop();
}
@@ -3221,10 +3204,9 @@ TEST(jump_tables1) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
+ auto f = GeneratedCode<F1>::FromCode(*code);
for (int i = 0; i < kNumCases; ++i) {
- int res = reinterpret_cast<int>(
- CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0));
+ int res = reinterpret_cast<int>(f.Call(i, 0, 0, 0, 0));
::printf("f(%d) = %d\n", i, res);
CHECK_EQ(values[i], res);
}
@@ -3252,8 +3234,8 @@ TEST(jump_tables2) {
for (int i = 0; i < kNumCases; ++i) {
__ bind(&labels[i]);
- __ lui(v0, (values[i] >> 16) & 0xffff);
- __ ori(v0, v0, values[i] & 0xffff);
+ __ lui(v0, (values[i] >> 16) & 0xFFFF);
+ __ ori(v0, v0, values[i] & 0xFFFF);
__ b(&done);
__ nop();
}
@@ -3291,10 +3273,9 @@ TEST(jump_tables2) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
+ auto f = GeneratedCode<F1>::FromCode(*code);
for (int i = 0; i < kNumCases; ++i) {
- int res = reinterpret_cast<int>(
- CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0));
+ int res = reinterpret_cast<int>(f.Call(i, 0, 0, 0, 0));
::printf("f(%d) = %d\n", i, res);
CHECK_EQ(values[i], res);
}
@@ -3329,8 +3310,8 @@ TEST(jump_tables3) {
__ bind(&labels[i]);
obj = *values[i];
imm32 = reinterpret_cast<intptr_t>(obj);
- __ lui(v0, (imm32 >> 16) & 0xffff);
- __ ori(v0, v0, imm32 & 0xffff);
+ __ lui(v0, (imm32 >> 16) & 0xFFFF);
+ __ ori(v0, v0, imm32 & 0xFFFF);
__ b(&done);
__ nop();
}
@@ -3368,10 +3349,9 @@ TEST(jump_tables3) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
+ auto f = GeneratedCode<F1>::FromCode(*code);
for (int i = 0; i < kNumCases; ++i) {
- Handle<Object> result(
- CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0), isolate);
+ Handle<Object> result(f.Call(i, 0, 0, 0, 0), isolate);
#ifdef OBJECT_PRINT
::printf("f(%d) = ", i);
result->Print(std::cout);
@@ -3416,11 +3396,10 @@ TEST(BITSWAP) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
t.r1 = 0x781A15C3;
t.r2 = 0x8B71FCDE;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(static_cast<int32_t>(0x1E58A8C3), t.r1);
CHECK_EQ(static_cast<int32_t>(0xD18E3F7B), t.r2);
@@ -3551,7 +3530,7 @@ TEST(class_fmt) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
t.dSignalingNan = std::numeric_limits<double>::signaling_NaN();
t.dQuietNan = std::numeric_limits<double>::quiet_NaN();
@@ -3576,8 +3555,7 @@ TEST(class_fmt) {
t.fPosSubnorm = FLT_MIN / 20.0;
t.fPosZero = +0.0;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
// Expected double results.
CHECK_EQ(bit_cast<int64_t>(t.dSignalingNan), 0x001);
CHECK_EQ(bit_cast<int64_t>(t.dQuietNan), 0x002);
@@ -3644,37 +3622,37 @@ TEST(ABS) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
test.a = -2.0;
test.b = -2.0;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.a, 2.0);
CHECK_EQ(test.b, 2.0);
test.a = 2.0;
test.b = 2.0;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.a, 2.0);
CHECK_EQ(test.b, 2.0);
// Testing biggest positive number
test.a = std::numeric_limits<double>::max();
test.b = std::numeric_limits<float>::max();
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.a, std::numeric_limits<double>::max());
CHECK_EQ(test.b, std::numeric_limits<float>::max());
// Testing smallest negative number
test.a = -std::numeric_limits<double>::max(); // lowest()
test.b = -std::numeric_limits<float>::max(); // lowest()
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.a, std::numeric_limits<double>::max());
CHECK_EQ(test.b, std::numeric_limits<float>::max());
// Testing smallest positive number
test.a = -std::numeric_limits<double>::min();
test.b = -std::numeric_limits<float>::min();
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.a, std::numeric_limits<double>::min());
CHECK_EQ(test.b, std::numeric_limits<float>::min());
@@ -3683,7 +3661,7 @@ TEST(ABS) {
/ std::numeric_limits<double>::min();
test.b = -std::numeric_limits<float>::max()
/ std::numeric_limits<float>::min();
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.a, std::numeric_limits<double>::max()
/ std::numeric_limits<double>::min());
CHECK_EQ(test.b, std::numeric_limits<float>::max()
@@ -3691,13 +3669,13 @@ TEST(ABS) {
test.a = std::numeric_limits<double>::quiet_NaN();
test.b = std::numeric_limits<float>::quiet_NaN();
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK(std::isnan(test.a));
CHECK(std::isnan(test.b));
test.a = std::numeric_limits<double>::signaling_NaN();
test.b = std::numeric_limits<float>::signaling_NaN();
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK(std::isnan(test.a));
CHECK(std::isnan(test.b));
}
@@ -3738,12 +3716,12 @@ TEST(ADD_FMT) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
test.a = 2.0;
test.b = 3.0;
test.fa = 2.0;
test.fb = 3.0;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.c, 5.0);
CHECK_EQ(test.fc, 5.0);
@@ -3751,7 +3729,7 @@ TEST(ADD_FMT) {
test.b = -std::numeric_limits<double>::max(); // lowest()
test.fa = std::numeric_limits<float>::max();
test.fb = -std::numeric_limits<float>::max(); // lowest()
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.c, 0.0);
CHECK_EQ(test.fc, 0.0);
@@ -3759,7 +3737,7 @@ TEST(ADD_FMT) {
test.b = std::numeric_limits<double>::max();
test.fa = std::numeric_limits<float>::max();
test.fb = std::numeric_limits<float>::max();
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK(!std::isfinite(test.c));
CHECK(!std::isfinite(test.fc));
@@ -3767,7 +3745,7 @@ TEST(ADD_FMT) {
test.b = std::numeric_limits<double>::signaling_NaN();
test.fa = 5.0;
test.fb = std::numeric_limits<float>::signaling_NaN();
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK(std::isnan(test.c));
CHECK(std::isnan(test.fc));
}
@@ -3893,12 +3871,12 @@ TEST(C_COND_FMT) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
test.dOp1 = 2.0;
test.dOp2 = 3.0;
test.fOp1 = 2.0;
test.fOp2 = 3.0;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.dF, 0U);
CHECK_EQ(test.dUn, 0U);
CHECK_EQ(test.dEq, 0U);
@@ -3920,7 +3898,7 @@ TEST(C_COND_FMT) {
test.dOp2 = std::numeric_limits<double>::min();
test.fOp1 = std::numeric_limits<float>::min();
test.fOp2 = -std::numeric_limits<float>::max(); // lowest()
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.dF, 0U);
CHECK_EQ(test.dUn, 0U);
CHECK_EQ(test.dEq, 0U);
@@ -3942,7 +3920,7 @@ TEST(C_COND_FMT) {
test.dOp2 = -std::numeric_limits<double>::max(); // lowest()
test.fOp1 = std::numeric_limits<float>::max();
test.fOp2 = std::numeric_limits<float>::max();
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.dF, 0U);
CHECK_EQ(test.dUn, 0U);
CHECK_EQ(test.dEq, 1U);
@@ -3964,7 +3942,7 @@ TEST(C_COND_FMT) {
test.dOp2 = 0.0;
test.fOp1 = std::numeric_limits<float>::quiet_NaN();
test.fOp2 = 0.0;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.dF, 0U);
CHECK_EQ(test.dUn, 1U);
CHECK_EQ(test.dEq, 0U);
@@ -4094,7 +4072,7 @@ TEST(CMP_COND_FMT) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
uint64_t dTrue = 0xFFFFFFFFFFFFFFFF;
uint64_t dFalse = 0x0000000000000000;
uint32_t fTrue = 0xFFFFFFFF;
@@ -4104,7 +4082,7 @@ TEST(CMP_COND_FMT) {
test.dOp2 = 3.0;
test.fOp1 = 2.0;
test.fOp2 = 3.0;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(bit_cast<uint64_t>(test.dF), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dUn), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dEq), dFalse);
@@ -4129,7 +4107,7 @@ TEST(CMP_COND_FMT) {
test.dOp2 = std::numeric_limits<double>::min();
test.fOp1 = std::numeric_limits<float>::min();
test.fOp2 = -std::numeric_limits<float>::max(); // lowest()
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(bit_cast<uint64_t>(test.dF), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dUn), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dEq), dFalse);
@@ -4154,7 +4132,7 @@ TEST(CMP_COND_FMT) {
test.dOp2 = -std::numeric_limits<double>::max(); // lowest()
test.fOp1 = std::numeric_limits<float>::max();
test.fOp2 = std::numeric_limits<float>::max();
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(bit_cast<uint64_t>(test.dF), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dUn), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dEq), dTrue);
@@ -4179,7 +4157,7 @@ TEST(CMP_COND_FMT) {
test.dOp2 = 0.0;
test.fOp1 = std::numeric_limits<float>::quiet_NaN();
test.fOp2 = 0.0;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(bit_cast<uint64_t>(test.dF), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dUn), dTrue);
CHECK_EQ(bit_cast<uint64_t>(test.dEq), dFalse);
@@ -4281,7 +4259,7 @@ TEST(CVT) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
test.cvt_d_s_in = -0.51;
test.cvt_d_w_in = -1;
@@ -4294,7 +4272,7 @@ TEST(CVT) {
test.cvt_w_s_in = -0.51;
test.cvt_w_d_in = -0.51;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
@@ -4325,7 +4303,7 @@ TEST(CVT) {
test.cvt_w_s_in = 0.49;
test.cvt_w_d_in = 0.49;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
@@ -4356,7 +4334,7 @@ TEST(CVT) {
test.cvt_w_s_in = std::numeric_limits<float>::max();
test.cvt_w_d_in = std::numeric_limits<double>::max();
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
@@ -4388,7 +4366,7 @@ TEST(CVT) {
test.cvt_w_s_in = -std::numeric_limits<float>::max(); // lowest()
test.cvt_w_d_in = -std::numeric_limits<double>::max(); // lowest()
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
@@ -4427,7 +4405,7 @@ TEST(CVT) {
test.cvt_w_s_in = std::numeric_limits<float>::min();
test.cvt_w_d_in = std::numeric_limits<double>::min();
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
@@ -4495,9 +4473,9 @@ TEST(DIV_FMT) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
const int test_size = 3;
@@ -4538,7 +4516,7 @@ TEST(DIV_FMT) {
test.fOp1 = fOp1[i];
test.fOp2 = fOp2[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK_EQ(test.dRes, dRes[i]);
CHECK_EQ(test.fRes, fRes[i]);
}
@@ -4548,7 +4526,7 @@ TEST(DIV_FMT) {
test.fOp1 = FLT_MAX;
test.fOp2 = -0.0;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK(!std::isfinite(test.dRes));
CHECK(!std::isfinite(test.fRes));
@@ -4557,7 +4535,7 @@ TEST(DIV_FMT) {
test.fOp1 = 0.0;
test.fOp2 = -0.0;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK(std::isnan(test.dRes));
CHECK(std::isnan(test.fRes));
@@ -4566,7 +4544,7 @@ TEST(DIV_FMT) {
test.fOp1 = std::numeric_limits<float>::quiet_NaN();
test.fOp2 = -5.0;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ (f.Call(&test, 0, 0, 0, 0));
CHECK(std::isnan(test.dRes));
CHECK(std::isnan(test.fRes));
}
@@ -4588,10 +4566,10 @@ uint32_t run_align(uint32_t rs_value, uint32_t rt_value, uint8_t bp) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint32_t res = reinterpret_cast<uint32_t>(CALL_GENERATED_CODE(
- isolate, f, rs_value, rt_value, 0, 0, 0));
+ uint32_t res =
+ reinterpret_cast<uint32_t>(f.Call(rs_value, rt_value, 0, 0, 0));
return res;
}
@@ -4608,13 +4586,15 @@ TEST(r6_align) {
uint32_t expected_res;
};
+ // clang-format off
struct TestCaseAlign tc[] = {
- // rs_value, rt_value, bp, expected_res
- { 0x11223344, 0xaabbccdd, 0, 0xaabbccdd },
- { 0x11223344, 0xaabbccdd, 1, 0xbbccdd11 },
- { 0x11223344, 0xaabbccdd, 2, 0xccdd1122 },
- { 0x11223344, 0xaabbccdd, 3, 0xdd112233 },
+ // rs_value, rt_value, bp, expected_res
+ {0x11223344, 0xAABBCCDD, 0, 0xAABBCCDD},
+ {0x11223344, 0xAABBCCDD, 1, 0xBBCCDD11},
+ {0x11223344, 0xAABBCCDD, 2, 0xCCDD1122},
+ {0x11223344, 0xAABBCCDD, 3, 0xDD112233},
};
+ // clang-format on
size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseAlign);
for (size_t i = 0; i < nr_test_cases; ++i) {
@@ -4642,11 +4622,10 @@ uint32_t run_aluipc(int16_t offset) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
- PC = (uint32_t) f; // Set the program counter.
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ PC = (uint32_t)code->entry(); // Set the program counter.
- uint32_t res = reinterpret_cast<uint32_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint32_t res = reinterpret_cast<uint32_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -4697,11 +4676,10 @@ uint32_t run_auipc(int16_t offset) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
- PC = (uint32_t) f; // Set the program counter.
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ PC = (uint32_t)code->entry(); // Set the program counter.
- uint32_t res = reinterpret_cast<uint32_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint32_t res = reinterpret_cast<uint32_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -4744,24 +4722,24 @@ uint32_t run_lwpc(int offset) {
v8::internal::CodeObjectRequired::kYes);
// 256k instructions; 2^8k
- // addiu t7, t0, 0xffff; (0x250fffff)
+ // addiu t7, t0, 0xFFFF; (0x250FFFFF)
// ...
- // addiu t4, t0, 0x0000; (0x250c0000)
+ // addiu t4, t0, 0x0000; (0x250C0000)
uint32_t addiu_start_1 = 0x25000000;
- for (int32_t i = 0xfffff; i >= 0xc0000; --i) {
+ for (int32_t i = 0xFFFFF; i >= 0xC0000; --i) {
uint32_t addiu_new = addiu_start_1 + i;
__ dd(addiu_new);
}
- __ lwpc(t8, offset); // offset 0; 0xef080000 (t8 register)
+ __ lwpc(t8, offset); // offset 0; 0xEF080000 (t8 register)
__ mov(v0, t8);
// 256k instructions; 2^8k
// addiu t0, t0, 0x0000; (0x25080000)
// ...
- // addiu t3, t0, 0xffff; (0x250bffff)
+ // addiu t3, t0, 0xFFFF; (0x250BFFFF)
uint32_t addiu_start_2 = 0x25000000;
- for (int32_t i = 0x80000; i <= 0xbffff; ++i) {
+ for (int32_t i = 0x80000; i <= 0xBFFFF; ++i) {
uint32_t addiu_new = addiu_start_2 + i;
__ dd(addiu_new);
}
@@ -4774,10 +4752,9 @@ uint32_t run_lwpc(int offset) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint32_t res = reinterpret_cast<uint32_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint32_t res = reinterpret_cast<uint32_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -4792,17 +4769,19 @@ TEST(r6_lwpc) {
uint32_t expected_res;
};
+ // clang-format off
struct TestCaseLwpc tc[] = {
// offset, expected_res
- { -262144, 0x250fffff }, // offset 0x40000
- { -4, 0x250c0003 },
- { -1, 0x250c0000 },
- { 0, 0xef080000 },
+ { -262144, 0x250FFFFF }, // offset 0x40000
+ { -4, 0x250C0003 },
+ { -1, 0x250C0000 },
+ { 0, 0xEF080000 },
{ 1, 0x03001025 }, // mov(v0, t8)
{ 2, 0x25080000 },
{ 4, 0x25080002 },
- { 262143, 0x250bfffd }, // offset 0x3ffff
+ { 262143, 0x250BFFFD }, // offset 0x3FFFF
};
+ // clang-format on
size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseLwpc);
for (size_t i = 0; i < nr_test_cases; ++i) {
@@ -4859,10 +4838,9 @@ uint32_t run_jic(int16_t offset) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint32_t res = reinterpret_cast<uint32_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint32_t res = reinterpret_cast<uint32_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -4932,10 +4910,9 @@ uint64_t run_beqzc(int32_t value, int32_t offset) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint32_t res = reinterpret_cast<uint32_t>(
- CALL_GENERATED_CODE(isolate, f, value, 0, 0, 0, 0));
+ uint32_t res = reinterpret_cast<uint32_t>(f.Call(value, 0, 0, 0, 0));
return res;
}
@@ -4951,14 +4928,16 @@ TEST(r6_beqzc) {
uint32_t expected_res;
};
+ // clang-format off
struct TestCaseBeqzc tc[] = {
// value, offset, expected_res
{ 0x0, -8, 0x66 },
{ 0x0, 0, 0x3334 },
{ 0x0, 1, 0x3333 },
- { 0xabc, 1, 0x3334 },
+ { 0xABC, 1, 0x3334 },
{ 0x0, 4, 0x2033 },
};
+ // clang-format on
size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBeqzc);
for (size_t i = 0; i < nr_test_cases; ++i) {
@@ -4970,12 +4949,12 @@ TEST(r6_beqzc) {
void load_elements_of_vector(MacroAssembler& assm, const uint64_t elements[],
MSARegister w, Register t0, Register t1) {
- __ li(t0, static_cast<uint32_t>(elements[0] & 0xffffffff));
- __ li(t1, static_cast<uint32_t>((elements[0] >> 32) & 0xffffffff));
+ __ li(t0, static_cast<uint32_t>(elements[0] & 0xFFFFFFFF));
+ __ li(t1, static_cast<uint32_t>((elements[0] >> 32) & 0xFFFFFFFF));
__ insert_w(w, 0, t0);
__ insert_w(w, 1, t1);
- __ li(t0, static_cast<uint32_t>(elements[1] & 0xffffffff));
- __ li(t1, static_cast<uint32_t>((elements[1] >> 32) & 0xffffffff));
+ __ li(t0, static_cast<uint32_t>(elements[1] & 0xFFFFFFFF));
+ __ li(t1, static_cast<uint32_t>((elements[1] >> 32) & 0xFFFFFFFF));
__ insert_w(w, 2, t0);
__ insert_w(w, 3, t1);
}
@@ -5012,7 +4991,7 @@ void run_bz_bnz(TestCaseMsaBranch* input, Branch GenerateBranch,
uint64_t wd_lo;
uint64_t wd_hi;
} T;
- T t = {0x20b9cc4f1a83e0c5, 0xa27e1b5f2f5bb18a, 0x0000000000000000,
+ T t = {0x20B9CC4F1A83E0C5, 0xA27E1B5F2F5BB18A, 0x0000000000000000,
0x0000000000000000};
msa_reg_t res;
Label do_not_move_w0_to_w2;
@@ -5036,9 +5015,9 @@ void run_bz_bnz(TestCaseMsaBranch* input, Branch GenerateBranch,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+ (f.Call(&res, 0, 0, 0, 0));
if (branched) {
CHECK_EQ(t.wd_lo, res.d[0]);
CHECK_EQ(t.wd_hi, res.d[1]);
@@ -5053,7 +5032,7 @@ TEST(MSA_bz_bnz) {
return;
TestCaseMsaBranch tz_v[] = {
- {0x0, 0x0}, {0xabc, 0x0}, {0x0, 0xabc}, {0xabc, 0xabc}};
+ {0x0, 0x0}, {0xABC, 0x0}, {0x0, 0xABC}, {0xABC, 0xABC}};
for (unsigned i = 0; i < arraysize(tz_v); ++i) {
run_bz_bnz(
&tz_v[i],
@@ -5077,32 +5056,32 @@ TEST(MSA_bz_bnz) {
j != lanes); \
}
TestCaseMsaBranch tz_b[] = {{0x0, 0x0},
- {0xbc0000, 0x0},
- {0x0, 0xab000000000000cd},
- {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ {0xBC0000, 0x0},
+ {0x0, 0xAB000000000000CD},
+ {0x123456789ABCDEF0, 0xAAAAAAAAAAAAAAAA}};
TEST_BZ_DF(tz_b, kMSALanesByte, bz_b, int8_t)
TestCaseMsaBranch tz_h[] = {{0x0, 0x0},
- {0xbcde0000, 0x0},
- {0x0, 0xabcd00000000abcd},
- {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ {0xBCDE0000, 0x0},
+ {0x0, 0xABCD00000000ABCD},
+ {0x123456789ABCDEF0, 0xAAAAAAAAAAAAAAAA}};
TEST_BZ_DF(tz_h, kMSALanesHalf, bz_h, int16_t)
TestCaseMsaBranch tz_w[] = {{0x0, 0x0},
- {0xbcde123400000000, 0x0},
- {0x0, 0x000000001234abcd},
- {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ {0xBCDE123400000000, 0x0},
+ {0x0, 0x000000001234ABCD},
+ {0x123456789ABCDEF0, 0xAAAAAAAAAAAAAAAA}};
TEST_BZ_DF(tz_w, kMSALanesWord, bz_w, int32_t)
TestCaseMsaBranch tz_d[] = {{0x0, 0x0},
- {0xbcde0000, 0x0},
- {0x0, 0xabcd00000000abcd},
- {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ {0xBCDE0000, 0x0},
+ {0x0, 0xABCD00000000ABCD},
+ {0x123456789ABCDEF0, 0xAAAAAAAAAAAAAAAA}};
TEST_BZ_DF(tz_d, kMSALanesDword, bz_d, int64_t)
#undef TEST_BZ_DF
TestCaseMsaBranch tnz_v[] = {
- {0x0, 0x0}, {0xabc, 0x0}, {0x0, 0xabc}, {0xabc, 0xabc}};
+ {0x0, 0x0}, {0xABC, 0x0}, {0x0, 0xABC}, {0xABC, 0xABC}};
for (unsigned i = 0; i < arraysize(tnz_v); ++i) {
run_bz_bnz(&tnz_v[i],
[](MacroAssembler& assm, Label& br_target) {
@@ -5127,27 +5106,27 @@ TEST(MSA_bz_bnz) {
j == lanes); \
}
TestCaseMsaBranch tnz_b[] = {{0x0, 0x0},
- {0xbc0000, 0x0},
- {0x0, 0xab000000000000cd},
- {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ {0xBC0000, 0x0},
+ {0x0, 0xAB000000000000CD},
+ {0x123456789ABCDEF0, 0xAAAAAAAAAAAAAAAA}};
TEST_BNZ_DF(tnz_b, 16, bnz_b, int8_t)
TestCaseMsaBranch tnz_h[] = {{0x0, 0x0},
- {0xbcde0000, 0x0},
- {0x0, 0xabcd00000000abcd},
- {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ {0xBCDE0000, 0x0},
+ {0x0, 0xABCD00000000ABCD},
+ {0x123456789ABCDEF0, 0xAAAAAAAAAAAAAAAA}};
TEST_BNZ_DF(tnz_h, 8, bnz_h, int16_t)
TestCaseMsaBranch tnz_w[] = {{0x0, 0x0},
- {0xbcde123400000000, 0x0},
- {0x0, 0x000000001234abcd},
- {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ {0xBCDE123400000000, 0x0},
+ {0x0, 0x000000001234ABCD},
+ {0x123456789ABCDEF0, 0xAAAAAAAAAAAAAAAA}};
TEST_BNZ_DF(tnz_w, 4, bnz_w, int32_t)
TestCaseMsaBranch tnz_d[] = {{0x0, 0x0},
- {0xbcde0000, 0x0},
- {0x0, 0xabcd00000000abcd},
- {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ {0xBCDE0000, 0x0},
+ {0x0, 0xABCD00000000ABCD},
+ {0x123456789ABCDEF0, 0xAAAAAAAAAAAAAAAA}};
TEST_BNZ_DF(tnz_d, 2, bnz_d, int64_t)
#undef TEST_BNZ_DF
}
@@ -5210,10 +5189,9 @@ uint32_t run_jialc(int16_t offset) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint32_t res = reinterpret_cast<uint32_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint32_t res = reinterpret_cast<uint32_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -5260,11 +5238,10 @@ static uint32_t run_addiupc(int32_t imm19) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
- PC = (uint32_t) f; // Set the program counter.
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ PC = (uint32_t)code->entry(); // Set the program counter.
- uint32_t rs = reinterpret_cast<uint32_t>(
- CALL_GENERATED_CODE(isolate, f, imm19, 0, 0, 0, 0));
+ uint32_t rs = reinterpret_cast<uint32_t>(f.Call(imm19, 0, 0, 0, 0));
return rs;
}
@@ -5344,10 +5321,9 @@ int32_t run_bc(int32_t offset) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- int32_t res = reinterpret_cast<int32_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ int32_t res = reinterpret_cast<int32_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -5427,10 +5403,9 @@ int32_t run_balc(int32_t offset) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- int32_t res = reinterpret_cast<int32_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ int32_t res = reinterpret_cast<int32_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -5453,11 +5428,9 @@ uint32_t run_aui(uint32_t rs, uint16_t offset) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint32_t res =
- reinterpret_cast<uint32_t>
- (CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint32_t res = reinterpret_cast<uint32_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -5474,15 +5447,15 @@ TEST(r6_aui) {
};
struct TestCaseAui tc[] = {
- // input, offset, result
- {0xfffeffff, 1, 0xffffffff},
- {0xffffffff, 0, 0xffffffff},
- {0, 0xffff, 0xffff0000},
- {0x0008ffff, 0xfff7, 0xffffffff},
- {32767, 32767, 0x7fff7fff},
- // overflow cases
- {0xffffffff, 0x1, 0x0000ffff},
- {0xffffffff, 0xffff, 0xfffeffff},
+ // input, offset, result
+ {0xFFFEFFFF, 1, 0xFFFFFFFF},
+ {0xFFFFFFFF, 0, 0xFFFFFFFF},
+ {0, 0xFFFF, 0xFFFF0000},
+ {0x0008FFFF, 0xFFF7, 0xFFFFFFFF},
+ {32767, 32767, 0x7FFF7FFF},
+ // overflow cases
+ {0xFFFFFFFF, 0x1, 0x0000FFFF},
+ {0xFFFFFFFF, 0xFFFF, 0xFFFEFFFF},
};
size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseAui);
@@ -5545,10 +5518,9 @@ uint32_t run_bal(int16_t offset) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint32_t res = reinterpret_cast<uint32_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint32_t res = reinterpret_cast<uint32_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -5598,10 +5570,9 @@ TEST(Trampoline) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- int32_t res = reinterpret_cast<int32_t>(
- CALL_GENERATED_CODE(isolate, f, 42, 42, 0, 0, 0));
+ int32_t res = reinterpret_cast<int32_t>(f.Call(42, 42, 0, 0, 0));
CHECK_EQ(0, res);
}
@@ -5666,7 +5637,7 @@ void helper_madd_msub_maddf_msubf(F func) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
const size_t kTableLength = sizeof(test_cases) / sizeof(TestCaseMaddMsub<T>);
TestCaseMaddMsub<T> tc;
@@ -5675,7 +5646,7 @@ void helper_madd_msub_maddf_msubf(F func) {
tc.fs = test_cases[i].fs;
tc.ft = test_cases[i].ft;
- (CALL_GENERATED_CODE(isolate, f, &tc, 0, 0, 0, 0));
+ (f.Call(&tc, 0, 0, 0, 0));
T res_add = 0;
T res_sub = 0;
@@ -5753,10 +5724,9 @@ uint32_t run_Subu(uint32_t imm, int32_t num_instr) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint32_t res = reinterpret_cast<uint32_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint32_t res = reinterpret_cast<uint32_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -5777,28 +5747,28 @@ TEST(Subu) {
// 0 - imm = expected_res
struct TestCaseSubu tc[] = {
// imm, expected_res, num_instr
- {0xffff8000, 0x8000, 2}, // min_int16
+ {0xFFFF8000, 0x8000, 2}, // min_int16
// Generates ori + addu
// We can't have just addiu because -min_int16 > max_int16 so use
// register. We can load min_int16 to at register with addiu and then
// subtract at with subu, but now we use ori + addu because -min_int16 can
// be loaded using ori.
- {0x8000, 0xffff8000, 1}, // max_int16 + 1
+ {0x8000, 0xFFFF8000, 1}, // max_int16 + 1
// Generates addiu
// max_int16 + 1 is not int16 but -(max_int16 + 1) is, just use addiu.
- {0xffff7fff, 0x8001, 2}, // min_int16 - 1
+ {0xFFFF7FFF, 0x8001, 2}, // min_int16 - 1
// Generates ori + addu
// To load this value to at we need two instructions and another one to
// subtract, lui + ori + subu. But we can load -value to at using just
// ori and then add at register with addu.
- {0x8001, 0xffff7fff, 2}, // max_int16 + 2
+ {0x8001, 0xFFFF7FFF, 2}, // max_int16 + 2
// Generates ori + subu
// Not int16 but is uint16, load value to at with ori and subtract with
// subu.
- {0x00010000, 0xffff0000, 2},
+ {0x00010000, 0xFFFF0000, 2},
// Generates lui + subu
// Load value using lui to at and subtract with subu.
- {0x00010001, 0xfffeffff, 3},
+ {0x00010001, 0xFFFEFFFF, 3},
// Generates lui + ori + subu
// We have to generate three instructions in this case.
};
@@ -5832,7 +5802,7 @@ TEST(MSA_fill_copy) {
{
CpuFeatureScope fscope(&assm, MIPS_SIMD);
- __ li(t0, 0xa512b683);
+ __ li(t0, 0xA512B683);
__ fill_b(w0, t0);
__ fill_h(w2, t0);
@@ -5862,17 +5832,16 @@ TEST(MSA_fill_copy) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(0x83u, t.u8);
- CHECK_EQ(0xb683u, t.u16);
- CHECK_EQ(0xa512b683u, t.u32);
- CHECK_EQ(0xffffff83u, t.s8);
- CHECK_EQ(0xffffb683u, t.s16);
- CHECK_EQ(0xa512b683u, t.s32);
+ CHECK_EQ(0xB683u, t.u16);
+ CHECK_EQ(0xA512B683u, t.u32);
+ CHECK_EQ(0xFFFFFF83u, t.s8);
+ CHECK_EQ(0xFFFFB683u, t.s16);
+ CHECK_EQ(0xA512B683u, t.s32);
}
TEST(MSA_fill_copy_2) {
@@ -5898,7 +5867,7 @@ TEST(MSA_fill_copy_2) {
{
CpuFeatureScope fscope(&assm, MIPS_SIMD);
- __ li(t0, 0xaaaaaaaa);
+ __ li(t0, 0xAAAAAAAA);
__ li(t1, 0x55555555);
__ fill_w(w0, t0);
@@ -5932,19 +5901,18 @@ TEST(MSA_fill_copy_2) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F4 f = FUNCTION_CAST<F4>(code->entry());
+ auto f = GeneratedCode<F4>::FromCode(*code);
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t[0], &t[1], 0, 0, 0);
- USE(dummy);
+ f.Call(&t[0], &t[1], 0, 0, 0);
CHECK_EQ(0x55555555, t[0].w0);
- CHECK_EQ(0xaaaaaaaa, t[0].w1);
- CHECK_EQ(0xaaaaaaaa, t[0].w2);
- CHECK_EQ(0xaaaaaaaa, t[0].w3);
- CHECK_EQ(0xaaaaaaaa, t[1].w0);
+ CHECK_EQ(0xAAAAAAAA, t[0].w1);
+ CHECK_EQ(0xAAAAAAAA, t[0].w2);
+ CHECK_EQ(0xAAAAAAAA, t[0].w3);
+ CHECK_EQ(0xAAAAAAAA, t[1].w0);
CHECK_EQ(0x55555555, t[1].w1);
- CHECK_EQ(0xaaaaaaaa, t[1].w2);
- CHECK_EQ(0xaaaaaaaa, t[1].w3);
+ CHECK_EQ(0xAAAAAAAA, t[1].w2);
+ CHECK_EQ(0xAAAAAAAA, t[1].w3);
}
TEST(MSA_fill_copy_3) {
@@ -5968,7 +5936,7 @@ TEST(MSA_fill_copy_3) {
{
CpuFeatureScope fscope(&assm, MIPS_SIMD);
- __ li(t0, 0xaaaaaaaa);
+ __ li(t0, 0xAAAAAAAA);
__ li(t1, 0x55555555);
__ Move(f0, t0, t0);
@@ -5991,10 +5959,9 @@ TEST(MSA_fill_copy_3) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F4 f = FUNCTION_CAST<F4>(code->entry());
+ auto f = GeneratedCode<F4>::FromCode(*code);
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t[0], &t[1], 0, 0, 0);
- USE(dummy);
+ f.Call(&t[0], &t[1], 0, 0, 0);
CHECK_EQ(0x5555555555555555, t[0].d0);
CHECK_EQ(0x5555555555555555, t[1].d0);
@@ -6038,9 +6005,9 @@ void run_msa_insert(int32_t rs_value, int n, msa_reg_t* w) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, w, 0, 0, 0, 0));
+ (f.Call(w, 0, 0, 0, 0));
}
TEST(MSA_insert) {
@@ -6058,10 +6025,10 @@ TEST(MSA_insert) {
struct TestCaseInsert tc_b[] = {
// input, n, exp_res_lo, exp_res_hi
- {0xa2, 13, 0xffffffffffffffffu, 0xffffa2ffffffffffu},
- {0x73, 10, 0xffffffffffffffffu, 0xffffffffff73ffffu},
- {0x3494, 5, 0xffff94ffffffffffu, 0xffffffffffffffffu},
- {0xa6b8, 1, 0xffffffffffffb8ffu, 0xffffffffffffffffu}};
+ {0xA2, 13, 0xFFFFFFFFFFFFFFFFu, 0xFFFFA2FFFFFFFFFFu},
+ {0x73, 10, 0xFFFFFFFFFFFFFFFFu, 0xFFFFFFFFFF73FFFFu},
+ {0x3494, 5, 0xFFFF94FFFFFFFFFFu, 0xFFFFFFFFFFFFFFFFu},
+ {0xA6B8, 1, 0xFFFFFFFFFFFFB8FFu, 0xFFFFFFFFFFFFFFFFu}};
for (size_t i = 0; i < sizeof(tc_b) / sizeof(TestCaseInsert); ++i) {
msa_reg_t res;
@@ -6072,10 +6039,10 @@ TEST(MSA_insert) {
struct TestCaseInsert tc_h[] = {
// input, n, exp_res_lo, exp_res_hi
- {0x85a2, 7, 0xffffffffffffffffu, 0x85a2ffffffffffffu},
- {0xe873, 5, 0xffffffffffffffffu, 0xffffffffe873ffffu},
- {0x3494, 3, 0x3494ffffffffffffu, 0xffffffffffffffffu},
- {0xa6b8, 1, 0xffffffffa6b8ffffu, 0xffffffffffffffffu}};
+ {0x85A2, 7, 0xFFFFFFFFFFFFFFFFu, 0x85A2FFFFFFFFFFFFu},
+ {0xE873, 5, 0xFFFFFFFFFFFFFFFFu, 0xFFFFFFFFE873FFFFu},
+ {0x3494, 3, 0x3494FFFFFFFFFFFFu, 0xFFFFFFFFFFFFFFFFu},
+ {0xA6B8, 1, 0xFFFFFFFFA6B8FFFFu, 0xFFFFFFFFFFFFFFFFu}};
for (size_t i = 0; i < sizeof(tc_h) / sizeof(TestCaseInsert); ++i) {
msa_reg_t res;
@@ -6086,10 +6053,10 @@ TEST(MSA_insert) {
struct TestCaseInsert tc_w[] = {
// input, n, exp_res_lo, exp_res_hi
- {0xd2f085a2u, 3, 0xffffffffffffffffu, 0xd2f085a2ffffffffu},
- {0x4567e873u, 2, 0xffffffffffffffffu, 0xffffffff4567e873u},
- {0xacdb3494u, 1, 0xacdb3494ffffffffu, 0xffffffffffffffffu},
- {0x89aba6b8u, 0, 0xffffffff89aba6b8u, 0xffffffffffffffffu}};
+ {0xD2F085A2u, 3, 0xFFFFFFFFFFFFFFFFu, 0xD2F085A2FFFFFFFFu},
+ {0x4567E873u, 2, 0xFFFFFFFFFFFFFFFFu, 0xFFFFFFFF4567E873u},
+ {0xACDB3494u, 1, 0xACDB3494FFFFFFFFu, 0xFFFFFFFFFFFFFFFFu},
+ {0x89ABA6B8u, 0, 0xFFFFFFFF89ABA6B8u, 0xFFFFFFFFFFFFFFFFu}};
for (size_t i = 0; i < sizeof(tc_w) / sizeof(TestCaseInsert); ++i) {
msa_reg_t res;
@@ -6112,12 +6079,12 @@ TEST(MSA_move_v) {
uint64_t wd_lo;
uint64_t wd_hi;
} T;
- T t[] = {{0x20b9cc4f1a83e0c5, 0xa27e1b5f2f5bb18a, 0x1e86678b52f8e1ff,
- 0x706e51290ac76fb9},
- {0x4414aed7883ffd18, 0x047d183a06b67016, 0x4ef258cf8d822870,
- 0x2686b73484c2e843},
- {0xd38ff9d048884ffc, 0x6dc63a57c0943ca7, 0x8520ca2f3e97c426,
- 0xa9913868fb819c59}};
+ T t[] = {{0x20B9CC4F1A83E0C5, 0xA27E1B5F2F5BB18A, 0x1E86678B52F8E1FF,
+ 0x706E51290AC76FB9},
+ {0x4414AED7883FFD18, 0x047D183A06B67016, 0x4EF258CF8D822870,
+ 0x2686B73484C2E843},
+ {0xD38FF9D048884FFC, 0x6DC63A57C0943CA7, 0x8520CA2F3E97C426,
+ 0xA9913868FB819C59}};
for (unsigned i = 0; i < arraysize(t); ++i) {
MacroAssembler assm(isolate, nullptr, 0,
@@ -6139,8 +6106,8 @@ TEST(MSA_move_v) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
- (CALL_GENERATED_CODE(isolate, f, &t[i].wd_lo, 0, 0, 0, 0));
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ (f.Call(&t[i].wd_lo, 0, 0, 0, 0));
CHECK_EQ(t[i].ws_lo, t[i].wd_lo);
CHECK_EQ(t[i].ws_hi, t[i].wd_hi);
}
@@ -6158,12 +6125,12 @@ void run_msa_sldi(OperFunc GenerateOperation,
uint64_t wd_lo;
uint64_t wd_hi;
} T;
- T t[] = {{0x20b9cc4f1a83e0c5, 0xa27e1b5f2f5bb18a, 0x1e86678b52f8e1ff,
- 0x706e51290ac76fb9},
- {0x4414aed7883ffd18, 0x047d183a06b67016, 0x4ef258cf8d822870,
- 0x2686b73484c2e843},
- {0xd38ff9d048884ffc, 0x6dc63a57c0943ca7, 0x8520ca2f3e97c426,
- 0xa9913868fb819c59}};
+ T t[] = {{0x20B9CC4F1A83E0C5, 0xA27E1B5F2F5BB18A, 0x1E86678B52F8E1FF,
+ 0x706E51290AC76FB9},
+ {0x4414AED7883FFD18, 0x047D183A06B67016, 0x4EF258CF8D822870,
+ 0x2686B73484C2E843},
+ {0xD38FF9D048884FFC, 0x6DC63A57C0943CA7, 0x8520CA2F3E97C426,
+ 0xA9913868FB819C59}};
uint64_t res[2];
for (unsigned i = 0; i < arraysize(t); ++i) {
@@ -6185,8 +6152,8 @@ void run_msa_sldi(OperFunc GenerateOperation,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
- (CALL_GENERATED_CODE(isolate, f, &res[0], 0, 0, 0, 0));
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ (f.Call(&res[0], 0, 0, 0, 0));
GenerateExpectedResult(reinterpret_cast<uint8_t*>(&t[i].ws_lo),
reinterpret_cast<uint8_t*>(&t[i].wd_lo));
CHECK_EQ(res[0], t[i].wd_lo);
@@ -6271,12 +6238,12 @@ void run_msa_ctc_cfc(uint32_t value) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
uint32_t res;
- (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+ (f.Call(&res, 0, 0, 0, 0));
- CHECK_EQ(value & 0x0167ffff, res);
+ CHECK_EQ(value & 0x0167FFFF, res);
}
TEST(MSA_cfc_ctc) {
@@ -6285,12 +6252,12 @@ TEST(MSA_cfc_ctc) {
CcTest::InitializeVM();
- const uint32_t mask_without_cause = 0xff9c0fff;
- const uint32_t mask_always_zero = 0x0167ffff;
- const uint32_t mask_enables = 0x00000f80;
- uint32_t test_case[] = {0x2d5ede31, 0x07955425, 0x15b7dbe3, 0x2bf8bc37,
- 0xe6aae923, 0x24d0f68d, 0x41afa84c, 0x2d6bf64f,
- 0x925014bd, 0x4dba7e61};
+ const uint32_t mask_without_cause = 0xFF9C0FFF;
+ const uint32_t mask_always_zero = 0x0167FFFF;
+ const uint32_t mask_enables = 0x00000F80;
+ uint32_t test_case[] = {0x2D5EDE31, 0x07955425, 0x15B7DBE3, 0x2BF8BC37,
+ 0xE6AAE923, 0x24D0F68D, 0x41AFA84C, 0x2D6BF64F,
+ 0x925014BD, 0x4DBA7E61};
for (unsigned i = 0; i < arraysize(test_case); i++) {
// Setting enable bits and corresponding cause bits could result in
// exception raised and this prevents that from happening
@@ -6315,16 +6282,16 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
- uint64_t wd_lo = 0xf35862e13e38f8b0;
- uint64_t wd_hi = 0x4f41ffdef2bfe636;
+ uint64_t wd_lo = 0xF35862E13E38F8B0;
+ uint64_t wd_hi = 0x4F41FFDEF2BFE636;
#define LOAD_W_REG(lo, hi, w_reg) \
- __ li(t0, static_cast<uint32_t>(lo & 0xffffffff)); \
- __ li(t1, static_cast<uint32_t>((lo >> 32) & 0xffffffff)); \
+ __ li(t0, static_cast<uint32_t>(lo & 0xFFFFFFFF)); \
+ __ li(t1, static_cast<uint32_t>((lo >> 32) & 0xFFFFFFFF)); \
__ insert_w(w_reg, 0, t0); \
__ insert_w(w_reg, 1, t1); \
- __ li(t0, static_cast<uint32_t>(hi & 0xffffffff)); \
- __ li(t1, static_cast<uint32_t>((hi >> 32) & 0xffffffff)); \
+ __ li(t0, static_cast<uint32_t>(hi & 0xFFFFFFFF)); \
+ __ li(t1, static_cast<uint32_t>((hi >> 32) & 0xFFFFFFFF)); \
__ insert_w(w_reg, 2, t0); \
__ insert_w(w_reg, 3, t1);
@@ -6382,9 +6349,9 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+ (f.Call(&res, 0, 0, 0, 0));
uint64_t mask = i8 * 0x0101010101010101ull;
switch (opcode) {
@@ -6419,13 +6386,13 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
case SHF_B: {
struct ExpResShf exp_b[] = {
// i8, exp_lo, exp_hi
- {0xffu, 0x11111111b9b9b9b9, 0xf7f7f7f7c8c8c8c8},
- {0x0u, 0x62626262dfdfdfdf, 0xd6d6d6d6c8c8c8c8},
- {0xe4u, 0xf35862e13e38f8b0, 0x4f41ffdef2bfe636},
- {0x1bu, 0x1b756911c3d9a7b9, 0xae94a5f79c8aefc8},
- {0xb1u, 0x662b6253e8c4df12, 0x0d3ad6803f8bc88b},
- {0x4eu, 0x62e1f358f8b03e38, 0xffde4f41e636f2bf},
- {0x27u, 0x1b697511c3a7d9b9, 0xaea594f79cef8ac8}};
+ {0xFFu, 0x11111111B9B9B9B9, 0xF7F7F7F7C8C8C8C8},
+ {0x0u, 0x62626262DFDFDFDF, 0xD6D6D6D6C8C8C8C8},
+ {0xE4u, 0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636},
+ {0x1Bu, 0x1B756911C3D9A7B9, 0xAE94A5F79C8AEFC8},
+ {0xB1u, 0x662B6253E8C4DF12, 0x0D3AD6803F8BC88B},
+ {0x4Eu, 0x62E1F358F8B03E38, 0xFFDE4F41E636F2BF},
+ {0x27u, 0x1B697511C3A7D9B9, 0xAEA594F79CEF8AC8}};
for (size_t i = 0; i < sizeof(exp_b) / sizeof(ExpResShf); ++i) {
if (exp_b[i].i8 == i8) {
CHECK_EQ(exp_b[i].lo, res.d[0]);
@@ -6436,13 +6403,13 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
case SHF_H: {
struct ExpResShf exp_h[] = {
// i8, exp_lo, exp_hi
- {0xffu, 0x1169116911691169, 0xf7a5f7a5f7a5f7a5},
- {0x0u, 0x12df12df12df12df, 0x8bc88bc88bc88bc8},
- {0xe4u, 0xf35862e13e38f8b0, 0x4f41ffdef2bfe636},
- {0x1bu, 0xd9c3b9a7751b1169, 0x8a9cc8ef94aef7a5},
- {0xb1u, 0x53622b6612dfc4e8, 0x80d63a0d8bc88b3f},
- {0x4eu, 0x3e38f8b0f35862e1, 0xf2bfe6364f41ffde},
- {0x27u, 0xd9c3751bb9a71169, 0x8a9c94aec8eff7a5}};
+ {0xFFu, 0x1169116911691169, 0xF7A5F7A5F7A5F7A5},
+ {0x0u, 0x12DF12DF12DF12DF, 0x8BC88BC88BC88BC8},
+ {0xE4u, 0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636},
+ {0x1Bu, 0xD9C3B9A7751B1169, 0x8A9CC8EF94AEF7A5},
+ {0xB1u, 0x53622B6612DFC4E8, 0x80D63A0D8BC88B3F},
+ {0x4Eu, 0x3E38F8B0F35862E1, 0xF2BFE6364F41FFDE},
+ {0x27u, 0xD9C3751BB9A71169, 0x8A9C94AEC8EFF7A5}};
for (size_t i = 0; i < sizeof(exp_h) / sizeof(ExpResShf); ++i) {
if (exp_h[i].i8 == i8) {
CHECK_EQ(exp_h[i].lo, res.d[0]);
@@ -6453,13 +6420,13 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
case SHF_W: {
struct ExpResShf exp_w[] = {
// i8, exp_lo, exp_hi
- {0xffu, 0xf7a594aef7a594ae, 0xf7a594aef7a594ae},
- {0x0u, 0xc4e812dfc4e812df, 0xc4e812dfc4e812df},
- {0xe4u, 0xf35862e13e38f8b0, 0x4f41ffdef2bfe636},
- {0x1bu, 0xc8ef8a9cf7a594ae, 0xb9a7d9c31169751b},
- {0xb1u, 0xc4e812df2b665362, 0x8b3f8bc83a0d80d6},
- {0x4eu, 0x4f41ffdef2bfe636, 0xf35862e13e38f8b0},
- {0x27u, 0x1169751bf7a594ae, 0xb9a7d9c3c8ef8a9c}};
+ {0xFFu, 0xF7A594AEF7A594AE, 0xF7A594AEF7A594AE},
+ {0x0u, 0xC4E812DFC4E812DF, 0xC4E812DFC4E812DF},
+ {0xE4u, 0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636},
+ {0x1Bu, 0xC8EF8A9CF7A594AE, 0xB9A7D9C31169751B},
+ {0xB1u, 0xC4E812DF2B665362, 0x8B3F8BC83A0D80D6},
+ {0x4Eu, 0x4F41FFDEF2BFE636, 0xF35862E13E38F8B0},
+ {0x27u, 0x1169751BF7A594AE, 0xB9A7D9C3C8EF8A9C}};
for (size_t i = 0; i < sizeof(exp_w) / sizeof(ExpResShf); ++i) {
if (exp_w[i].i8 == i8) {
CHECK_EQ(exp_w[i].lo, res.d[0]);
@@ -6485,10 +6452,10 @@ TEST(MSA_andi_ori_nori_xori) {
CcTest::InitializeVM();
struct TestCaseMsaI8 tc[] = {// input_lo, input_hi, i8
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0xffu},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0x0u},
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x3bu},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0xd9u}};
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0xFFu},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x0u},
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x3Bu},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0xD9u}};
for (size_t i = 0; i < sizeof(tc) / sizeof(TestCaseMsaI8); ++i) {
run_msa_i8(ANDI_B, tc[i].input_lo, tc[i].input_hi, tc[i].i8);
@@ -6505,10 +6472,10 @@ TEST(MSA_bmnzi_bmzi_bseli) {
CcTest::InitializeVM();
struct TestCaseMsaI8 tc[] = {// input_lo, input_hi, i8
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0xffu},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0x0u},
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x3bu},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0xd9u}};
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0xFFu},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x0u},
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x3Bu},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0xD9u}};
for (size_t i = 0; i < sizeof(tc) / sizeof(TestCaseMsaI8); ++i) {
run_msa_i8(BMNZI_B, tc[i].input_lo, tc[i].input_hi, tc[i].i8);
@@ -6525,13 +6492,13 @@ TEST(MSA_shf) {
struct TestCaseMsaI8 tc[] = {
// input_lo, input_hi, i8
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0xffu}, // 3333
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0x0u}, // 0000
- {0xf35862e13e38f8b0, 0x4f41ffdef2bfe636, 0xe4u}, // 3210
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x1bu}, // 0123
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0xb1u}, // 2301
- {0xf35862e13e38f8b0, 0x4f41ffdef2bfe636, 0x4eu}, // 1032
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x27u} // 0213
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0xFFu}, // 3333
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x0u}, // 0000
+ {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 0xE4u}, // 3210
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x1Bu}, // 0123
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0xB1u}, // 2301
+ {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 0x4Eu}, // 1032
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x27u} // 0213
};
for (size_t i = 0; i < sizeof(tc) / sizeof(TestCaseMsaI8); ++i) {
@@ -6558,10 +6525,9 @@ uint32_t run_Ins(uint32_t imm, uint32_t source, uint16_t pos, uint16_t size) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint32_t res = reinterpret_cast<uint32_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint32_t res = reinterpret_cast<uint32_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -6570,9 +6536,9 @@ TEST(Ins) {
CcTest::InitializeVM();
// run_Ins(rt_value, rs_value, pos, size), expected_result
- CHECK_EQ(run_Ins(0x55555555, 0xabcdef01, 31, 1), 0xd5555555);
- CHECK_EQ(run_Ins(0x55555555, 0xabcdef02, 30, 2), 0x95555555);
- CHECK_EQ(run_Ins(0x01234567, 0xfabcdeff, 0, 32), 0xfabcdeff);
+ CHECK_EQ(run_Ins(0x55555555, 0xABCDEF01, 31, 1), 0xD5555555);
+ CHECK_EQ(run_Ins(0x55555555, 0xABCDEF02, 30, 2), 0x95555555);
+ CHECK_EQ(run_Ins(0x01234567, 0xFABCDEFF, 0, 32), 0xFABCDEFF);
// Results with positive sign.
CHECK_EQ(run_Ins(0x55555550, 0x80000001, 0, 1), 0x55555551);
@@ -6590,7 +6556,7 @@ TEST(Ins) {
CHECK_EQ(run_Ins(0x55555555, 0x80800001, 8, 24), 0x80000155);
CHECK_EQ(run_Ins(0x55555555, 0x80008001, 16, 16), 0x80015555);
CHECK_EQ(run_Ins(0x55555555, 0x80000081, 24, 8), 0x81555555);
- CHECK_EQ(run_Ins(0x75555555, 0x00000001, 31, 1), 0xf5555555);
+ CHECK_EQ(run_Ins(0x75555555, 0x00000001, 31, 1), 0xF5555555);
}
uint32_t run_Ext(uint32_t source, uint16_t pos, uint16_t size) {
@@ -6600,7 +6566,7 @@ uint32_t run_Ext(uint32_t source, uint16_t pos, uint16_t size) {
MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
- __ li(v0, 0xffffffff);
+ __ li(v0, 0xFFFFFFFF);
__ li(t0, source);
__ Ext(v0, t0, pos, size);
__ jr(ra);
@@ -6610,10 +6576,9 @@ uint32_t run_Ext(uint32_t source, uint16_t pos, uint16_t size) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint32_t res = reinterpret_cast<uint32_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint32_t res = reinterpret_cast<uint32_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -6677,9 +6642,9 @@ void run_msa_i5(struct TestCaseMsaI5* input, bool i5_sign_ext,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+ (f.Call(&res, 0, 0, 0, 0));
CHECK_EQ(GenerateOperationFunc(input->ws_lo, input->i5), res.d[0]);
CHECK_EQ(GenerateOperationFunc(input->ws_hi, input->i5), res.d[1]);
@@ -6693,12 +6658,12 @@ TEST(MSA_addvi_subvi) {
struct TestCaseMsaI5 tc[] = {
// ws_lo, ws_hi, i5
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x0000001f},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0x0000000f},
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x00000005},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0x00000010},
- {0xffab807f807fffcd, 0x7f23ff80ff567f80, 0x0000000f},
- {0x80ffefff7f12807f, 0x807f80ff7fdeff78, 0x00000010}};
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x0000001F},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x0000000F},
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x00000005},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x00000010},
+ {0xFFAB807F807FFFCD, 0x7F23FF80FF567F80, 0x0000000F},
+ {0x80FFEFFF7F12807F, 0x807F80FF7FDEFF78, 0x00000010}};
#define ADDVI_DF(lanes, mask) \
uint64_t res = 0; \
@@ -6769,21 +6734,21 @@ TEST(MSA_maxi_mini) {
struct TestCaseMsaI5 tc[] = {
// ws_lo, ws_hi, i5
- {0x7f80ff3480ff7f00, 0x8d7fff80ff7f6780, 0x0000001f},
- {0x7f80ff3480ff7f00, 0x8d7fff80ff7f6780, 0x0000000f},
- {0x7f80ff3480ff7f00, 0x8d7fff80ff7f6780, 0x00000010},
- {0x80007fff91daffff, 0x7fff8000ffff5678, 0x0000001f},
- {0x80007fff91daffff, 0x7fff8000ffff5678, 0x0000000f},
- {0x80007fff91daffff, 0x7fff8000ffff5678, 0x00000010},
- {0x7fffffff80000000, 0x12345678ffffffff, 0x0000001f},
- {0x7fffffff80000000, 0x12345678ffffffff, 0x0000000f},
- {0x7fffffff80000000, 0x12345678ffffffff, 0x00000010},
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x0000001f},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0x0000000f},
- {0xf35862e13e38f8b0, 0x4f41ffdef2bfe636, 0x00000010},
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x00000015},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0x00000009},
- {0xf35862e13e38f8b0, 0x4f41ffdef2bfe636, 0x00000003}};
+ {0x7F80FF3480FF7F00, 0x8D7FFF80FF7F6780, 0x0000001F},
+ {0x7F80FF3480FF7F00, 0x8D7FFF80FF7F6780, 0x0000000F},
+ {0x7F80FF3480FF7F00, 0x8D7FFF80FF7F6780, 0x00000010},
+ {0x80007FFF91DAFFFF, 0x7FFF8000FFFF5678, 0x0000001F},
+ {0x80007FFF91DAFFFF, 0x7FFF8000FFFF5678, 0x0000000F},
+ {0x80007FFF91DAFFFF, 0x7FFF8000FFFF5678, 0x00000010},
+ {0x7FFFFFFF80000000, 0x12345678FFFFFFFF, 0x0000001F},
+ {0x7FFFFFFF80000000, 0x12345678FFFFFFFF, 0x0000000F},
+ {0x7FFFFFFF80000000, 0x12345678FFFFFFFF, 0x00000010},
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x0000001F},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x0000000F},
+ {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 0x00000010},
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x00000015},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x00000009},
+ {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 0x00000003}};
#define MAXI_MINI_S_DF(lanes, mask, func) \
[](uint64_t ws, uint32_t ui5) { \
@@ -6904,18 +6869,18 @@ TEST(MSA_ceqi_clti_clei) {
CcTest::InitializeVM();
struct TestCaseMsaI5 tc[] = {
- {0xff69751bb9a7d9c3, 0xf7a594aec8ff8a9c, 0x0000001f},
- {0xe669ffffb9a7d9c3, 0xf7a594aeffff8a9c, 0x0000001f},
- {0xffffffffb9a7d9c3, 0xf7a594aeffffffff, 0x0000001f},
- {0x2b0b5362c4e812df, 0x3a0d80d68b3f0bc8, 0x0000000b},
- {0x2b66000bc4e812df, 0x3a0d000b8b3f8bc8, 0x0000000b},
- {0x0000000bc4e812df, 0x3a0d80d60000000b, 0x0000000b},
- {0xf38062e13e38f8b0, 0x8041ffdef2bfe636, 0x00000010},
- {0xf35880003e38f8b0, 0x4f41ffdef2bf8000, 0x00000010},
- {0xf35862e180000000, 0x80000000f2bfe636, 0x00000010},
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x00000015},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0x00000009},
- {0xf30062e13e38f800, 0x4f00ffdef2bf0036, 0x00000000}};
+ {0xFF69751BB9A7D9C3, 0xF7A594AEC8FF8A9C, 0x0000001F},
+ {0xE669FFFFB9A7D9C3, 0xF7A594AEFFFF8A9C, 0x0000001F},
+ {0xFFFFFFFFB9A7D9C3, 0xF7A594AEFFFFFFFF, 0x0000001F},
+ {0x2B0B5362C4E812DF, 0x3A0D80D68B3F0BC8, 0x0000000B},
+ {0x2B66000BC4E812DF, 0x3A0D000B8B3F8BC8, 0x0000000B},
+ {0x0000000BC4E812DF, 0x3A0D80D60000000B, 0x0000000B},
+ {0xF38062E13E38F8B0, 0x8041FFDEF2BFE636, 0x00000010},
+ {0xF35880003E38F8B0, 0x4F41FFDEF2BF8000, 0x00000010},
+ {0xF35862E180000000, 0x80000000F2BFE636, 0x00000010},
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x00000015},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x00000009},
+ {0xF30062E13E38F800, 0x4F00FFDEF2BF0036, 0x00000000}};
#define CEQI_CLTI_CLEI_S_DF(lanes, mask, func) \
[](uint64_t ws, uint32_t ui5) { \
@@ -7098,9 +7063,9 @@ void run_msa_2r(const struct TestCaseMsa2R* input,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+ (f.Call(&res, 0, 0, 0, 0));
CHECK_EQ(input->exp_res_lo, res.d[0]);
CHECK_EQ(input->exp_res_hi, res.d[1]);
@@ -7114,44 +7079,44 @@ TEST(MSA_pcnt) {
struct TestCaseMsa2R tc_b[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi
{0x0000000000000000, 0x0000000000000000, 0, 0},
- {0xffffffffffffffff, 0xffffffffffffffff,
+ {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF,
0x0808080808080808, 0x0808080808080808},
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c,
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C,
0x0204050405050504, 0x0704030503070304},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8,
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8,
0x0404040303040207, 0x0403010504060403},
- {0xf35862e13e38f8b0, 0x4f41ffdef2bfe636,
+ {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636,
0x0603030405030503, 0x0502080605070504}};
struct TestCaseMsa2R tc_h[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi
{0x0000000000000000, 0x0000000000000000, 0, 0},
- {0xffffffffffffffff, 0xffffffffffffffff,
+ {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF,
0x0010001000100010, 0x0010001000100010},
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c,
- 0x00060009000a0009, 0x000b0008000a0007},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8,
- 0x0008000700070009, 0x00070006000a0007},
- {0xf35862e13e38f8b0, 0x4f41ffdef2bfe636,
- 0x0009000700080008, 0x0007000e000c0009}};
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C,
+ 0x00060009000A0009, 0x000B0008000A0007},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8,
+ 0x0008000700070009, 0x00070006000A0007},
+ {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636,
+ 0x0009000700080008, 0x0007000E000C0009}};
struct TestCaseMsa2R tc_w[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi
{0x0000000000000000, 0x0000000000000000, 0, 0},
- {0xffffffffffffffff, 0xffffffffffffffff,
+ {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF,
0x0000002000000020, 0x0000002000000020},
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c,
- 0x0000000f00000013, 0x0000001300000011},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8,
- 0x0000000f00000010, 0x0000000d00000011},
- {0xf35862e13e38f8b0, 0x4f41ffdef2bfe636,
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C,
+ 0x0000000F00000013, 0x0000001300000011},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8,
+ 0x0000000F00000010, 0x0000000D00000011},
+ {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636,
0x0000001000000010, 0x0000001500000015}};
struct TestCaseMsa2R tc_d[] = {
// ws_lo, ws_hi, exp_res_lo, exp_res_hi
{0x0000000000000000, 0x0000000000000000, 0, 0},
- {0xffffffffffffffff, 0xffffffffffffffff, 0x40, 0x40},
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x22, 0x24},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0x1f, 0x1e},
- {0xf35862e13e38f8b0, 0x4f41ffdef2bfe636, 0x20, 0x2a}};
+ {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x40, 0x40},
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x22, 0x24},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x1F, 0x1E},
+ {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 0x20, 0x2A}};
for (size_t i = 0; i < sizeof(tc_b) / sizeof(TestCaseMsa2R); ++i) {
run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ pcnt_b(w2, w0); });
@@ -7170,43 +7135,43 @@ TEST(MSA_nlzc) {
struct TestCaseMsa2R tc_b[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi
{0x0000000000000000, 0x0000000000000000,
0x0808080808080808, 0x0808080808080808},
- {0xffffffffffffffff, 0xffffffffffffffff, 0, 0},
- {0x1169350b07030100, 0x7f011402381f0a6c,
+ {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0, 0},
+ {0x1169350B07030100, 0x7F011402381F0A6C,
0x0301020405060708, 0x0107030602030401},
- {0x010806003478121f, 0x03013016073f7b08,
+ {0x010806003478121F, 0x03013016073F7B08,
0x0704050802010303, 0x0607020305020104},
- {0x0168321100083803, 0x07113f03013f1676,
+ {0x0168321100083803, 0x07113F03013F1676,
0x0701020308040206, 0x0503020607020301}};
struct TestCaseMsa2R tc_h[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi
{0x0000000000000000, 0x0000000000000000,
0x0010001000100010, 0x0010001000100010},
- {0xffffffffffffffff, 0xffffffffffffffff, 0, 0},
- {0x00010007000a003c, 0x37a5001e00010002,
- 0x000f000d000c000a, 0x0002000b000f000e},
- {0x0026066200780edf, 0x003d0003000f00c8,
- 0x000a000500090004, 0x000a000e000c0008},
- {0x335807e100480030, 0x01410fde12bf5636,
- 0x000200050009000a, 0x0007000400030001}};
+ {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0, 0},
+ {0x00010007000A003C, 0x37A5001E00010002,
+ 0x000F000D000C000A, 0x0002000B000F000E},
+ {0x0026066200780EDF, 0x003D0003000F00C8,
+ 0x000A000500090004, 0x000A000E000C0008},
+ {0x335807E100480030, 0x01410FDE12BF5636,
+ 0x000200050009000A, 0x0007000400030001}};
struct TestCaseMsa2R tc_w[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi
{0x0000000000000000, 0x0000000000000000,
0x0000002000000020, 0x0000002000000020},
- {0xffffffffffffffff, 0xffffffffffffffff, 0, 0},
- {0x00000005000007c3, 0x000014ae00006a9c,
- 0x0000001d00000015, 0x0000001300000011},
- {0x00009362000112df, 0x000380d6003f8bc8,
- 0x000000100000000f, 0x0000000e0000000a},
- {0x135862e17e38f8b0, 0x0061ffde03bfe636,
+ {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0, 0},
+ {0x00000005000007C3, 0x000014AE00006A9C,
+ 0x0000001D00000015, 0x0000001300000011},
+ {0x00009362000112DF, 0x000380D6003F8BC8,
+ 0x000000100000000F, 0x0000000E0000000A},
+ {0x135862E17E38F8B0, 0x0061FFDE03BFE636,
0x0000000300000001, 0x0000000900000006}};
struct TestCaseMsa2R tc_d[] = {
// ws_lo, ws_hi, exp_res_lo, exp_res_hi
{0x0000000000000000, 0x0000000000000000, 0x40, 0x40},
- {0xffffffffffffffff, 0xffffffffffffffff, 0, 0},
- {0x000000000000014e, 0x00000000000176da, 0x37, 0x2f},
- {0x00000062c4e812df, 0x000065d68b3f8bc8, 0x19, 0x11},
- {0x00000000e338f8b0, 0x0754534acab32654, 0x20, 0x5}};
+ {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0, 0},
+ {0x000000000000014E, 0x00000000000176DA, 0x37, 0x2F},
+ {0x00000062C4E812DF, 0x000065D68B3F8BC8, 0x19, 0x11},
+ {0x00000000E338F8B0, 0x0754534ACAB32654, 0x20, 0x5}};
for (size_t i = 0; i < sizeof(tc_b) / sizeof(TestCaseMsa2R); ++i) {
run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ nlzc_b(w2, w0); });
@@ -7223,7 +7188,7 @@ TEST(MSA_nloc) {
CcTest::InitializeVM();
struct TestCaseMsa2R tc_b[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi
- {0xffffffffffffffff, 0xffffffffffffffff,
+ {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF,
0x0808080808080808, 0x0808080808080808},
{0x0000000000000000, 0x0000000000000000, 0, 0},
{0xEE96CAF4F8FCFEFF, 0x80FEEBFDC7E0F593,
@@ -7234,32 +7199,32 @@ TEST(MSA_nloc) {
0x0701020308040206, 0x0503020607020301}};
struct TestCaseMsa2R tc_h[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi
- {0xffffffffffffffff, 0xffffffffffffffff,
+ {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF,
0x0010001000100010, 0x0010001000100010},
{0x0000000000000000, 0x0000000000000000, 0, 0},
{0xFFFEFFF8FFF5FFC3, 0xC85AFFE1FFFEFFFD,
- 0x000f000d000c000a, 0x0002000b000f000e},
+ 0x000F000D000C000A, 0x0002000B000F000E},
{0xFFD9F99DFF87F120, 0xFFC2FFFCFFF0FF37,
- 0x000a000500090004, 0x000a000e000c0008},
+ 0x000A000500090004, 0x000A000E000C0008},
{0xCCA7F81EFFB7FFCF, 0xFEBEF021ED40A9C9,
- 0x000200050009000a, 0x0007000400030001}};
+ 0x000200050009000A, 0x0007000400030001}};
struct TestCaseMsa2R tc_w[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi
- {0xffffffffffffffff, 0xffffffffffffffff,
+ {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF,
0x0000002000000020, 0x0000002000000020},
{0x0000000000000000, 0x0000000000000000, 0, 0},
{0xFFFFFFFAFFFFF83C, 0xFFFFEB51FFFF9563,
- 0x0000001d00000015, 0x0000001300000011},
+ 0x0000001D00000015, 0x0000001300000011},
{0xFFFF6C9DFFFEED20, 0xFFFC7F29FFC07437,
- 0x000000100000000f, 0x0000000e0000000a},
+ 0x000000100000000F, 0x0000000E0000000A},
{0xECA79D1E81C7074F, 0xFF9E0021FC4019C9,
0x0000000300000001, 0x0000000900000006}};
struct TestCaseMsa2R tc_d[] = {
// ws_lo, ws_hi, exp_res_lo, exp_res_hi
- {0xffffffffffffffff, 0xffffffffffffffff, 0x40, 0x40},
+ {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x40, 0x40},
{0x0000000000000000, 0x0000000000000000, 0, 0},
- {0xFFFFFFFFFFFFFEB1, 0xFFFFFFFFFFFE8925, 0x37, 0x2f},
+ {0xFFFFFFFFFFFFFEB1, 0xFFFFFFFFFFFE8925, 0x37, 0x2F},
{0xFFFFFF9D3B17ED20, 0xFFFF9A2974C07437, 0x19, 0x11},
{0xFFFFFFFF1CC7074F, 0xF8ABACB5354CD9AB, 0x20, 0x5}};
@@ -7997,11 +7962,11 @@ TEST(MSA_fexupl) {
const double inf_double = std::numeric_limits<double>::infinity();
struct TestCaseMsa2RF_U16_F tc_s[] = {
- {1, 2, 0x7c00, 0x0c00, 0, 0x7c00, 0xfc00, 0x8000, 0.f, inf_float,
+ {1, 2, 0x7C00, 0x0C00, 0, 0x7C00, 0xFC00, 0x8000, 0.f, inf_float,
-inf_float, -0.f},
- {0xfc00, 0xffff, 0x00ff, 0x8000, 0x81fe, 0x8000, 0x0345, 0xaaaa,
+ {0xFC00, 0xFFFF, 0x00FF, 0x8000, 0x81FE, 0x8000, 0x0345, 0xAAAA,
-3.0398368835e-5f, -0.f, 4.9889088e-5f, -5.2062988281e-2f},
- {3, 4, 0x5555, 6, 0x2aaa, 0x8700, 0x7777, 0x6a8b, 5.2062988281e-2f,
+ {3, 4, 0x5555, 6, 0x2AAA, 0x8700, 0x7777, 0x6A8B, 5.2062988281e-2f,
-1.06811523458e-4f, 3.0576e4f, 3.35e3f}};
struct TestCaseMsa2RF_F_D tc_d[] = {
@@ -8030,11 +7995,11 @@ TEST(MSA_fexupr) {
const double inf_double = std::numeric_limits<double>::infinity();
struct TestCaseMsa2RF_U16_F tc_s[] = {
- {0, 0x7c00, 0xfc00, 0x8000, 1, 2, 0x7c00, 0x0c00, 0.f, inf_float,
+ {0, 0x7C00, 0xFC00, 0x8000, 1, 2, 0x7C00, 0x0C00, 0.f, inf_float,
-inf_float, -0.f},
- {0x81fe, 0x8000, 0x0345, 0xaaaa, 0xfc00, 0xffff, 0x00ff, 0x8000,
+ {0x81FE, 0x8000, 0x0345, 0xAAAA, 0xFC00, 0xFFFF, 0x00FF, 0x8000,
-3.0398368835e-5f, -0.f, 4.9889088e-5f, -5.2062988281e-2f},
- {0x2aaa, 0x8700, 0x7777, 0x6a8b, 3, 4, 0x5555, 6, 5.2062988281e-2f,
+ {0x2AAA, 0x8700, 0x7777, 0x6A8B, 3, 4, 0x5555, 6, 5.2062988281e-2f,
-1.06811523458e-4f, 3.0576e4f, 3.35e3f}};
struct TestCaseMsa2RF_F_D tc_d[] = {
@@ -8068,13 +8033,13 @@ TEST(MSA_ffql) {
CcTest::InitializeVM();
- struct TestCaseMsa2RF_U16_F tc_s[] = {{0, 3, 0xffff, 0x8000, 0x8000, 0xe000,
+ struct TestCaseMsa2RF_U16_F tc_s[] = {{0, 3, 0xFFFF, 0x8000, 0x8000, 0xE000,
0x0FF0, 0, -1.f, -0.25f,
0.12451171875f, 0.f}};
struct TestCaseMsa2RF_U32_D tc_d[] = {
- {0, 45, 0x80000000, 0xe0000000, -1., -0.25},
- {0x28379, 0xaaaa5555, 0x024903d3, 0, 17.853239085525274277e-3, 0.}};
+ {0, 45, 0x80000000, 0xE0000000, -1., -0.25},
+ {0x28379, 0xAAAA5555, 0x024903D3, 0, 17.853239085525274277e-3, 0.}};
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U16_F); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
@@ -8092,13 +8057,13 @@ TEST(MSA_ffqr) {
CcTest::InitializeVM();
- struct TestCaseMsa2RF_U16_F tc_s[] = {{0x8000, 0xe000, 0x0FF0, 0, 0, 3,
- 0xffff, 0x8000, -1.f, -0.25f,
+ struct TestCaseMsa2RF_U16_F tc_s[] = {{0x8000, 0xE000, 0x0FF0, 0, 0, 3,
+ 0xFFFF, 0x8000, -1.f, -0.25f,
0.12451171875f, 0.f}};
struct TestCaseMsa2RF_U32_D tc_d[] = {
- {0x80000000, 0xe0000000, 0, 45, -1., -0.25},
- {0x024903d3, 0, 0x28379, 0xaaaa5555, 17.853239085525274277e-3, 0.}};
+ {0x80000000, 0xE0000000, 0, 45, -1., -0.25},
+ {0x024903D3, 0, 0x28379, 0xAAAA5555, 17.853239085525274277e-3, 0.}};
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U16_F); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
@@ -8149,9 +8114,9 @@ void run_msa_vector(struct TestCaseMsaVector* input,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+ (f.Call(&res, 0, 0, 0, 0));
CHECK_EQ(GenerateOperationFunc(input->wd_lo, input->ws_lo, input->wt_lo),
res.d[0]);
@@ -8167,12 +8132,12 @@ TEST(MSA_vector) {
struct TestCaseMsaVector tc[] = {
// wd_lo, wd_hi, ws_lo, ws_hi, wt_lo, wt_hi
- {0xf35862e13e38f8b0, 0x4f41ffdef2bfe636, 0xdcd39d91f9057627,
- 0x64be4f6dbe9caa51, 0x6b23de1a687d9cb9, 0x49547aad691da4ca},
- {0xf35862e13e38f8b0, 0x4f41ffdef2bfe636, 0x401614523d830549,
- 0xd7c46d613f50eddd, 0x52284cbc60a1562b, 0x1756ed510d8849cd},
- {0xf35862e13e38f8b0, 0x4f41ffdef2bfe636, 0xd6e2d2ebcb40d72f,
- 0x13a619afce67b079, 0x36cce284343e40f9, 0xb4e8f44fd148bf7f}};
+ {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 0xDCD39D91F9057627,
+ 0x64BE4F6DBE9CAA51, 0x6B23DE1A687D9CB9, 0x49547AAD691DA4CA},
+ {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 0x401614523D830549,
+ 0xD7C46D613F50EDDD, 0x52284CBC60A1562B, 0x1756ED510D8849CD},
+ {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 0xD6E2D2EBCB40D72F,
+ 0x13A619AFCE67B079, 0x36CCE284343E40F9, 0xB4E8F44FD148BF7F}};
for (size_t i = 0; i < sizeof(tc) / sizeof(TestCaseMsaVector); ++i) {
run_msa_vector(
@@ -8238,9 +8203,9 @@ void run_msa_bit(struct TestCaseMsaBit* input, InstFunc GenerateInstructionFunc,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+ (f.Call(&res, 0, 0, 0, 0));
CHECK_EQ(GenerateOperationFunc(input->wd_lo, input->ws_lo, input->m),
res.d[0]);
@@ -8256,14 +8221,14 @@ TEST(MSA_slli_srai_srli) {
struct TestCaseMsaBit tc[] = {
// wd_lo, wd_hi ws_lo, ws_hi, m
- {0, 0, 0xf35862e13e38f8b0, 0x4f41ffdef2bfe636, 3},
- {0, 0, 0x64be4f6dbe9caa51, 0x6b23de1a687d9cb9, 5},
- {0, 0, 0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 9},
- {0, 0, 0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 13},
- {0, 0, 0x566be7ba4365b70a, 0x01ebbc1937d76cb4, 21},
- {0, 0, 0x380e2deb9d3f8aae, 0x017e0de0bcc6ca42, 30},
- {0, 0, 0xa46a3a9bcb43f4e5, 0x1c62c8473bdfcffb, 45},
- {0, 0, 0xf6759d85f23b5a2b, 0x5c042ae42c6d12c1, 61}};
+ {0, 0, 0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 3},
+ {0, 0, 0x64BE4F6DBE9CAA51, 0x6B23DE1A687D9CB9, 5},
+ {0, 0, 0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 9},
+ {0, 0, 0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 13},
+ {0, 0, 0x566BE7BA4365B70A, 0x01EBBC1937D76CB4, 21},
+ {0, 0, 0x380E2DEB9D3F8AAE, 0x017E0DE0BCC6CA42, 30},
+ {0, 0, 0xA46A3A9BCB43F4E5, 0x1C62C8473BDFCFFB, 45},
+ {0, 0, 0xF6759D85F23B5A2B, 0x5C042AE42C6D12C1, 61}};
#define SLLI_SRLI_DF(lanes, mask, func) \
[](uint64_t wd, uint64_t ws, uint32_t m) { \
@@ -8409,14 +8374,14 @@ TEST(MSA_bclri_bseti_bnegi) {
struct TestCaseMsaBit tc[] = {
// wd_lo, wd_hi, ws_lo, ws_hi, m
- {0, 0, 0xf35862e13e38f8b0, 0x4f41ffdef2bfe636, 3},
- {0, 0, 0x64be4f6dbe9caa51, 0x6b23de1a687d9cb9, 5},
- {0, 0, 0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 9},
- {0, 0, 0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 13},
- {0, 0, 0x566be7ba4365b70a, 0x01ebbc1937d76cb4, 21},
- {0, 0, 0x380e2deb9d3f8aae, 0x017e0de0bcc6ca42, 30},
- {0, 0, 0xa46a3a9bcb43f4e5, 0x1c62c8473bdfcffb, 45},
- {0, 0, 0xf6759d85f23b5a2b, 0x5c042ae42c6d12c1, 61}};
+ {0, 0, 0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 3},
+ {0, 0, 0x64BE4F6DBE9CAA51, 0x6B23DE1A687D9CB9, 5},
+ {0, 0, 0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 9},
+ {0, 0, 0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 13},
+ {0, 0, 0x566BE7BA4365B70A, 0x01EBBC1937D76CB4, 21},
+ {0, 0, 0x380E2DEB9D3F8AAE, 0x017E0DE0BCC6CA42, 30},
+ {0, 0, 0xA46A3A9BCB43F4E5, 0x1C62C8473BDFCFFB, 45},
+ {0, 0, 0xF6759D85F23B5A2B, 0x5C042AE42C6D12C1, 61}};
#define BCLRI_BSETI_BNEGI_DF(lanes, mask, func) \
[](uint64_t wd, uint64_t ws, uint32_t m) { \
@@ -8504,22 +8469,22 @@ TEST(MSA_binsli_binsri) {
CcTest::InitializeVM();
struct TestCaseMsaBit tc[] = {// wd_lo, wd_hi, ws_lo, ws_hi, m
- {0x53f4457553bbd5b4, 0x5fb8250eacc296b2,
- 0xf35862e13e38f8b0, 0x4f41ffdef2bfe636, 3},
- {0xf61bfdb0f312e6fc, 0xc9437568dd1ea925,
- 0x64be4f6dbe9caa51, 0x6b23de1a687d9cb9, 5},
- {0x53f4457553bbd5b4, 0x5fb8250eacc296b2,
- 0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 9},
- {0xf61bfdb0f312e6fc, 0xc9437568dd1ea925,
- 0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 13},
- {0x53f4457553bbd5b4, 0x5fb8250eacc296b2,
- 0x566be7ba4365b70a, 0x01ebbc1937d76cb4, 21},
- {0xf61bfdb0f312e6fc, 0xc9437568dd1ea925,
- 0x380e2deb9d3f8aae, 0x017e0de0bcc6ca42, 30},
- {0x53f4457553bbd5b4, 0x5fb8250eacc296b2,
- 0xa46a3a9bcb43f4e5, 0x1c62c8473bdfcffb, 45},
- {0xf61bfdb0f312e6fc, 0xc9437568dd1ea925,
- 0xf6759d85f23b5a2b, 0x5c042ae42c6d12c1, 61}};
+ {0x53F4457553BBD5B4, 0x5FB8250EACC296B2,
+ 0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 3},
+ {0xF61BFDB0F312E6FC, 0xC9437568DD1EA925,
+ 0x64BE4F6DBE9CAA51, 0x6B23DE1A687D9CB9, 5},
+ {0x53F4457553BBD5B4, 0x5FB8250EACC296B2,
+ 0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 9},
+ {0xF61BFDB0F312E6FC, 0xC9437568DD1EA925,
+ 0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 13},
+ {0x53F4457553BBD5B4, 0x5FB8250EACC296B2,
+ 0x566BE7BA4365B70A, 0x01EBBC1937D76CB4, 21},
+ {0xF61BFDB0F312E6FC, 0xC9437568DD1EA925,
+ 0x380E2DEB9D3F8AAE, 0x017E0DE0BCC6CA42, 30},
+ {0x53F4457553BBD5B4, 0x5FB8250EACC296B2,
+ 0xA46A3A9BCB43F4E5, 0x1C62C8473BDFCFFB, 45},
+ {0xF61BFDB0F312E6FC, 0xC9437568DD1EA925,
+ 0xF6759D85F23B5A2B, 0x5C042AE42C6D12C1, 61}};
#define BINSLI_BINSRI_DF(lanes, mask, func) \
[](uint64_t wd, uint64_t ws, uint32_t m) { \
@@ -8596,14 +8561,14 @@ TEST(MSA_sat_s_sat_u) {
struct TestCaseMsaBit tc[] = {
// wd_lo, wd_hi, ws_lo, ws_hi, m
- {0, 0, 0xf35862e13e3808b0, 0x4f41ffdef2bfe636, 3},
- {0, 0, 0x64be4f6dbe9caa51, 0x6b23de1a687d9cb9, 5},
- {0, 0, 0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 9},
- {0, 0, 0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 13},
- {0, 0, 0x566be7ba4365b70a, 0x01ebbc1937d76cb4, 21},
- {0, 0, 0x380e2deb9d3f8aae, 0x017e0de0bcc6ca42, 30},
- {0, 0, 0xa46a3a9bcb43f4e5, 0x1c62c8473bdfcffb, 45},
- {0, 0, 0xf6759d85f23b5a2b, 0x5c042ae42c6d12c1, 61}};
+ {0, 0, 0xF35862E13E3808B0, 0x4F41FFDEF2BFE636, 3},
+ {0, 0, 0x64BE4F6DBE9CAA51, 0x6B23DE1A687D9CB9, 5},
+ {0, 0, 0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 9},
+ {0, 0, 0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 13},
+ {0, 0, 0x566BE7BA4365B70A, 0x01EBBC1937D76CB4, 21},
+ {0, 0, 0x380E2DEB9D3F8AAE, 0x017E0DE0BCC6CA42, 30},
+ {0, 0, 0xA46A3A9BCB43F4E5, 0x1C62C8473BDFCFFB, 45},
+ {0, 0, 0xF6759D85F23B5A2B, 0x5C042AE42C6D12C1, 61}};
#define SAT_DF(lanes, mask, func) \
[](uint64_t wd, uint64_t ws, uint32_t m) { \
@@ -8712,9 +8677,9 @@ void run_msa_i10(int32_t input, InstFunc GenerateVectorInstructionFunc,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+ (f.Call(&res, 0, 0, 0, 0));
CHECK_EQ(GenerateOperationFunc(input), res.d[0]);
CHECK_EQ(GenerateOperationFunc(input), res.d[1]);
@@ -8791,9 +8756,9 @@ void run_msa_mi10(InstFunc GenerateVectorInstructionFunc) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F4 f = FUNCTION_CAST<F4>(code->entry());
+ auto f = GeneratedCode<F4>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, in_array_middle, out_array_middle, 0, 0, 0));
+ (f.Call(in_array_middle, out_array_middle, 0, 0, 0));
CHECK_EQ(memcmp(in_test_vector, out_test_vector, arraysize(in_test_vector)),
0);
@@ -8871,9 +8836,9 @@ void run_msa_3r(struct TestCaseMsa3R* input, InstFunc GenerateI5InstructionFunc,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+ (f.Call(&res, 0, 0, 0, 0));
GenerateOperationFunc(&input->ws_lo, &input->wt_lo, &input->wd_lo);
if (input->wd_lo != Unpredictable) {
@@ -8891,32 +8856,32 @@ TEST(MSA_3R_instructions) {
CcTest::InitializeVM();
struct TestCaseMsa3R tc[] = {
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x1169751bb9a7d9c3,
- 0xf7a594aec8ef8a9c, 0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0x2b665362c4e812df,
- 0x3a0d80d68b3f8bc8, 0x2b665362c4e812df, 0x3a0d80d68b3f8bc8},
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x1169751bb9a7d9c3,
- 0xf7a594aec8ef8a9c, 0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0x2b665362c4e812df,
- 0x3a0d80d68b3f8bc8, 0x2b665362c4e812df, 0x3a0d80d68b3f8bc8},
- {0xffab807f807fffcd, 0x7f23ff80ff567f80, 0xffab807f807fffcd,
- 0x7f23ff80ff567f80, 0xffab807f807fffcd, 0x7f23ff80ff567f80},
- {0x80ffefff7f12807f, 0x807f80ff7fdeff78, 0x80ffefff7f12807f,
- 0x807f80ff7fdeff78, 0x80ffefff7f12807f, 0x807f80ff7fdeff78},
- {0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff,
- 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff},
- {0x0000000000000000, 0xffffffffffffffff, 0xffffffffffffffff,
- 0x0000000000000000, 0x0000000000000000, 0xffffffffffffffff},
- {0xffff0000ffff0000, 0xffff0000ffff0000, 0xffff0000ffff0000,
- 0xffff0000ffff0000, 0xffff0000ffff0000, 0xffff0000ffff0000},
- {0xff00ff00ff00ff00, 0xff00ff00ff00ff00, 0xff00ff00ff00ff00,
- 0xff00ff00ff00ff00, 0xff00ff00ff00ff00, 0xff00ff00ff00ff00},
- {0xf0f0f0f0f0f0f0f0, 0xf0f0f0f0f0f0f0f0, 0xf0f0f0f0f0f0f0f0,
- 0xf0f0f0f0f0f0f0f0, 0xf0f0f0f0f0f0f0f0, 0xf0f0f0f0f0f0f0f0},
- {0xff0000ffff0000ff, 0xff0000ffff0000ff, 0xff0000ffff0000ff,
- 0xff0000ffff0000ff, 0xff0000ffff0000ff, 0xff0000ffff0000ff},
- {0xffff00000000ffff, 0xffff00000000ffff, 0xffff00000000ffff,
- 0xffff00000000ffff, 0xffff00000000ffff, 0xffff00000000ffff}};
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x1169751BB9A7D9C3,
+ 0xF7A594AEC8EF8A9C, 0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x2B665362C4E812DF,
+ 0x3A0D80D68B3F8BC8, 0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8},
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x1169751BB9A7D9C3,
+ 0xF7A594AEC8EF8A9C, 0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x2B665362C4E812DF,
+ 0x3A0D80D68B3F8BC8, 0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8},
+ {0xFFAB807F807FFFCD, 0x7F23FF80FF567F80, 0xFFAB807F807FFFCD,
+ 0x7F23FF80FF567F80, 0xFFAB807F807FFFCD, 0x7F23FF80FF567F80},
+ {0x80FFEFFF7F12807F, 0x807F80FF7FDEFF78, 0x80FFEFFF7F12807F,
+ 0x807F80FF7FDEFF78, 0x80FFEFFF7F12807F, 0x807F80FF7FDEFF78},
+ {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF,
+ 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF},
+ {0x0000000000000000, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF,
+ 0x0000000000000000, 0x0000000000000000, 0xFFFFFFFFFFFFFFFF},
+ {0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000,
+ 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000},
+ {0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00,
+ 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00},
+ {0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0,
+ 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0},
+ {0xFF0000FFFF0000FF, 0xFF0000FFFF0000FF, 0xFF0000FFFF0000FF,
+ 0xFF0000FFFF0000FF, 0xFF0000FFFF0000FF, 0xFF0000FFFF0000FF},
+ {0xFFFF00000000FFFF, 0xFFFF00000000FFFF, 0xFFFF00000000FFFF,
+ 0xFFFF00000000FFFF, 0xFFFF00000000FFFF, 0xFFFF00000000FFFF}};
#define SLL_DF(T, lanes, mask) \
int size_in_bits = kMSARegSize / lanes; \
@@ -9510,8 +9475,8 @@ TEST(MSA_3R_instructions) {
T* ws_p = reinterpret_cast<T*>(ws); \
T* wt_p = reinterpret_cast<T*>(wt); \
T* wd_p = reinterpret_cast<T*>(wd); \
- const int mask_not_valid = 0xc0; \
- const int mask_6bits = 0x3f; \
+ const int mask_not_valid = 0xC0; \
+ const int mask_6bits = 0x3F; \
for (int i = 0; i < lanes; ++i) { \
if ((wd_p[i] & mask_not_valid)) { \
wd_p[i] = 0; \
@@ -9877,9 +9842,9 @@ void run_msa_3rf(const struct TestCaseMsa3RF* input,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+ (f.Call(&res, 0, 0, 0, 0));
CHECK_EQ(output->exp_res_lo, res.d[0]);
CHECK_EQ(output->exp_res_hi, res.d[1]);
@@ -10260,70 +10225,70 @@ TEST(MSA_fixed_point_arithmetic) {
CcTest::InitializeVM();
const struct TestCaseMsa3RF tc_h[]{
- {0x800080007fff7fff, 0xe1ed8000fad3863a, 0x80007fff00af7fff,
- 0x800015a77fffa0eb, 0x7fff800080007fff, 0x80007fff1f207364},
- {0x800080007fff006a, 0x002affc4329ad87b, 0x80007fff7fff00f3,
- 0xffecffb4d0d7f429, 0x80007fff80007c33, 0x54ac6bbce53b8c91}};
+ {0x800080007FFF7FFF, 0xE1ED8000FAD3863A, 0x80007FFF00AF7FFF,
+ 0x800015A77FFFA0EB, 0x7FFF800080007FFF, 0x80007FFF1F207364},
+ {0x800080007FFF006A, 0x002AFFC4329AD87B, 0x80007FFF7FFF00F3,
+ 0xFFECFFB4D0D7F429, 0x80007FFF80007C33, 0x54AC6BBCE53B8C91}};
const struct TestCaseMsa3RF tc_w[]{
- {0x8000000080000000, 0x7fffffff7fffffff, 0x800000007fffffff,
- 0x00001ff37fffffff, 0x7fffffff80000000, 0x800000007fffffff},
- {0xe1ed035580000000, 0xfad3863aed462c0b, 0x8000000015a70aec,
- 0x7fffffffa0ebd354, 0x800000007fffffff, 0xd0d7f4291f207364},
- {0x8000000080000000, 0x7fffffff0000da1f, 0x800000007fffffff,
- 0x7fffffff00f39c3b, 0x800000007fffffff, 0x800000007c33f2fd},
- {0x0000ac33ffff329a, 0x54ac6bbce53bd87b, 0xffffe2b4d0d7f429,
- 0x0355ed462c0b1ff3, 0xb5deb625939dd3f9, 0xe642adfa69519596}};
+ {0x8000000080000000, 0x7FFFFFFF7FFFFFFF, 0x800000007FFFFFFF,
+ 0x00001FF37FFFFFFF, 0x7FFFFFFF80000000, 0x800000007FFFFFFF},
+ {0xE1ED035580000000, 0xFAD3863AED462C0B, 0x8000000015A70AEC,
+ 0x7FFFFFFFA0EBD354, 0x800000007FFFFFFF, 0xD0D7F4291F207364},
+ {0x8000000080000000, 0x7FFFFFFF0000DA1F, 0x800000007FFFFFFF,
+ 0x7FFFFFFF00F39C3B, 0x800000007FFFFFFF, 0x800000007C33F2FD},
+ {0x0000AC33FFFF329A, 0x54AC6BBCE53BD87B, 0xFFFFE2B4D0D7F429,
+ 0x0355ED462C0B1FF3, 0xB5DEB625939DD3F9, 0xE642ADFA69519596}};
const struct ExpectedResult_MSA3RF exp_res_mul_q_h[] = {
- {0x7fff800100ae7ffe, 0x1e13ea59fad35a74},
- {0x7fff80017ffe0000, 0xffff0000ed5b03a7}};
+ {0x7FFF800100AE7FFE, 0x1E13EA59FAD35A74},
+ {0x7FFF80017FFE0000, 0xFFFF0000ED5B03A7}};
const struct ExpectedResult_MSA3RF exp_res_madd_q_h[] = {
- {0x7fff800080ae7fff, 0x9e136a5819f37fff},
- {0x00000000fffe7c33, 0x54ab6bbcd2969038}};
+ {0x7FFF800080AE7FFF, 0x9E136A5819F37FFF},
+ {0x00000000FFFE7C33, 0x54AB6BBCD2969038}};
const struct ExpectedResult_MSA3RF exp_res_msub_q_h[] = {
- {0xffffffff80000000, 0x80007fff244c18ef},
- {0x80007fff80007c32, 0x54ac6bbbf7df88e9}};
+ {0xFFFFFFFF80000000, 0x80007FFF244C18EF},
+ {0x80007FFF80007C32, 0x54AC6BBBF7DF88E9}};
const struct ExpectedResult_MSA3RF exp_res_mulr_q_h[] = {
- {0x7fff800100af7ffe, 0x1e13ea59fad35a75},
- {0x7fff80017ffe0001, 0x00000000ed5b03a8}};
+ {0x7FFF800100AF7FFE, 0x1E13EA59FAD35A75},
+ {0x7FFF80017FFE0001, 0x00000000ED5B03A8}};
const struct ExpectedResult_MSA3RF exp_res_maddr_q_h[] = {
- {0x7fff800080af7fff, 0x9e136a5819f37fff},
- {0x00000000fffe7c34, 0x54ac6bbcd2969039}};
+ {0x7FFF800080AF7FFF, 0x9E136A5819F37FFF},
+ {0x00000000FFFE7C34, 0x54AC6BBCD2969039}};
const struct ExpectedResult_MSA3RF exp_res_msubr_q_h[] = {
- {0xffffffff80000001, 0x80007fff244d18ef},
- {0x80007fff80007c32, 0x54ac6bbcf7e088e9}};
+ {0xFFFFFFFF80000001, 0x80007FFF244D18EF},
+ {0x80007FFF80007C32, 0x54AC6BBCF7E088E9}};
const struct ExpectedResult_MSA3RF exp_res_mul_q_w[] = {
- {0x7fffffff80000001, 0x00001ff27ffffffe},
- {0x1e12fcabea58f514, 0xfad3863a0de8dee1},
- {0x7fffffff80000001, 0x7ffffffe0000019f},
- {0xffffffff00004bab, 0x0234e1fbf6ca3ee0}};
+ {0x7FFFFFFF80000001, 0x00001FF27FFFFFFE},
+ {0x1E12FCABEA58F514, 0xFAD3863A0DE8DEE1},
+ {0x7FFFFFFF80000001, 0x7FFFFFFE0000019F},
+ {0xFFFFFFFF00004BAB, 0x0234E1FBF6CA3EE0}};
const struct ExpectedResult_MSA3RF exp_res_madd_q_w[] = {
- {0x7fffffff80000000, 0x80001ff27fffffff},
- {0x9e12fcab6a58f513, 0xcbab7a632d095245},
- {0x0000000000000000, 0xfffffffe7c33f49c},
- {0xb5deb624939e1fa4, 0xe8778ff5601bd476}};
+ {0x7FFFFFFF80000000, 0x80001FF27FFFFFFF},
+ {0x9E12FCAB6A58F513, 0xCBAB7A632D095245},
+ {0x0000000000000000, 0xFFFFFFFE7C33F49C},
+ {0xB5DEB624939E1FA4, 0xE8778FF5601BD476}};
const struct ExpectedResult_MSA3RF exp_res_msub_q_w[] = {
- {0xffffffffffffffff, 0x8000000000000000},
- {0x800000007fffffff, 0xd6046dee11379482},
- {0x800000007fffffff, 0x800000007c33f15d},
- {0xb5deb625939d884d, 0xe40dcbfe728756b5}};
+ {0xFFFFFFFFFFFFFFFF, 0x8000000000000000},
+ {0x800000007FFFFFFF, 0xD6046DEE11379482},
+ {0x800000007FFFFFFF, 0x800000007C33F15D},
+ {0xB5DEB625939D884D, 0xE40DCBFE728756B5}};
const struct ExpectedResult_MSA3RF exp_res_mulr_q_w[] = {
- {0x7fffffff80000001, 0x00001ff37ffffffe},
- {0x1e12fcabea58f514, 0xfad3863a0de8dee2},
- {0x7fffffff80000001, 0x7ffffffe0000019f},
- {0x0000000000004bac, 0x0234e1fcf6ca3ee1}};
+ {0x7FFFFFFF80000001, 0x00001FF37FFFFFFE},
+ {0x1E12FCABEA58F514, 0xFAD3863A0DE8DEE2},
+ {0x7FFFFFFF80000001, 0x7FFFFFFE0000019F},
+ {0x0000000000004BAC, 0x0234E1FCF6CA3EE1}};
const struct ExpectedResult_MSA3RF exp_res_maddr_q_w[] = {
- {0x7fffffff80000000, 0x80001ff37fffffff},
- {0x9e12fcab6a58f513, 0xcbab7a632d095246},
- {0x0000000000000000, 0xfffffffe7c33f49c},
- {0xb5deb625939e1fa5, 0xe8778ff6601bd477}};
+ {0x7FFFFFFF80000000, 0x80001FF37FFFFFFF},
+ {0x9E12FCAB6A58F513, 0xCBAB7A632D095246},
+ {0x0000000000000000, 0xFFFFFFFE7C33F49C},
+ {0xB5DEB625939E1FA5, 0xE8778FF6601BD477}};
const struct ExpectedResult_MSA3RF exp_res_msubr_q_w[] = {
- {0xffffffffffffffff, 0x8000000000000001},
- {0x800000007fffffff, 0xd6046def11379482},
- {0x800000007fffffff, 0x800000007c33f15e},
- {0xb5deb625939d884d, 0xe40dcbfe728756b5}};
+ {0xFFFFFFFFFFFFFFFF, 0x8000000000000001},
+ {0x800000007FFFFFFF, 0xD6046DEF11379482},
+ {0x800000007FFFFFFF, 0x800000007C33F15E},
+ {0xB5DEB625939D884D, 0xE40DCBFE728756B5}};
#define TEST_FIXED_POINT_DF_H(instruction, src, exp_res) \
run_msa_3rf((src), (exp_res), \
@@ -10383,31 +10348,31 @@ TEST(MSA_fexdo) {
const struct ExpRes_16I exp_res_fexdo_w[] = {
{static_cast<int16_t>(0x0410), static_cast<int16_t>(0x0347),
- static_cast<int16_t>(0xd00d), static_cast<int16_t>(0xfc00),
- static_cast<int16_t>(0x7c00), static_cast<int16_t>(0x7dff),
- static_cast<int16_t>(0x7c00), static_cast<int16_t>(0x7bff)},
+ static_cast<int16_t>(0xD00D), static_cast<int16_t>(0xFC00),
+ static_cast<int16_t>(0x7C00), static_cast<int16_t>(0x7DFF),
+ static_cast<int16_t>(0x7C00), static_cast<int16_t>(0x7BFF)},
{static_cast<int16_t>(0x8001), static_cast<int16_t>(0x0001),
static_cast<int16_t>(0x0002), static_cast<int16_t>(0x8000),
static_cast<int16_t>(0x8000), static_cast<int16_t>(0x0000),
- static_cast<int16_t>(0x57b9), static_cast<int16_t>(0xe1fb)},
+ static_cast<int16_t>(0x57B9), static_cast<int16_t>(0xE1FB)},
{static_cast<int16_t>(0x0001), static_cast<int16_t>(0x8000),
- static_cast<int16_t>(0xfc00), static_cast<int16_t>(0xfbff),
- static_cast<int16_t>(0x0000), static_cast<int16_t>(0x7c00),
- static_cast<int16_t>(0xfc00), static_cast<int16_t>(0x0000)}};
+ static_cast<int16_t>(0xFC00), static_cast<int16_t>(0xFBFF),
+ static_cast<int16_t>(0x0000), static_cast<int16_t>(0x7C00),
+ static_cast<int16_t>(0xFC00), static_cast<int16_t>(0x0000)}};
const struct ExpRes_32I exp_res_fexdo_d[] = {
- {bit_cast<int32_t>(0x7f800000), bit_cast<int32_t>(0x7f7fc99e),
- bit_cast<int32_t>(0x7f800000), bit_cast<int32_t>(0xc49a4000)},
- {bit_cast<int32_t>(0xc21bae14), bit_cast<int32_t>(0xff800000),
- bit_cast<int32_t>(0x0082ab1e), bit_cast<int32_t>(0x000bfa5a)},
- {bit_cast<int32_t>(0x7673b164), bit_cast<int32_t>(0xfb13653d),
+ {bit_cast<int32_t>(0x7F800000), bit_cast<int32_t>(0x7F7FC99E),
+ bit_cast<int32_t>(0x7F800000), bit_cast<int32_t>(0xC49A4000)},
+ {bit_cast<int32_t>(0xC21BAE14), bit_cast<int32_t>(0xFF800000),
+ bit_cast<int32_t>(0x0082AB1E), bit_cast<int32_t>(0x000BFA5A)},
+ {bit_cast<int32_t>(0x7673B164), bit_cast<int32_t>(0xFB13653D),
bit_cast<int32_t>(0x80000000), bit_cast<int32_t>(0x00000000)},
- {bit_cast<int32_t>(0x000002ca), bit_cast<int32_t>(0x80000000),
+ {bit_cast<int32_t>(0x000002CA), bit_cast<int32_t>(0x80000000),
bit_cast<int32_t>(0x80000001), bit_cast<int32_t>(0x00000001)},
- {bit_cast<int32_t>(0xff800000), bit_cast<int32_t>(0x56b5e621),
- bit_cast<int32_t>(0x00000000), bit_cast<int32_t>(0x7f800000)},
- {bit_cast<int32_t>(0xf673b164), bit_cast<int32_t>(0x7b13653d),
- bit_cast<int32_t>(0x0000042e), bit_cast<int32_t>(0x00000000)}};
+ {bit_cast<int32_t>(0xFF800000), bit_cast<int32_t>(0x56B5E621),
+ bit_cast<int32_t>(0x00000000), bit_cast<int32_t>(0x7F800000)},
+ {bit_cast<int32_t>(0xF673B164), bit_cast<int32_t>(0x7B13653D),
+ bit_cast<int32_t>(0x0000042E), bit_cast<int32_t>(0x00000000)}};
#define TEST_FEXDO_H(instruction, src, exp_res) \
run_msa_3rf(reinterpret_cast<const struct TestCaseMsa3RF*>(src), \
@@ -10457,31 +10422,31 @@ TEST(MSA_ftq) {
{-3e306, 2e-307, 9e307, 2e-307, 0, 0}};
const struct ExpRes_16I exp_res_ftq_w[] = {
- {static_cast<int16_t>(0x0000), static_cast<int16_t>(0xb375),
- static_cast<int16_t>(0x004b), static_cast<int16_t>(0x0000),
- static_cast<int16_t>(0x7fff), static_cast<int16_t>(0x8021),
- static_cast<int16_t>(0x7fff), static_cast<int16_t>(0xffff)},
+ {static_cast<int16_t>(0x0000), static_cast<int16_t>(0xB375),
+ static_cast<int16_t>(0x004B), static_cast<int16_t>(0x0000),
+ static_cast<int16_t>(0x7FFF), static_cast<int16_t>(0x8021),
+ static_cast<int16_t>(0x7FFF), static_cast<int16_t>(0xFFFF)},
{static_cast<int16_t>(0x0000), static_cast<int16_t>(0x8000),
- static_cast<int16_t>(0x7ffd), static_cast<int16_t>(0xfff5),
- static_cast<int16_t>(0x7fff), static_cast<int16_t>(0x8000),
- static_cast<int16_t>(0x8000), static_cast<int16_t>(0x7fff)},
+ static_cast<int16_t>(0x7FFD), static_cast<int16_t>(0xFFF5),
+ static_cast<int16_t>(0x7FFF), static_cast<int16_t>(0x8000),
+ static_cast<int16_t>(0x8000), static_cast<int16_t>(0x7FFF)},
{static_cast<int16_t>(0x0000), static_cast<int16_t>(0x0000),
- static_cast<int16_t>(0x7fff), static_cast<int16_t>(0xffff),
- static_cast<int16_t>(0x0000), static_cast<int16_t>(0x7fff),
+ static_cast<int16_t>(0x7FFF), static_cast<int16_t>(0xFFFF),
+ static_cast<int16_t>(0x0000), static_cast<int16_t>(0x7FFF),
static_cast<int16_t>(0x8000), static_cast<int16_t>(0x0000)}};
const struct ExpRes_32I exp_res_ftq_d[] = {
- {bit_cast<int32_t>(0x7fffffff), bit_cast<int32_t>(0xfffefbf4),
- bit_cast<int32_t>(0x7fffffff), bit_cast<int32_t>(0x8020c49c)},
- {bit_cast<int32_t>(0x004b5dcc), bit_cast<int32_t>(0x00000000),
- bit_cast<int32_t>(0x000000d7), bit_cast<int32_t>(0xb374bc6a)},
- {bit_cast<int32_t>(0x80000000), bit_cast<int32_t>(0x7fffffff),
- bit_cast<int32_t>(0x7fffffff), bit_cast<int32_t>(0x80000000)},
- {bit_cast<int32_t>(0x7ffcb900), bit_cast<int32_t>(0xfff572de),
+ {bit_cast<int32_t>(0x7FFFFFFF), bit_cast<int32_t>(0xFFFEFBF4),
+ bit_cast<int32_t>(0x7FFFFFFF), bit_cast<int32_t>(0x8020C49C)},
+ {bit_cast<int32_t>(0x004B5DCC), bit_cast<int32_t>(0x00000000),
+ bit_cast<int32_t>(0x000000D7), bit_cast<int32_t>(0xB374BC6A)},
+ {bit_cast<int32_t>(0x80000000), bit_cast<int32_t>(0x7FFFFFFF),
+ bit_cast<int32_t>(0x7FFFFFFF), bit_cast<int32_t>(0x80000000)},
+ {bit_cast<int32_t>(0x7FFCB900), bit_cast<int32_t>(0xFFF572DE),
bit_cast<int32_t>(0x00000000), bit_cast<int32_t>(0x80000000)},
{bit_cast<int32_t>(0x80000000), bit_cast<int32_t>(0x00000000),
- bit_cast<int32_t>(0x00000000), bit_cast<int32_t>(0x7fffffff)},
- {bit_cast<int32_t>(0x7fffffff), bit_cast<int32_t>(0x00000000),
+ bit_cast<int32_t>(0x00000000), bit_cast<int32_t>(0x7FFFFFFF)},
+ {bit_cast<int32_t>(0x7FFFFFFF), bit_cast<int32_t>(0x00000000),
bit_cast<int32_t>(0x80000000), bit_cast<int32_t>(0x00000000)}};
#define TEST_FTQ_H(instruction, src, exp_res) \
diff --git a/deps/v8/test/cctest/test-assembler-mips64.cc b/deps/v8/test/cctest/test-assembler-mips64.cc
index f809ea8f39..c59f2af2dc 100644
--- a/deps/v8/test/cctest/test-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-assembler-mips64.cc
@@ -35,7 +35,7 @@
#include "src/factory.h"
#include "src/macro-assembler.h"
#include "src/mips64/macro-assembler-mips64.h"
-#include "src/mips64/simulator-mips64.h"
+#include "src/simulator.h"
#include "test/cctest/cctest.h"
@@ -43,11 +43,12 @@ namespace v8 {
namespace internal {
// Define these function prototypes to match JSEntryFunction in execution.cc.
-typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
-typedef Object* (*F2)(int x, int y, int p2, int p3, int p4);
-typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
-typedef Object* (*F4)(int64_t x, int64_t y, int64_t p2, int64_t p3, int64_t p4);
-typedef Object* (*F5)(void* p0, void* p1, int p2, int p3, int p4);
+// TODO(mips64): Refine these signatures per test case.
+typedef Object*(F1)(int x, int p1, int p2, int p3, int p4);
+typedef Object*(F2)(int x, int y, int p2, int p3, int p4);
+typedef Object*(F3)(void* p, int p1, int p2, int p3, int p4);
+typedef Object*(F4)(int64_t x, int64_t y, int64_t p2, int64_t p3, int64_t p4);
+typedef Object*(F5)(void* p0, void* p1, int p2, int p3, int p4);
#define __ assm.
@@ -68,10 +69,9 @@ TEST(MIPS0) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
- int64_t res = reinterpret_cast<int64_t>(
- CALL_GENERATED_CODE(isolate, f, 0xab0, 0xc, 0, 0, 0));
- CHECK_EQ(0xabcL, res);
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ int64_t res = reinterpret_cast<int64_t>(f.Call(0xAB0, 0xC, 0, 0, 0));
+ CHECK_EQ(0xABCL, res);
}
@@ -105,9 +105,8 @@ TEST(MIPS1) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F1 f = FUNCTION_CAST<F1>(code->entry());
- int64_t res = reinterpret_cast<int64_t>(
- CALL_GENERATED_CODE(isolate, f, 50, 0, 0, 0, 0));
+ auto f = GeneratedCode<F1>::FromCode(*code);
+ int64_t res = reinterpret_cast<int64_t>(f.Call(50, 0, 0, 0, 0));
CHECK_EQ(1275L, res);
}
@@ -130,8 +129,8 @@ TEST(MIPS2) {
__ ori(a4, zero_reg, 0);
__ lui(a4, 0x1234);
__ ori(a4, a4, 0);
- __ ori(a4, a4, 0x0f0f);
- __ ori(a4, a4, 0xf0f0);
+ __ ori(a4, a4, 0x0F0F);
+ __ ori(a4, a4, 0xF0F0);
__ addiu(a5, a4, 1);
__ addiu(a6, a5, -0x10);
@@ -139,20 +138,20 @@ TEST(MIPS2) {
__ li(a4, 0x00000004);
__ li(a5, 0x00001234);
__ li(a6, 0x12345678);
- __ li(a7, 0x7fffffff);
- __ li(t0, 0xfffffffc);
- __ li(t1, 0xffffedcc);
- __ li(t2, 0xedcba988);
+ __ li(a7, 0x7FFFFFFF);
+ __ li(t0, 0xFFFFFFFC);
+ __ li(t1, 0xFFFFEDCC);
+ __ li(t2, 0xEDCBA988);
__ li(t3, 0x80000000);
// SPECIAL class.
__ srl(v0, a6, 8); // 0x00123456
- __ sll(v0, v0, 11); // 0x91a2b000
- __ sra(v0, v0, 3); // 0xf2345600
- __ srav(v0, v0, a4); // 0xff234560
- __ sllv(v0, v0, a4); // 0xf2345600
- __ srlv(v0, v0, a4); // 0x0f234560
- __ Branch(&error, ne, v0, Operand(0x0f234560));
+ __ sll(v0, v0, 11); // 0x91A2B000
+ __ sra(v0, v0, 3); // 0xF2345600
+ __ srav(v0, v0, a4); // 0xFF234560
+ __ sllv(v0, v0, a4); // 0xF2345600
+ __ srlv(v0, v0, a4); // 0x0F234560
+ __ Branch(&error, ne, v0, Operand(0x0F234560));
__ nop();
__ addu(v0, a4, a5); // 0x00001238
@@ -160,17 +159,17 @@ TEST(MIPS2) {
__ Branch(&error, ne, v0, Operand(0x00001234));
__ nop();
__ addu(v1, a7, a4); // 32bit addu result is sign-extended into 64bit reg.
- __ Branch(&error, ne, v1, Operand(0xffffffff80000003));
+ __ Branch(&error, ne, v1, Operand(0xFFFFFFFF80000003));
__ nop();
- __ subu(v1, t3, a4); // 0x7ffffffc
- __ Branch(&error, ne, v1, Operand(0x7ffffffc));
+ __ subu(v1, t3, a4); // 0x7FFFFFFC
+ __ Branch(&error, ne, v1, Operand(0x7FFFFFFC));
__ nop();
__ and_(v0, a5, a6); // 0x0000000000001230
__ or_(v0, v0, a5); // 0x0000000000001234
- __ xor_(v0, v0, a6); // 0x000000001234444c
- __ nor(v0, v0, a6); // 0xffffffffedcba987
- __ Branch(&error, ne, v0, Operand(0xffffffffedcba983));
+ __ xor_(v0, v0, a6); // 0x000000001234444C
+ __ nor(v0, v0, a6); // 0xFFFFFFFFEDCBA987
+ __ Branch(&error, ne, v0, Operand(0xFFFFFFFFEDCBA983));
__ nop();
// Shift both 32bit number to left, to preserve meaning of next comparison.
@@ -195,11 +194,11 @@ TEST(MIPS2) {
__ Branch(&error, ne, v0, Operand(0x00007400));
__ nop();
__ addiu(v1, a7, 0x1); // 0x80000000 - result is sign-extended.
- __ Branch(&error, ne, v1, Operand(0xffffffff80000000));
+ __ Branch(&error, ne, v1, Operand(0xFFFFFFFF80000000));
__ nop();
__ slti(v0, a5, 0x00002000); // 0x1
- __ slti(v0, v0, 0xffff8000); // 0x0
+ __ slti(v0, v0, 0xFFFF8000); // 0x0
__ Branch(&error, ne, v0, Operand(zero_reg));
__ nop();
__ sltiu(v0, a5, 0x00002000); // 0x1
@@ -207,13 +206,13 @@ TEST(MIPS2) {
__ Branch(&error, ne, v0, Operand(0x1));
__ nop();
- __ andi(v0, a5, 0xf0f0); // 0x00001030
- __ ori(v0, v0, 0x8a00); // 0x00009a30
- __ xori(v0, v0, 0x83cc); // 0x000019fc
- __ Branch(&error, ne, v0, Operand(0x000019fc));
+ __ andi(v0, a5, 0xF0F0); // 0x00001030
+ __ ori(v0, v0, 0x8A00); // 0x00009A30
+ __ xori(v0, v0, 0x83CC); // 0x000019FC
+ __ Branch(&error, ne, v0, Operand(0x000019FC));
__ nop();
__ lui(v1, 0x8123); // Result is sign-extended into 64bit register.
- __ Branch(&error, ne, v1, Operand(0xffffffff81230000));
+ __ Branch(&error, ne, v1, Operand(0xFFFFFFFF81230000));
__ nop();
// Bit twiddling instructions & conditional moves.
@@ -227,11 +226,11 @@ TEST(MIPS2) {
__ addu(v0, v0, v1); // 51
__ Branch(&error, ne, v0, Operand(51));
__ Movn(a0, a7, a4); // Move a0<-a7 (a4 is NOT 0).
- __ Ins(a0, a5, 12, 8); // 0x7ff34fff
- __ Branch(&error, ne, a0, Operand(0x7ff34fff));
+ __ Ins(a0, a5, 12, 8); // 0x7FF34FFF
+ __ Branch(&error, ne, a0, Operand(0x7FF34FFF));
__ Movz(a0, t2, t3); // a0 not updated (t3 is NOT 0).
- __ Ext(a1, a0, 8, 12); // 0x34f
- __ Branch(&error, ne, a1, Operand(0x34f));
+ __ Ext(a1, a0, 8, 12); // 0x34F
+ __ Branch(&error, ne, a1, Operand(0x34F));
__ Movz(a0, t2, v1); // a0<-t2, v0 is 0, from 8 instr back.
__ Branch(&error, ne, a0, Operand(t2));
@@ -252,9 +251,8 @@ TEST(MIPS2) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
- int64_t res = reinterpret_cast<int64_t>(
- CALL_GENERATED_CODE(isolate, f, 0xab0, 0xc, 0, 0, 0));
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ int64_t res = reinterpret_cast<int64_t>(f.Call(0xAB0, 0xC, 0, 0, 0));
CHECK_EQ(0x31415926L, res);
}
@@ -356,7 +354,7 @@ TEST(MIPS3) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
// Double test values.
t.a = 1.5e14;
t.b = 2.75e11;
@@ -373,8 +371,7 @@ TEST(MIPS3) {
t.fd = 0.0;
t.fe = 0.0;
t.ff = 0.0;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
// Expected double results.
CHECK_EQ(1.5e14, t.a);
CHECK_EQ(1.5e14, t.b);
@@ -451,19 +448,18 @@ TEST(MIPS4) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 1.5e22;
t.b = 2.75e11;
t.c = 17.17;
t.d = -2.75e11;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(2.75e11, t.a);
CHECK_EQ(2.75e11, t.b);
CHECK_EQ(1.5e22, t.c);
- CHECK_EQ(static_cast<int64_t>(0xffffffffc25001d1L), t.high);
- CHECK_EQ(static_cast<int64_t>(0xffffffffbf800000L), t.low);
+ CHECK_EQ(static_cast<int64_t>(0xFFFFFFFFC25001D1L), t.high);
+ CHECK_EQ(static_cast<int64_t>(0xFFFFFFFFBF800000L), t.low);
}
@@ -518,13 +514,12 @@ TEST(MIPS5) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 1.5e4;
t.b = 2.75e8;
t.i = 12345678;
t.j = -100000;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(12345678.0, t.a);
CHECK_EQ(-100000.0, t.b);
@@ -589,25 +584,24 @@ TEST(MIPS6) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
t.ui = 0x11223344;
- t.si = 0x99aabbcc;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ t.si = 0x99AABBCC;
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(static_cast<int32_t>(0x11223344), t.r1);
if (kArchEndian == kLittle) {
CHECK_EQ(static_cast<int32_t>(0x3344), t.r2);
- CHECK_EQ(static_cast<int32_t>(0xffffbbcc), t.r3);
- CHECK_EQ(static_cast<int32_t>(0x0000bbcc), t.r4);
- CHECK_EQ(static_cast<int32_t>(0xffffffcc), t.r5);
- CHECK_EQ(static_cast<int32_t>(0x3333bbcc), t.r6);
+ CHECK_EQ(static_cast<int32_t>(0xFFFFBBCC), t.r3);
+ CHECK_EQ(static_cast<int32_t>(0x0000BBCC), t.r4);
+ CHECK_EQ(static_cast<int32_t>(0xFFFFFFCC), t.r5);
+ CHECK_EQ(static_cast<int32_t>(0x3333BBCC), t.r6);
} else {
CHECK_EQ(static_cast<int32_t>(0x1122), t.r2);
- CHECK_EQ(static_cast<int32_t>(0xffff99aa), t.r3);
- CHECK_EQ(static_cast<int32_t>(0x000099aa), t.r4);
- CHECK_EQ(static_cast<int32_t>(0xffffff99), t.r5);
- CHECK_EQ(static_cast<int32_t>(0x99aa3333), t.r6);
+ CHECK_EQ(static_cast<int32_t>(0xFFFF99AA), t.r3);
+ CHECK_EQ(static_cast<int32_t>(0x000099AA), t.r4);
+ CHECK_EQ(static_cast<int32_t>(0xFFFFFF99), t.r5);
+ CHECK_EQ(static_cast<int32_t>(0x99AA3333), t.r6);
}
}
@@ -677,7 +671,7 @@ TEST(MIPS7) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 1.5e14;
t.b = 2.75e11;
t.c = 2.0;
@@ -685,8 +679,7 @@ TEST(MIPS7) {
t.e = 0.0;
t.f = 0.0;
t.result = 0;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(1.5e14, t.a);
CHECK_EQ(2.75e11, t.b);
CHECK_EQ(1, t.result);
@@ -728,11 +721,11 @@ TEST(MIPS8) {
// ROTR instruction (called through the Ror macro).
__ Ror(a5, a4, 0x0004);
__ Ror(a6, a4, 0x0008);
- __ Ror(a7, a4, 0x000c);
+ __ Ror(a7, a4, 0x000C);
__ Ror(t0, a4, 0x0010);
__ Ror(t1, a4, 0x0014);
__ Ror(t2, a4, 0x0018);
- __ Ror(t3, a4, 0x001c);
+ __ Ror(t3, a4, 0x001C);
// Basic word store.
__ Sw(a5, MemOperand(a0, offsetof(T, result_rotr_4)));
@@ -775,10 +768,9 @@ TEST(MIPS8) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
t.input = 0x12345678;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0x0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0x0, 0, 0, 0);
CHECK_EQ(static_cast<int32_t>(0x81234567), t.result_rotr_4);
CHECK_EQ(static_cast<int32_t>(0x78123456), t.result_rotr_8);
CHECK_EQ(static_cast<int32_t>(0x67812345), t.result_rotr_12);
@@ -898,17 +890,16 @@ TEST(MIPS10) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
- t.a = 2.147483647e9; // 0x7fffffff -> 0x41DFFFFFFFC00000 as double.
- t.b_long_hi = 0x000000ff; // 0xFF00FF00FF -> 0x426FE01FE01FE000 as double.
- t.b_long_lo = 0x00ff00ff;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ t.a = 2.147483647e9; // 0x7FFFFFFF -> 0x41DFFFFFFFC00000 as double.
+ t.b_long_hi = 0x000000FF; // 0xFF00FF00FF -> 0x426FE01FE01FE000 as double.
+ t.b_long_lo = 0x00FF00FF;
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(static_cast<int32_t>(0x41DFFFFF), t.dbl_exp);
CHECK_EQ(static_cast<int32_t>(0xFFC00000), t.dbl_mant);
CHECK_EQ(0, t.long_hi);
- CHECK_EQ(static_cast<int32_t>(0x7fffffff), t.long_lo);
+ CHECK_EQ(static_cast<int32_t>(0x7FFFFFFF), t.long_lo);
CHECK_EQ(2.147483647e9, t.a_converted);
// 0xFF00FF00FF -> 1.095233372415e12.
@@ -1034,53 +1025,52 @@ TEST(MIPS11) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
- t.reg_init = 0xaabbccdd;
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ t.reg_init = 0xAABBCCDD;
t.mem_init = 0x11223344;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
if (kArchEndian == kLittle) {
- CHECK_EQ(static_cast<int32_t>(0x44bbccdd), t.lwl_0);
- CHECK_EQ(static_cast<int32_t>(0x3344ccdd), t.lwl_1);
- CHECK_EQ(static_cast<int32_t>(0x223344dd), t.lwl_2);
+ CHECK_EQ(static_cast<int32_t>(0x44BBCCDD), t.lwl_0);
+ CHECK_EQ(static_cast<int32_t>(0x3344CCDD), t.lwl_1);
+ CHECK_EQ(static_cast<int32_t>(0x223344DD), t.lwl_2);
CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwl_3);
CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwr_0);
- CHECK_EQ(static_cast<int32_t>(0xaa112233), t.lwr_1);
- CHECK_EQ(static_cast<int32_t>(0xaabb1122), t.lwr_2);
- CHECK_EQ(static_cast<int32_t>(0xaabbcc11), t.lwr_3);
-
- CHECK_EQ(static_cast<int32_t>(0x112233aa), t.swl_0);
- CHECK_EQ(static_cast<int32_t>(0x1122aabb), t.swl_1);
- CHECK_EQ(static_cast<int32_t>(0x11aabbcc), t.swl_2);
- CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swl_3);
-
- CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swr_0);
- CHECK_EQ(static_cast<int32_t>(0xbbccdd44), t.swr_1);
- CHECK_EQ(static_cast<int32_t>(0xccdd3344), t.swr_2);
- CHECK_EQ(static_cast<int32_t>(0xdd223344), t.swr_3);
+ CHECK_EQ(static_cast<int32_t>(0xAA112233), t.lwr_1);
+ CHECK_EQ(static_cast<int32_t>(0xAABB1122), t.lwr_2);
+ CHECK_EQ(static_cast<int32_t>(0xAABBCC11), t.lwr_3);
+
+ CHECK_EQ(static_cast<int32_t>(0x112233AA), t.swl_0);
+ CHECK_EQ(static_cast<int32_t>(0x1122AABB), t.swl_1);
+ CHECK_EQ(static_cast<int32_t>(0x11AABBCC), t.swl_2);
+ CHECK_EQ(static_cast<int32_t>(0xAABBCCDD), t.swl_3);
+
+ CHECK_EQ(static_cast<int32_t>(0xAABBCCDD), t.swr_0);
+ CHECK_EQ(static_cast<int32_t>(0xBBCCDD44), t.swr_1);
+ CHECK_EQ(static_cast<int32_t>(0xCCDD3344), t.swr_2);
+ CHECK_EQ(static_cast<int32_t>(0xDD223344), t.swr_3);
} else {
CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwl_0);
- CHECK_EQ(static_cast<int32_t>(0x223344dd), t.lwl_1);
- CHECK_EQ(static_cast<int32_t>(0x3344ccdd), t.lwl_2);
- CHECK_EQ(static_cast<int32_t>(0x44bbccdd), t.lwl_3);
+ CHECK_EQ(static_cast<int32_t>(0x223344DD), t.lwl_1);
+ CHECK_EQ(static_cast<int32_t>(0x3344CCDD), t.lwl_2);
+ CHECK_EQ(static_cast<int32_t>(0x44BBCCDD), t.lwl_3);
- CHECK_EQ(static_cast<int32_t>(0xaabbcc11), t.lwr_0);
- CHECK_EQ(static_cast<int32_t>(0xaabb1122), t.lwr_1);
- CHECK_EQ(static_cast<int32_t>(0xaa112233), t.lwr_2);
+ CHECK_EQ(static_cast<int32_t>(0xAABBCC11), t.lwr_0);
+ CHECK_EQ(static_cast<int32_t>(0xAABB1122), t.lwr_1);
+ CHECK_EQ(static_cast<int32_t>(0xAA112233), t.lwr_2);
CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwr_3);
- CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swl_0);
- CHECK_EQ(static_cast<int32_t>(0x11aabbcc), t.swl_1);
- CHECK_EQ(static_cast<int32_t>(0x1122aabb), t.swl_2);
- CHECK_EQ(static_cast<int32_t>(0x112233aa), t.swl_3);
+ CHECK_EQ(static_cast<int32_t>(0xAABBCCDD), t.swl_0);
+ CHECK_EQ(static_cast<int32_t>(0x11AABBCC), t.swl_1);
+ CHECK_EQ(static_cast<int32_t>(0x1122AABB), t.swl_2);
+ CHECK_EQ(static_cast<int32_t>(0x112233AA), t.swl_3);
- CHECK_EQ(static_cast<int32_t>(0xdd223344), t.swr_0);
- CHECK_EQ(static_cast<int32_t>(0xccdd3344), t.swr_1);
- CHECK_EQ(static_cast<int32_t>(0xbbccdd44), t.swr_2);
- CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swr_3);
+ CHECK_EQ(static_cast<int32_t>(0xDD223344), t.swr_0);
+ CHECK_EQ(static_cast<int32_t>(0xCCDD3344), t.swr_1);
+ CHECK_EQ(static_cast<int32_t>(0xBBCCDD44), t.swr_2);
+ CHECK_EQ(static_cast<int32_t>(0xAABBCCDD), t.swr_3);
}
}
}
@@ -1160,7 +1150,7 @@ TEST(MIPS12) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
t.x = 1;
t.y = 2;
t.y1 = 3;
@@ -1168,8 +1158,7 @@ TEST(MIPS12) {
t.y3 = 0XBABA;
t.y4 = 0xDEDA;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(3, t.y1);
}
@@ -1215,13 +1204,12 @@ TEST(MIPS13) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
t.cvt_big_in = 0xFFFFFFFF;
t.cvt_small_in = 333;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(t.cvt_big_out, static_cast<double>(t.cvt_big_in));
CHECK_EQ(t.cvt_small_out, static_cast<double>(t.cvt_small_in));
@@ -1337,7 +1325,7 @@ TEST(MIPS14) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
t.round_up_in = 123.51;
t.round_down_in = 123.49;
@@ -1348,8 +1336,7 @@ TEST(MIPS14) {
t.err3_in = static_cast<double>(1) + 0xFFFFFFFF;
t.err4_in = NAN;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
#define GET_FPU_ERR(x) (static_cast<int>(x & kFCSRFlagMask))
#define CHECK_NAN2008(x) (x & kFCSRNaN2008FlagMask)
@@ -1468,9 +1455,9 @@ TEST(MIPS16) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
t.ui = 0x44332211;
- t.si = 0x99aabbcc;
+ t.si = 0x99AABBCC;
t.r1 = 0x5555555555555555;
t.r2 = 0x5555555555555555;
t.r3 = 0x5555555555555555;
@@ -1484,8 +1471,7 @@ TEST(MIPS16) {
t.r11 = 0x5555555555555555;
t.r12 = 0x5555555555555555;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
if (kArchEndian == kLittle) {
// Unsigned data, 32 & 64
@@ -1493,52 +1479,52 @@ TEST(MIPS16) {
CHECK_EQ(static_cast<int64_t>(0x0000000044332211L), t.r2); // sd.
// Signed data, 32 & 64.
- CHECK_EQ(static_cast<int64_t>(0x5555555599aabbccL), t.r3); // lw, sw.
- CHECK_EQ(static_cast<int64_t>(0xffffffff99aabbccL), t.r4); // sd.
+ CHECK_EQ(static_cast<int64_t>(0x5555555599AABBCCL), t.r3); // lw, sw.
+ CHECK_EQ(static_cast<int64_t>(0xFFFFFFFF99AABBCCL), t.r4); // sd.
// Signed data, 32 & 64.
- CHECK_EQ(static_cast<int64_t>(0x5555555599aabbccL), t.r5); // lwu, sw.
- CHECK_EQ(static_cast<int64_t>(0x0000000099aabbccL), t.r6); // sd.
+ CHECK_EQ(static_cast<int64_t>(0x5555555599AABBCCL), t.r5); // lwu, sw.
+ CHECK_EQ(static_cast<int64_t>(0x0000000099AABBCCL), t.r6); // sd.
// lh with unsigned and signed data.
CHECK_EQ(static_cast<int64_t>(0x5555555500002211L), t.r7); // lh, sw.
- CHECK_EQ(static_cast<int64_t>(0x55555555ffffbbccL), t.r8); // lh, sw.
+ CHECK_EQ(static_cast<int64_t>(0x55555555FFFFBBCCL), t.r8); // lh, sw.
// lhu with signed data.
- CHECK_EQ(static_cast<int64_t>(0x555555550000bbccL), t.r9); // lhu, sw.
+ CHECK_EQ(static_cast<int64_t>(0x555555550000BBCCL), t.r9); // lhu, sw.
// lb with signed data.
- CHECK_EQ(static_cast<int64_t>(0x55555555ffffffccL), t.r10); // lb, sw.
+ CHECK_EQ(static_cast<int64_t>(0x55555555FFFFFFCCL), t.r10); // lb, sw.
// sh with unsigned and signed data.
CHECK_EQ(static_cast<int64_t>(0x5555555555552211L), t.r11); // lw, sh.
- CHECK_EQ(static_cast<int64_t>(0x555555555555bbccL), t.r12); // lw, sh.
+ CHECK_EQ(static_cast<int64_t>(0x555555555555BBCCL), t.r12); // lw, sh.
} else {
// Unsigned data, 32 & 64
CHECK_EQ(static_cast<int64_t>(0x4433221155555555L), t.r1); // lw, sw.
CHECK_EQ(static_cast<int64_t>(0x0000000044332211L), t.r2); // sd.
// Signed data, 32 & 64.
- CHECK_EQ(static_cast<int64_t>(0x99aabbcc55555555L), t.r3); // lw, sw.
- CHECK_EQ(static_cast<int64_t>(0xffffffff99aabbccL), t.r4); // sd.
+ CHECK_EQ(static_cast<int64_t>(0x99AABBCC55555555L), t.r3); // lw, sw.
+ CHECK_EQ(static_cast<int64_t>(0xFFFFFFFF99AABBCCL), t.r4); // sd.
// Signed data, 32 & 64.
- CHECK_EQ(static_cast<int64_t>(0x99aabbcc55555555L), t.r5); // lwu, sw.
- CHECK_EQ(static_cast<int64_t>(0x0000000099aabbccL), t.r6); // sd.
+ CHECK_EQ(static_cast<int64_t>(0x99AABBCC55555555L), t.r5); // lwu, sw.
+ CHECK_EQ(static_cast<int64_t>(0x0000000099AABBCCL), t.r6); // sd.
// lh with unsigned and signed data.
CHECK_EQ(static_cast<int64_t>(0x0000443355555555L), t.r7); // lh, sw.
- CHECK_EQ(static_cast<int64_t>(0xffff99aa55555555L), t.r8); // lh, sw.
+ CHECK_EQ(static_cast<int64_t>(0xFFFF99AA55555555L), t.r8); // lh, sw.
// lhu with signed data.
- CHECK_EQ(static_cast<int64_t>(0x000099aa55555555L), t.r9); // lhu, sw.
+ CHECK_EQ(static_cast<int64_t>(0x000099AA55555555L), t.r9); // lhu, sw.
// lb with signed data.
- CHECK_EQ(static_cast<int64_t>(0xffffff9955555555L), t.r10); // lb, sw.
+ CHECK_EQ(static_cast<int64_t>(0xFFFFFF9955555555L), t.r10); // lb, sw.
// sh with unsigned and signed data.
CHECK_EQ(static_cast<int64_t>(0x2211555555555555L), t.r11); // lw, sh.
- CHECK_EQ(static_cast<int64_t>(0xbbcc555555555555L), t.r12); // lw, sh.
+ CHECK_EQ(static_cast<int64_t>(0xBBCC555555555555L), t.r12); // lw, sh.
}
}
@@ -1597,9 +1583,9 @@ TEST(seleqz_selnez) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(1, test.a);
CHECK_EQ(0, test.b);
@@ -1627,7 +1613,7 @@ TEST(seleqz_selnez) {
test.f = tests_D[j];
test.i = inputs_S[i];
test.j = tests_S[j];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(outputs_D[i], test.g);
CHECK_EQ(0, test.h);
CHECK_EQ(outputs_S[i], test.k);
@@ -1635,7 +1621,7 @@ TEST(seleqz_selnez) {
test.f = tests_D[j+1];
test.j = tests_S[j+1];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(0, test.g);
CHECK_EQ(outputs_D[i], test.h);
CHECK_EQ(0, test.k);
@@ -1713,14 +1699,14 @@ TEST(min_max) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 4; i < kTableLength; i++) {
test.a = inputsa[i];
test.b = inputsb[i];
test.e = inputse[i];
test.f = inputsf[i];
- CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0);
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(0, memcmp(&test.c, &outputsdmin[i], sizeof(test.c)));
CHECK_EQ(0, memcmp(&test.d, &outputsdmax[i], sizeof(test.d)));
@@ -1822,13 +1808,13 @@ TEST(rint_d) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int j = 0; j < 4; j++) {
test.fcsr = fcsr_inputs[j];
for (int i = 0; i < kTableLength; i++) {
test.a = inputs[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.b, outputs[j][i]);
}
}
@@ -1870,7 +1856,7 @@ TEST(sel) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
const int test_size = 3;
const int input_size = 5;
@@ -1895,13 +1881,13 @@ TEST(sel) {
test.ft = inputs_ft[i];
test.fd = tests_S[j];
test.fs = inputs_fs[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.dd, inputs_ds[i]);
CHECK_EQ(test.fd, inputs_fs[i]);
test.dd = tests_D[j+1];
test.fd = tests_S[j+1];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.dd, inputs_dt[i]);
CHECK_EQ(test.fd, inputs_ft[i]);
}
@@ -2003,13 +1989,13 @@ TEST(rint_s) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int j = 0; j < 4; j++) {
test.fcsr = fcsr_inputs[j];
for (int i = 0; i < kTableLength; i++) {
test.a = inputs[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.b, outputs[j][i]);
}
}
@@ -2089,13 +2075,13 @@ TEST(mina_maxa) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
test.b = inputsb[i];
test.c = inputsc[i];
test.d = inputsd[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
if (i < kTableLength - 1) {
CHECK_EQ(test.resd, resd[i]);
@@ -2171,11 +2157,11 @@ TEST(trunc_l) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
if ((test.isNaN2008 & kFCSRNaN2008FlagMask) &&
kArchVariant == kMips64r6) {
CHECK_EQ(test.c, outputsNaN2008[i]);
@@ -2252,20 +2238,20 @@ TEST(movz_movn) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.c = inputs_S[i];
test.rt = 1;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.b, test.bold);
CHECK_EQ(test.d, test.dold);
CHECK_EQ(test.b1, outputs_D[i]);
CHECK_EQ(test.d1, outputs_S[i]);
test.rt = 0;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.b, outputs_D[i]);
CHECK_EQ(test.d, outputs_S[i]);
CHECK_EQ(test.b1, test.bold1);
@@ -2353,15 +2339,15 @@ TEST(movt_movd) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.dstf, outputs_S[i]);
CHECK_EQ(test.dstd, outputs_D[i]);
CHECK_EQ(test.dstf1, test.dstfold1);
CHECK_EQ(test.dstd1, test.dstdold1);
test.fcsr = 0;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.dstf, test.dstfold);
CHECK_EQ(test.dstd, test.dstdold);
CHECK_EQ(test.dstf1, outputs_S[i]);
@@ -2440,12 +2426,12 @@ TEST(cvt_w_d) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int j = 0; j < 4; j++) {
test.fcsr = fcsr_inputs[j];
for (int i = 0; i < kTableLength; i++) {
test.a = inputs[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.b, outputs[j][i]);
}
}
@@ -2508,11 +2494,11 @@ TEST(trunc_w) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
if ((test.isNaN2008 & kFCSRNaN2008FlagMask) && kArchVariant == kMips64r6) {
CHECK_EQ(test.c, outputsNaN2008[i]);
} else {
@@ -2578,11 +2564,11 @@ TEST(round_w) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
if ((test.isNaN2008 & kFCSRNaN2008FlagMask) && kArchVariant == kMips64r6) {
CHECK_EQ(test.c, outputsNaN2008[i]);
} else {
@@ -2649,11 +2635,11 @@ TEST(round_l) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
if ((test.isNaN2008 & kFCSRNaN2008FlagMask) &&
kArchVariant == kMips64r6) {
CHECK_EQ(test.c, outputsNaN2008[i]);
@@ -2722,13 +2708,13 @@ TEST(sub) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputfs_S[i];
test.b = inputft_S[i];
test.c = inputfs_D[i];
test.d = inputft_D[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.resultS, outputs_S[i]);
CHECK_EQ(test.resultD, outputs_D[i]);
}
@@ -2795,7 +2781,7 @@ TEST(sqrt_rsqrt_recip) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
float f1;
@@ -2803,7 +2789,7 @@ TEST(sqrt_rsqrt_recip) {
test.a = inputs_S[i];
test.c = inputs_D[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.resultS, outputs_S[i]);
CHECK_EQ(test.resultD, outputs_D[i]);
@@ -2874,11 +2860,11 @@ TEST(neg) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_S[i];
test.c = inputs_D[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.resultS, outputs_S[i]);
CHECK_EQ(test.resultD, outputs_D[i]);
}
@@ -2933,13 +2919,13 @@ TEST(mul) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputfs_S[i];
test.b = inputft_S[i];
test.c = inputfs_D[i];
test.d = inputft_D[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.resultS, inputfs_S[i]*inputft_S[i]);
CHECK_EQ(test.resultD, inputfs_D[i]*inputft_D[i]);
}
@@ -2989,12 +2975,12 @@ TEST(mov) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.c = inputs_S[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.b, outputs_D[i]);
CHECK_EQ(test.d, outputs_S[i]);
}
@@ -3057,11 +3043,11 @@ TEST(floor_w) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
if ((test.isNaN2008 & kFCSRNaN2008FlagMask) && kArchVariant == kMips64r6) {
CHECK_EQ(test.c, outputsNaN2008[i]);
} else {
@@ -3128,11 +3114,11 @@ TEST(floor_l) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
if ((test.isNaN2008 & kFCSRNaN2008FlagMask) &&
kArchVariant == kMips64r6) {
CHECK_EQ(test.c, outputsNaN2008[i]);
@@ -3200,11 +3186,11 @@ TEST(ceil_w) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
if ((test.isNaN2008 & kFCSRNaN2008FlagMask) && kArchVariant == kMips64r6) {
CHECK_EQ(test.c, outputsNaN2008[i]);
} else {
@@ -3271,11 +3257,11 @@ TEST(ceil_l) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
if ((test.isNaN2008 & kFCSRNaN2008FlagMask) &&
kArchVariant == kMips64r6) {
CHECK_EQ(test.c, outputsNaN2008[i]);
@@ -3325,8 +3311,8 @@ TEST(jump_tables1) {
for (int i = 0; i < kNumCases; ++i) {
__ bind(&labels[i]);
- __ lui(v0, (values[i] >> 16) & 0xffff);
- __ ori(v0, v0, values[i] & 0xffff);
+ __ lui(v0, (values[i] >> 16) & 0xFFFF);
+ __ ori(v0, v0, values[i] & 0xFFFF);
__ b(&done);
__ nop();
}
@@ -3346,10 +3332,9 @@ TEST(jump_tables1) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
+ auto f = GeneratedCode<F1>::FromCode(*code);
for (int i = 0; i < kNumCases; ++i) {
- int64_t res = reinterpret_cast<int64_t>(
- CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0));
+ int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
::printf("f(%d) = %" PRId64 "\n", i, res);
CHECK_EQ(values[i], static_cast<int>(res));
}
@@ -3378,8 +3363,8 @@ TEST(jump_tables2) {
for (int i = 0; i < kNumCases; ++i) {
__ bind(&labels[i]);
- __ lui(v0, (values[i] >> 16) & 0xffff);
- __ ori(v0, v0, values[i] & 0xffff);
+ __ lui(v0, (values[i] >> 16) & 0xFFFF);
+ __ ori(v0, v0, values[i] & 0xFFFF);
__ b(&done);
__ nop();
}
@@ -3417,10 +3402,9 @@ TEST(jump_tables2) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
+ auto f = GeneratedCode<F1>::FromCode(*code);
for (int i = 0; i < kNumCases; ++i) {
- int64_t res = reinterpret_cast<int64_t>(
- CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0));
+ int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
::printf("f(%d) = %" PRId64 "\n", i, res);
CHECK_EQ(values[i], res);
}
@@ -3498,10 +3482,9 @@ TEST(jump_tables3) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
+ auto f = GeneratedCode<F1>::FromCode(*code);
for (int i = 0; i < kNumCases; ++i) {
- Handle<Object> result(
- CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0), isolate);
+ Handle<Object> result(f.Call(i, 0, 0, 0, 0), isolate);
#ifdef OBJECT_PRINT
::printf("f(%d) = ", i);
result->Print(std::cout);
@@ -3569,15 +3552,14 @@ TEST(BITSWAP) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
t.r1 = 0x00102100781A15C3;
t.r2 = 0x001021008B71FCDE;
t.r3 = 0xFF8017FF781A15C3;
t.r4 = 0xFF8017FF8B71FCDE;
t.r5 = 0x10C021098B71FCDE;
t.r6 = 0xFB8017FF781A15C3;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(static_cast<int64_t>(0x000000001E58A8C3L), t.r1);
CHECK_EQ(static_cast<int64_t>(0xFFFFFFFFD18E3F7BL), t.r2);
@@ -3712,7 +3694,7 @@ TEST(class_fmt) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
// Double test values.
t.dSignalingNan = std::numeric_limits<double>::signaling_NaN();
@@ -3738,8 +3720,7 @@ TEST(class_fmt) {
t.fPosSubnorm = FLT_MIN / 20.0;
t.fPosZero = +0.0;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
// Expected double results.
CHECK_EQ(bit_cast<int64_t>(t.dSignalingNan), 0x001);
CHECK_EQ(bit_cast<int64_t>(t.dQuietNan), 0x002);
@@ -3807,37 +3788,37 @@ TEST(ABS) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
test.a = -2.0;
test.b = -2.0;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.a, 2.0);
CHECK_EQ(test.b, 2.0);
test.a = 2.0;
test.b = 2.0;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.a, 2.0);
CHECK_EQ(test.b, 2.0);
// Testing biggest positive number
test.a = std::numeric_limits<double>::max();
test.b = std::numeric_limits<float>::max();
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.a, std::numeric_limits<double>::max());
CHECK_EQ(test.b, std::numeric_limits<float>::max());
// Testing smallest negative number
test.a = -std::numeric_limits<double>::max(); // lowest()
test.b = -std::numeric_limits<float>::max(); // lowest()
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.a, std::numeric_limits<double>::max());
CHECK_EQ(test.b, std::numeric_limits<float>::max());
// Testing smallest positive number
test.a = -std::numeric_limits<double>::min();
test.b = -std::numeric_limits<float>::min();
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.a, std::numeric_limits<double>::min());
CHECK_EQ(test.b, std::numeric_limits<float>::min());
@@ -3846,7 +3827,7 @@ TEST(ABS) {
/ std::numeric_limits<double>::min();
test.b = -std::numeric_limits<float>::max()
/ std::numeric_limits<float>::min();
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.a, std::numeric_limits<double>::max()
/ std::numeric_limits<double>::min());
CHECK_EQ(test.b, std::numeric_limits<float>::max()
@@ -3854,13 +3835,13 @@ TEST(ABS) {
test.a = std::numeric_limits<double>::quiet_NaN();
test.b = std::numeric_limits<float>::quiet_NaN();
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK(std::isnan(test.a));
CHECK(std::isnan(test.b));
test.a = std::numeric_limits<double>::signaling_NaN();
test.b = std::numeric_limits<float>::signaling_NaN();
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK(std::isnan(test.a));
CHECK(std::isnan(test.b));
}
@@ -3901,12 +3882,12 @@ TEST(ADD_FMT) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
test.a = 2.0;
test.b = 3.0;
test.fa = 2.0;
test.fb = 3.0;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.c, 5.0);
CHECK_EQ(test.fc, 5.0);
@@ -3914,7 +3895,7 @@ TEST(ADD_FMT) {
test.b = -std::numeric_limits<double>::max(); // lowest()
test.fa = std::numeric_limits<float>::max();
test.fb = -std::numeric_limits<float>::max(); // lowest()
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.c, 0.0);
CHECK_EQ(test.fc, 0.0);
@@ -3922,7 +3903,7 @@ TEST(ADD_FMT) {
test.b = std::numeric_limits<double>::max();
test.fa = std::numeric_limits<float>::max();
test.fb = std::numeric_limits<float>::max();
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK(!std::isfinite(test.c));
CHECK(!std::isfinite(test.fc));
@@ -3930,7 +3911,7 @@ TEST(ADD_FMT) {
test.b = std::numeric_limits<double>::signaling_NaN();
test.fa = 5.0;
test.fb = std::numeric_limits<float>::signaling_NaN();
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK(std::isnan(test.c));
CHECK(std::isnan(test.fc));
}
@@ -4056,12 +4037,12 @@ TEST(C_COND_FMT) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
test.dOp1 = 2.0;
test.dOp2 = 3.0;
test.fOp1 = 2.0;
test.fOp2 = 3.0;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.dF, 0U);
CHECK_EQ(test.dUn, 0U);
CHECK_EQ(test.dEq, 0U);
@@ -4083,7 +4064,7 @@ TEST(C_COND_FMT) {
test.dOp2 = std::numeric_limits<double>::min();
test.fOp1 = std::numeric_limits<float>::min();
test.fOp2 = -std::numeric_limits<float>::max(); // lowest()
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.dF, 0U);
CHECK_EQ(test.dUn, 0U);
CHECK_EQ(test.dEq, 0U);
@@ -4105,7 +4086,7 @@ TEST(C_COND_FMT) {
test.dOp2 = -std::numeric_limits<double>::max(); // lowest()
test.fOp1 = std::numeric_limits<float>::max();
test.fOp2 = std::numeric_limits<float>::max();
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.dF, 0U);
CHECK_EQ(test.dUn, 0U);
CHECK_EQ(test.dEq, 1U);
@@ -4127,7 +4108,7 @@ TEST(C_COND_FMT) {
test.dOp2 = 0.0;
test.fOp1 = std::numeric_limits<float>::quiet_NaN();
test.fOp2 = 0.0;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.dF, 0U);
CHECK_EQ(test.dUn, 1U);
CHECK_EQ(test.dEq, 0U);
@@ -4257,7 +4238,7 @@ TEST(CMP_COND_FMT) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
uint64_t dTrue = 0xFFFFFFFFFFFFFFFF;
uint64_t dFalse = 0x0000000000000000;
uint32_t fTrue = 0xFFFFFFFF;
@@ -4267,7 +4248,7 @@ TEST(CMP_COND_FMT) {
test.dOp2 = 3.0;
test.fOp1 = 2.0;
test.fOp2 = 3.0;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(bit_cast<uint64_t>(test.dF), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dUn), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dEq), dFalse);
@@ -4292,7 +4273,7 @@ TEST(CMP_COND_FMT) {
test.dOp2 = std::numeric_limits<double>::min();
test.fOp1 = std::numeric_limits<float>::min();
test.fOp2 = -std::numeric_limits<float>::max(); // lowest()
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(bit_cast<uint64_t>(test.dF), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dUn), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dEq), dFalse);
@@ -4317,7 +4298,7 @@ TEST(CMP_COND_FMT) {
test.dOp2 = -std::numeric_limits<double>::max(); // lowest()
test.fOp1 = std::numeric_limits<float>::max();
test.fOp2 = std::numeric_limits<float>::max();
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(bit_cast<uint64_t>(test.dF), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dUn), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dEq), dTrue);
@@ -4342,7 +4323,7 @@ TEST(CMP_COND_FMT) {
test.dOp2 = 0.0;
test.fOp1 = std::numeric_limits<float>::quiet_NaN();
test.fOp2 = 0.0;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(bit_cast<uint64_t>(test.dF), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dUn), dTrue);
CHECK_EQ(bit_cast<uint64_t>(test.dEq), dFalse);
@@ -4436,7 +4417,7 @@ TEST(CVT) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
test.cvt_d_s_in = -0.51;
test.cvt_d_w_in = -1;
@@ -4449,7 +4430,7 @@ TEST(CVT) {
test.cvt_w_s_in = -0.51;
test.cvt_w_d_in = -0.51;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
CHECK_EQ(test.cvt_d_l_out, static_cast<double>(test.cvt_d_l_in));
@@ -4472,7 +4453,7 @@ TEST(CVT) {
test.cvt_w_s_in = 0.49;
test.cvt_w_d_in = 0.49;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
CHECK_EQ(test.cvt_d_l_out, static_cast<double>(test.cvt_d_l_in));
@@ -4495,7 +4476,7 @@ TEST(CVT) {
test.cvt_w_s_in = std::numeric_limits<float>::max();
test.cvt_w_d_in = std::numeric_limits<double>::max();
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
CHECK_EQ(test.cvt_d_l_out, static_cast<double>(test.cvt_d_l_in));
@@ -4519,7 +4500,7 @@ TEST(CVT) {
test.cvt_w_s_in = -std::numeric_limits<float>::max(); // lowest()
test.cvt_w_d_in = -std::numeric_limits<double>::max(); // lowest()
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
CHECK_EQ(test.cvt_d_l_out, static_cast<double>(test.cvt_d_l_in));
@@ -4550,7 +4531,7 @@ TEST(CVT) {
test.cvt_w_s_in = std::numeric_limits<float>::min();
test.cvt_w_d_in = std::numeric_limits<double>::min();
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
CHECK_EQ(test.cvt_d_l_out, static_cast<double>(test.cvt_d_l_in));
@@ -4608,9 +4589,9 @@ TEST(DIV_FMT) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
const int test_size = 3;
@@ -4651,7 +4632,7 @@ TEST(DIV_FMT) {
test.fOp1 = fOp1[i];
test.fOp2 = fOp2[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.dRes, dRes[i]);
CHECK_EQ(test.fRes, fRes[i]);
}
@@ -4661,7 +4642,7 @@ TEST(DIV_FMT) {
test.fOp1 = FLT_MAX;
test.fOp2 = -0.0;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK(!std::isfinite(test.dRes));
CHECK(!std::isfinite(test.fRes));
@@ -4670,7 +4651,7 @@ TEST(DIV_FMT) {
test.fOp1 = 0.0;
test.fOp2 = -0.0;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK(std::isnan(test.dRes));
CHECK(std::isnan(test.fRes));
@@ -4679,7 +4660,7 @@ TEST(DIV_FMT) {
test.fOp1 = std::numeric_limits<float>::quiet_NaN();
test.fOp2 = -5.0;
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ f.Call(&test, 0, 0, 0, 0);
CHECK(std::isnan(test.dRes));
CHECK(std::isnan(test.fRes));
}
@@ -4701,10 +4682,10 @@ uint64_t run_align(uint64_t rs_value, uint64_t rt_value, uint8_t bp) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F4 f = FUNCTION_CAST<F4>(code->entry());
+ auto f = GeneratedCode<F4>::FromCode(*code);
- uint64_t res = reinterpret_cast<uint64_t>(
- CALL_GENERATED_CODE(isolate, f, rs_value, rt_value, 0, 0, 0));
+ uint64_t res =
+ reinterpret_cast<uint64_t>(f.Call(rs_value, rt_value, 0, 0, 0));
return res;
}
@@ -4721,13 +4702,15 @@ TEST(r6_align) {
uint64_t expected_res;
};
+ // clang-format off
struct TestCaseAlign tc[] = {
// rs_value, rt_value, bp, expected_res
- { 0x11223344, 0xaabbccdd, 0, 0xffffffffaabbccdd },
- { 0x11223344, 0xaabbccdd, 1, 0xffffffffbbccdd11 },
- { 0x11223344, 0xaabbccdd, 2, 0xffffffffccdd1122 },
- { 0x11223344, 0xaabbccdd, 3, 0xffffffffdd112233 },
+ { 0x11223344, 0xAABBCCDD, 0, 0xFFFFFFFFAABBCCDD },
+ { 0x11223344, 0xAABBCCDD, 1, 0xFFFFFFFFBBCCDD11 },
+ { 0x11223344, 0xAABBCCDD, 2, 0xFFFFFFFFCCDD1122 },
+ { 0x11223344, 0xAABBCCDD, 3, 0xFFFFFFFFDD112233 },
};
+ // clang-format on
size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseAlign);
for (size_t i = 0; i < nr_test_cases; ++i) {
@@ -4755,9 +4738,9 @@ uint64_t run_dalign(uint64_t rs_value, uint64_t rt_value, uint8_t bp) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F4 f = FUNCTION_CAST<F4>(code->entry());
- uint64_t res = reinterpret_cast<uint64_t>(
- CALL_GENERATED_CODE(isolate, f, rs_value, rt_value, 0, 0, 0));
+ auto f = GeneratedCode<F4>::FromCode(*code);
+ uint64_t res =
+ reinterpret_cast<uint64_t>(f.Call(rs_value, rt_value, 0, 0, 0));
return res;
}
@@ -4774,17 +4757,19 @@ TEST(r6_dalign) {
uint64_t expected_res;
};
+ // clang-format off
struct TestCaseDalign tc[] = {
// rs_value, rt_value, bp, expected_res
- { 0x1122334455667700, 0xaabbccddeeff8899, 0, 0xaabbccddeeff8899 },
- { 0x1122334455667700, 0xaabbccddeeff8899, 1, 0xbbccddeeff889911 },
- { 0x1122334455667700, 0xaabbccddeeff8899, 2, 0xccddeeff88991122 },
- { 0x1122334455667700, 0xaabbccddeeff8899, 3, 0xddeeff8899112233 },
- { 0x1122334455667700, 0xaabbccddeeff8899, 4, 0xeeff889911223344 },
- { 0x1122334455667700, 0xaabbccddeeff8899, 5, 0xff88991122334455 },
- { 0x1122334455667700, 0xaabbccddeeff8899, 6, 0x8899112233445566 },
- { 0x1122334455667700, 0xaabbccddeeff8899, 7, 0x9911223344556677 }
+ { 0x1122334455667700, 0xAABBCCDDEEFF8899, 0, 0xAABBCCDDEEFF8899 },
+ { 0x1122334455667700, 0xAABBCCDDEEFF8899, 1, 0xBBCCDDEEFF889911 },
+ { 0x1122334455667700, 0xAABBCCDDEEFF8899, 2, 0xCCDDEEFF88991122 },
+ { 0x1122334455667700, 0xAABBCCDDEEFF8899, 3, 0xDDEEFF8899112233 },
+ { 0x1122334455667700, 0xAABBCCDDEEFF8899, 4, 0xEEFF889911223344 },
+ { 0x1122334455667700, 0xAABBCCDDEEFF8899, 5, 0xFF88991122334455 },
+ { 0x1122334455667700, 0xAABBCCDDEEFF8899, 6, 0x8899112233445566 },
+ { 0x1122334455667700, 0xAABBCCDDEEFF8899, 7, 0x9911223344556677 }
};
+ // clang-format on
size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseDalign);
for (size_t i = 0; i < nr_test_cases; ++i) {
@@ -4814,11 +4799,10 @@ uint64_t run_aluipc(int16_t offset) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
- PC = (uint64_t) f; // Set the program counter.
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ PC = (uint64_t)code->entry(); // Set the program counter.
- uint64_t res = reinterpret_cast<uint64_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -4869,11 +4853,10 @@ uint64_t run_auipc(int16_t offset) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
- PC = (uint64_t) f; // Set the program counter.
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ PC = (uint64_t)code->entry(); // Set the program counter.
- uint64_t res = reinterpret_cast<uint64_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -4925,11 +4908,9 @@ uint64_t run_aui(uint64_t rs, uint16_t offset) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint64_t res =
- reinterpret_cast<uint64_t>
- (CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -4952,11 +4933,9 @@ uint64_t run_daui(uint64_t rs, uint16_t offset) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint64_t res =
- reinterpret_cast<uint64_t>
- (CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -4979,11 +4958,9 @@ uint64_t run_dahi(uint64_t rs, uint16_t offset) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint64_t res =
- reinterpret_cast<uint64_t>
- (CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -5006,11 +4983,9 @@ uint64_t run_dati(uint64_t rs, uint16_t offset) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint64_t res =
- reinterpret_cast<uint64_t>
- (CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -5028,13 +5003,13 @@ TEST(r6_aui_family) {
// AUI test cases.
struct TestCaseAui aui_tc[] = {
- {0xfffeffff, 0x1, 0xffffffffffffffff},
- {0xffffffff, 0x0, 0xffffffffffffffff},
- {0, 0xffff, 0xffffffffffff0000},
- {0x0008ffff, 0xfff7, 0xffffffffffffffff},
- {32767, 32767, 0x000000007fff7fff},
- {0x00000000ffffffff, 0x1, 0x000000000000ffff},
- {0xffffffff, 0xffff, 0xfffffffffffeffff},
+ {0xFFFEFFFF, 0x1, 0xFFFFFFFFFFFFFFFF},
+ {0xFFFFFFFF, 0x0, 0xFFFFFFFFFFFFFFFF},
+ {0, 0xFFFF, 0xFFFFFFFFFFFF0000},
+ {0x0008FFFF, 0xFFF7, 0xFFFFFFFFFFFFFFFF},
+ {32767, 32767, 0x000000007FFF7FFF},
+ {0x00000000FFFFFFFF, 0x1, 0x000000000000FFFF},
+ {0xFFFFFFFF, 0xFFFF, 0xFFFFFFFFFFFEFFFF},
};
size_t nr_test_cases = sizeof(aui_tc) / sizeof(TestCaseAui);
@@ -5045,13 +5020,13 @@ TEST(r6_aui_family) {
// DAUI test cases.
struct TestCaseAui daui_tc[] = {
- {0xfffffffffffeffff, 0x1, 0xffffffffffffffff},
- {0xffffffffffffffff, 0x0, 0xffffffffffffffff},
- {0, 0xffff, 0xffffffffffff0000},
- {0x0008ffff, 0xfff7, 0xffffffffffffffff},
- {32767, 32767, 0x000000007fff7fff},
- {0x00000000ffffffff, 0x1, 0x000000010000ffff},
- {0xffffffff, 0xffff, 0x00000000fffeffff},
+ {0xFFFFFFFFFFFEFFFF, 0x1, 0xFFFFFFFFFFFFFFFF},
+ {0xFFFFFFFFFFFFFFFF, 0x0, 0xFFFFFFFFFFFFFFFF},
+ {0, 0xFFFF, 0xFFFFFFFFFFFF0000},
+ {0x0008FFFF, 0xFFF7, 0xFFFFFFFFFFFFFFFF},
+ {32767, 32767, 0x000000007FFF7FFF},
+ {0x00000000FFFFFFFF, 0x1, 0x000000010000FFFF},
+ {0xFFFFFFFF, 0xFFFF, 0x00000000FFFEFFFF},
};
nr_test_cases = sizeof(daui_tc) / sizeof(TestCaseAui);
@@ -5062,13 +5037,13 @@ TEST(r6_aui_family) {
// DATI test cases.
struct TestCaseAui dati_tc[] = {
- {0xfffffffffffeffff, 0x1, 0x0000fffffffeffff},
- {0xffffffffffffffff, 0x0, 0xffffffffffffffff},
- {0, 0xffff, 0xffff000000000000},
- {0x0008ffff, 0xfff7, 0xfff700000008ffff},
- {32767, 32767, 0x7fff000000007fff},
- {0x00000000ffffffff, 0x1, 0x00010000ffffffff},
- {0xffffffffffff, 0xffff, 0xffffffffffffffff},
+ {0xFFFFFFFFFFFEFFFF, 0x1, 0x0000FFFFFFFEFFFF},
+ {0xFFFFFFFFFFFFFFFF, 0x0, 0xFFFFFFFFFFFFFFFF},
+ {0, 0xFFFF, 0xFFFF000000000000},
+ {0x0008FFFF, 0xFFF7, 0xFFF700000008FFFF},
+ {32767, 32767, 0x7FFF000000007FFF},
+ {0x00000000FFFFFFFF, 0x1, 0x00010000FFFFFFFF},
+ {0xFFFFFFFFFFFF, 0xFFFF, 0xFFFFFFFFFFFFFFFF},
};
nr_test_cases = sizeof(dati_tc) / sizeof(TestCaseAui);
@@ -5079,9 +5054,9 @@ TEST(r6_aui_family) {
// DAHI test cases.
struct TestCaseAui dahi_tc[] = {
- {0xfffffffeffffffff, 0x1, 0xffffffffffffffff},
- {0xffffffffffffffff, 0x0, 0xffffffffffffffff},
- {0, 0xffff, 0xffffffff00000000},
+ {0xFFFFFFFEFFFFFFFF, 0x1, 0xFFFFFFFFFFFFFFFF},
+ {0xFFFFFFFFFFFFFFFF, 0x0, 0xFFFFFFFFFFFFFFFF},
+ {0, 0xFFFF, 0xFFFFFFFF00000000},
};
nr_test_cases = sizeof(dahi_tc) / sizeof(TestCaseAui);
@@ -5114,10 +5089,9 @@ uint64_t run_li_macro(uint64_t imm, LiFlags mode, int32_t num_instr = 0) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint64_t res = reinterpret_cast<uint64_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -5137,13 +5111,13 @@ TEST(li_macro) {
// We call li(v0, imm) to test cases listed below.
struct TestCase_li tc[] = {
// imm, r2_num_instr, r6_num_instr
- {0xffffffffffff8000, 1, 1}, // min_int16
+ {0xFFFFFFFFFFFF8000, 1, 1}, // min_int16
// The test case above generates daddiu instruction.
// This is int16 value and we can load it using just daddiu.
{0x8000, 1, 1}, // max_int16 + 1
// Generates ori
// max_int16 + 1 is not int16 but is uint16, just use ori.
- {0xffffffffffff7fff, 2, 2}, // min_int16 - 1
+ {0xFFFFFFFFFFFF7FFF, 2, 2}, // min_int16 - 1
// Generates lui + ori
// We load int32 value using lui + ori.
{0x8001, 1, 1}, // max_int16 + 2
@@ -5155,115 +5129,115 @@ TEST(li_macro) {
{0x00010001, 2, 2}, // max_uint16 + 2
// Generates lui + ori
// We have to generate two instructions in this case.
- {0x00000000ffffffff, 2, 2}, // max_uint32
+ {0x00000000FFFFFFFF, 2, 2}, // max_uint32
// r2 - daddiu + dsrl32
// r6 - daddiu + dahi
- {0x00000000fffffffe, 3, 2}, // max_uint32 - 1
+ {0x00000000FFFFFFFE, 3, 2}, // max_uint32 - 1
// r2 - lui + ori + dsll
// r6 - daddiu + dahi
- {0x00ffff000000fffe, 3, 3},
+ {0x00FFFF000000FFFE, 3, 3},
// ori + dsll32 + ori
- {0x00000001fffffffe, 4, 2}, // max_uint32 << 1
+ {0x00000001FFFFFFFE, 4, 2}, // max_uint32 << 1
// r2 - lui + ori + dsll + ori
// r6 - daddiu + dahi
- {0x0000fffffffffffe, 4, 2}, // max_uint48 - 1
+ {0x0000FFFFFFFFFFFE, 4, 2}, // max_uint48 - 1
// r2 - daddiu + dsll32 + ori + dsubu
// Loading imm directly would require ori + dsll + ori + dsll + ori.
// Optimized by loading -imm and using dsubu to get imm.
// r6 - daddiu + dati
- {0xffffffff00000000, 2, 2}, // max_uint32 << 32
+ {0xFFFFFFFF00000000, 2, 2}, // max_uint32 << 32
// r2 - daddiu + dsll32
// r6 - ori + dahi
// We need ori to clear register before loading value using dahi.
- {0xffffffff80000000, 1, 1}, // min_int32
+ {0xFFFFFFFF80000000, 1, 1}, // min_int32
// The test case above generates lui instruction.
{0x0000000080000000, 2, 2}, // max_int32 + 1
// r2 - ori + dsll
// r6 - lui + dahi
{0x0000800000000000, 2, 2},
// ori + dsll32
- {0xffff800000000000, 2, 2},
+ {0xFFFF800000000000, 2, 2},
// r2 - daddiu + dsll32
// r6 - ori + dahi
- {0xffff80000000ffff, 3, 2},
+ {0xFFFF80000000FFFF, 3, 2},
// r2 - daddiu + dsll32 + ori
// r6 - ori + dahi
- {0xffffff123000ffff, 3, 3},
+ {0xFFFFFF123000FFFF, 3, 3},
// daddiu + dsll + ori
- {0xffff00000000ffff, 3, 2},
+ {0xFFFF00000000FFFF, 3, 2},
// r2 - daddiu + dsll32 + ori
// r6 - ori + dati
- {0xffff8000ffff0000, 3, 2},
+ {0xFFFF8000FFFF0000, 3, 2},
// r2 - lui + ori + dsll
// r6 - lui + dahi
- {0x0000ffffffff0000, 4, 2},
+ {0x0000FFFFFFFF0000, 4, 2},
// r2 - ori + dsll + ori + dsll
// r6 - lui + dati
- {0x1234ffff80000000, 3, 2},
+ {0x1234FFFF80000000, 3, 2},
// r2 - lui + ori + dsll
// r6 - lui + dati
- {0x1234ffff80010000, 5, 2},
+ {0x1234FFFF80010000, 5, 2},
// r2 - lui + ori + dsll + ori + dsll
// r6 - lui + dati
- {0xffff8000ffff8000, 2, 2},
+ {0xFFFF8000FFFF8000, 2, 2},
// r2 - daddiu + dinsu
// r6 - daddiu + dahi
- {0xffff0000ffff8000, 4, 3},
+ {0xFFFF0000FFFF8000, 4, 3},
// r2 - ori + dsll32 + ori + dsubu
// Loading imm directly would require lui + dsll + ori + dsll + ori.
// Optimized by loading -imm and using dsubu to get imm.
// r6 - daddiu + dahi + dati
{0x8000000080000000, 2, 2},
// lui + dinsu
- {0xabcd0000abcd0000, 2, 2},
+ {0xABCD0000ABCD0000, 2, 2},
// lui + dinsu
{0x8000800080008000, 3, 3},
// lui + ori + dinsu
- {0xabcd1234abcd1234, 3, 3},
+ {0xABCD1234ABCD1234, 3, 3},
// The test case above generates lui + ori + dinsu instruction sequence.
- {0xffff800080008000, 4, 3},
+ {0xFFFF800080008000, 4, 3},
// r2 - lui + ori + dsll + ori
// r6 - lui + ori + dahi
- {0xffffabcd, 3, 2},
+ {0xFFFFABCD, 3, 2},
// r2 - ori + dsll + ori
// r6 - daddiu + dahi
- {0x1ffffabcd, 4, 2},
+ {0x1FFFFABCD, 4, 2},
// r2 - lui + ori + dsll + ori
// r6 - daddiu + dahi
- {0xffffffffabcd, 4, 2},
+ {0xFFFFFFFFABCD, 4, 2},
// r2 - daddiu + dsll32 + ori + dsubu
// Loading imm directly would require ori + dsll + ori + dsll + ori.
// Optimized by loading -imm and using dsubu to get imm.
// r6 - daddiu + dati
- {0x1ffffffffabcd, 4, 2},
+ {0x1FFFFFFFFABCD, 4, 2},
// r2 - daddiu + dsll32 + ori + dsubu
// Loading imm directly would require lui + ori + dsll + ori + dsll + ori.
// Optimized by loading -imm and using dsubu to get imm.
// r6 - daddiu + dati
- {0xffff7fff80010000, 5, 2},
+ {0xFFFF7FFF80010000, 5, 2},
// r2 - lui + ori + dsll + ori + dsll
// r6 - lui + dahi
// Here lui sets high 32 bits to 1 so dahi can be used to get target
// value.
- {0x00007fff7fff0000, 3, 2},
+ {0x00007FFF7FFF0000, 3, 2},
// r2 - lui + ori + dsll
// r6 - lui + dahi
// High 32 bits are not set so dahi can be used to get target value.
- {0xffff7fff7fff0000, 5, 3},
+ {0xFFFF7FFF7FFF0000, 5, 3},
// r2 - lui + ori + dsll + ori + dsll
// r6 - lui + dahi + dati
// High 32 bits are not set so just dahi can't be used to get target
// value.
- {0x00007fff80010000, 3, 3},
+ {0x00007FFF80010000, 3, 3},
// r2 - lui + ori + dsll
// r6 - lui + ori + dsll
// High 32 bits are set so can't just use lui + dahi to get target value.
- {0x1234abcd87654321, 6, 4},
+ {0x1234ABCD87654321, 6, 4},
// The test case above generates:
// r2 - lui + ori + dsll + ori + dsll + ori instruction sequence,
// r6 - lui + ori + dahi + dati.
// Load using full instruction sequence.
- {0xffff0000ffffffff, 3, 3},
+ {0xFFFF0000FFFFFFFF, 3, 3},
// r2 - ori + dsll32 + nor
// Loading imm directly would require lui + dsll + ori + dsll + ori.
// Optimized by loading ~imm and using nor to get imm. Loading -imm would
@@ -5296,24 +5270,24 @@ uint64_t run_lwpc(int offset) {
v8::internal::CodeObjectRequired::kYes);
// 256k instructions; 2^8k
- // addiu t3, a4, 0xffff; (0x250fffff)
+ // addiu t3, a4, 0xFFFF; (0x250FFFFF)
// ...
- // addiu t0, a4, 0x0000; (0x250c0000)
+ // addiu t0, a4, 0x0000; (0x250C0000)
uint32_t addiu_start_1 = 0x25000000;
- for (int32_t i = 0xfffff; i >= 0xc0000; --i) {
+ for (int32_t i = 0xFFFFF; i >= 0xC0000; --i) {
uint32_t addiu_new = addiu_start_1 + i;
__ dd(addiu_new);
}
- __ lwpc(t8, offset); // offset 0; 0xef080000 (t8 register)
+ __ lwpc(t8, offset); // offset 0; 0xEF080000 (t8 register)
__ mov(v0, t8);
// 256k instructions; 2^8k
// addiu a4, a4, 0x0000; (0x25080000)
// ...
- // addiu a7, a4, 0xffff; (0x250bffff)
+ // addiu a7, a4, 0xFFFF; (0x250BFFFF)
uint32_t addiu_start_2 = 0x25000000;
- for (int32_t i = 0x80000; i <= 0xbffff; ++i) {
+ for (int32_t i = 0x80000; i <= 0xBFFFF; ++i) {
uint32_t addiu_new = addiu_start_2 + i;
__ dd(addiu_new);
}
@@ -5326,10 +5300,9 @@ uint64_t run_lwpc(int offset) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint64_t res = reinterpret_cast<uint64_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -5344,17 +5317,19 @@ TEST(r6_lwpc) {
uint64_t expected_res;
};
+ // clang-format off
struct TestCaseLwpc tc[] = {
// offset, expected_res
- { -262144, 0x250fffff }, // offset 0x40000
- { -4, 0x250c0003 },
- { -1, 0x250c0000 },
- { 0, 0xffffffffef080000 },
+ { -262144, 0x250FFFFF }, // offset 0x40000
+ { -4, 0x250C0003 },
+ { -1, 0x250C0000 },
+ { 0, 0xFFFFFFFFEF080000 },
{ 1, 0x03001025 }, // mov(v0, t8)
{ 2, 0x25080000 },
{ 4, 0x25080002 },
- { 262143, 0x250bfffd }, // offset 0x3ffff
+ { 262143, 0x250BFFFD }, // offset 0x3FFFF
};
+ // clang-format on
size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseLwpc);
for (size_t i = 0; i < nr_test_cases; ++i) {
@@ -5373,24 +5348,24 @@ uint64_t run_lwupc(int offset) {
v8::internal::CodeObjectRequired::kYes);
// 256k instructions; 2^8k
- // addiu t3, a4, 0xffff; (0x250fffff)
+ // addiu t3, a4, 0xFFFF; (0x250FFFFF)
// ...
- // addiu t0, a4, 0x0000; (0x250c0000)
+ // addiu t0, a4, 0x0000; (0x250C0000)
uint32_t addiu_start_1 = 0x25000000;
- for (int32_t i = 0xfffff; i >= 0xc0000; --i) {
+ for (int32_t i = 0xFFFFF; i >= 0xC0000; --i) {
uint32_t addiu_new = addiu_start_1 + i;
__ dd(addiu_new);
}
- __ lwupc(t8, offset); // offset 0; 0xef080000 (t8 register)
+ __ lwupc(t8, offset); // offset 0; 0xEF080000 (t8 register)
__ mov(v0, t8);
// 256k instructions; 2^8k
// addiu a4, a4, 0x0000; (0x25080000)
// ...
- // addiu a7, a4, 0xffff; (0x250bffff)
+ // addiu a7, a4, 0xFFFF; (0x250BFFFF)
uint32_t addiu_start_2 = 0x25000000;
- for (int32_t i = 0x80000; i <= 0xbffff; ++i) {
+ for (int32_t i = 0x80000; i <= 0xBFFFF; ++i) {
uint32_t addiu_new = addiu_start_2 + i;
__ dd(addiu_new);
}
@@ -5403,10 +5378,9 @@ uint64_t run_lwupc(int offset) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint64_t res = reinterpret_cast<uint64_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -5421,17 +5395,19 @@ TEST(r6_lwupc) {
uint64_t expected_res;
};
+ // clang-format off
struct TestCaseLwupc tc[] = {
// offset, expected_res
- { -262144, 0x250fffff }, // offset 0x40000
- { -4, 0x250c0003 },
- { -1, 0x250c0000 },
- { 0, 0xef100000 },
+ { -262144, 0x250FFFFF }, // offset 0x40000
+ { -4, 0x250C0003 },
+ { -1, 0x250C0000 },
+ { 0, 0xEF100000 },
{ 1, 0x03001025 }, // mov(v0, t8)
{ 2, 0x25080000 },
{ 4, 0x25080002 },
- { 262143, 0x250bfffd }, // offset 0x3ffff
+ { 262143, 0x250BFFFD }, // offset 0x3FFFF
};
+ // clang-format on
size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseLwupc);
for (size_t i = 0; i < nr_test_cases; ++i) {
@@ -5488,10 +5464,9 @@ uint64_t run_jic(int16_t offset) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint64_t res = reinterpret_cast<uint64_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -5561,10 +5536,9 @@ uint64_t run_beqzc(int32_t value, int32_t offset) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint64_t res = reinterpret_cast<uint64_t>(
- CALL_GENERATED_CODE(isolate, f, value, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(value, 0, 0, 0, 0));
return res;
}
@@ -5580,14 +5554,16 @@ TEST(r6_beqzc) {
uint32_t expected_res;
};
+ // clang-format off
struct TestCaseBeqzc tc[] = {
// value, offset, expected_res
{ 0x0, -8, 0x66 },
{ 0x0, 0, 0x3334 },
{ 0x0, 1, 0x3333 },
- { 0xabc, 1, 0x3334 },
+ { 0xABC, 1, 0x3334 },
{ 0x0, 4, 0x2033 },
};
+ // clang-format on
size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBeqzc);
for (size_t i = 0; i < nr_test_cases; ++i) {
@@ -5599,12 +5575,12 @@ TEST(r6_beqzc) {
void load_elements_of_vector(MacroAssembler& assm, const uint64_t elements[],
MSARegister w, Register t0, Register t1) {
- __ li(t0, static_cast<uint32_t>(elements[0] & 0xffffffff));
- __ li(t1, static_cast<uint32_t>((elements[0] >> 32) & 0xffffffff));
+ __ li(t0, static_cast<uint32_t>(elements[0] & 0xFFFFFFFF));
+ __ li(t1, static_cast<uint32_t>((elements[0] >> 32) & 0xFFFFFFFF));
__ insert_w(w, 0, t0);
__ insert_w(w, 1, t1);
- __ li(t0, static_cast<uint32_t>(elements[1] & 0xffffffff));
- __ li(t1, static_cast<uint32_t>((elements[1] >> 32) & 0xffffffff));
+ __ li(t0, static_cast<uint32_t>(elements[1] & 0xFFFFFFFF));
+ __ li(t1, static_cast<uint32_t>((elements[1] >> 32) & 0xFFFFFFFF));
__ insert_w(w, 2, t0);
__ insert_w(w, 3, t1);
}
@@ -5641,7 +5617,7 @@ void run_bz_bnz(TestCaseMsaBranch* input, Branch GenerateBranch,
uint64_t wd_lo;
uint64_t wd_hi;
} T;
- T t = {0x20b9cc4f1a83e0c5, 0xa27e1b5f2f5bb18a, 0x0000000000000000,
+ T t = {0x20B9CC4F1A83E0C5, 0xA27E1B5F2F5BB18A, 0x0000000000000000,
0x0000000000000000};
msa_reg_t res;
Label do_not_move_w0_to_w2;
@@ -5665,9 +5641,9 @@ void run_bz_bnz(TestCaseMsaBranch* input, Branch GenerateBranch,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+ f.Call(&res, 0, 0, 0, 0);
if (branched) {
CHECK_EQ(t.wd_lo, res.d[0]);
CHECK_EQ(t.wd_hi, res.d[1]);
@@ -5682,7 +5658,7 @@ TEST(MSA_bz_bnz) {
return;
TestCaseMsaBranch tz_v[] = {
- {0x0, 0x0}, {0xabc, 0x0}, {0x0, 0xabc}, {0xabc, 0xabc}};
+ {0x0, 0x0}, {0xABC, 0x0}, {0x0, 0xABC}, {0xABC, 0xABC}};
for (unsigned i = 0; i < arraysize(tz_v); ++i) {
run_bz_bnz(
&tz_v[i],
@@ -5706,32 +5682,32 @@ TEST(MSA_bz_bnz) {
j != lanes); \
}
TestCaseMsaBranch tz_b[] = {{0x0, 0x0},
- {0xbc0000, 0x0},
- {0x0, 0xab000000000000cd},
- {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ {0xBC0000, 0x0},
+ {0x0, 0xAB000000000000CD},
+ {0x123456789ABCDEF0, 0xAAAAAAAAAAAAAAAA}};
TEST_BZ_DF(tz_b, kMSALanesByte, bz_b, int8_t)
TestCaseMsaBranch tz_h[] = {{0x0, 0x0},
- {0xbcde0000, 0x0},
- {0x0, 0xabcd00000000abcd},
- {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ {0xBCDE0000, 0x0},
+ {0x0, 0xABCD00000000ABCD},
+ {0x123456789ABCDEF0, 0xAAAAAAAAAAAAAAAA}};
TEST_BZ_DF(tz_h, kMSALanesHalf, bz_h, int16_t)
TestCaseMsaBranch tz_w[] = {{0x0, 0x0},
- {0xbcde123400000000, 0x0},
- {0x0, 0x000000001234abcd},
- {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ {0xBCDE123400000000, 0x0},
+ {0x0, 0x000000001234ABCD},
+ {0x123456789ABCDEF0, 0xAAAAAAAAAAAAAAAA}};
TEST_BZ_DF(tz_w, kMSALanesWord, bz_w, int32_t)
TestCaseMsaBranch tz_d[] = {{0x0, 0x0},
- {0xbcde0000, 0x0},
- {0x0, 0xabcd00000000abcd},
- {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ {0xBCDE0000, 0x0},
+ {0x0, 0xABCD00000000ABCD},
+ {0x123456789ABCDEF0, 0xAAAAAAAAAAAAAAAA}};
TEST_BZ_DF(tz_d, kMSALanesDword, bz_d, int64_t)
#undef TEST_BZ_DF
TestCaseMsaBranch tnz_v[] = {
- {0x0, 0x0}, {0xabc, 0x0}, {0x0, 0xabc}, {0xabc, 0xabc}};
+ {0x0, 0x0}, {0xABC, 0x0}, {0x0, 0xABC}, {0xABC, 0xABC}};
for (unsigned i = 0; i < arraysize(tnz_v); ++i) {
run_bz_bnz(&tnz_v[i],
[](MacroAssembler& assm, Label& br_target) {
@@ -5756,27 +5732,27 @@ TEST(MSA_bz_bnz) {
j == lanes); \
}
TestCaseMsaBranch tnz_b[] = {{0x0, 0x0},
- {0xbc0000, 0x0},
- {0x0, 0xab000000000000cd},
- {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ {0xBC0000, 0x0},
+ {0x0, 0xAB000000000000CD},
+ {0x123456789ABCDEF0, 0xAAAAAAAAAAAAAAAA}};
TEST_BNZ_DF(tnz_b, 16, bnz_b, int8_t)
TestCaseMsaBranch tnz_h[] = {{0x0, 0x0},
- {0xbcde0000, 0x0},
- {0x0, 0xabcd00000000abcd},
- {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ {0xBCDE0000, 0x0},
+ {0x0, 0xABCD00000000ABCD},
+ {0x123456789ABCDEF0, 0xAAAAAAAAAAAAAAAA}};
TEST_BNZ_DF(tnz_h, 8, bnz_h, int16_t)
TestCaseMsaBranch tnz_w[] = {{0x0, 0x0},
- {0xbcde123400000000, 0x0},
- {0x0, 0x000000001234abcd},
- {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ {0xBCDE123400000000, 0x0},
+ {0x0, 0x000000001234ABCD},
+ {0x123456789ABCDEF0, 0xAAAAAAAAAAAAAAAA}};
TEST_BNZ_DF(tnz_w, 4, bnz_w, int32_t)
TestCaseMsaBranch tnz_d[] = {{0x0, 0x0},
- {0xbcde0000, 0x0},
- {0x0, 0xabcd00000000abcd},
- {0x123456789abcdef0, 0xaaaaaaaaaaaaaaaa}};
+ {0xBCDE0000, 0x0},
+ {0x0, 0xABCD00000000ABCD},
+ {0x123456789ABCDEF0, 0xAAAAAAAAAAAAAAAA}};
TEST_BNZ_DF(tnz_d, 2, bnz_d, int64_t)
#undef TEST_BNZ_DF
}
@@ -5839,10 +5815,9 @@ uint64_t run_jialc(int16_t offset) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint64_t res = reinterpret_cast<uint64_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -5892,11 +5867,10 @@ uint64_t run_addiupc(int32_t imm19) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
- PC = (uint64_t) f; // Set the program counter.
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ PC = (uint64_t)code->entry(); // Set the program counter.
- uint64_t res = reinterpret_cast<uint64_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -5939,24 +5913,24 @@ uint64_t run_ldpc(int offset) {
v8::internal::CodeObjectRequired::kYes);
// 256k instructions; 2 * 2^7k = 2^8k
- // addiu t3, a4, 0xffff; (0x250fffff)
+ // addiu t3, a4, 0xFFFF; (0x250FFFFF)
// ...
- // addiu t0, a4, 0x0000; (0x250c0000)
+ // addiu t0, a4, 0x0000; (0x250C0000)
uint32_t addiu_start_1 = 0x25000000;
- for (int32_t i = 0xfffff; i >= 0xc0000; --i) {
+ for (int32_t i = 0xFFFFF; i >= 0xC0000; --i) {
uint32_t addiu_new = addiu_start_1 + i;
__ dd(addiu_new);
}
- __ ldpc(t8, offset); // offset 0; 0xef080000 (t8 register)
+ __ ldpc(t8, offset); // offset 0; 0xEF080000 (t8 register)
__ mov(v0, t8);
// 256k instructions; 2 * 2^7k = 2^8k
// addiu a4, a4, 0x0000; (0x25080000)
// ...
- // addiu a7, a4, 0xffff; (0x250bffff)
+ // addiu a7, a4, 0xFFFF; (0x250BFFFF)
uint32_t addiu_start_2 = 0x25000000;
- for (int32_t i = 0x80000; i <= 0xbffff; ++i) {
+ for (int32_t i = 0x80000; i <= 0xBFFFF; ++i) {
uint32_t addiu_new = addiu_start_2 + i;
__ dd(addiu_new);
}
@@ -5969,10 +5943,9 @@ uint64_t run_ldpc(int offset) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint64_t res = reinterpret_cast<uint64_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -5996,13 +5969,13 @@ TEST(r6_ldpc) {
TestCaseLdpc tc[] = {
// offset, expected_res
- {-131072, doubleword(0x250ffffe, 0x250fffff)},
- {-4, doubleword(0x250c0006, 0x250c0007)},
- {-1, doubleword(0x250c0000, 0x250c0001)},
- {0, doubleword(0x03001025, 0xef180000)},
+ {-131072, doubleword(0x250FFFFE, 0x250FFFFF)},
+ {-4, doubleword(0x250C0006, 0x250C0007)},
+ {-1, doubleword(0x250C0000, 0x250C0001)},
+ {0, doubleword(0x03001025, 0xEF180000)},
{1, doubleword(0x25080001, 0x25080000)},
{4, doubleword(0x25080007, 0x25080006)},
- {131071, doubleword(0x250bfffd, 0x250bfffc)},
+ {131071, doubleword(0x250BFFFD, 0x250BFFFC)},
};
size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseLdpc);
@@ -6059,10 +6032,9 @@ int64_t run_bc(int32_t offset) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- int64_t res = reinterpret_cast<int64_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ int64_t res = reinterpret_cast<int64_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -6142,10 +6114,9 @@ int64_t run_balc(int32_t offset) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- int64_t res = reinterpret_cast<int64_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ int64_t res = reinterpret_cast<int64_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -6193,10 +6164,9 @@ uint64_t run_dsll(uint64_t rt_value, uint16_t sa_value) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F4 f = FUNCTION_CAST<F4>(code->entry());
+ auto f = GeneratedCode<F4>::FromCode(*code);
- uint64_t res = reinterpret_cast<uint64_t>(
- CALL_GENERATED_CODE(isolate, f, rt_value, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(rt_value, 0, 0, 0, 0));
return res;
}
@@ -6211,12 +6181,14 @@ TEST(dsll) {
uint64_t expected_res;
};
+ // clang-format off
struct TestCaseDsll tc[] = {
// rt_value, sa_value, expected_res
- { 0xffffffffffffffff, 0, 0xffffffffffffffff },
- { 0xffffffffffffffff, 16, 0xffffffffffff0000 },
- { 0xffffffffffffffff, 31, 0xffffffff80000000 },
+ { 0xFFFFFFFFFFFFFFFF, 0, 0xFFFFFFFFFFFFFFFF },
+ { 0xFFFFFFFFFFFFFFFF, 16, 0xFFFFFFFFFFFF0000 },
+ { 0xFFFFFFFFFFFFFFFF, 31, 0xFFFFFFFF80000000 },
};
+ // clang-format on
size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseDsll);
for (size_t i = 0; i < nr_test_cases; ++i) {
@@ -6250,10 +6222,9 @@ uint64_t run_bal(int16_t offset) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint64_t res = reinterpret_cast<uint64_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -6267,10 +6238,12 @@ TEST(bal) {
uint64_t expected_res;
};
+ // clang-format off
struct TestCaseBal tc[] = {
// offset, expected_res
- { 4, 1 },
+ { 4, 1 },
};
+ // clang-format on
size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBal);
for (size_t i = 0; i < nr_test_cases; ++i) {
@@ -6303,10 +6276,9 @@ TEST(Trampoline) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- int64_t res = reinterpret_cast<int64_t>(
- CALL_GENERATED_CODE(isolate, f, 42, 42, 0, 0, 0));
+ int64_t res = reinterpret_cast<int64_t>(f.Call(42, 42, 0, 0, 0));
CHECK_EQ(0, res);
}
@@ -6371,7 +6343,7 @@ void helper_madd_msub_maddf_msubf(F func) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
const size_t kTableLength = sizeof(test_cases) / sizeof(TestCaseMaddMsub<T>);
TestCaseMaddMsub<T> tc;
@@ -6380,7 +6352,7 @@ void helper_madd_msub_maddf_msubf(F func) {
tc.fs = test_cases[i].fs;
tc.ft = test_cases[i].ft;
- (CALL_GENERATED_CODE(isolate, f, &tc, 0, 0, 0, 0));
+ f.Call(&tc, 0, 0, 0, 0);
T res_sub;
T res_add;
@@ -6458,10 +6430,9 @@ uint64_t run_Subu(uint64_t imm, int32_t num_instr) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint64_t res = reinterpret_cast<uint64_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -6480,39 +6451,41 @@ TEST(Subu) {
// We call Subu(v0, zero_reg, imm) to test cases listed below.
// 0 - imm = expected_res
+ // clang-format off
struct TestCaseSubu tc[] = {
- // imm, expected_res, num_instr
- {0xffffffffffff8000, 0x8000, 2}, // min_int16
+ // imm, expected_res, num_instr
+ {0xFFFFFFFFFFFF8000, 0x8000, 2}, // min_int16
// The test case above generates ori + addu instruction sequence.
// We can't have just addiu because -min_int16 > max_int16 so use
// register. We can load min_int16 to at register with addiu and then
// subtract at with subu, but now we use ori + addu because -min_int16 can
// be loaded using ori.
- {0x8000, 0xffffffffffff8000, 1}, // max_int16 + 1
+ {0x8000, 0xFFFFFFFFFFFF8000, 1}, // max_int16 + 1
// Generates addiu
// max_int16 + 1 is not int16 but -(max_int16 + 1) is, just use addiu.
- {0xffffffffffff7fff, 0x8001, 2}, // min_int16 - 1
+ {0xFFFFFFFFFFFF7FFF, 0x8001, 2}, // min_int16 - 1
// Generates ori + addu
// To load this value to at we need two instructions and another one to
// subtract, lui + ori + subu. But we can load -value to at using just
// ori and then add at register with addu.
- {0x8001, 0xffffffffffff7fff, 2}, // max_int16 + 2
+ {0x8001, 0xFFFFFFFFFFFF7FFF, 2}, // max_int16 + 2
// Generates ori + subu
// Not int16 but is uint16, load value to at with ori and subtract with
// subu.
- {0x00010000, 0xffffffffffff0000, 2},
+ {0x00010000, 0xFFFFFFFFFFFF0000, 2},
// Generates lui + subu
// Load value using lui to at and subtract with subu.
- {0x00010001, 0xfffffffffffeffff, 3},
+ {0x00010001, 0xFFFFFFFFFFFEFFFF, 3},
// Generates lui + ori + subu
// We have to generate three instructions in this case.
- {0x7fffffff, 0xffffffff80000001, 3}, // max_int32
+ {0x7FFFFFFF, 0xFFFFFFFF80000001, 3}, // max_int32
// Generates lui + ori + subu
- {0xffffffff80000000, 0xffffffff80000000, 2}, // min_int32
+ {0xFFFFFFFF80000000, 0xFFFFFFFF80000000, 2}, // min_int32
// The test case above generates lui + subu intruction sequence.
// The result of 0 - min_int32 eqauls max_int32 + 1, which wraps around to
// min_int32 again.
};
+ // clang-format on
size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseSubu);
for (size_t i = 0; i < nr_test_cases; ++i) {
@@ -6541,10 +6514,9 @@ uint64_t run_Dsubu(uint64_t imm, int32_t num_instr) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint64_t res = reinterpret_cast<uint64_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -6563,38 +6535,39 @@ TEST(Dsubu) {
// We call Dsubu(v0, zero_reg, imm) to test cases listed below.
// 0 - imm = expected_res
+ // clang-format off
struct TestCaseDsubu tc[] = {
// imm, expected_res, num_instr
- {0xffffffffffff8000, 0x8000, 2}, // min_int16
+ {0xFFFFFFFFFFFF8000, 0x8000, 2}, // min_int16
// The test case above generates daddiu + dsubu instruction sequence.
// We can't have just daddiu because -min_int16 > max_int16 so use
// register, but we can load min_int16 to at register with daddiu and then
// subtract at with dsubu.
- {0x8000, 0xffffffffffff8000, 1}, // max_int16 + 1
+ {0x8000, 0xFFFFFFFFFFFF8000, 1}, // max_int16 + 1
// Generates daddiu
// max_int16 + 1 is not int16 but -(max_int16 + 1) is, just use daddiu.
- {0xffffffffffff7fff, 0x8001, 2}, // min_int16 - 1
+ {0xFFFFFFFFFFFF7FFF, 0x8001, 2}, // min_int16 - 1
// Generates ori + daddu
// To load this value to at we need two instructions and another one to
// subtract, lui + ori + dsubu. But we can load -value to at using just
// ori and then dadd at register with daddu.
- {0x8001, 0xffffffffffff7fff, 2}, // max_int16 + 2
+ {0x8001, 0xFFFFFFFFFFFF7FFF, 2}, // max_int16 + 2
// Generates ori + dsubu
// Not int16 but is uint16, load value to at with ori and subtract with
// dsubu.
- {0x00010000, 0xffffffffffff0000, 2},
+ {0x00010000, 0xFFFFFFFFFFFF0000, 2},
// Generates lui + dsubu
// Load value using lui to at and subtract with dsubu.
- {0x00010001, 0xfffffffffffeffff, 3},
+ {0x00010001, 0xFFFFFFFFFFFEFFFF, 3},
// Generates lui + ori + dsubu
// We have to generate three instructions in this case.
- {0x7fffffff, 0xffffffff80000001, 3}, // max_int32
+ {0x7FFFFFFF, 0xFFFFFFFF80000001, 3}, // max_int32
// Generates lui + ori + dsubu
- {0xffffffff80000000, 0x0000000080000000, 2}, // min_int32
+ {0xFFFFFFFF80000000, 0x0000000080000000, 2}, // min_int32
// Generates lui + dsubu
// The result of 0 - min_int32 eqauls max_int32 + 1, which fits into a 64
// bit register, Dsubu gives a different result here.
- {0x7fffffffffffffff, 0x8000000000000001, 3}, // max_int64
+ {0x7FFFFFFFFFFFFFFF, 0x8000000000000001, 3}, // max_int64
// r2 - Generates daddiu + dsrl + dsubu
// r6 - Generates daddiu + dati + dsubu
{0x8000000000000000, 0x8000000000000000, 3}, // min_int64
@@ -6603,13 +6576,14 @@ TEST(Dsubu) {
// r6 - ori + dati + dsubu.
// The result of 0 - min_int64 eqauls max_int64 + 1, which wraps around to
// min_int64 again.
- {0xffff0000ffffffff, 0x0000ffff00000001, 4},
+ {0xFFFF0000FFFFFFFF, 0x0000FFFF00000001, 4},
// The test case above generates:
// r2 - ori + dsll32 + ori + daddu instruction sequence,
// r6 - daddiu + dahi + dati + dsubu.
// For r2 loading imm would take more instructions than loading -imm so we
// can load -imm and add with daddu.
};
+ // clang-format on
size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseDsubu);
for (size_t i = 0; i < nr_test_cases; ++i) {
@@ -6634,10 +6608,9 @@ uint64_t run_Dins(uint64_t imm, uint64_t source, uint16_t pos, uint16_t size) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint64_t res = reinterpret_cast<uint64_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -6657,18 +6630,20 @@ TEST(Dins) {
// We load imm to v0 and source to t0 and then call
// Dins(v0, t0, pos, size) to test cases listed below.
+ // clang-format off
struct TestCaseDins tc[] = {
// imm, source, pos, size, expected_res
- {0x5555555555555555, 0x1abcdef01, 31, 1, 0x55555555d5555555},
- {0x5555555555555555, 0x1abcdef02, 30, 2, 0x5555555595555555},
- {0x201234567, 0x1fabcdeff, 0, 32, 0x2fabcdeff},
- {0x201234567, 0x7fabcdeff, 31, 2, 0x381234567},
- {0x800000000, 0x7fabcdeff, 0, 33, 0x9fabcdeff},
- {0x1234, 0xabcdabcdabcdabcd, 0, 64, 0xabcdabcdabcdabcd},
- {0xabcd, 0xabceabcf, 32, 1, 0x10000abcd},
- {0xabcd, 0xabceabcf, 63, 1, 0x800000000000abcd},
- {0x10000abcd, 0xabc1abc2abc3abc4, 32, 32, 0xabc3abc40000abcd},
+ {0x5555555555555555, 0x1ABCDEF01, 31, 1, 0x55555555D5555555},
+ {0x5555555555555555, 0x1ABCDEF02, 30, 2, 0x5555555595555555},
+ {0x201234567, 0x1FABCDEFF, 0, 32, 0x2FABCDEFF},
+ {0x201234567, 0x7FABCDEFF, 31, 2, 0x381234567},
+ {0x800000000, 0x7FABCDEFF, 0, 33, 0x9FABCDEFF},
+ {0x1234, 0xABCDABCDABCDABCD, 0, 64, 0xABCDABCDABCDABCD},
+ {0xABCD, 0xABCEABCF, 32, 1, 0x10000ABCD},
+ {0xABCD, 0xABCEABCF, 63, 1, 0x800000000000ABCD},
+ {0x10000ABCD, 0xABC1ABC2ABC3ABC4, 32, 32, 0xABC3ABC40000ABCD},
};
+ // clang-format on
size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseDins);
for (size_t i = 0; i < nr_test_cases; ++i) {
@@ -6694,10 +6669,9 @@ uint64_t run_Ins(uint64_t imm, uint64_t source, uint16_t pos, uint16_t size) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint64_t res = reinterpret_cast<uint64_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -6707,44 +6681,44 @@ TEST(Ins) {
// run_Ins(rt_value, rs_value, pos, size),
// expected_result
- CHECK_EQ(run_Ins(0x0000000055555555, 0xffffffffabcdef01, 31, 1),
- 0xffffffffd5555555);
- CHECK_EQ(run_Ins(0x0000000055555555, 0xffffffffabcdef02, 30, 2),
- 0xffffffff95555555);
- CHECK_EQ(run_Ins(0x0000000001234567, 0xfffffffffabcdeff, 0, 32),
- 0xfffffffffabcdeff);
+ CHECK_EQ(run_Ins(0x0000000055555555, 0xFFFFFFFFABCDEF01, 31, 1),
+ 0xFFFFFFFFD5555555);
+ CHECK_EQ(run_Ins(0x0000000055555555, 0xFFFFFFFFABCDEF02, 30, 2),
+ 0xFFFFFFFF95555555);
+ CHECK_EQ(run_Ins(0x0000000001234567, 0xFFFFFFFFFABCDEFF, 0, 32),
+ 0xFFFFFFFFFABCDEFF);
// Results with positive sign.
- CHECK_EQ(run_Ins(0x0000000055555550, 0xffffffff80000001, 0, 1),
+ CHECK_EQ(run_Ins(0x0000000055555550, 0xFFFFFFFF80000001, 0, 1),
0x0000000055555551);
CHECK_EQ(run_Ins(0x0000000055555555, 0x0000000040000001, 0, 32),
0x0000000040000001);
CHECK_EQ(run_Ins(0x0000000055555555, 0x0000000020000001, 1, 31),
0x0000000040000003);
- CHECK_EQ(run_Ins(0x0000000055555555, 0xffffffff80700001, 8, 24),
+ CHECK_EQ(run_Ins(0x0000000055555555, 0xFFFFFFFF80700001, 8, 24),
0x0000000070000155);
- CHECK_EQ(run_Ins(0x0000000055555555, 0xffffffff80007001, 16, 16),
+ CHECK_EQ(run_Ins(0x0000000055555555, 0xFFFFFFFF80007001, 16, 16),
0x0000000070015555);
- CHECK_EQ(run_Ins(0x0000000055555555, 0xffffffff80000071, 24, 8),
+ CHECK_EQ(run_Ins(0x0000000055555555, 0xFFFFFFFF80000071, 24, 8),
0x0000000071555555);
CHECK_EQ(run_Ins(0x0000000075555555, 0x0000000040000000, 31, 1),
0x0000000075555555);
// Results with negative sign.
- CHECK_EQ(run_Ins(0xffffffff85555550, 0xffffffff80000001, 0, 1),
- 0xffffffff85555551);
- CHECK_EQ(run_Ins(0x0000000055555555, 0xffffffff80000001, 0, 32),
- 0xffffffff80000001);
+ CHECK_EQ(run_Ins(0xFFFFFFFF85555550, 0xFFFFFFFF80000001, 0, 1),
+ 0xFFFFFFFF85555551);
+ CHECK_EQ(run_Ins(0x0000000055555555, 0xFFFFFFFF80000001, 0, 32),
+ 0xFFFFFFFF80000001);
CHECK_EQ(run_Ins(0x0000000055555555, 0x0000000040000001, 1, 31),
- 0xffffffff80000003);
- CHECK_EQ(run_Ins(0x0000000055555555, 0xffffffff80800001, 8, 24),
- 0xffffffff80000155);
- CHECK_EQ(run_Ins(0x0000000055555555, 0xffffffff80008001, 16, 16),
- 0xffffffff80015555);
- CHECK_EQ(run_Ins(0x0000000055555555, 0xffffffff80000081, 24, 8),
- 0xffffffff81555555);
+ 0xFFFFFFFF80000003);
+ CHECK_EQ(run_Ins(0x0000000055555555, 0xFFFFFFFF80800001, 8, 24),
+ 0xFFFFFFFF80000155);
+ CHECK_EQ(run_Ins(0x0000000055555555, 0xFFFFFFFF80008001, 16, 16),
+ 0xFFFFFFFF80015555);
+ CHECK_EQ(run_Ins(0x0000000055555555, 0xFFFFFFFF80000081, 24, 8),
+ 0xFFFFFFFF81555555);
CHECK_EQ(run_Ins(0x0000000075555555, 0x0000000000000001, 31, 1),
- 0xfffffffff5555555);
+ 0xFFFFFFFFF5555555);
}
uint64_t run_Ext(uint64_t source, uint16_t pos, uint16_t size) {
@@ -6754,7 +6728,7 @@ uint64_t run_Ext(uint64_t source, uint16_t pos, uint16_t size) {
MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
- __ li(v0, 0xffffffffffffffff);
+ __ li(v0, 0xFFFFFFFFFFFFFFFF);
__ li(t0, source);
__ Ext(v0, t0, pos, size);
__ jr(ra);
@@ -6764,10 +6738,9 @@ uint64_t run_Ext(uint64_t source, uint16_t pos, uint16_t size) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
- uint64_t res = reinterpret_cast<uint64_t>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
return res;
}
@@ -6777,13 +6750,13 @@ TEST(Ext) {
// Source values with negative sign.
// run_Ext(rs_value, pos, size), expected_result
- CHECK_EQ(run_Ext(0xffffffff80000001, 0, 1), 0x0000000000000001);
- CHECK_EQ(run_Ext(0xffffffff80000001, 0, 32), 0xffffffff80000001);
- CHECK_EQ(run_Ext(0xffffffff80000002, 1, 31), 0x0000000040000001);
- CHECK_EQ(run_Ext(0xffffffff80000100, 8, 24), 0x0000000000800001);
- CHECK_EQ(run_Ext(0xffffffff80010000, 16, 16), 0x0000000000008001);
- CHECK_EQ(run_Ext(0xffffffff81000000, 24, 8), 0x0000000000000081);
- CHECK_EQ(run_Ext(0xffffffff80000000, 31, 1), 0x0000000000000001);
+ CHECK_EQ(run_Ext(0xFFFFFFFF80000001, 0, 1), 0x0000000000000001);
+ CHECK_EQ(run_Ext(0xFFFFFFFF80000001, 0, 32), 0xFFFFFFFF80000001);
+ CHECK_EQ(run_Ext(0xFFFFFFFF80000002, 1, 31), 0x0000000040000001);
+ CHECK_EQ(run_Ext(0xFFFFFFFF80000100, 8, 24), 0x0000000000800001);
+ CHECK_EQ(run_Ext(0xFFFFFFFF80010000, 16, 16), 0x0000000000008001);
+ CHECK_EQ(run_Ext(0xFFFFFFFF81000000, 24, 8), 0x0000000000000081);
+ CHECK_EQ(run_Ext(0xFFFFFFFF80000000, 31, 1), 0x0000000000000001);
// Source values with positive sign.
CHECK_EQ(run_Ext(0x0000000000000001, 0, 1), 0x0000000000000001);
@@ -6819,7 +6792,7 @@ TEST(MSA_fill_copy) {
{
CpuFeatureScope fscope(&assm, MIPS_SIMD);
- __ li(t0, 0x9e7689aca512b683);
+ __ li(t0, 0x9E7689ACA512B683);
__ fill_b(w0, t0);
__ fill_h(w2, t0);
@@ -6852,18 +6825,17 @@ TEST(MSA_fill_copy) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(0x83u, t.u8);
- CHECK_EQ(0xb683u, t.u16);
- CHECK_EQ(0xa512b683u, t.u32);
- CHECK_EQ(0xffffffffffffff83u, t.s8);
- CHECK_EQ(0xffffffffffffb683u, t.s16);
- CHECK_EQ(0xffffffffa512b683u, t.s32);
- CHECK_EQ(0x9e7689aca512b683u, t.s64);
+ CHECK_EQ(0xB683u, t.u16);
+ CHECK_EQ(0xA512B683u, t.u32);
+ CHECK_EQ(0xFFFFFFFFFFFFFF83u, t.s8);
+ CHECK_EQ(0xFFFFFFFFFFFFB683u, t.s16);
+ CHECK_EQ(0xFFFFFFFFA512B683u, t.s32);
+ CHECK_EQ(0x9E7689ACA512B683u, t.s64);
}
TEST(MSA_fill_copy_2) {
@@ -6887,7 +6859,7 @@ TEST(MSA_fill_copy_2) {
{
CpuFeatureScope fscope(&assm, MIPS_SIMD);
- __ li(t0, 0xaaaaaaaaaaaaaaaa);
+ __ li(t0, 0xAAAAAAAAAAAAAAAA);
__ li(t1, 0x5555555555555555);
__ fill_d(w0, t0);
@@ -6917,15 +6889,14 @@ TEST(MSA_fill_copy_2) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F5 f = FUNCTION_CAST<F5>(code->entry());
+ auto f = GeneratedCode<F5>::FromCode(*code);
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t[0], &t[1], 0, 0, 0);
- USE(dummy);
+ f.Call(&t[0], &t[1], 0, 0, 0);
CHECK_EQ(0x5555555555555555, t[0].d0);
- CHECK_EQ(0xaaaaaaaaaaaaaaaa, t[0].d1);
+ CHECK_EQ(0xAAAAAAAAAAAAAAAA, t[0].d1);
CHECK_EQ(0x5555555555555555, t[1].d0);
- CHECK_EQ(0xaaaaaaaaaaaaaaaa, t[1].d1);
+ CHECK_EQ(0xAAAAAAAAAAAAAAAA, t[1].d1);
}
TEST(MSA_fill_copy_3) {
@@ -6949,7 +6920,7 @@ TEST(MSA_fill_copy_3) {
{
CpuFeatureScope fscope(&assm, MIPS_SIMD);
- __ li(t0, 0xaaaaaaaaaaaaaaaa);
+ __ li(t0, 0xAAAAAAAAAAAAAAAA);
__ li(t1, 0x5555555555555555);
__ Move(f0, t0);
@@ -6972,10 +6943,9 @@ TEST(MSA_fill_copy_3) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F5 f = FUNCTION_CAST<F5>(code->entry());
+ auto f = GeneratedCode<F5>::FromCode(*code);
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t[0], &t[1], 0, 0, 0);
- USE(dummy);
+ f.Call(&t[0], &t[1], 0, 0, 0);
CHECK_EQ(0x5555555555555555, t[0].d0);
CHECK_EQ(0x5555555555555555, t[1].d0);
@@ -7023,9 +6993,9 @@ void run_msa_insert(int64_t rs_value, int n, msa_reg_t* w) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, w, 0, 0, 0, 0));
+ f.Call(w, 0, 0, 0, 0);
}
TEST(MSA_insert) {
@@ -7041,12 +7011,15 @@ TEST(MSA_insert) {
uint64_t exp_res_hi;
};
+ // clang-format off
struct TestCaseInsert tc_b[] = {
- // input, n, exp_res_lo, exp_res_hi
- {0xa2, 13, 0xffffffffffffffffu, 0xffffa2ffffffffffu},
- {0x73, 10, 0xffffffffffffffffu, 0xffffffffff73ffffu},
- {0x3494, 5, 0xffff94ffffffffffu, 0xffffffffffffffffu},
- {0xa6b8, 1, 0xffffffffffffb8ffu, 0xffffffffffffffffu}};
+ // input, n, exp_res_lo, exp_res_hi
+ { 0xA2, 13, 0xFFFFFFFFFFFFFFFFu, 0xFFFFA2FFFFFFFFFFu},
+ { 0x73, 10, 0xFFFFFFFFFFFFFFFFu, 0xFFFFFFFFFF73FFFFu},
+ {0x3494, 5, 0xFFFF94FFFFFFFFFFu, 0xFFFFFFFFFFFFFFFFu},
+ {0xA6B8, 1, 0xFFFFFFFFFFFFB8FFu, 0xFFFFFFFFFFFFFFFFu}
+ };
+ // clang-format off
for (size_t i = 0; i < sizeof(tc_b) / sizeof(TestCaseInsert); ++i) {
msa_reg_t res;
@@ -7055,12 +7028,15 @@ TEST(MSA_insert) {
CHECK_EQ(tc_b[i].exp_res_hi, res.d[1]);
}
+ // clang-format off
struct TestCaseInsert tc_h[] = {
- // input, n, exp_res_lo, exp_res_hi
- {0x85a2, 7, 0xffffffffffffffffu, 0x85a2ffffffffffffu},
- {0xe873, 5, 0xffffffffffffffffu, 0xffffffffe873ffffu},
- {0x3494, 3, 0x3494ffffffffffffu, 0xffffffffffffffffu},
- {0xa6b8, 1, 0xffffffffa6b8ffffu, 0xffffffffffffffffu}};
+ // input, n, exp_res_lo, exp_res_hi
+ {0x85A2, 7, 0xFFFFFFFFFFFFFFFFu, 0x85A2FFFFFFFFFFFFu},
+ {0xE873, 5, 0xFFFFFFFFFFFFFFFFu, 0xFFFFFFFFE873FFFFu},
+ {0x3494, 3, 0x3494FFFFFFFFFFFFu, 0xFFFFFFFFFFFFFFFFu},
+ {0xA6B8, 1, 0xFFFFFFFFA6B8FFFFu, 0xFFFFFFFFFFFFFFFFu}
+ };
+ // clang-format on
for (size_t i = 0; i < sizeof(tc_h) / sizeof(TestCaseInsert); ++i) {
msa_reg_t res;
@@ -7069,12 +7045,15 @@ TEST(MSA_insert) {
CHECK_EQ(tc_h[i].exp_res_hi, res.d[1]);
}
+ // clang-format off
struct TestCaseInsert tc_w[] = {
- // input, n, exp_res_lo, exp_res_hi
- {0xd2f085a2u, 3, 0xffffffffffffffffu, 0xd2f085a2ffffffffu},
- {0x4567e873u, 2, 0xffffffffffffffffu, 0xffffffff4567e873u},
- {0xacdb3494u, 1, 0xacdb3494ffffffffu, 0xffffffffffffffffu},
- {0x89aba6b8u, 0, 0xffffffff89aba6b8u, 0xffffffffffffffffu}};
+ // input, n, exp_res_lo, exp_res_hi
+ {0xD2F085A2u, 3, 0xFFFFFFFFFFFFFFFFu, 0xD2F085A2FFFFFFFFu},
+ {0x4567E873u, 2, 0xFFFFFFFFFFFFFFFFu, 0xFFFFFFFF4567E873u},
+ {0xACDB3494u, 1, 0xACDB3494FFFFFFFFu, 0xFFFFFFFFFFFFFFFFu},
+ {0x89ABA6B8u, 0, 0xFFFFFFFF89ABA6B8u, 0xFFFFFFFFFFFFFFFFu}
+ };
+ // clang-format on
for (size_t i = 0; i < sizeof(tc_w) / sizeof(TestCaseInsert); ++i) {
msa_reg_t res;
@@ -7083,10 +7062,13 @@ TEST(MSA_insert) {
CHECK_EQ(tc_w[i].exp_res_hi, res.d[1]);
}
+ // clang-format off
struct TestCaseInsert tc_d[] = {
- // input, n, exp_res_lo, exp_res_hi
- {0xf35862e13e38f8b0, 1, 0xffffffffffffffffu, 0xf35862e13e38f8b0},
- {0x4f41ffdef2bfe636, 0, 0x4f41ffdef2bfe636, 0xffffffffffffffffu}};
+ // input, n, exp_res_lo, exp_res_hi
+ {0xF35862E13E38F8B0, 1, 0xFFFFFFFFFFFFFFFFu, 0xF35862E13E38F8B0},
+ {0x4F41FFDEF2BFE636, 0, 0x4F41FFDEF2BFE636, 0xFFFFFFFFFFFFFFFFu}
+ };
+ // clang-format on
for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseInsert); ++i) {
msa_reg_t res;
@@ -7122,13 +7104,13 @@ void run_msa_ctc_cfc(uint64_t value) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
uint64_t res;
- (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+ f.Call(&res, 0, 0, 0, 0);
CHECK_EQ(bit_cast<uint64_t>(static_cast<int64_t>(
- bit_cast<int32_t>(static_cast<uint32_t>(value & 0x0167ffff)))),
+ bit_cast<int32_t>(static_cast<uint32_t>(value & 0x0167FFFF)))),
res);
}
@@ -7145,12 +7127,12 @@ TEST(MSA_move_v) {
uint64_t wd_lo;
uint64_t wd_hi;
} T;
- T t[] = {{0x20b9cc4f1a83e0c5, 0xa27e1b5f2f5bb18a, 0x1e86678b52f8e1ff,
- 0x706e51290ac76fb9},
- {0x4414aed7883ffd18, 0x047d183a06b67016, 0x4ef258cf8d822870,
- 0x2686b73484c2e843},
- {0xd38ff9d048884ffc, 0x6dc63a57c0943ca7, 0x8520ca2f3e97c426,
- 0xa9913868fb819c59}};
+ T t[] = {{0x20B9CC4F1A83E0C5, 0xA27E1B5F2F5BB18A, 0x1E86678B52F8E1FF,
+ 0x706E51290AC76FB9},
+ {0x4414AED7883FFD18, 0x047D183A06B67016, 0x4EF258CF8D822870,
+ 0x2686B73484C2E843},
+ {0xD38FF9D048884FFC, 0x6DC63A57C0943CA7, 0x8520CA2F3E97C426,
+ 0xA9913868FB819C59}};
for (unsigned i = 0; i < arraysize(t); ++i) {
MacroAssembler assm(isolate, nullptr, 0,
@@ -7172,8 +7154,8 @@ TEST(MSA_move_v) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
- (CALL_GENERATED_CODE(isolate, f, &t[i].wd_lo, 0, 0, 0, 0));
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ f.Call(&t[i].wd_lo, 0, 0, 0, 0);
CHECK_EQ(t[i].ws_lo, t[i].wd_lo);
CHECK_EQ(t[i].ws_hi, t[i].wd_hi);
}
@@ -7191,12 +7173,12 @@ void run_msa_sldi(OperFunc GenerateOperation,
uint64_t wd_lo;
uint64_t wd_hi;
} T;
- T t[] = {{0x20b9cc4f1a83e0c5, 0xa27e1b5f2f5bb18a, 0x1e86678b52f8e1ff,
- 0x706e51290ac76fb9},
- {0x4414aed7883ffd18, 0x047d183a06b67016, 0x4ef258cf8d822870,
- 0x2686b73484c2e843},
- {0xd38ff9d048884ffc, 0x6dc63a57c0943ca7, 0x8520ca2f3e97c426,
- 0xa9913868fb819c59}};
+ T t[] = {{0x20B9CC4F1A83E0C5, 0xA27E1B5F2F5BB18A, 0x1E86678B52F8E1FF,
+ 0x706E51290AC76FB9},
+ {0x4414AED7883FFD18, 0x047D183A06B67016, 0x4EF258CF8D822870,
+ 0x2686B73484C2E843},
+ {0xD38FF9D048884FFC, 0x6DC63A57C0943CA7, 0x8520CA2F3E97C426,
+ 0xA9913868FB819C59}};
uint64_t res[2];
for (unsigned i = 0; i < arraysize(t); ++i) {
@@ -7218,8 +7200,8 @@ void run_msa_sldi(OperFunc GenerateOperation,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
- (CALL_GENERATED_CODE(isolate, f, &res[0], 0, 0, 0, 0));
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ f.Call(&res[0], 0, 0, 0, 0);
GenerateExpectedResult(reinterpret_cast<uint8_t*>(&t[i].ws_lo),
reinterpret_cast<uint8_t*>(&t[i].wd_lo));
CHECK_EQ(res[0], t[i].wd_lo);
@@ -7284,14 +7266,14 @@ TEST(MSA_cfc_ctc) {
CcTest::InitializeVM();
- const uint64_t mask_without_cause = 0xffffffffff9c0fff;
- const uint64_t mask_always_zero = 0x0167ffff;
- const uint64_t mask_enables = 0x0000000000000f80;
- uint64_t test_case[] = {0x30c6f6352d5ede31, 0xefc9fed507955425,
- 0x64f2a3ff15b7dbe3, 0x6aa069352bf8bc37,
- 0x7ea7ab2ae6aae923, 0xa10f5d4c24d0f68d,
- 0x6dd14c9441afa84c, 0xc366373b2d6bf64f,
- 0x6b35fb04925014bd, 0x9e3ea39a4dba7e61};
+ const uint64_t mask_without_cause = 0xFFFFFFFFFF9C0FFF;
+ const uint64_t mask_always_zero = 0x0167FFFF;
+ const uint64_t mask_enables = 0x0000000000000F80;
+ uint64_t test_case[] = {0x30C6F6352D5EDE31, 0xEFC9FED507955425,
+ 0x64F2A3FF15B7DBE3, 0x6AA069352BF8BC37,
+ 0x7EA7AB2AE6AAE923, 0xA10F5D4C24D0F68D,
+ 0x6DD14C9441AFA84C, 0xC366373B2D6BF64F,
+ 0x6B35FB04925014BD, 0x9E3EA39A4DBA7E61};
for (unsigned i = 0; i < arraysize(test_case); i++) {
// Setting enable bits and corresponding cause bits could result in
// exception raised and this prevents that from happening
@@ -7316,8 +7298,8 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
- uint64_t wd_lo = 0xf35862e13e38f8b0;
- uint64_t wd_hi = 0x4f41ffdef2bfe636;
+ uint64_t wd_lo = 0xF35862E13E38F8B0;
+ uint64_t wd_hi = 0x4F41FFDEF2BFE636;
#define LOAD_W_REG(lo, hi, w_reg) \
__ li(t0, lo); \
@@ -7379,9 +7361,9 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+ f.Call(&res, 0, 0, 0, 0);
uint64_t mask = i8 * 0x0101010101010101ull;
switch (opcode) {
@@ -7416,13 +7398,13 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
case SHF_B: {
struct ExpResShf exp_b[] = {
// i8, exp_lo, exp_hi
- {0xffu, 0x11111111b9b9b9b9, 0xf7f7f7f7c8c8c8c8},
- {0x0u, 0x62626262dfdfdfdf, 0xd6d6d6d6c8c8c8c8},
- {0xe4u, 0xf35862e13e38f8b0, 0x4f41ffdef2bfe636},
- {0x1bu, 0x1b756911c3d9a7b9, 0xae94a5f79c8aefc8},
- {0xb1u, 0x662b6253e8c4df12, 0x0d3ad6803f8bc88b},
- {0x4eu, 0x62e1f358f8b03e38, 0xffde4f41e636f2bf},
- {0x27u, 0x1b697511c3a7d9b9, 0xaea594f79cef8ac8}};
+ {0xFFu, 0x11111111B9B9B9B9, 0xF7F7F7F7C8C8C8C8},
+ {0x0u, 0x62626262DFDFDFDF, 0xD6D6D6D6C8C8C8C8},
+ {0xE4u, 0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636},
+ {0x1Bu, 0x1B756911C3D9A7B9, 0xAE94A5F79C8AEFC8},
+ {0xB1u, 0x662B6253E8C4DF12, 0x0D3AD6803F8BC88B},
+ {0x4Eu, 0x62E1F358F8B03E38, 0xFFDE4F41E636F2BF},
+ {0x27u, 0x1B697511C3A7D9B9, 0xAEA594F79CEF8AC8}};
for (size_t i = 0; i < sizeof(exp_b) / sizeof(ExpResShf); ++i) {
if (exp_b[i].i8 == i8) {
CHECK_EQ(exp_b[i].lo, res.d[0]);
@@ -7433,13 +7415,13 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
case SHF_H: {
struct ExpResShf exp_h[] = {
// i8, exp_lo, exp_hi
- {0xffu, 0x1169116911691169, 0xf7a5f7a5f7a5f7a5},
- {0x0u, 0x12df12df12df12df, 0x8bc88bc88bc88bc8},
- {0xe4u, 0xf35862e13e38f8b0, 0x4f41ffdef2bfe636},
- {0x1bu, 0xd9c3b9a7751b1169, 0x8a9cc8ef94aef7a5},
- {0xb1u, 0x53622b6612dfc4e8, 0x80d63a0d8bc88b3f},
- {0x4eu, 0x3e38f8b0f35862e1, 0xf2bfe6364f41ffde},
- {0x27u, 0xd9c3751bb9a71169, 0x8a9c94aec8eff7a5}};
+ {0xFFu, 0x1169116911691169, 0xF7A5F7A5F7A5F7A5},
+ {0x0u, 0x12DF12DF12DF12DF, 0x8BC88BC88BC88BC8},
+ {0xE4u, 0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636},
+ {0x1Bu, 0xD9C3B9A7751B1169, 0x8A9CC8EF94AEF7A5},
+ {0xB1u, 0x53622B6612DFC4E8, 0x80D63A0D8BC88B3F},
+ {0x4Eu, 0x3E38F8B0F35862E1, 0xF2BFE6364F41FFDE},
+ {0x27u, 0xD9C3751BB9A71169, 0x8A9C94AEC8EFF7A5}};
for (size_t i = 0; i < sizeof(exp_h) / sizeof(ExpResShf); ++i) {
if (exp_h[i].i8 == i8) {
CHECK_EQ(exp_h[i].lo, res.d[0]);
@@ -7450,13 +7432,13 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
case SHF_W: {
struct ExpResShf exp_w[] = {
// i8, exp_lo, exp_hi
- {0xffu, 0xf7a594aef7a594ae, 0xf7a594aef7a594ae},
- {0x0u, 0xc4e812dfc4e812df, 0xc4e812dfc4e812df},
- {0xe4u, 0xf35862e13e38f8b0, 0x4f41ffdef2bfe636},
- {0x1bu, 0xc8ef8a9cf7a594ae, 0xb9a7d9c31169751b},
- {0xb1u, 0xc4e812df2b665362, 0x8b3f8bc83a0d80d6},
- {0x4eu, 0x4f41ffdef2bfe636, 0xf35862e13e38f8b0},
- {0x27u, 0x1169751bf7a594ae, 0xb9a7d9c3c8ef8a9c}};
+ {0xFFu, 0xF7A594AEF7A594AE, 0xF7A594AEF7A594AE},
+ {0x0u, 0xC4E812DFC4E812DF, 0xC4E812DFC4E812DF},
+ {0xE4u, 0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636},
+ {0x1Bu, 0xC8EF8A9CF7A594AE, 0xB9A7D9C31169751B},
+ {0xB1u, 0xC4E812DF2B665362, 0x8B3F8BC83A0D80D6},
+ {0x4Eu, 0x4F41FFDEF2BFE636, 0xF35862E13E38F8B0},
+ {0x27u, 0x1169751BF7A594AE, 0xB9A7D9C3C8EF8A9C}};
for (size_t i = 0; i < sizeof(exp_w) / sizeof(ExpResShf); ++i) {
if (exp_w[i].i8 == i8) {
CHECK_EQ(exp_w[i].lo, res.d[0]);
@@ -7481,11 +7463,15 @@ TEST(MSA_andi_ori_nori_xori) {
CcTest::InitializeVM();
- struct TestCaseMsaI8 tc[] = {// input_lo, input_hi, i8
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0xffu},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0x0u},
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x3bu},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0xd9u}};
+ // clang-format off
+ struct TestCaseMsaI8 tc[] = {
+ // input_lo, input_hi, i8
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0xFFu},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x0u},
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x3Bu},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0xD9u}
+ };
+ // clang-format on
for (size_t i = 0; i < sizeof(tc) / sizeof(TestCaseMsaI8); ++i) {
run_msa_i8(ANDI_B, tc[i].input_lo, tc[i].input_hi, tc[i].i8);
@@ -7501,11 +7487,15 @@ TEST(MSA_bmnzi_bmzi_bseli) {
CcTest::InitializeVM();
- struct TestCaseMsaI8 tc[] = {// input_lo, input_hi, i8
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0xffu},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0x0u},
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x3bu},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0xd9u}};
+ // clang-format off
+ struct TestCaseMsaI8 tc[] = {
+ // input_lo, input_hi, i8
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0xFFu},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x0u},
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x3Bu},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0xD9u}
+ };
+ // clang-format on
for (size_t i = 0; i < sizeof(tc) / sizeof(TestCaseMsaI8); ++i) {
run_msa_i8(BMNZI_B, tc[i].input_lo, tc[i].input_hi, tc[i].i8);
@@ -7520,16 +7510,18 @@ TEST(MSA_shf) {
CcTest::InitializeVM();
+ // clang-format off
struct TestCaseMsaI8 tc[] = {
// input_lo, input_hi, i8
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0xffu}, // 3333
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0x0u}, // 0000
- {0xf35862e13e38f8b0, 0x4f41ffdef2bfe636, 0xe4u}, // 3210
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x1bu}, // 0123
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0xb1u}, // 2301
- {0xf35862e13e38f8b0, 0x4f41ffdef2bfe636, 0x4eu}, // 1032
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x27u} // 0213
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0xFFu}, // 3333
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x0u}, // 0000
+ {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 0xE4u}, // 3210
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x1Bu}, // 0123
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0xB1u}, // 2301
+ {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 0x4Eu}, // 1032
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x27u} // 0213
};
+ // clang-format on
for (size_t i = 0; i < sizeof(tc) / sizeof(TestCaseMsaI8); ++i) {
run_msa_i8(SHF_B, tc[i].input_lo, tc[i].input_hi, tc[i].i8);
@@ -7574,9 +7566,9 @@ void run_msa_i5(struct TestCaseMsaI5* input, bool i5_sign_ext,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+ f.Call(&res, 0, 0, 0, 0);
CHECK_EQ(GenerateOperationFunc(input->ws_lo, input->i5), res.d[0]);
CHECK_EQ(GenerateOperationFunc(input->ws_hi, input->i5), res.d[1]);
@@ -7588,14 +7580,17 @@ TEST(MSA_addvi_subvi) {
CcTest::InitializeVM();
+ // clang-format off
struct TestCaseMsaI5 tc[] = {
- // ws_lo, ws_hi, i5
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x0000001f},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0x0000000f},
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x00000005},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0x00000010},
- {0xffab807f807fffcd, 0x7f23ff80ff567f80, 0x0000000f},
- {0x80ffefff7f12807f, 0x807f80ff7fdeff78, 0x00000010}};
+ // ws_lo, ws_hi, i5
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x0000001F},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x0000000F},
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x00000005},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x00000010},
+ {0xFFAB807F807FFFCD, 0x7F23FF80FF567F80, 0x0000000F},
+ {0x80FFEFFF7F12807F, 0x807F80FF7FDEFF78, 0x00000010}
+ };
+// clang-format on
#define ADDVI_DF(lanes, mask) \
uint64_t res = 0; \
@@ -7664,23 +7659,26 @@ TEST(MSA_maxi_mini) {
CcTest::InitializeVM();
+ // clang-format off
struct TestCaseMsaI5 tc[] = {
- // ws_lo, ws_hi, i5
- {0x7f80ff3480ff7f00, 0x8d7fff80ff7f6780, 0x0000001f},
- {0x7f80ff3480ff7f00, 0x8d7fff80ff7f6780, 0x0000000f},
- {0x7f80ff3480ff7f00, 0x8d7fff80ff7f6780, 0x00000010},
- {0x80007fff91daffff, 0x7fff8000ffff5678, 0x0000001f},
- {0x80007fff91daffff, 0x7fff8000ffff5678, 0x0000000f},
- {0x80007fff91daffff, 0x7fff8000ffff5678, 0x00000010},
- {0x7fffffff80000000, 0x12345678ffffffff, 0x0000001f},
- {0x7fffffff80000000, 0x12345678ffffffff, 0x0000000f},
- {0x7fffffff80000000, 0x12345678ffffffff, 0x00000010},
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x0000001f},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0x0000000f},
- {0xf35862e13e38f8b0, 0x4f41ffdef2bfe636, 0x00000010},
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x00000015},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0x00000009},
- {0xf35862e13e38f8b0, 0x4f41ffdef2bfe636, 0x00000003}};
+ // ws_lo, ws_hi, i5
+ {0x7F80FF3480FF7F00, 0x8D7FFF80FF7F6780, 0x0000001F},
+ {0x7F80FF3480FF7F00, 0x8D7FFF80FF7F6780, 0x0000000F},
+ {0x7F80FF3480FF7F00, 0x8D7FFF80FF7F6780, 0x00000010},
+ {0x80007FFF91DAFFFF, 0x7FFF8000FFFF5678, 0x0000001F},
+ {0x80007FFF91DAFFFF, 0x7FFF8000FFFF5678, 0x0000000F},
+ {0x80007FFF91DAFFFF, 0x7FFF8000FFFF5678, 0x00000010},
+ {0x7FFFFFFF80000000, 0x12345678FFFFFFFF, 0x0000001F},
+ {0x7FFFFFFF80000000, 0x12345678FFFFFFFF, 0x0000000F},
+ {0x7FFFFFFF80000000, 0x12345678FFFFFFFF, 0x00000010},
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x0000001F},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x0000000F},
+ {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 0x00000010},
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x00000015},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x00000009},
+ {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 0x00000003}
+ };
+// clang-format on
#define MAXI_MINI_S_DF(lanes, mask, func) \
[](uint64_t ws, uint32_t ui5) { \
@@ -7801,18 +7799,18 @@ TEST(MSA_ceqi_clti_clei) {
CcTest::InitializeVM();
struct TestCaseMsaI5 tc[] = {
- {0xff69751bb9a7d9c3, 0xf7a594aec8ff8a9c, 0x0000001f},
- {0xe669ffffb9a7d9c3, 0xf7a594aeffff8a9c, 0x0000001f},
- {0xffffffffb9a7d9c3, 0xf7a594aeffffffff, 0x0000001f},
- {0x2b0b5362c4e812df, 0x3a0d80d68b3f0bc8, 0x0000000b},
- {0x2b66000bc4e812df, 0x3a0d000b8b3f8bc8, 0x0000000b},
- {0x0000000bc4e812df, 0x3a0d80d60000000b, 0x0000000b},
- {0xf38062e13e38f8b0, 0x8041ffdef2bfe636, 0x00000010},
- {0xf35880003e38f8b0, 0x4f41ffdef2bf8000, 0x00000010},
- {0xf35862e180000000, 0x80000000f2bfe636, 0x00000010},
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x00000015},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0x00000009},
- {0xf30062e13e38f800, 0x4f00ffdef2bf0036, 0x00000000}};
+ {0xFF69751BB9A7D9C3, 0xF7A594AEC8FF8A9C, 0x0000001F},
+ {0xE669FFFFB9A7D9C3, 0xF7A594AEFFFF8A9C, 0x0000001F},
+ {0xFFFFFFFFB9A7D9C3, 0xF7A594AEFFFFFFFF, 0x0000001F},
+ {0x2B0B5362C4E812DF, 0x3A0D80D68B3F0BC8, 0x0000000B},
+ {0x2B66000BC4E812DF, 0x3A0D000B8B3F8BC8, 0x0000000B},
+ {0x0000000BC4E812DF, 0x3A0D80D60000000B, 0x0000000B},
+ {0xF38062E13E38F8B0, 0x8041FFDEF2BFE636, 0x00000010},
+ {0xF35880003E38F8B0, 0x4F41FFDEF2BF8000, 0x00000010},
+ {0xF35862E180000000, 0x80000000F2BFE636, 0x00000010},
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x00000015},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x00000009},
+ {0xF30062E13E38F800, 0x4F00FFDEF2BF0036, 0x00000000}};
#define CEQI_CLTI_CLEI_S_DF(lanes, mask, func) \
[](uint64_t ws, uint32_t ui5) { \
@@ -7995,9 +7993,9 @@ void run_msa_2r(const struct TestCaseMsa2R* input,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+ f.Call(&res, 0, 0, 0, 0);
CHECK_EQ(input->exp_res_lo, res.d[0]);
CHECK_EQ(input->exp_res_hi, res.d[1]);
@@ -8011,44 +8009,44 @@ TEST(MSA_pcnt) {
struct TestCaseMsa2R tc_b[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi
{0x0000000000000000, 0x0000000000000000, 0, 0},
- {0xffffffffffffffff, 0xffffffffffffffff,
+ {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF,
0x0808080808080808, 0x0808080808080808},
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c,
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C,
0x0204050405050504, 0x0704030503070304},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8,
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8,
0x0404040303040207, 0x0403010504060403},
- {0xf35862e13e38f8b0, 0x4f41ffdef2bfe636,
+ {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636,
0x0603030405030503, 0x0502080605070504}};
struct TestCaseMsa2R tc_h[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi
{0x0000000000000000, 0x0000000000000000, 0, 0},
- {0xffffffffffffffff, 0xffffffffffffffff,
+ {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF,
0x0010001000100010, 0x0010001000100010},
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c,
- 0x00060009000a0009, 0x000b0008000a0007},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8,
- 0x0008000700070009, 0x00070006000a0007},
- {0xf35862e13e38f8b0, 0x4f41ffdef2bfe636,
- 0x0009000700080008, 0x0007000e000c0009}};
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C,
+ 0x00060009000A0009, 0x000B0008000A0007},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8,
+ 0x0008000700070009, 0x00070006000A0007},
+ {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636,
+ 0x0009000700080008, 0x0007000E000C0009}};
struct TestCaseMsa2R tc_w[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi
{0x0000000000000000, 0x0000000000000000, 0, 0},
- {0xffffffffffffffff, 0xffffffffffffffff,
+ {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF,
0x0000002000000020, 0x0000002000000020},
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c,
- 0x0000000f00000013, 0x0000001300000011},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8,
- 0x0000000f00000010, 0x0000000d00000011},
- {0xf35862e13e38f8b0, 0x4f41ffdef2bfe636,
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C,
+ 0x0000000F00000013, 0x0000001300000011},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8,
+ 0x0000000F00000010, 0x0000000D00000011},
+ {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636,
0x0000001000000010, 0x0000001500000015}};
struct TestCaseMsa2R tc_d[] = {
// ws_lo, ws_hi, exp_res_lo, exp_res_hi
{0x0000000000000000, 0x0000000000000000, 0, 0},
- {0xffffffffffffffff, 0xffffffffffffffff, 0x40, 0x40},
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x22, 0x24},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0x1f, 0x1e},
- {0xf35862e13e38f8b0, 0x4f41ffdef2bfe636, 0x20, 0x2a}};
+ {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x40, 0x40},
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x22, 0x24},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x1F, 0x1E},
+ {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 0x20, 0x2A}};
for (size_t i = 0; i < sizeof(tc_b) / sizeof(TestCaseMsa2R); ++i) {
run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ pcnt_b(w2, w0); });
@@ -8067,43 +8065,43 @@ TEST(MSA_nlzc) {
struct TestCaseMsa2R tc_b[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi
{0x0000000000000000, 0x0000000000000000,
0x0808080808080808, 0x0808080808080808},
- {0xffffffffffffffff, 0xffffffffffffffff, 0, 0},
- {0x1169350b07030100, 0x7f011402381f0a6c,
+ {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0, 0},
+ {0x1169350B07030100, 0x7F011402381F0A6C,
0x0301020405060708, 0x0107030602030401},
- {0x010806003478121f, 0x03013016073f7b08,
+ {0x010806003478121F, 0x03013016073F7B08,
0x0704050802010303, 0x0607020305020104},
- {0x0168321100083803, 0x07113f03013f1676,
+ {0x0168321100083803, 0x07113F03013F1676,
0x0701020308040206, 0x0503020607020301}};
struct TestCaseMsa2R tc_h[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi
{0x0000000000000000, 0x0000000000000000,
0x0010001000100010, 0x0010001000100010},
- {0xffffffffffffffff, 0xffffffffffffffff, 0, 0},
- {0x00010007000a003c, 0x37a5001e00010002,
- 0x000f000d000c000a, 0x0002000b000f000e},
- {0x0026066200780edf, 0x003d0003000f00c8,
- 0x000a000500090004, 0x000a000e000c0008},
- {0x335807e100480030, 0x01410fde12bf5636,
- 0x000200050009000a, 0x0007000400030001}};
+ {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0, 0},
+ {0x00010007000A003C, 0x37A5001E00010002,
+ 0x000F000D000C000A, 0x0002000B000F000E},
+ {0x0026066200780EDF, 0x003D0003000F00C8,
+ 0x000A000500090004, 0x000A000E000C0008},
+ {0x335807E100480030, 0x01410FDE12BF5636,
+ 0x000200050009000A, 0x0007000400030001}};
struct TestCaseMsa2R tc_w[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi
{0x0000000000000000, 0x0000000000000000,
0x0000002000000020, 0x0000002000000020},
- {0xffffffffffffffff, 0xffffffffffffffff, 0, 0},
- {0x00000005000007c3, 0x000014ae00006a9c,
- 0x0000001d00000015, 0x0000001300000011},
- {0x00009362000112df, 0x000380d6003f8bc8,
- 0x000000100000000f, 0x0000000e0000000a},
- {0x135862e17e38f8b0, 0x0061ffde03bfe636,
+ {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0, 0},
+ {0x00000005000007C3, 0x000014AE00006A9C,
+ 0x0000001D00000015, 0x0000001300000011},
+ {0x00009362000112DF, 0x000380D6003F8BC8,
+ 0x000000100000000F, 0x0000000E0000000A},
+ {0x135862E17E38F8B0, 0x0061FFDE03BFE636,
0x0000000300000001, 0x0000000900000006}};
struct TestCaseMsa2R tc_d[] = {
// ws_lo, ws_hi, exp_res_lo, exp_res_hi
{0x0000000000000000, 0x0000000000000000, 0x40, 0x40},
- {0xffffffffffffffff, 0xffffffffffffffff, 0, 0},
- {0x000000000000014e, 0x00000000000176da, 0x37, 0x2f},
- {0x00000062c4e812df, 0x000065d68b3f8bc8, 0x19, 0x11},
- {0x00000000e338f8b0, 0x0754534acab32654, 0x20, 0x5}};
+ {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0, 0},
+ {0x000000000000014E, 0x00000000000176DA, 0x37, 0x2F},
+ {0x00000062C4E812DF, 0x000065D68B3F8BC8, 0x19, 0x11},
+ {0x00000000E338F8B0, 0x0754534ACAB32654, 0x20, 0x5}};
for (size_t i = 0; i < sizeof(tc_b) / sizeof(TestCaseMsa2R); ++i) {
run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ nlzc_b(w2, w0); });
@@ -8120,7 +8118,7 @@ TEST(MSA_nloc) {
CcTest::InitializeVM();
struct TestCaseMsa2R tc_b[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi
- {0xffffffffffffffff, 0xffffffffffffffff,
+ {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF,
0x0808080808080808, 0x0808080808080808},
{0x0000000000000000, 0x0000000000000000, 0, 0},
{0xEE96CAF4F8FCFEFF, 0x80FEEBFDC7E0F593,
@@ -8131,32 +8129,32 @@ TEST(MSA_nloc) {
0x0701020308040206, 0x0503020607020301}};
struct TestCaseMsa2R tc_h[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi
- {0xffffffffffffffff, 0xffffffffffffffff,
+ {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF,
0x0010001000100010, 0x0010001000100010},
{0x0000000000000000, 0x0000000000000000, 0, 0},
{0xFFFEFFF8FFF5FFC3, 0xC85AFFE1FFFEFFFD,
- 0x000f000d000c000a, 0x0002000b000f000e},
+ 0x000F000D000C000A, 0x0002000B000F000E},
{0xFFD9F99DFF87F120, 0xFFC2FFFCFFF0FF37,
- 0x000a000500090004, 0x000a000e000c0008},
+ 0x000A000500090004, 0x000A000E000C0008},
{0xCCA7F81EFFB7FFCF, 0xFEBEF021ED40A9C9,
- 0x000200050009000a, 0x0007000400030001}};
+ 0x000200050009000A, 0x0007000400030001}};
struct TestCaseMsa2R tc_w[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi
- {0xffffffffffffffff, 0xffffffffffffffff,
+ {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF,
0x0000002000000020, 0x0000002000000020},
{0x0000000000000000, 0x0000000000000000, 0, 0},
{0xFFFFFFFAFFFFF83C, 0xFFFFEB51FFFF9563,
- 0x0000001d00000015, 0x0000001300000011},
+ 0x0000001D00000015, 0x0000001300000011},
{0xFFFF6C9DFFFEED20, 0xFFFC7F29FFC07437,
- 0x000000100000000f, 0x0000000e0000000a},
+ 0x000000100000000F, 0x0000000E0000000A},
{0xECA79D1E81C7074F, 0xFF9E0021FC4019C9,
0x0000000300000001, 0x0000000900000006}};
struct TestCaseMsa2R tc_d[] = {
// ws_lo, ws_hi, exp_res_lo, exp_res_hi
- {0xffffffffffffffff, 0xffffffffffffffff, 0x40, 0x40},
+ {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x40, 0x40},
{0x0000000000000000, 0x0000000000000000, 0, 0},
- {0xFFFFFFFFFFFFFEB1, 0xFFFFFFFFFFFE8925, 0x37, 0x2f},
+ {0xFFFFFFFFFFFFFEB1, 0xFFFFFFFFFFFE8925, 0x37, 0x2F},
{0xFFFFFF9D3B17ED20, 0xFFFF9A2974C07437, 0x19, 0x11},
{0xFFFFFFFF1CC7074F, 0xF8ABACB5354CD9AB, 0x20, 0x5}};
@@ -8894,11 +8892,11 @@ TEST(MSA_fexupl) {
const double inf_double = std::numeric_limits<double>::infinity();
struct TestCaseMsa2RF_U16_F tc_s[] = {
- {1, 2, 0x7c00, 0x0c00, 0, 0x7c00, 0xfc00, 0x8000, 0.f, inf_float,
+ {1, 2, 0x7C00, 0x0C00, 0, 0x7C00, 0xFC00, 0x8000, 0.f, inf_float,
-inf_float, -0.f},
- {0xfc00, 0xffff, 0x00ff, 0x8000, 0x81fe, 0x8000, 0x0345, 0xaaaa,
+ {0xFC00, 0xFFFF, 0x00FF, 0x8000, 0x81FE, 0x8000, 0x0345, 0xAAAA,
-3.0398368835e-5f, -0.f, 4.9889088e-5f, -5.2062988281e-2f},
- {3, 4, 0x5555, 6, 0x2aaa, 0x8700, 0x7777, 0x6a8b, 5.2062988281e-2f,
+ {3, 4, 0x5555, 6, 0x2AAA, 0x8700, 0x7777, 0x6A8B, 5.2062988281e-2f,
-1.06811523458e-4f, 3.0576e4f, 3.35e3f}};
struct TestCaseMsa2RF_F_D tc_d[] = {
@@ -8927,11 +8925,11 @@ TEST(MSA_fexupr) {
const double inf_double = std::numeric_limits<double>::infinity();
struct TestCaseMsa2RF_U16_F tc_s[] = {
- {0, 0x7c00, 0xfc00, 0x8000, 1, 2, 0x7c00, 0x0c00, 0.f, inf_float,
+ {0, 0x7C00, 0xFC00, 0x8000, 1, 2, 0x7C00, 0x0C00, 0.f, inf_float,
-inf_float, -0.f},
- {0x81fe, 0x8000, 0x0345, 0xaaaa, 0xfc00, 0xffff, 0x00ff, 0x8000,
+ {0x81FE, 0x8000, 0x0345, 0xAAAA, 0xFC00, 0xFFFF, 0x00FF, 0x8000,
-3.0398368835e-5f, -0.f, 4.9889088e-5f, -5.2062988281e-2f},
- {0x2aaa, 0x8700, 0x7777, 0x6a8b, 3, 4, 0x5555, 6, 5.2062988281e-2f,
+ {0x2AAA, 0x8700, 0x7777, 0x6A8B, 3, 4, 0x5555, 6, 5.2062988281e-2f,
-1.06811523458e-4f, 3.0576e4f, 3.35e3f}};
struct TestCaseMsa2RF_F_D tc_d[] = {
@@ -8965,13 +8963,13 @@ TEST(MSA_ffql) {
CcTest::InitializeVM();
- struct TestCaseMsa2RF_U16_F tc_s[] = {{0, 3, 0xffff, 0x8000, 0x8000, 0xe000,
+ struct TestCaseMsa2RF_U16_F tc_s[] = {{0, 3, 0xFFFF, 0x8000, 0x8000, 0xE000,
0x0FF0, 0, -1.f, -0.25f,
0.12451171875f, 0.f}};
struct TestCaseMsa2RF_U32_D tc_d[] = {
- {0, 45, 0x80000000, 0xe0000000, -1., -0.25},
- {0x28379, 0xaaaa5555, 0x024903d3, 0, 17.853239085525274277e-3, 0.}};
+ {0, 45, 0x80000000, 0xE0000000, -1., -0.25},
+ {0x28379, 0xAAAA5555, 0x024903D3, 0, 17.853239085525274277e-3, 0.}};
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U16_F); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
@@ -8989,13 +8987,13 @@ TEST(MSA_ffqr) {
CcTest::InitializeVM();
- struct TestCaseMsa2RF_U16_F tc_s[] = {{0x8000, 0xe000, 0x0FF0, 0, 0, 3,
- 0xffff, 0x8000, -1.f, -0.25f,
+ struct TestCaseMsa2RF_U16_F tc_s[] = {{0x8000, 0xE000, 0x0FF0, 0, 0, 3,
+ 0xFFFF, 0x8000, -1.f, -0.25f,
0.12451171875f, 0.f}};
struct TestCaseMsa2RF_U32_D tc_d[] = {
- {0x80000000, 0xe0000000, 0, 45, -1., -0.25},
- {0x024903d3, 0, 0x28379, 0xaaaa5555, 17.853239085525274277e-3, 0.}};
+ {0x80000000, 0xE0000000, 0, 45, -1., -0.25},
+ {0x024903D3, 0, 0x28379, 0xAAAA5555, 17.853239085525274277e-3, 0.}};
for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U16_F); ++i) {
run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
@@ -9046,9 +9044,9 @@ void run_msa_vector(struct TestCaseMsaVector* input,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+ f.Call(&res, 0, 0, 0, 0);
CHECK_EQ(GenerateOperationFunc(input->wd_lo, input->ws_lo, input->wt_lo),
res.d[0]);
@@ -9064,12 +9062,12 @@ TEST(MSA_vector) {
struct TestCaseMsaVector tc[] = {
// wd_lo, wd_hi, ws_lo, ws_hi, wt_lo, wt_hi
- {0xf35862e13e38f8b0, 0x4f41ffdef2bfe636, 0xdcd39d91f9057627,
- 0x64be4f6dbe9caa51, 0x6b23de1a687d9cb9, 0x49547aad691da4ca},
- {0xf35862e13e38f8b0, 0x4f41ffdef2bfe636, 0x401614523d830549,
- 0xd7c46d613f50eddd, 0x52284cbc60a1562b, 0x1756ed510d8849cd},
- {0xf35862e13e38f8b0, 0x4f41ffdef2bfe636, 0xd6e2d2ebcb40d72f,
- 0x13a619afce67b079, 0x36cce284343e40f9, 0xb4e8f44fd148bf7f}};
+ {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 0xDCD39D91F9057627,
+ 0x64BE4F6DBE9CAA51, 0x6B23DE1A687D9CB9, 0x49547AAD691DA4CA},
+ {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 0x401614523D830549,
+ 0xD7C46D613F50EDDD, 0x52284CBC60A1562B, 0x1756ED510D8849CD},
+ {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 0xD6E2D2EBCB40D72F,
+ 0x13A619AFCE67B079, 0x36CCE284343E40F9, 0xB4E8F44FD148BF7F}};
for (size_t i = 0; i < sizeof(tc) / sizeof(TestCaseMsaVector); ++i) {
run_msa_vector(
@@ -9135,9 +9133,9 @@ void run_msa_bit(struct TestCaseMsaBit* input, InstFunc GenerateInstructionFunc,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+ f.Call(&res, 0, 0, 0, 0);
CHECK_EQ(GenerateOperationFunc(input->wd_lo, input->ws_lo, input->m),
res.d[0]);
@@ -9153,14 +9151,14 @@ TEST(MSA_slli_srai_srli) {
struct TestCaseMsaBit tc[] = {
// wd_lo, wd_hi ws_lo, ws_hi, m
- {0, 0, 0xf35862e13e38f8b0, 0x4f41ffdef2bfe636, 3},
- {0, 0, 0x64be4f6dbe9caa51, 0x6b23de1a687d9cb9, 5},
- {0, 0, 0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 9},
- {0, 0, 0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 13},
- {0, 0, 0x566be7ba4365b70a, 0x01ebbc1937d76cb4, 21},
- {0, 0, 0x380e2deb9d3f8aae, 0x017e0de0bcc6ca42, 30},
- {0, 0, 0xa46a3a9bcb43f4e5, 0x1c62c8473bdfcffb, 45},
- {0, 0, 0xf6759d85f23b5a2b, 0x5c042ae42c6d12c1, 61}};
+ {0, 0, 0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 3},
+ {0, 0, 0x64BE4F6DBE9CAA51, 0x6B23DE1A687D9CB9, 5},
+ {0, 0, 0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 9},
+ {0, 0, 0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 13},
+ {0, 0, 0x566BE7BA4365B70A, 0x01EBBC1937D76CB4, 21},
+ {0, 0, 0x380E2DEB9D3F8AAE, 0x017E0DE0BCC6CA42, 30},
+ {0, 0, 0xA46A3A9BCB43F4E5, 0x1C62C8473BDFCFFB, 45},
+ {0, 0, 0xF6759D85F23B5A2B, 0x5C042AE42C6D12C1, 61}};
#define SLLI_SRLI_DF(lanes, mask, func) \
[](uint64_t wd, uint64_t ws, uint32_t m) { \
@@ -9306,14 +9304,14 @@ TEST(MSA_bclri_bseti_bnegi) {
struct TestCaseMsaBit tc[] = {
// wd_lo, wd_hi, ws_lo, ws_hi, m
- {0, 0, 0xf35862e13e38f8b0, 0x4f41ffdef2bfe636, 3},
- {0, 0, 0x64be4f6dbe9caa51, 0x6b23de1a687d9cb9, 5},
- {0, 0, 0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 9},
- {0, 0, 0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 13},
- {0, 0, 0x566be7ba4365b70a, 0x01ebbc1937d76cb4, 21},
- {0, 0, 0x380e2deb9d3f8aae, 0x017e0de0bcc6ca42, 30},
- {0, 0, 0xa46a3a9bcb43f4e5, 0x1c62c8473bdfcffb, 45},
- {0, 0, 0xf6759d85f23b5a2b, 0x5c042ae42c6d12c1, 61}};
+ {0, 0, 0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 3},
+ {0, 0, 0x64BE4F6DBE9CAA51, 0x6B23DE1A687D9CB9, 5},
+ {0, 0, 0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 9},
+ {0, 0, 0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 13},
+ {0, 0, 0x566BE7BA4365B70A, 0x01EBBC1937D76CB4, 21},
+ {0, 0, 0x380E2DEB9D3F8AAE, 0x017E0DE0BCC6CA42, 30},
+ {0, 0, 0xA46A3A9BCB43F4E5, 0x1C62C8473BDFCFFB, 45},
+ {0, 0, 0xF6759D85F23B5A2B, 0x5C042AE42C6D12C1, 61}};
#define BCLRI_BSETI_BNEGI_DF(lanes, mask, func) \
[](uint64_t wd, uint64_t ws, uint32_t m) { \
@@ -9401,22 +9399,22 @@ TEST(MSA_binsli_binsri) {
CcTest::InitializeVM();
struct TestCaseMsaBit tc[] = {// wd_lo, wd_hi, ws_lo, ws_hi, m
- {0x53f4457553bbd5b4, 0x5fb8250eacc296b2,
- 0xf35862e13e38f8b0, 0x4f41ffdef2bfe636, 3},
- {0xf61bfdb0f312e6fc, 0xc9437568dd1ea925,
- 0x64be4f6dbe9caa51, 0x6b23de1a687d9cb9, 5},
- {0x53f4457553bbd5b4, 0x5fb8250eacc296b2,
- 0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 9},
- {0xf61bfdb0f312e6fc, 0xc9437568dd1ea925,
- 0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 13},
- {0x53f4457553bbd5b4, 0x5fb8250eacc296b2,
- 0x566be7ba4365b70a, 0x01ebbc1937d76cb4, 21},
- {0xf61bfdb0f312e6fc, 0xc9437568dd1ea925,
- 0x380e2deb9d3f8aae, 0x017e0de0bcc6ca42, 30},
- {0x53f4457553bbd5b4, 0x5fb8250eacc296b2,
- 0xa46a3a9bcb43f4e5, 0x1c62c8473bdfcffb, 45},
- {0xf61bfdb0f312e6fc, 0xc9437568dd1ea925,
- 0xf6759d85f23b5a2b, 0x5c042ae42c6d12c1, 61}};
+ {0x53F4457553BBD5B4, 0x5FB8250EACC296B2,
+ 0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 3},
+ {0xF61BFDB0F312E6FC, 0xC9437568DD1EA925,
+ 0x64BE4F6DBE9CAA51, 0x6B23DE1A687D9CB9, 5},
+ {0x53F4457553BBD5B4, 0x5FB8250EACC296B2,
+ 0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 9},
+ {0xF61BFDB0F312E6FC, 0xC9437568DD1EA925,
+ 0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 13},
+ {0x53F4457553BBD5B4, 0x5FB8250EACC296B2,
+ 0x566BE7BA4365B70A, 0x01EBBC1937D76CB4, 21},
+ {0xF61BFDB0F312E6FC, 0xC9437568DD1EA925,
+ 0x380E2DEB9D3F8AAE, 0x017E0DE0BCC6CA42, 30},
+ {0x53F4457553BBD5B4, 0x5FB8250EACC296B2,
+ 0xA46A3A9BCB43F4E5, 0x1C62C8473BDFCFFB, 45},
+ {0xF61BFDB0F312E6FC, 0xC9437568DD1EA925,
+ 0xF6759D85F23B5A2B, 0x5C042AE42C6D12C1, 61}};
#define BINSLI_BINSRI_DF(lanes, mask, func) \
[](uint64_t wd, uint64_t ws, uint32_t m) { \
@@ -9493,14 +9491,14 @@ TEST(MSA_sat_s_sat_u) {
struct TestCaseMsaBit tc[] = {
// wd_lo, wd_hi, ws_lo, ws_hi, m
- {0, 0, 0xf35862e13e3808b0, 0x4f41ffdef2bfe636, 3},
- {0, 0, 0x64be4f6dbe9caa51, 0x6b23de1a687d9cb9, 5},
- {0, 0, 0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 9},
- {0, 0, 0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 13},
- {0, 0, 0x566be7ba4365b70a, 0x01ebbc1937d76cb4, 21},
- {0, 0, 0x380e2deb9d3f8aae, 0x017e0de0bcc6ca42, 30},
- {0, 0, 0xa46a3a9bcb43f4e5, 0x1c62c8473bdfcffb, 45},
- {0, 0, 0xf6759d85f23b5a2b, 0x5c042ae42c6d12c1, 61}};
+ {0, 0, 0xF35862E13E3808B0, 0x4F41FFDEF2BFE636, 3},
+ {0, 0, 0x64BE4F6DBE9CAA51, 0x6B23DE1A687D9CB9, 5},
+ {0, 0, 0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 9},
+ {0, 0, 0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 13},
+ {0, 0, 0x566BE7BA4365B70A, 0x01EBBC1937D76CB4, 21},
+ {0, 0, 0x380E2DEB9D3F8AAE, 0x017E0DE0BCC6CA42, 30},
+ {0, 0, 0xA46A3A9BCB43F4E5, 0x1C62C8473BDFCFFB, 45},
+ {0, 0, 0xF6759D85F23B5A2B, 0x5C042AE42C6D12C1, 61}};
#define SAT_DF(lanes, mask, func) \
[](uint64_t wd, uint64_t ws, uint32_t m) { \
@@ -9609,9 +9607,9 @@ void run_msa_i10(int32_t input, InstFunc GenerateVectorInstructionFunc,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+ f.Call(&res, 0, 0, 0, 0);
CHECK_EQ(GenerateOperationFunc(input), res.d[0]);
CHECK_EQ(GenerateOperationFunc(input), res.d[1]);
@@ -9688,9 +9686,9 @@ void run_msa_mi10(InstFunc GenerateVectorInstructionFunc) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F5 f = FUNCTION_CAST<F5>(code->entry());
+ auto f = GeneratedCode<F5>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, in_array_middle, out_array_middle, 0, 0, 0));
+ f.Call(in_array_middle, out_array_middle, 0, 0, 0);
CHECK_EQ(memcmp(in_test_vector, out_test_vector, arraysize(in_test_vector)),
0);
@@ -9768,9 +9766,9 @@ void run_msa_3r(struct TestCaseMsa3R* input, InstFunc GenerateI5InstructionFunc,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+ f.Call(&res, 0, 0, 0, 0);
GenerateOperationFunc(&input->ws_lo, &input->wt_lo, &input->wd_lo);
if (input->wd_lo != Unpredictable) {
@@ -9787,32 +9785,32 @@ TEST(MSA_3R_instructions) {
CcTest::InitializeVM();
struct TestCaseMsa3R tc[] = {
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x1169751bb9a7d9c3,
- 0xf7a594aec8ef8a9c, 0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0x2b665362c4e812df,
- 0x3a0d80d68b3f8bc8, 0x2b665362c4e812df, 0x3a0d80d68b3f8bc8},
- {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x1169751bb9a7d9c3,
- 0xf7a594aec8ef8a9c, 0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c},
- {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0x2b665362c4e812df,
- 0x3a0d80d68b3f8bc8, 0x2b665362c4e812df, 0x3a0d80d68b3f8bc8},
- {0xffab807f807fffcd, 0x7f23ff80ff567f80, 0xffab807f807fffcd,
- 0x7f23ff80ff567f80, 0xffab807f807fffcd, 0x7f23ff80ff567f80},
- {0x80ffefff7f12807f, 0x807f80ff7fdeff78, 0x80ffefff7f12807f,
- 0x807f80ff7fdeff78, 0x80ffefff7f12807f, 0x807f80ff7fdeff78},
- {0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff,
- 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff},
- {0x0000000000000000, 0xffffffffffffffff, 0xffffffffffffffff,
- 0x0000000000000000, 0x0000000000000000, 0xffffffffffffffff},
- {0xffff0000ffff0000, 0xffff0000ffff0000, 0xffff0000ffff0000,
- 0xffff0000ffff0000, 0xffff0000ffff0000, 0xffff0000ffff0000},
- {0xff00ff00ff00ff00, 0xff00ff00ff00ff00, 0xff00ff00ff00ff00,
- 0xff00ff00ff00ff00, 0xff00ff00ff00ff00, 0xff00ff00ff00ff00},
- {0xf0f0f0f0f0f0f0f0, 0xf0f0f0f0f0f0f0f0, 0xf0f0f0f0f0f0f0f0,
- 0xf0f0f0f0f0f0f0f0, 0xf0f0f0f0f0f0f0f0, 0xf0f0f0f0f0f0f0f0},
- {0xff0000ffff0000ff, 0xff0000ffff0000ff, 0xff0000ffff0000ff,
- 0xff0000ffff0000ff, 0xff0000ffff0000ff, 0xff0000ffff0000ff},
- {0xffff00000000ffff, 0xffff00000000ffff, 0xffff00000000ffff,
- 0xffff00000000ffff, 0xffff00000000ffff, 0xffff00000000ffff}};
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x1169751BB9A7D9C3,
+ 0xF7A594AEC8EF8A9C, 0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x2B665362C4E812DF,
+ 0x3A0D80D68B3F8BC8, 0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8},
+ {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x1169751BB9A7D9C3,
+ 0xF7A594AEC8EF8A9C, 0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C},
+ {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x2B665362C4E812DF,
+ 0x3A0D80D68B3F8BC8, 0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8},
+ {0xFFAB807F807FFFCD, 0x7F23FF80FF567F80, 0xFFAB807F807FFFCD,
+ 0x7F23FF80FF567F80, 0xFFAB807F807FFFCD, 0x7F23FF80FF567F80},
+ {0x80FFEFFF7F12807F, 0x807F80FF7FDEFF78, 0x80FFEFFF7F12807F,
+ 0x807F80FF7FDEFF78, 0x80FFEFFF7F12807F, 0x807F80FF7FDEFF78},
+ {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF,
+ 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF},
+ {0x0000000000000000, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF,
+ 0x0000000000000000, 0x0000000000000000, 0xFFFFFFFFFFFFFFFF},
+ {0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000,
+ 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000},
+ {0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00,
+ 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00},
+ {0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0,
+ 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0},
+ {0xFF0000FFFF0000FF, 0xFF0000FFFF0000FF, 0xFF0000FFFF0000FF,
+ 0xFF0000FFFF0000FF, 0xFF0000FFFF0000FF, 0xFF0000FFFF0000FF},
+ {0xFFFF00000000FFFF, 0xFFFF00000000FFFF, 0xFFFF00000000FFFF,
+ 0xFFFF00000000FFFF, 0xFFFF00000000FFFF, 0xFFFF00000000FFFF}};
#define SLL_DF(T, lanes, mask) \
int size_in_bits = kMSARegSize / lanes; \
@@ -10406,8 +10404,8 @@ TEST(MSA_3R_instructions) {
T* ws_p = reinterpret_cast<T*>(ws); \
T* wt_p = reinterpret_cast<T*>(wt); \
T* wd_p = reinterpret_cast<T*>(wd); \
- const int mask_not_valid = 0xc0; \
- const int mask_6bits = 0x3f; \
+ const int mask_not_valid = 0xC0; \
+ const int mask_6bits = 0x3F; \
for (int i = 0; i < lanes; ++i) { \
if ((wd_p[i] & mask_not_valid)) { \
wd_p[i] = 0; \
@@ -10773,9 +10771,9 @@ void run_msa_3rf(const struct TestCaseMsa3RF* input,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
- (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+ f.Call(&res, 0, 0, 0, 0);
CHECK_EQ(output->exp_res_lo, res.d[0]);
CHECK_EQ(output->exp_res_hi, res.d[1]);
@@ -11157,70 +11155,70 @@ TEST(MSA_fixed_point_arithmetic) {
CcTest::InitializeVM();
const struct TestCaseMsa3RF tc_h[]{
- {0x800080007fff7fff, 0xe1ed8000fad3863a, 0x80007fff00af7fff,
- 0x800015a77fffa0eb, 0x7fff800080007fff, 0x80007fff1f207364},
- {0x800080007fff006a, 0x002affc4329ad87b, 0x80007fff7fff00f3,
- 0xffecffb4d0d7f429, 0x80007fff80007c33, 0x54ac6bbce53b8c91}};
+ {0x800080007FFF7FFF, 0xE1ED8000FAD3863A, 0x80007FFF00AF7FFF,
+ 0x800015A77FFFA0EB, 0x7FFF800080007FFF, 0x80007FFF1F207364},
+ {0x800080007FFF006A, 0x002AFFC4329AD87B, 0x80007FFF7FFF00F3,
+ 0xFFECFFB4D0D7F429, 0x80007FFF80007C33, 0x54AC6BBCE53B8C91}};
const struct TestCaseMsa3RF tc_w[]{
- {0x8000000080000000, 0x7fffffff7fffffff, 0x800000007fffffff,
- 0x00001ff37fffffff, 0x7fffffff80000000, 0x800000007fffffff},
- {0xe1ed035580000000, 0xfad3863aed462c0b, 0x8000000015a70aec,
- 0x7fffffffa0ebd354, 0x800000007fffffff, 0xd0d7f4291f207364},
- {0x8000000080000000, 0x7fffffff0000da1f, 0x800000007fffffff,
- 0x7fffffff00f39c3b, 0x800000007fffffff, 0x800000007c33f2fd},
- {0x0000ac33ffff329a, 0x54ac6bbce53bd87b, 0xffffe2b4d0d7f429,
- 0x0355ed462c0b1ff3, 0xb5deb625939dd3f9, 0xe642adfa69519596}};
+ {0x8000000080000000, 0x7FFFFFFF7FFFFFFF, 0x800000007FFFFFFF,
+ 0x00001FF37FFFFFFF, 0x7FFFFFFF80000000, 0x800000007FFFFFFF},
+ {0xE1ED035580000000, 0xFAD3863AED462C0B, 0x8000000015A70AEC,
+ 0x7FFFFFFFA0EBD354, 0x800000007FFFFFFF, 0xD0D7F4291F207364},
+ {0x8000000080000000, 0x7FFFFFFF0000DA1F, 0x800000007FFFFFFF,
+ 0x7FFFFFFF00F39C3B, 0x800000007FFFFFFF, 0x800000007C33F2FD},
+ {0x0000AC33FFFF329A, 0x54AC6BBCE53BD87B, 0xFFFFE2B4D0D7F429,
+ 0x0355ED462C0B1FF3, 0xB5DEB625939DD3F9, 0xE642ADFA69519596}};
const struct ExpectedResult_MSA3RF exp_res_mul_q_h[] = {
- {0x7fff800100ae7ffe, 0x1e13ea59fad35a74},
- {0x7fff80017ffe0000, 0xffff0000ed5b03a7}};
+ {0x7FFF800100AE7FFE, 0x1E13EA59FAD35A74},
+ {0x7FFF80017FFE0000, 0xFFFF0000ED5B03A7}};
const struct ExpectedResult_MSA3RF exp_res_madd_q_h[] = {
- {0x7fff800080ae7fff, 0x9e136a5819f37fff},
- {0x00000000fffe7c33, 0x54ab6bbcd2969038}};
+ {0x7FFF800080AE7FFF, 0x9E136A5819F37FFF},
+ {0x00000000FFFE7C33, 0x54AB6BBCD2969038}};
const struct ExpectedResult_MSA3RF exp_res_msub_q_h[] = {
- {0xffffffff80000000, 0x80007fff244c18ef},
- {0x80007fff80007c32, 0x54ac6bbbf7df88e9}};
+ {0xFFFFFFFF80000000, 0x80007FFF244C18EF},
+ {0x80007FFF80007C32, 0x54AC6BBBF7DF88E9}};
const struct ExpectedResult_MSA3RF exp_res_mulr_q_h[] = {
- {0x7fff800100af7ffe, 0x1e13ea59fad35a75},
- {0x7fff80017ffe0001, 0x00000000ed5b03a8}};
+ {0x7FFF800100AF7FFE, 0x1E13EA59FAD35A75},
+ {0x7FFF80017FFE0001, 0x00000000ED5B03A8}};
const struct ExpectedResult_MSA3RF exp_res_maddr_q_h[] = {
- {0x7fff800080af7fff, 0x9e136a5819f37fff},
- {0x00000000fffe7c34, 0x54ac6bbcd2969039}};
+ {0x7FFF800080AF7FFF, 0x9E136A5819F37FFF},
+ {0x00000000FFFE7C34, 0x54AC6BBCD2969039}};
const struct ExpectedResult_MSA3RF exp_res_msubr_q_h[] = {
- {0xffffffff80000001, 0x80007fff244d18ef},
- {0x80007fff80007c32, 0x54ac6bbcf7e088e9}};
+ {0xFFFFFFFF80000001, 0x80007FFF244D18EF},
+ {0x80007FFF80007C32, 0x54AC6BBCF7E088E9}};
const struct ExpectedResult_MSA3RF exp_res_mul_q_w[] = {
- {0x7fffffff80000001, 0x00001ff27ffffffe},
- {0x1e12fcabea58f514, 0xfad3863a0de8dee1},
- {0x7fffffff80000001, 0x7ffffffe0000019f},
- {0xffffffff00004bab, 0x0234e1fbf6ca3ee0}};
+ {0x7FFFFFFF80000001, 0x00001FF27FFFFFFE},
+ {0x1E12FCABEA58F514, 0xFAD3863A0DE8DEE1},
+ {0x7FFFFFFF80000001, 0x7FFFFFFE0000019F},
+ {0xFFFFFFFF00004BAB, 0x0234E1FBF6CA3EE0}};
const struct ExpectedResult_MSA3RF exp_res_madd_q_w[] = {
- {0x7fffffff80000000, 0x80001ff27fffffff},
- {0x9e12fcab6a58f513, 0xcbab7a632d095245},
- {0x0000000000000000, 0xfffffffe7c33f49c},
- {0xb5deb624939e1fa4, 0xe8778ff5601bd476}};
+ {0x7FFFFFFF80000000, 0x80001FF27FFFFFFF},
+ {0x9E12FCAB6A58F513, 0xCBAB7A632D095245},
+ {0x0000000000000000, 0xFFFFFFFE7C33F49C},
+ {0xB5DEB624939E1FA4, 0xE8778FF5601BD476}};
const struct ExpectedResult_MSA3RF exp_res_msub_q_w[] = {
- {0xffffffffffffffff, 0x8000000000000000},
- {0x800000007fffffff, 0xd6046dee11379482},
- {0x800000007fffffff, 0x800000007c33f15d},
- {0xb5deb625939d884d, 0xe40dcbfe728756b5}};
+ {0xFFFFFFFFFFFFFFFF, 0x8000000000000000},
+ {0x800000007FFFFFFF, 0xD6046DEE11379482},
+ {0x800000007FFFFFFF, 0x800000007C33F15D},
+ {0xB5DEB625939D884D, 0xE40DCBFE728756B5}};
const struct ExpectedResult_MSA3RF exp_res_mulr_q_w[] = {
- {0x7fffffff80000001, 0x00001ff37ffffffe},
- {0x1e12fcabea58f514, 0xfad3863a0de8dee2},
- {0x7fffffff80000001, 0x7ffffffe0000019f},
- {0x0000000000004bac, 0x0234e1fcf6ca3ee1}};
+ {0x7FFFFFFF80000001, 0x00001FF37FFFFFFE},
+ {0x1E12FCABEA58F514, 0xFAD3863A0DE8DEE2},
+ {0x7FFFFFFF80000001, 0x7FFFFFFE0000019F},
+ {0x0000000000004BAC, 0x0234E1FCF6CA3EE1}};
const struct ExpectedResult_MSA3RF exp_res_maddr_q_w[] = {
- {0x7fffffff80000000, 0x80001ff37fffffff},
- {0x9e12fcab6a58f513, 0xcbab7a632d095246},
- {0x0000000000000000, 0xfffffffe7c33f49c},
- {0xb5deb625939e1fa5, 0xe8778ff6601bd477}};
+ {0x7FFFFFFF80000000, 0x80001FF37FFFFFFF},
+ {0x9E12FCAB6A58F513, 0xCBAB7A632D095246},
+ {0x0000000000000000, 0xFFFFFFFE7C33F49C},
+ {0xB5DEB625939E1FA5, 0xE8778FF6601BD477}};
const struct ExpectedResult_MSA3RF exp_res_msubr_q_w[] = {
- {0xffffffffffffffff, 0x8000000000000001},
- {0x800000007fffffff, 0xd6046def11379482},
- {0x800000007fffffff, 0x800000007c33f15e},
- {0xb5deb625939d884d, 0xe40dcbfe728756b5}};
+ {0xFFFFFFFFFFFFFFFF, 0x8000000000000001},
+ {0x800000007FFFFFFF, 0xD6046DEF11379482},
+ {0x800000007FFFFFFF, 0x800000007C33F15E},
+ {0xB5DEB625939D884D, 0xE40DCBFE728756B5}};
#define TEST_FIXED_POINT_DF_H(instruction, src, exp_res) \
run_msa_3rf((src), (exp_res), \
@@ -11280,31 +11278,31 @@ TEST(MSA_fexdo) {
const struct ExpRes_16I exp_res_fexdo_w[] = {
{static_cast<int16_t>(0x0410), static_cast<int16_t>(0x0347),
- static_cast<int16_t>(0xd00d), static_cast<int16_t>(0xfc00),
- static_cast<int16_t>(0x7c00), static_cast<int16_t>(0x7dff),
- static_cast<int16_t>(0x7c00), static_cast<int16_t>(0x7bff)},
+ static_cast<int16_t>(0xD00D), static_cast<int16_t>(0xFC00),
+ static_cast<int16_t>(0x7C00), static_cast<int16_t>(0x7DFF),
+ static_cast<int16_t>(0x7C00), static_cast<int16_t>(0x7BFF)},
{static_cast<int16_t>(0x8001), static_cast<int16_t>(0x0001),
static_cast<int16_t>(0x0002), static_cast<int16_t>(0x8000),
static_cast<int16_t>(0x8000), static_cast<int16_t>(0x0000),
- static_cast<int16_t>(0x57b9), static_cast<int16_t>(0xe1fb)},
+ static_cast<int16_t>(0x57B9), static_cast<int16_t>(0xE1FB)},
{static_cast<int16_t>(0x0001), static_cast<int16_t>(0x8000),
- static_cast<int16_t>(0xfc00), static_cast<int16_t>(0xfbff),
- static_cast<int16_t>(0x0000), static_cast<int16_t>(0x7c00),
- static_cast<int16_t>(0xfc00), static_cast<int16_t>(0x0000)}};
+ static_cast<int16_t>(0xFC00), static_cast<int16_t>(0xFBFF),
+ static_cast<int16_t>(0x0000), static_cast<int16_t>(0x7C00),
+ static_cast<int16_t>(0xFC00), static_cast<int16_t>(0x0000)}};
const struct ExpRes_32I exp_res_fexdo_d[] = {
- {bit_cast<int32_t>(0x7f800000), bit_cast<int32_t>(0x7f7fc99e),
- bit_cast<int32_t>(0x7f800000), bit_cast<int32_t>(0xc49a4000)},
- {bit_cast<int32_t>(0xc21bae14), bit_cast<int32_t>(0xff800000),
- bit_cast<int32_t>(0x0082ab1e), bit_cast<int32_t>(0x000bfa5a)},
- {bit_cast<int32_t>(0x7673b164), bit_cast<int32_t>(0xfb13653d),
+ {bit_cast<int32_t>(0x7F800000), bit_cast<int32_t>(0x7F7FC99E),
+ bit_cast<int32_t>(0x7F800000), bit_cast<int32_t>(0xC49A4000)},
+ {bit_cast<int32_t>(0xC21BAE14), bit_cast<int32_t>(0xFF800000),
+ bit_cast<int32_t>(0x0082AB1E), bit_cast<int32_t>(0x000BFA5A)},
+ {bit_cast<int32_t>(0x7673B164), bit_cast<int32_t>(0xFB13653D),
bit_cast<int32_t>(0x80000000), bit_cast<int32_t>(0x00000000)},
- {bit_cast<int32_t>(0x000002ca), bit_cast<int32_t>(0x80000000),
+ {bit_cast<int32_t>(0x000002CA), bit_cast<int32_t>(0x80000000),
bit_cast<int32_t>(0x80000001), bit_cast<int32_t>(0x00000001)},
- {bit_cast<int32_t>(0xff800000), bit_cast<int32_t>(0x56b5e621),
- bit_cast<int32_t>(0x00000000), bit_cast<int32_t>(0x7f800000)},
- {bit_cast<int32_t>(0xf673b164), bit_cast<int32_t>(0x7b13653d),
- bit_cast<int32_t>(0x0000042e), bit_cast<int32_t>(0x00000000)}};
+ {bit_cast<int32_t>(0xFF800000), bit_cast<int32_t>(0x56B5E621),
+ bit_cast<int32_t>(0x00000000), bit_cast<int32_t>(0x7F800000)},
+ {bit_cast<int32_t>(0xF673B164), bit_cast<int32_t>(0x7B13653D),
+ bit_cast<int32_t>(0x0000042E), bit_cast<int32_t>(0x00000000)}};
#define TEST_FEXDO_H(instruction, src, exp_res) \
run_msa_3rf(reinterpret_cast<const struct TestCaseMsa3RF*>(src), \
@@ -11354,31 +11352,31 @@ TEST(MSA_ftq) {
{-3e306, 2e-307, 9e307, 2e-307, 0, 0}};
const struct ExpRes_16I exp_res_ftq_w[] = {
- {static_cast<int16_t>(0x0000), static_cast<int16_t>(0xb375),
- static_cast<int16_t>(0x004b), static_cast<int16_t>(0x0000),
- static_cast<int16_t>(0x7fff), static_cast<int16_t>(0x8021),
- static_cast<int16_t>(0x7fff), static_cast<int16_t>(0xffff)},
+ {static_cast<int16_t>(0x0000), static_cast<int16_t>(0xB375),
+ static_cast<int16_t>(0x004B), static_cast<int16_t>(0x0000),
+ static_cast<int16_t>(0x7FFF), static_cast<int16_t>(0x8021),
+ static_cast<int16_t>(0x7FFF), static_cast<int16_t>(0xFFFF)},
{static_cast<int16_t>(0x0000), static_cast<int16_t>(0x8000),
- static_cast<int16_t>(0x7ffd), static_cast<int16_t>(0xfff5),
- static_cast<int16_t>(0x7fff), static_cast<int16_t>(0x8000),
- static_cast<int16_t>(0x8000), static_cast<int16_t>(0x7fff)},
+ static_cast<int16_t>(0x7FFD), static_cast<int16_t>(0xFFF5),
+ static_cast<int16_t>(0x7FFF), static_cast<int16_t>(0x8000),
+ static_cast<int16_t>(0x8000), static_cast<int16_t>(0x7FFF)},
{static_cast<int16_t>(0x0000), static_cast<int16_t>(0x0000),
- static_cast<int16_t>(0x7fff), static_cast<int16_t>(0xffff),
- static_cast<int16_t>(0x0000), static_cast<int16_t>(0x7fff),
+ static_cast<int16_t>(0x7FFF), static_cast<int16_t>(0xFFFF),
+ static_cast<int16_t>(0x0000), static_cast<int16_t>(0x7FFF),
static_cast<int16_t>(0x8000), static_cast<int16_t>(0x0000)}};
const struct ExpRes_32I exp_res_ftq_d[] = {
- {bit_cast<int32_t>(0x7fffffff), bit_cast<int32_t>(0xfffefbf4),
- bit_cast<int32_t>(0x7fffffff), bit_cast<int32_t>(0x8020c49c)},
- {bit_cast<int32_t>(0x004b5dcc), bit_cast<int32_t>(0x00000000),
- bit_cast<int32_t>(0x000000d7), bit_cast<int32_t>(0xb374bc6a)},
- {bit_cast<int32_t>(0x80000000), bit_cast<int32_t>(0x7fffffff),
- bit_cast<int32_t>(0x7fffffff), bit_cast<int32_t>(0x80000000)},
- {bit_cast<int32_t>(0x7ffcb900), bit_cast<int32_t>(0xfff572de),
+ {bit_cast<int32_t>(0x7FFFFFFF), bit_cast<int32_t>(0xFFFEFBF4),
+ bit_cast<int32_t>(0x7FFFFFFF), bit_cast<int32_t>(0x8020C49C)},
+ {bit_cast<int32_t>(0x004B5DCC), bit_cast<int32_t>(0x00000000),
+ bit_cast<int32_t>(0x000000D7), bit_cast<int32_t>(0xB374BC6A)},
+ {bit_cast<int32_t>(0x80000000), bit_cast<int32_t>(0x7FFFFFFF),
+ bit_cast<int32_t>(0x7FFFFFFF), bit_cast<int32_t>(0x80000000)},
+ {bit_cast<int32_t>(0x7FFCB900), bit_cast<int32_t>(0xFFF572DE),
bit_cast<int32_t>(0x00000000), bit_cast<int32_t>(0x80000000)},
{bit_cast<int32_t>(0x80000000), bit_cast<int32_t>(0x00000000),
- bit_cast<int32_t>(0x00000000), bit_cast<int32_t>(0x7fffffff)},
- {bit_cast<int32_t>(0x7fffffff), bit_cast<int32_t>(0x00000000),
+ bit_cast<int32_t>(0x00000000), bit_cast<int32_t>(0x7FFFFFFF)},
+ {bit_cast<int32_t>(0x7FFFFFFF), bit_cast<int32_t>(0x00000000),
bit_cast<int32_t>(0x80000000), bit_cast<int32_t>(0x00000000)}};
#define TEST_FTQ_H(instruction, src, exp_res) \
diff --git a/deps/v8/test/cctest/test-assembler-ppc.cc b/deps/v8/test/cctest/test-assembler-ppc.cc
index 1e150a0cb5..f965975ed9 100644
--- a/deps/v8/test/cctest/test-assembler-ppc.cc
+++ b/deps/v8/test/cctest/test-assembler-ppc.cc
@@ -30,18 +30,19 @@
#include "src/disassembler.h"
#include "src/factory.h"
#include "src/ppc/assembler-ppc-inl.h"
-#include "src/ppc/simulator-ppc.h"
+#include "src/simulator.h"
#include "test/cctest/cctest.h"
namespace v8 {
namespace internal {
-// Define these function prototypes to match JSEntryFunction in execution.cc.
-typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
-typedef Object* (*F2)(int x, int y, int p2, int p3, int p4);
-typedef Object* (*F3)(void* p0, int p1, int p2, int p3, int p4);
-typedef Object* (*F4)(void* p0, void* p1, int p2, int p3, int p4);
-
+// TODO(ppc): Refine these signatures per test case, they can have arbitrary
+// return and argument types and arbitrary number of arguments.
+using F_iiiii = Object*(int x, int p1, int p2, int p3, int p4);
+using F_piiii = Object*(void* p0, int p1, int p2, int p3, int p4);
+using F_ppiii = Object*(void* p0, void* p1, int p2, int p3, int p4);
+using F_pppii = Object*(void* p0, void* p1, void* p2, int p3, int p4);
+using F_ippii = Object*(int p0, void* p1, void* p2, int p3, int p4);
#define __ assm.
@@ -65,9 +66,8 @@ TEST(0) {
#ifdef DEBUG
code->Print();
#endif
- F2 f = FUNCTION_CAST<F2>(code->entry());
- intptr_t res = reinterpret_cast<intptr_t>(
- CALL_GENERATED_CODE(isolate, f, 3, 4, 0, 0, 0));
+ auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ intptr_t res = reinterpret_cast<intptr_t>(f.Call(3, 4, 0, 0, 0));
::printf("f() = %" V8PRIdPTR "\n", res);
CHECK_EQ(7, static_cast<int>(res));
}
@@ -104,9 +104,8 @@ TEST(1) {
#ifdef DEBUG
code->Print();
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
- intptr_t res = reinterpret_cast<intptr_t>(
- CALL_GENERATED_CODE(isolate, f, 100, 0, 0, 0, 0));
+ auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ intptr_t res = reinterpret_cast<intptr_t>(f.Call(100, 0, 0, 0, 0));
::printf("f() = %" V8PRIdPTR "\n", res);
CHECK_EQ(5050, static_cast<int>(res));
}
@@ -155,9 +154,8 @@ TEST(2) {
#ifdef DEBUG
code->Print();
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
- intptr_t res = reinterpret_cast<intptr_t>(
- CALL_GENERATED_CODE(isolate, f, 10, 0, 0, 0, 0));
+ auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ intptr_t res = reinterpret_cast<intptr_t>(f.Call(10, 0, 0, 0, 0));
::printf("f() = %" V8PRIdPTR "\n", res);
CHECK_EQ(3628800, static_cast<int>(res));
}
@@ -228,12 +226,11 @@ TEST(3) {
#ifdef DEBUG
code->Print();
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F_piiii>::FromCode(*code);
t.i = 100000;
t.c = 10;
t.s = 1000;
- intptr_t res = reinterpret_cast<intptr_t>(
- CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0));
+ intptr_t res = reinterpret_cast<intptr_t>(f.Call(&t, 0, 0, 0, 0));
::printf("f() = %" V8PRIdPTR "\n", res);
CHECK_EQ(101010, static_cast<int>(res));
CHECK_EQ(100000 / 2, t.i);
@@ -301,7 +298,7 @@ TEST(4) {
__ vstr(d4, r4, offsetof(T, e));
// Move a literal into a register that requires 64 bits to encode.
- // 0x3ff0000010000000 = 1.000000059604644775390625
+ // 0x3FF0000010000000 = 1.000000059604644775390625
__ vmov(d4, 1.000000059604644775390625);
__ vstr(d4, r4, offsetof(T, d));
@@ -344,7 +341,7 @@ TEST(4) {
#ifdef DEBUG
Code::cast(code)->Print();
#endif
- F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+ auto f = GeneratedCode<F_piiii>::FromCode(*code);
t.a = 1.5;
t.b = 2.75;
t.c = 17.17;
@@ -358,8 +355,7 @@ TEST(4) {
t.n = 123.456;
t.x = 4.5;
t.y = 9.0;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(4.5, t.y);
CHECK_EQ(9.0, t.x);
CHECK_EQ(-123.456, t.n);
@@ -405,9 +401,8 @@ TEST(5) {
#ifdef DEBUG
Code::cast(code)->Print();
#endif
- F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
- int res = reinterpret_cast<int>(
- CALL_GENERATED_CODE(isolate, f, 0xAAAAAAAA, 0, 0, 0, 0));
+ auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ int res = reinterpret_cast<int>(f.Call(0xAAAAAAAA, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(-7, res);
}
@@ -441,9 +436,8 @@ TEST(6) {
#ifdef DEBUG
Code::cast(code)->Print();
#endif
- F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
- int res = reinterpret_cast<int>(
- CALL_GENERATED_CODE(isolate, f, 0xFFFF, 0, 0, 0, 0));
+ auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ int res = reinterpret_cast<int>(f.Call(0xFFFF, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(382, res);
}
@@ -517,9 +511,8 @@ static void TestRoundingMode(VCVTTypes types,
#ifdef DEBUG
Code::cast(code)->Print();
#endif
- F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
- int res = reinterpret_cast<int>(
- CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ int res = reinterpret_cast<int>(f.Call(0, 0, 0, 0, 0));
::printf("res = %d\n", res);
CHECK_EQ(expected, res);
}
@@ -597,8 +590,8 @@ TEST(7) {
TestRoundingMode(u32_f64, RZ, kMinInt - 1.0, 0, true);
// Positive values.
- // kMaxInt is the maximum *signed* integer: 0x7fffffff.
- static const uint32_t kMaxUInt = 0xffffffffu;
+ // kMaxInt is the maximum *signed* integer: 0x7FFFFFFF.
+ static const uint32_t kMaxUInt = 0xFFFFFFFFu;
TestRoundingMode(u32_f64, RZ, 0, 0);
TestRoundingMode(u32_f64, RZ, 0.5, 0);
TestRoundingMode(u32_f64, RZ, 123.7, 123);
@@ -705,7 +698,7 @@ TEST(8) {
#ifdef DEBUG
Code::cast(code)->Print();
#endif
- F4 fn = FUNCTION_CAST<F4>(Code::cast(code)->entry());
+ auto fn = GeneratedCode<F_ppiii>::FromCode(*code);
d.a = 1.1;
d.b = 2.2;
d.c = 3.3;
@@ -724,8 +717,7 @@ TEST(8) {
f.g = 7.0;
f.h = 8.0;
- Object* dummy = CALL_GENERATED_CODE(isolate, fn, &d, &f, 0, 0, 0);
- USE(dummy);
+ fn.Call(&d, &f, 0, 0, 0);
CHECK_EQ(7.7, d.a);
CHECK_EQ(8.8, d.b);
@@ -821,7 +813,7 @@ TEST(9) {
#ifdef DEBUG
Code::cast(code)->Print();
#endif
- F4 fn = FUNCTION_CAST<F4>(Code::cast(code)->entry());
+ auto fn = GeneratedCode<F_ppiii>::FromCode(*code);
d.a = 1.1;
d.b = 2.2;
d.c = 3.3;
@@ -840,8 +832,7 @@ TEST(9) {
f.g = 7.0;
f.h = 8.0;
- Object* dummy = CALL_GENERATED_CODE(isolate, fn, &d, &f, 0, 0, 0);
- USE(dummy);
+ fn.Call(&d, &f, 0, 0, 0);
CHECK_EQ(7.7, d.a);
CHECK_EQ(8.8, d.b);
@@ -933,7 +924,7 @@ TEST(10) {
#ifdef DEBUG
Code::cast(code)->Print();
#endif
- F4 fn = FUNCTION_CAST<F4>(Code::cast(code)->entry());
+ auto fn = GeneratedCode<F_ppiii>::FromCode(*code);
d.a = 1.1;
d.b = 2.2;
d.c = 3.3;
@@ -952,8 +943,7 @@ TEST(10) {
f.g = 7.0;
f.h = 8.0;
- Object* dummy = CALL_GENERATED_CODE(isolate, fn, &d, &f, 0, 0, 0);
- USE(dummy);
+ fn.Call(&d, &f, 0, 0, 0);
CHECK_EQ(7.7, d.a);
CHECK_EQ(8.8, d.b);
@@ -990,8 +980,8 @@ TEST(11) {
} I;
I i;
- i.a = 0xabcd0001;
- i.b = 0xabcd0000;
+ i.a = 0xABCD0001;
+ i.b = 0xABCD0000;
Assembler assm(isolate, nullptr, 0);
@@ -1007,13 +997,13 @@ TEST(11) {
__ str(r2, MemOperand(r0, offsetof(I, b)));
// Test corner cases.
- __ mov(r1, Operand(0xffffffff));
+ __ mov(r1, Operand(0xFFFFFFFF));
__ mov(r2, Operand::Zero());
__ mov(r3, Operand(r1, ASR, 1), SetCC); // Set the carry.
__ adc(r3, r1, Operand(r2));
__ str(r3, MemOperand(r0, offsetof(I, c)));
- __ mov(r1, Operand(0xffffffff));
+ __ mov(r1, Operand(0xFFFFFFFF));
__ mov(r2, Operand::Zero());
__ mov(r3, Operand(r2, ASR, 1), SetCC); // Unset the carry.
__ adc(r3, r1, Operand(r2));
@@ -1031,14 +1021,13 @@ TEST(11) {
#ifdef DEBUG
Code::cast(code)->Print();
#endif
- F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &i, 0, 0, 0, 0);
- USE(dummy);
+ auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ f.Call(&i, 0, 0, 0, 0);
- CHECK_EQ(0xabcd0001, i.a);
- CHECK_EQ(static_cast<int32_t>(0xabcd0000) >> 1, i.b);
+ CHECK_EQ(0xABCD0001, i.a);
+ CHECK_EQ(static_cast<int32_t>(0xABCD0000) >> 1, i.b);
CHECK_EQ(0x00000000, i.c);
- CHECK_EQ(0xffffffff, i.d);
+ CHECK_EQ(0xFFFFFFFF, i.d);
}
diff --git a/deps/v8/test/cctest/test-assembler-s390.cc b/deps/v8/test/cctest/test-assembler-s390.cc
index df33b96752..d6bbe34e74 100644
--- a/deps/v8/test/cctest/test-assembler-s390.cc
+++ b/deps/v8/test/cctest/test-assembler-s390.cc
@@ -31,17 +31,18 @@
#include "src/factory.h"
#include "src/macro-assembler.h"
#include "src/s390/assembler-s390-inl.h"
-#include "src/s390/simulator-s390.h"
+#include "src/simulator.h"
#include "test/cctest/cctest.h"
namespace v8 {
namespace internal {
// Define these function prototypes to match JSEntryFunction in execution.cc.
-typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
-typedef Object* (*F2)(int x, int y, int p2, int p3, int p4);
-typedef Object* (*F3)(void* p0, int p1, int p2, int p3, int p4);
-typedef Object* (*F4)(void* p0, void* p1, int p2, int p3, int p4);
+// TODO(s390): Refine these signatures per test case.
+using F1 = Object*(int x, int p1, int p2, int p3, int p4);
+using F2 = Object*(int x, int y, int p2, int p3, int p4);
+using F3 = Object*(void* p0, int p1, int p2, int p3, int p4);
+using F4 = Object*(void* p0, void* p1, int p2, int p3, int p4);
#define __ assm.
@@ -66,9 +67,8 @@ TEST(0) {
#ifdef DEBUG
code->Print();
#endif
- F2 f = FUNCTION_CAST<F2>(code->entry());
- intptr_t res = reinterpret_cast<intptr_t>(
- CALL_GENERATED_CODE(isolate, f, 3, 4, 0, 0, 0));
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ intptr_t res = reinterpret_cast<intptr_t>(f.Call(3, 4, 0, 0, 0));
::printf("f() = %" V8PRIxPTR "\n", res);
CHECK_EQ(7, static_cast<int>(res));
}
@@ -106,9 +106,8 @@ TEST(1) {
#ifdef DEBUG
code->Print();
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
- intptr_t res = reinterpret_cast<intptr_t>(
- CALL_GENERATED_CODE(isolate, f, 100, 0, 0, 0, 0));
+ auto f = GeneratedCode<F1>::FromCode(*code);
+ intptr_t res = reinterpret_cast<intptr_t>(f.Call(100, 0, 0, 0, 0));
::printf("f() = %" V8PRIxPTR "\n", res);
CHECK_EQ(5050, static_cast<int>(res));
}
@@ -158,9 +157,8 @@ TEST(2) {
#ifdef DEBUG
code->Print();
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
- intptr_t res = reinterpret_cast<intptr_t>(
- CALL_GENERATED_CODE(isolate, f, 10, 0, 0, 0, 0));
+ auto f = GeneratedCode<F1>::FromCode(*code);
+ intptr_t res = reinterpret_cast<intptr_t>(f.Call(10, 0, 0, 0, 0));
::printf("f() = %" V8PRIxPTR "\n", res);
CHECK_EQ(3628800, static_cast<int>(res));
}
@@ -255,9 +253,9 @@ TEST(4) {
#ifdef DEBUG
code->Print();
#endif
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
intptr_t res = reinterpret_cast<intptr_t>(
- CALL_GENERATED_CODE(isolate, f, 3, 4, 3, 0, 0));
+ f.Call(3, 4, 3, 0, 0));
::printf("f() = %" V8PRIdPTR "\n", res);
CHECK_EQ(4, static_cast<int>(res));
}
@@ -283,9 +281,9 @@ TEST(5) {
#ifdef DEBUG
code->Print();
#endif
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
intptr_t res =
- reinterpret_cast<intptr_t>(CALL_GENERATED_CODE(isolate, f, 3, 4, 3, 0, 0));
+ reinterpret_cast<intptr_t>(f.Call(3, 4, 3, 0, 0));
::printf("f() = %" V8PRIdPTR "\n", res);
CHECK_EQ(2, static_cast<int>(res));
}
@@ -317,9 +315,9 @@ TEST(6) {
#ifdef DEBUG
code->Print();
#endif
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
intptr_t res =
- reinterpret_cast<intptr_t>(CALL_GENERATED_CODE(isolate, f, 3, 4, 3, 0, 0));
+ reinterpret_cast<intptr_t>(f.Call(3, 4, 3, 0, 0));
::printf("f() = %" V8PRIdPTR "\n", res);
CHECK_EQ(1, static_cast<int>(res));
}
@@ -349,9 +347,9 @@ TEST(7) {
#ifdef DEBUG
code->Print();
#endif
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ auto f = GeneratedCode<F2>::FromCode(*code);
intptr_t res =
- reinterpret_cast<intptr_t>(CALL_GENERATED_CODE(isolate, f, 3, 4, 3, 0, 0));
+ reinterpret_cast<intptr_t>(f.Call(3, 4, 3, 0, 0));
::printf("f() = %" V8PRIdPTR "\n", res);
CHECK_EQ(0x2468, static_cast<int>(res));
}
@@ -380,9 +378,9 @@ TEST(8) {
#ifdef DEBUG
code->Print();
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
+ auto f = GeneratedCode<F1>::FromCode(*code);
intptr_t res =
- reinterpret_cast<intptr_t>(CALL_GENERATED_CODE(isolate, f, 100, 0,
+ reinterpret_cast<intptr_t>(f.Call(100, 0,
0, 0, 0));
::printf("f() = %" V8PRIdPTR "\n", res);
CHECK_EQ(0, static_cast<int>(res));
@@ -407,9 +405,9 @@ TEST(9) {
#ifdef DEBUG
code->Print();
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
+ auto f = GeneratedCode<F1>::FromCode(*code);
intptr_t res =
- reinterpret_cast<intptr_t>(CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ reinterpret_cast<intptr_t>(f.Call(0, 0, 0, 0, 0));
::printf("f() = %" V8PRIdPTR "\n", res);
}
#endif
@@ -492,9 +490,8 @@ TEST(10) {
#ifdef DEBUG
code->Print();
#endif
- F2 f = FUNCTION_CAST<F2>(code->entry());
- intptr_t res = reinterpret_cast<intptr_t>(
- CALL_GENERATED_CODE(isolate, f, 3, 4, 0, 0, 0));
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ intptr_t res = reinterpret_cast<intptr_t>(f.Call(3, 4, 0, 0, 0));
::printf("f() = %" V8PRIxPTR "\n", res);
CHECK_EQ(0, static_cast<int>(res));
}
diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc
index e356fb2d82..043743b40a 100644
--- a/deps/v8/test/cctest/test-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-assembler-x64.cc
@@ -82,6 +82,7 @@ TEST(AssemblerX64ReturnOperation) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(3, 2);
CHECK_EQ(2, result);
@@ -111,6 +112,7 @@ TEST(AssemblerX64StackOperations) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(3, 2);
CHECK_EQ(2, result);
@@ -130,6 +132,7 @@ TEST(AssemblerX64ArithmeticOperations) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(3, 2);
CHECK_EQ(5, result);
@@ -156,6 +159,7 @@ TEST(AssemblerX64CmpbOperation) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(0x1002, 0x2002);
CHECK_EQ(1, result);
@@ -193,6 +197,7 @@ TEST(AssemblerX64ImulOperation) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(3, 2);
CHECK_EQ(0, result);
@@ -362,6 +367,7 @@ TEST(AssemblerX64testbwqOperation) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(0, 0);
CHECK_EQ(1, result);
@@ -382,6 +388,7 @@ TEST(AssemblerX64XchglOperations) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
// Call the function from C++.
uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
uint64_t right = V8_2PART_UINT64_C(0x30000000, 40000000);
@@ -404,6 +411,7 @@ TEST(AssemblerX64OrlOperations) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
// Call the function from C++.
uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
uint64_t right = V8_2PART_UINT64_C(0x30000000, 40000000);
@@ -425,6 +433,7 @@ TEST(AssemblerX64RollOperations) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
// Call the function from C++.
uint64_t src = V8_2PART_UINT64_C(0x10000000, C0000000);
uint64_t result = FUNCTION_CAST<F5>(buffer)(src);
@@ -444,11 +453,12 @@ TEST(AssemblerX64SublOperations) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
// Call the function from C++.
uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
uint64_t right = V8_2PART_UINT64_C(0x30000000, 40000000);
uint64_t result = FUNCTION_CAST<F4>(buffer)(&left, &right);
- CHECK_EQ(V8_2PART_UINT64_C(0x10000000, e0000000), left);
+ CHECK_EQ(V8_2PART_UINT64_C(0x10000000, E0000000), left);
USE(result);
}
@@ -471,6 +481,7 @@ TEST(AssemblerX64TestlOperations) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
// Call the function from C++.
uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
uint64_t right = V8_2PART_UINT64_C(0x30000000, 00000000);
@@ -488,7 +499,7 @@ TEST(AssemblerX64TestwOperations) {
// Set rax with the ZF flag of the testl instruction.
Label done;
__ movq(rax, Immediate(1));
- __ testw(Operand(arg1, 0), Immediate(0xf0f0));
+ __ testw(Operand(arg1, 0), Immediate(0xF0F0));
__ j(not_zero, &done, Label::kNear);
__ movq(rax, Immediate(0));
__ bind(&done);
@@ -496,6 +507,7 @@ TEST(AssemblerX64TestwOperations) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
// Call the function from C++.
uint16_t operand = 0x8000;
uint16_t result = FUNCTION_CAST<F>(buffer)(&operand);
@@ -514,6 +526,7 @@ TEST(AssemblerX64XorlOperations) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
// Call the function from C++.
uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
uint64_t right = V8_2PART_UINT64_C(0x30000000, 60000000);
@@ -548,6 +561,7 @@ TEST(AssemblerX64MemoryOperands) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(3, 2);
CHECK_EQ(3, result);
@@ -574,6 +588,7 @@ TEST(AssemblerX64ControlFlow) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
// Call the function from C++.
int result = FUNCTION_CAST<F2>(buffer)(3, 2);
CHECK_EQ(3, result);
@@ -622,6 +637,7 @@ TEST(AssemblerX64LoopImmediates) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
// Call the function from C++.
int result = FUNCTION_CAST<F0>(buffer)();
CHECK_EQ(1, result);
@@ -629,7 +645,7 @@ TEST(AssemblerX64LoopImmediates) {
TEST(OperandRegisterDependency) {
- int offsets[4] = {0, 1, 0xfed, 0xbeefcad};
+ int offsets[4] = {0, 1, 0xFED, 0xBEEFCAD};
for (int i = 0; i < 4; i++) {
int offset = offsets[i];
CHECK(Operand(rax, offset).AddressUsesRegister(rax));
@@ -892,6 +908,7 @@ TEST(AssemblerX64SSE) {
__ subps(xmm2, xmm0);
__ divps(xmm2, xmm1);
__ cvttss2si(rax, xmm2);
+ __ haddps(xmm1, xmm0);
__ ret(0);
}
@@ -1452,7 +1469,7 @@ TEST(AssemblerX64AVX_ss) {
// arguments in xmm0, xmm1 and xmm2
__ subq(rsp, Immediate(kDoubleSize * 2)); // For memory operand
- __ movl(rdx, Immediate(0xc2f64000)); // -123.125
+ __ movl(rdx, Immediate(0xC2F64000)); // -123.125
__ vmovd(xmm4, rdx);
__ vmovss(Operand(rsp, 0), xmm4);
__ vmovss(xmm5, Operand(rsp, 0));
@@ -1546,7 +1563,7 @@ TEST(AssemblerX64AVX_sd) {
// Test vcvtss2sd & vcvtsd2ss
__ movl(rax, Immediate(9));
- __ movq(rdx, V8_INT64_C(0x426D1A0000000000));
+ __ movq(rdx, uint64_t{0x426D1A0000000000});
__ movq(Operand(rsp, 0), rdx);
__ vcvtsd2ss(xmm6, xmm6, Operand(rsp, 0));
__ vcvtss2sd(xmm7, xmm6, xmm6);
@@ -1572,10 +1589,10 @@ TEST(AssemblerX64AVX_sd) {
// Test vcvttsd2siq
__ movl(rax, Immediate(11));
- __ movq(rdx, V8_INT64_C(0x426D1A94A2000000)); // 1.0e12
+ __ movq(rdx, uint64_t{0x426D1A94A2000000}); // 1.0e12
__ vmovq(xmm6, rdx);
__ vcvttsd2siq(rcx, xmm6);
- __ movq(rdx, V8_INT64_C(1000000000000));
+ __ movq(rdx, uint64_t{1000000000000});
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
__ xorq(rcx, rcx);
@@ -1586,9 +1603,9 @@ TEST(AssemblerX64AVX_sd) {
// Test vmovmskpd
__ movl(rax, Immediate(12));
- __ movq(rdx, V8_INT64_C(0x426D1A94A2000000)); // 1.0e12
+ __ movq(rdx, uint64_t{0x426D1A94A2000000}); // 1.0e12
__ vmovq(xmm6, rdx);
- __ movq(rdx, V8_INT64_C(0xC26D1A94A2000000)); // -1.0e12
+ __ movq(rdx, uint64_t{0xC26D1A94A2000000}); // -1.0e12
__ vmovq(xmm7, rdx);
__ shufps(xmm6, xmm7, 0x44);
__ vmovmskpd(rdx, xmm6);
@@ -1596,54 +1613,54 @@ TEST(AssemblerX64AVX_sd) {
__ j(not_equal, &exit);
// Test vpcmpeqd
- __ movq(rdx, V8_UINT64_C(0x0123456789abcdef));
- __ movq(rcx, V8_UINT64_C(0x0123456788888888));
+ __ movq(rdx, uint64_t{0x0123456789ABCDEF});
+ __ movq(rcx, uint64_t{0x0123456788888888});
__ vmovq(xmm6, rdx);
__ vmovq(xmm7, rcx);
__ vpcmpeqd(xmm8, xmm6, xmm7);
__ vmovq(rdx, xmm8);
- __ movq(rcx, V8_UINT64_C(0xffffffff00000000));
+ __ movq(rcx, uint64_t{0xFFFFFFFF00000000});
__ cmpq(rcx, rdx);
__ movl(rax, Immediate(13));
__ j(not_equal, &exit);
// Test vpsllq, vpsrlq
__ movl(rax, Immediate(13));
- __ movq(rdx, V8_UINT64_C(0x0123456789abcdef));
+ __ movq(rdx, uint64_t{0x0123456789ABCDEF});
__ vmovq(xmm6, rdx);
__ vpsrlq(xmm7, xmm6, 4);
__ vmovq(rdx, xmm7);
- __ movq(rcx, V8_UINT64_C(0x00123456789abcde));
+ __ movq(rcx, uint64_t{0x00123456789ABCDE});
__ cmpq(rdx, rcx);
__ j(not_equal, &exit);
__ vpsllq(xmm7, xmm6, 12);
__ vmovq(rdx, xmm7);
- __ movq(rcx, V8_UINT64_C(0x3456789abcdef000));
+ __ movq(rcx, uint64_t{0x3456789ABCDEF000});
__ cmpq(rdx, rcx);
__ j(not_equal, &exit);
// Test vandpd, vorpd, vxorpd
__ movl(rax, Immediate(14));
- __ movl(rdx, Immediate(0x00ff00ff));
- __ movl(rcx, Immediate(0x0f0f0f0f));
+ __ movl(rdx, Immediate(0x00FF00FF));
+ __ movl(rcx, Immediate(0x0F0F0F0F));
__ vmovd(xmm4, rdx);
__ vmovd(xmm5, rcx);
__ vandpd(xmm6, xmm4, xmm5);
__ vmovd(rdx, xmm6);
- __ cmpl(rdx, Immediate(0x000f000f));
+ __ cmpl(rdx, Immediate(0x000F000F));
__ j(not_equal, &exit);
__ vorpd(xmm6, xmm4, xmm5);
__ vmovd(rdx, xmm6);
- __ cmpl(rdx, Immediate(0x0fff0fff));
+ __ cmpl(rdx, Immediate(0x0FFF0FFF));
__ j(not_equal, &exit);
__ vxorpd(xmm6, xmm4, xmm5);
__ vmovd(rdx, xmm6);
- __ cmpl(rdx, Immediate(0x0ff00ff0));
+ __ cmpl(rdx, Immediate(0x0FF00FF0));
__ j(not_equal, &exit);
// Test vsqrtsd
__ movl(rax, Immediate(15));
- __ movq(rdx, V8_UINT64_C(0x4004000000000000)); // 2.5
+ __ movq(rdx, uint64_t{0x4004000000000000}); // 2.5
__ vmovq(xmm4, rdx);
__ vmulsd(xmm5, xmm4, xmm4);
__ vmovsd(Operand(rsp, 0), xmm5);
@@ -1658,10 +1675,10 @@ TEST(AssemblerX64AVX_sd) {
// Test vroundsd
__ movl(rax, Immediate(16));
- __ movq(rdx, V8_UINT64_C(0x4002000000000000)); // 2.25
+ __ movq(rdx, uint64_t{0x4002000000000000}); // 2.25
__ vmovq(xmm4, rdx);
__ vroundsd(xmm5, xmm4, xmm4, kRoundUp);
- __ movq(rcx, V8_UINT64_C(0x4008000000000000)); // 3.0
+ __ movq(rcx, uint64_t{0x4008000000000000}); // 3.0
__ vmovq(xmm6, rcx);
__ vucomisd(xmm5, xmm6);
__ j(not_equal, &exit);
@@ -1669,7 +1686,7 @@ TEST(AssemblerX64AVX_sd) {
// Test vcvtlsi2sd
__ movl(rax, Immediate(17));
__ movl(rdx, Immediate(6));
- __ movq(rcx, V8_UINT64_C(0x4018000000000000)); // 6.0
+ __ movq(rcx, uint64_t{0x4018000000000000}); // 6.0
__ vmovq(xmm5, rcx);
__ vcvtlsi2sd(xmm6, xmm6, rdx);
__ vucomisd(xmm5, xmm6);
@@ -1681,8 +1698,8 @@ TEST(AssemblerX64AVX_sd) {
// Test vcvtqsi2sd
__ movl(rax, Immediate(18));
- __ movq(rdx, V8_UINT64_C(0x2000000000000000)); // 2 << 0x3c
- __ movq(rcx, V8_UINT64_C(0x43c0000000000000));
+ __ movq(rdx, uint64_t{0x2000000000000000}); // 2 << 0x3C
+ __ movq(rcx, uint64_t{0x43C0000000000000});
__ vmovq(xmm5, rcx);
__ vcvtqsi2sd(xmm6, xmm6, rdx);
__ vucomisd(xmm5, xmm6);
@@ -1690,13 +1707,13 @@ TEST(AssemblerX64AVX_sd) {
// Test vcvtsd2si
__ movl(rax, Immediate(19));
- __ movq(rdx, V8_UINT64_C(0x4018000000000000)); // 6.0
+ __ movq(rdx, uint64_t{0x4018000000000000}); // 6.0
__ vmovq(xmm5, rdx);
__ vcvtsd2si(rcx, xmm5);
__ cmpl(rcx, Immediate(6));
__ j(not_equal, &exit);
- __ movq(rdx, V8_INT64_C(0x3ff0000000000000)); // 1.0
+ __ movq(rdx, uint64_t{0x3FF0000000000000}); // 1.0
__ vmovq(xmm7, rdx);
__ vmulsd(xmm1, xmm1, xmm7);
__ movq(Operand(rsp, 0), rdx);
@@ -1775,160 +1792,160 @@ TEST(AssemblerX64BMI1) {
CpuFeatureScope fscope(&masm, BMI1);
Label exit;
- __ movq(rcx, V8_UINT64_C(0x1122334455667788)); // source operand
+ __ movq(rcx, uint64_t{0x1122334455667788}); // source operand
__ pushq(rcx); // For memory operand
// andn
- __ movq(rdx, V8_UINT64_C(0x1000000020000000));
+ __ movq(rdx, uint64_t{0x1000000020000000});
__ movl(rax, Immediate(1)); // Test number
__ andnq(r8, rdx, rcx);
- __ movq(r9, V8_UINT64_C(0x0122334455667788)); // expected result
+ __ movq(r9, uint64_t{0x0122334455667788}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ andnq(r8, rdx, Operand(rsp, 0));
- __ movq(r9, V8_UINT64_C(0x0122334455667788)); // expected result
+ __ movq(r9, uint64_t{0x0122334455667788}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ andnl(r8, rdx, rcx);
- __ movq(r9, V8_UINT64_C(0x0000000055667788)); // expected result
+ __ movq(r9, uint64_t{0x0000000055667788}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ andnl(r8, rdx, Operand(rsp, 0));
- __ movq(r9, V8_UINT64_C(0x0000000055667788)); // expected result
+ __ movq(r9, uint64_t{0x0000000055667788}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
// bextr
- __ movq(rdx, V8_UINT64_C(0x0000000000002808));
+ __ movq(rdx, uint64_t{0x0000000000002808});
__ incq(rax);
__ bextrq(r8, rcx, rdx);
- __ movq(r9, V8_UINT64_C(0x0000003344556677)); // expected result
+ __ movq(r9, uint64_t{0x0000003344556677}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ bextrq(r8, Operand(rsp, 0), rdx);
- __ movq(r9, V8_UINT64_C(0x0000003344556677)); // expected result
+ __ movq(r9, uint64_t{0x0000003344556677}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ bextrl(r8, rcx, rdx);
- __ movq(r9, V8_UINT64_C(0x0000000000556677)); // expected result
+ __ movq(r9, uint64_t{0x0000000000556677}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ bextrl(r8, Operand(rsp, 0), rdx);
- __ movq(r9, V8_UINT64_C(0x0000000000556677)); // expected result
+ __ movq(r9, uint64_t{0x0000000000556677}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
// blsi
__ incq(rax);
__ blsiq(r8, rcx);
- __ movq(r9, V8_UINT64_C(0x0000000000000008)); // expected result
+ __ movq(r9, uint64_t{0x0000000000000008}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ blsiq(r8, Operand(rsp, 0));
- __ movq(r9, V8_UINT64_C(0x0000000000000008)); // expected result
+ __ movq(r9, uint64_t{0x0000000000000008}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ blsil(r8, rcx);
- __ movq(r9, V8_UINT64_C(0x0000000000000008)); // expected result
+ __ movq(r9, uint64_t{0x0000000000000008}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ blsil(r8, Operand(rsp, 0));
- __ movq(r9, V8_UINT64_C(0x0000000000000008)); // expected result
+ __ movq(r9, uint64_t{0x0000000000000008}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
// blsmsk
__ incq(rax);
__ blsmskq(r8, rcx);
- __ movq(r9, V8_UINT64_C(0x000000000000000f)); // expected result
+ __ movq(r9, uint64_t{0x000000000000000F}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ blsmskq(r8, Operand(rsp, 0));
- __ movq(r9, V8_UINT64_C(0x000000000000000f)); // expected result
+ __ movq(r9, uint64_t{0x000000000000000F}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ blsmskl(r8, rcx);
- __ movq(r9, V8_UINT64_C(0x000000000000000f)); // expected result
+ __ movq(r9, uint64_t{0x000000000000000F}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ blsmskl(r8, Operand(rsp, 0));
- __ movq(r9, V8_UINT64_C(0x000000000000000f)); // expected result
+ __ movq(r9, uint64_t{0x000000000000000F}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
// blsr
__ incq(rax);
__ blsrq(r8, rcx);
- __ movq(r9, V8_UINT64_C(0x1122334455667780)); // expected result
+ __ movq(r9, uint64_t{0x1122334455667780}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ blsrq(r8, Operand(rsp, 0));
- __ movq(r9, V8_UINT64_C(0x1122334455667780)); // expected result
+ __ movq(r9, uint64_t{0x1122334455667780}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ blsrl(r8, rcx);
- __ movq(r9, V8_UINT64_C(0x0000000055667780)); // expected result
+ __ movq(r9, uint64_t{0x0000000055667780}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ blsrl(r8, Operand(rsp, 0));
- __ movq(r9, V8_UINT64_C(0x0000000055667780)); // expected result
+ __ movq(r9, uint64_t{0x0000000055667780}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
// tzcnt
__ incq(rax);
__ tzcntq(r8, rcx);
- __ movq(r9, V8_UINT64_C(0x0000000000000003)); // expected result
+ __ movq(r9, uint64_t{0x0000000000000003}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ tzcntq(r8, Operand(rsp, 0));
- __ movq(r9, V8_UINT64_C(0x0000000000000003)); // expected result
+ __ movq(r9, uint64_t{0x0000000000000003}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ tzcntl(r8, rcx);
- __ movq(r9, V8_UINT64_C(0x0000000000000003)); // expected result
+ __ movq(r9, uint64_t{0x0000000000000003}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ tzcntl(r8, Operand(rsp, 0));
- __ movq(r9, V8_UINT64_C(0x0000000000000003)); // expected result
+ __ movq(r9, uint64_t{0x0000000000000003}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
@@ -1965,30 +1982,30 @@ TEST(AssemblerX64LZCNT) {
CpuFeatureScope fscope(&masm, LZCNT);
Label exit;
- __ movq(rcx, V8_UINT64_C(0x1122334455667788)); // source operand
+ __ movq(rcx, uint64_t{0x1122334455667788}); // source operand
__ pushq(rcx); // For memory operand
__ movl(rax, Immediate(1)); // Test number
__ lzcntq(r8, rcx);
- __ movq(r9, V8_UINT64_C(0x0000000000000003)); // expected result
+ __ movq(r9, uint64_t{0x0000000000000003}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ lzcntq(r8, Operand(rsp, 0));
- __ movq(r9, V8_UINT64_C(0x0000000000000003)); // expected result
+ __ movq(r9, uint64_t{0x0000000000000003}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ lzcntl(r8, rcx);
- __ movq(r9, V8_UINT64_C(0x0000000000000001)); // expected result
+ __ movq(r9, uint64_t{0x0000000000000001}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ lzcntl(r8, Operand(rsp, 0));
- __ movq(r9, V8_UINT64_C(0x0000000000000001)); // expected result
+ __ movq(r9, uint64_t{0x0000000000000001}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
@@ -2025,30 +2042,30 @@ TEST(AssemblerX64POPCNT) {
CpuFeatureScope fscope(&masm, POPCNT);
Label exit;
- __ movq(rcx, V8_UINT64_C(0x1111111111111100)); // source operand
+ __ movq(rcx, uint64_t{0x1111111111111100}); // source operand
__ pushq(rcx); // For memory operand
__ movl(rax, Immediate(1)); // Test number
__ popcntq(r8, rcx);
- __ movq(r9, V8_UINT64_C(0x000000000000000e)); // expected result
+ __ movq(r9, uint64_t{0x000000000000000E}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ popcntq(r8, Operand(rsp, 0));
- __ movq(r9, V8_UINT64_C(0x000000000000000e)); // expected result
+ __ movq(r9, uint64_t{0x000000000000000E}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ popcntl(r8, rcx);
- __ movq(r9, V8_UINT64_C(0x0000000000000006)); // expected result
+ __ movq(r9, uint64_t{0x0000000000000006}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ popcntl(r8, Operand(rsp, 0));
- __ movq(r9, V8_UINT64_C(0x0000000000000006)); // expected result
+ __ movq(r9, uint64_t{0x0000000000000006}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
@@ -2085,232 +2102,232 @@ TEST(AssemblerX64BMI2) {
CpuFeatureScope fscope(&masm, BMI2);
Label exit;
__ pushq(rbx); // save rbx
- __ movq(rcx, V8_UINT64_C(0x1122334455667788)); // source operand
+ __ movq(rcx, uint64_t{0x1122334455667788}); // source operand
__ pushq(rcx); // For memory operand
// bzhi
- __ movq(rdx, V8_UINT64_C(0x0000000000000009));
+ __ movq(rdx, uint64_t{0x0000000000000009});
__ movl(rax, Immediate(1)); // Test number
__ bzhiq(r8, rcx, rdx);
- __ movq(r9, V8_UINT64_C(0x0000000000000188)); // expected result
+ __ movq(r9, uint64_t{0x0000000000000188}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ bzhiq(r8, Operand(rsp, 0), rdx);
- __ movq(r9, V8_UINT64_C(0x0000000000000188)); // expected result
+ __ movq(r9, uint64_t{0x0000000000000188}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ bzhil(r8, rcx, rdx);
- __ movq(r9, V8_UINT64_C(0x0000000000000188)); // expected result
+ __ movq(r9, uint64_t{0x0000000000000188}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ bzhil(r8, Operand(rsp, 0), rdx);
- __ movq(r9, V8_UINT64_C(0x0000000000000188)); // expected result
+ __ movq(r9, uint64_t{0x0000000000000188}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
// mulx
- __ movq(rdx, V8_UINT64_C(0x0000000000001000));
+ __ movq(rdx, uint64_t{0x0000000000001000});
__ incq(rax);
__ mulxq(r8, r9, rcx);
- __ movq(rbx, V8_UINT64_C(0x0000000000000112)); // expected result
+ __ movq(rbx, uint64_t{0x0000000000000112}); // expected result
__ cmpq(r8, rbx);
__ j(not_equal, &exit);
- __ movq(rbx, V8_UINT64_C(0x2334455667788000)); // expected result
+ __ movq(rbx, uint64_t{0x2334455667788000}); // expected result
__ cmpq(r9, rbx);
__ j(not_equal, &exit);
__ incq(rax);
__ mulxq(r8, r9, Operand(rsp, 0));
- __ movq(rbx, V8_UINT64_C(0x0000000000000112)); // expected result
+ __ movq(rbx, uint64_t{0x0000000000000112}); // expected result
__ cmpq(r8, rbx);
__ j(not_equal, &exit);
- __ movq(rbx, V8_UINT64_C(0x2334455667788000)); // expected result
+ __ movq(rbx, uint64_t{0x2334455667788000}); // expected result
__ cmpq(r9, rbx);
__ j(not_equal, &exit);
__ incq(rax);
__ mulxl(r8, r9, rcx);
- __ movq(rbx, V8_UINT64_C(0x0000000000000556)); // expected result
+ __ movq(rbx, uint64_t{0x0000000000000556}); // expected result
__ cmpq(r8, rbx);
__ j(not_equal, &exit);
- __ movq(rbx, V8_UINT64_C(0x0000000067788000)); // expected result
+ __ movq(rbx, uint64_t{0x0000000067788000}); // expected result
__ cmpq(r9, rbx);
__ j(not_equal, &exit);
__ incq(rax);
__ mulxl(r8, r9, Operand(rsp, 0));
- __ movq(rbx, V8_UINT64_C(0x0000000000000556)); // expected result
+ __ movq(rbx, uint64_t{0x0000000000000556}); // expected result
__ cmpq(r8, rbx);
__ j(not_equal, &exit);
- __ movq(rbx, V8_UINT64_C(0x0000000067788000)); // expected result
+ __ movq(rbx, uint64_t{0x0000000067788000}); // expected result
__ cmpq(r9, rbx);
__ j(not_equal, &exit);
// pdep
- __ movq(rdx, V8_UINT64_C(0xfffffffffffffff0));
+ __ movq(rdx, uint64_t{0xFFFFFFFFFFFFFFF0});
__ incq(rax);
__ pdepq(r8, rdx, rcx);
- __ movq(r9, V8_UINT64_C(0x1122334455667400)); // expected result
+ __ movq(r9, uint64_t{0x1122334455667400}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ pdepq(r8, rdx, Operand(rsp, 0));
- __ movq(r9, V8_UINT64_C(0x1122334455667400)); // expected result
+ __ movq(r9, uint64_t{0x1122334455667400}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ pdepl(r8, rdx, rcx);
- __ movq(r9, V8_UINT64_C(0x0000000055667400)); // expected result
+ __ movq(r9, uint64_t{0x0000000055667400}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ pdepl(r8, rdx, Operand(rsp, 0));
- __ movq(r9, V8_UINT64_C(0x0000000055667400)); // expected result
+ __ movq(r9, uint64_t{0x0000000055667400}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
// pext
- __ movq(rdx, V8_UINT64_C(0xfffffffffffffff0));
+ __ movq(rdx, uint64_t{0xFFFFFFFFFFFFFFF0});
__ incq(rax);
__ pextq(r8, rdx, rcx);
- __ movq(r9, V8_UINT64_C(0x0000000003fffffe)); // expected result
+ __ movq(r9, uint64_t{0x0000000003FFFFFE}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ pextq(r8, rdx, Operand(rsp, 0));
- __ movq(r9, V8_UINT64_C(0x0000000003fffffe)); // expected result
+ __ movq(r9, uint64_t{0x0000000003FFFFFE}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ pextl(r8, rdx, rcx);
- __ movq(r9, V8_UINT64_C(0x000000000000fffe)); // expected result
+ __ movq(r9, uint64_t{0x000000000000FFFE}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ pextl(r8, rdx, Operand(rsp, 0));
- __ movq(r9, V8_UINT64_C(0x000000000000fffe)); // expected result
+ __ movq(r9, uint64_t{0x000000000000FFFE}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
// sarx
- __ movq(rdx, V8_UINT64_C(0x0000000000000004));
+ __ movq(rdx, uint64_t{0x0000000000000004});
__ incq(rax);
__ sarxq(r8, rcx, rdx);
- __ movq(r9, V8_UINT64_C(0x0112233445566778)); // expected result
+ __ movq(r9, uint64_t{0x0112233445566778}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ sarxq(r8, Operand(rsp, 0), rdx);
- __ movq(r9, V8_UINT64_C(0x0112233445566778)); // expected result
+ __ movq(r9, uint64_t{0x0112233445566778}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ sarxl(r8, rcx, rdx);
- __ movq(r9, V8_UINT64_C(0x0000000005566778)); // expected result
+ __ movq(r9, uint64_t{0x0000000005566778}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ sarxl(r8, Operand(rsp, 0), rdx);
- __ movq(r9, V8_UINT64_C(0x0000000005566778)); // expected result
+ __ movq(r9, uint64_t{0x0000000005566778}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
// shlx
- __ movq(rdx, V8_UINT64_C(0x0000000000000004));
+ __ movq(rdx, uint64_t{0x0000000000000004});
__ incq(rax);
__ shlxq(r8, rcx, rdx);
- __ movq(r9, V8_UINT64_C(0x1223344556677880)); // expected result
+ __ movq(r9, uint64_t{0x1223344556677880}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ shlxq(r8, Operand(rsp, 0), rdx);
- __ movq(r9, V8_UINT64_C(0x1223344556677880)); // expected result
+ __ movq(r9, uint64_t{0x1223344556677880}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ shlxl(r8, rcx, rdx);
- __ movq(r9, V8_UINT64_C(0x0000000056677880)); // expected result
+ __ movq(r9, uint64_t{0x0000000056677880}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ shlxl(r8, Operand(rsp, 0), rdx);
- __ movq(r9, V8_UINT64_C(0x0000000056677880)); // expected result
+ __ movq(r9, uint64_t{0x0000000056677880}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
// shrx
- __ movq(rdx, V8_UINT64_C(0x0000000000000004));
+ __ movq(rdx, uint64_t{0x0000000000000004});
__ incq(rax);
__ shrxq(r8, rcx, rdx);
- __ movq(r9, V8_UINT64_C(0x0112233445566778)); // expected result
+ __ movq(r9, uint64_t{0x0112233445566778}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ shrxq(r8, Operand(rsp, 0), rdx);
- __ movq(r9, V8_UINT64_C(0x0112233445566778)); // expected result
+ __ movq(r9, uint64_t{0x0112233445566778}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ shrxl(r8, rcx, rdx);
- __ movq(r9, V8_UINT64_C(0x0000000005566778)); // expected result
+ __ movq(r9, uint64_t{0x0000000005566778}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ shrxl(r8, Operand(rsp, 0), rdx);
- __ movq(r9, V8_UINT64_C(0x0000000005566778)); // expected result
+ __ movq(r9, uint64_t{0x0000000005566778}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
// rorx
__ incq(rax);
__ rorxq(r8, rcx, 0x4);
- __ movq(r9, V8_UINT64_C(0x8112233445566778)); // expected result
+ __ movq(r9, uint64_t{0x8112233445566778}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ rorxq(r8, Operand(rsp, 0), 0x4);
- __ movq(r9, V8_UINT64_C(0x8112233445566778)); // expected result
+ __ movq(r9, uint64_t{0x8112233445566778}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ rorxl(r8, rcx, 0x4);
- __ movq(r9, V8_UINT64_C(0x0000000085566778)); // expected result
+ __ movq(r9, uint64_t{0x0000000085566778}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
__ incq(rax);
__ rorxl(r8, Operand(rsp, 0), 0x4);
- __ movq(r9, V8_UINT64_C(0x0000000085566778)); // expected result
+ __ movq(r9, uint64_t{0x0000000085566778}); // expected result
__ cmpq(r8, r9);
__ j(not_equal, &exit);
@@ -2444,8 +2461,9 @@ TEST(AssemblerX64PslldWithXmm15) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
- uint64_t result = FUNCTION_CAST<F5>(buffer)(V8_UINT64_C(0x1122334455667788));
- CHECK_EQ(V8_UINT64_C(0x22446688aaccef10), result);
+ MakeAssemblerBufferExecutable(buffer, allocated);
+ uint64_t result = FUNCTION_CAST<F5>(buffer)(uint64_t{0x1122334455667788});
+ CHECK_EQ(uint64_t{0x22446688AACCEF10}, result);
}
typedef float (*F9)(float x, float y);
diff --git a/deps/v8/test/cctest/test-atomicops.cc b/deps/v8/test/cctest/test-atomicops.cc
index add819f771..92421138cb 100644
--- a/deps/v8/test/cctest/test-atomicops.cc
+++ b/deps/v8/test/cctest/test-atomicops.cc
@@ -168,12 +168,11 @@ static void TestAtomicIncrementBounds() {
CHECK_EQU(test_val - 1, value);
}
-
-// Return an AtomicType with the value 0xa5a5a5..
+// Return an AtomicType with the value 0xA5A5A5..
template <class AtomicType>
static AtomicType TestFillValue() {
AtomicType val = 0;
- memset(&val, 0xa5, sizeof(AtomicType));
+ memset(&val, 0xA5, sizeof(AtomicType));
return val;
}
diff --git a/deps/v8/test/cctest/test-bignum.cc b/deps/v8/test/cctest/test-bignum.cc
index d9721b06e1..966ee5b5d4 100644
--- a/deps/v8/test/cctest/test-bignum.cc
+++ b/deps/v8/test/cctest/test-bignum.cc
@@ -641,7 +641,7 @@ TEST(MultiplyUInt64) {
CHECK_EQ(0, strcmp("FFFEFFFFFFFFFFFF00010000000000000000000000000", buffer));
AssignDecimalString(&bignum, "15611230384529777");
- bignum.MultiplyByUInt64(V8_2PART_UINT64_C(0x8ac72304, 89e80000));
+ bignum.MultiplyByUInt64(V8_2PART_UINT64_C(0x8AC72304, 89E80000));
CHECK(bignum.ToHexString(buffer, kBufferSize));
CHECK_EQ(0, strcmp("1E10EE4B11D15A7F3DE7F3C7680000", buffer));
}
diff --git a/deps/v8/test/cctest/test-code-layout.cc b/deps/v8/test/cctest/test-code-layout.cc
index caeeaf3283..7f6fa58b17 100644
--- a/deps/v8/test/cctest/test-code-layout.cc
+++ b/deps/v8/test/cctest/test-code-layout.cc
@@ -15,8 +15,8 @@ TEST(CodeLayoutWithoutUnwindingInfo) {
HandleScope handle_scope(CcTest::i_isolate());
// "Hello, World!" in ASCII.
- byte buffer_array[13] = {0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x2c, 0x20,
- 0x57, 0x6f, 0x72, 0x6c, 0x64, 0x21};
+ byte buffer_array[13] = {0x48, 0x65, 0x6C, 0x6C, 0x6F, 0x2C, 0x20,
+ 0x57, 0x6F, 0x72, 0x6C, 0x64, 0x21};
byte* buffer = &buffer_array[0];
int buffer_size = sizeof(buffer_array);
@@ -46,11 +46,11 @@ TEST(CodeLayoutWithUnwindingInfo) {
HandleScope handle_scope(CcTest::i_isolate());
// "Hello, World!" in ASCII.
- byte buffer_array[13] = {0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x2c, 0x20,
- 0x57, 0x6f, 0x72, 0x6c, 0x64, 0x21};
+ byte buffer_array[13] = {0x48, 0x65, 0x6C, 0x6C, 0x6F, 0x2C, 0x20,
+ 0x57, 0x6F, 0x72, 0x6C, 0x64, 0x21};
// "JavaScript" in ASCII.
- byte unwinding_info_array[10] = {0x4a, 0x61, 0x76, 0x61, 0x53,
+ byte unwinding_info_array[10] = {0x4A, 0x61, 0x76, 0x61, 0x53,
0x63, 0x72, 0x69, 0x70, 0x74};
byte* buffer = &buffer_array[0];
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
index 7a94d3a511..7889a61a77 100644
--- a/deps/v8/test/cctest/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -237,8 +237,8 @@ TEST(DecodeWordFromWord32) {
CodeStubAssembler m(asm_tester.state());
class TestBitField : public BitField<unsigned, 3, 3> {};
- m.Return(
- m.SmiTag(m.DecodeWordFromWord32<TestBitField>(m.Int32Constant(0x2f))));
+ m.Return(m.SmiTag(
+ m.Signed(m.DecodeWordFromWord32<TestBitField>(m.Int32Constant(0x2F)))));
FunctionTester ft(asm_tester.GenerateCode());
MaybeHandle<Object> result = ft.Call();
// value = 00101111
@@ -914,7 +914,7 @@ TEST(TryHasOwnProperty) {
JSFunction::EnsureHasInitialMap(function);
function->initial_map()->set_instance_type(JS_GLOBAL_OBJECT_TYPE);
function->initial_map()->set_is_prototype_map(true);
- function->initial_map()->set_dictionary_map(true);
+ function->initial_map()->set_is_dictionary_map(true);
function->initial_map()->set_may_have_interesting_symbols(true);
Handle<JSObject> object = factory->NewJSGlobalObject(function);
AddProperties(object, names, arraysize(names));
@@ -1706,7 +1706,94 @@ TEST(Arguments) {
CSA_ASSERT(
&m, m.WordEqual(arguments.AtIndex(2), m.SmiConstant(Smi::FromInt(14))));
- m.Return(arguments.GetReceiver());
+ arguments.PopAndReturn(arguments.GetReceiver());
+
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+ Handle<Object> result = ft.Call(isolate->factory()->undefined_value(),
+ Handle<Smi>(Smi::FromInt(12), isolate),
+ Handle<Smi>(Smi::FromInt(13), isolate),
+ Handle<Smi>(Smi::FromInt(14), isolate))
+ .ToHandleChecked();
+ CHECK_EQ(*isolate->factory()->undefined_value(), *result);
+}
+
+TEST(ArgumentsWithSmiConstantIndices) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ const int kNumParams = 4;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeStubAssembler m(asm_tester.state());
+
+ CodeStubArguments arguments(&m, m.SmiConstant(3), nullptr,
+ CodeStubAssembler::SMI_PARAMETERS);
+
+ CSA_ASSERT(&m,
+ m.WordEqual(arguments.AtIndex(m.SmiConstant(0),
+ CodeStubAssembler::SMI_PARAMETERS),
+ m.SmiConstant(Smi::FromInt(12))));
+ CSA_ASSERT(&m,
+ m.WordEqual(arguments.AtIndex(m.SmiConstant(1),
+ CodeStubAssembler::SMI_PARAMETERS),
+ m.SmiConstant(Smi::FromInt(13))));
+ CSA_ASSERT(&m,
+ m.WordEqual(arguments.AtIndex(m.SmiConstant(2),
+ CodeStubAssembler::SMI_PARAMETERS),
+ m.SmiConstant(Smi::FromInt(14))));
+
+ arguments.PopAndReturn(arguments.GetReceiver());
+
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+ Handle<Object> result = ft.Call(isolate->factory()->undefined_value(),
+ Handle<Smi>(Smi::FromInt(12), isolate),
+ Handle<Smi>(Smi::FromInt(13), isolate),
+ Handle<Smi>(Smi::FromInt(14), isolate))
+ .ToHandleChecked();
+ CHECK_EQ(*isolate->factory()->undefined_value(), *result);
+}
+
+TNode<Smi> NonConstantSmi(CodeStubAssembler* m, int value) {
+ // Generate a SMI with the given value and feed it through a Phi so it can't
+ // be inferred to be constant.
+ Variable var(m, MachineRepresentation::kTagged, m->SmiConstant(value));
+ Label dummy_done(m);
+ // Even though the Goto always executes, it will taint the variable and thus
+ // make it appear non-constant when used later.
+ m->GotoIf(m->Int32Constant(1), &dummy_done);
+ var.Bind(m->SmiConstant(value));
+ m->Goto(&dummy_done);
+ m->BIND(&dummy_done);
+
+ // Ensure that the above hackery actually created a non-constant SMI.
+ Smi* smi_constant;
+ CHECK(!m->ToSmiConstant(var.value(), smi_constant));
+
+ return m->UncheckedCast<Smi>(var.value());
+}
+
+TEST(ArgumentsWithSmiIndices) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ const int kNumParams = 4;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeStubAssembler m(asm_tester.state());
+
+ CodeStubArguments arguments(&m, m.SmiConstant(3), nullptr,
+ CodeStubAssembler::SMI_PARAMETERS);
+
+ CSA_ASSERT(&m,
+ m.WordEqual(arguments.AtIndex(NonConstantSmi(&m, 0),
+ CodeStubAssembler::SMI_PARAMETERS),
+ m.SmiConstant(Smi::FromInt(12))));
+ CSA_ASSERT(&m,
+ m.WordEqual(arguments.AtIndex(NonConstantSmi(&m, 1),
+ CodeStubAssembler::SMI_PARAMETERS),
+ m.SmiConstant(Smi::FromInt(13))));
+ CSA_ASSERT(&m,
+ m.WordEqual(arguments.AtIndex(NonConstantSmi(&m, 2),
+ CodeStubAssembler::SMI_PARAMETERS),
+ m.SmiConstant(Smi::FromInt(14))));
+
+ arguments.PopAndReturn(arguments.GetReceiver());
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
Handle<Object> result = ft.Call(isolate->factory()->undefined_value(),
@@ -1734,7 +1821,7 @@ TEST(ArgumentsForEach) {
arguments.ForEach(
list, [&m, &sum](Node* arg) { sum.Bind(m.SmiAdd(sum.value(), arg)); });
- m.Return(sum.value());
+ arguments.PopAndReturn(sum.value());
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
Handle<Object> result = ft.Call(isolate->factory()->undefined_value(),
@@ -2663,7 +2750,7 @@ TEST(GotoIfNotWhiteSpaceOrLineTerminator) {
}
}
-TEST(BranchIfNumericRelationalComparison) {
+TEST(BranchIfNumberRelationalComparison) {
Isolate* isolate(CcTest::InitIsolateOnce());
Factory* f = isolate->factory();
const int kNumParams = 2;
@@ -2671,9 +2758,9 @@ TEST(BranchIfNumericRelationalComparison) {
{
CodeStubAssembler m(asm_tester.state());
Label return_true(&m), return_false(&m);
- m.BranchIfNumericRelationalComparison(Operation::kGreaterThanOrEqual,
- m.Parameter(0), m.Parameter(1),
- &return_true, &return_false);
+ m.BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual,
+ m.Parameter(0), m.Parameter(1),
+ &return_true, &return_false);
m.BIND(&return_true);
m.Return(m.BooleanConstant(true));
m.BIND(&return_false);
diff --git a/deps/v8/test/cctest/test-code-stubs-arm.cc b/deps/v8/test/cctest/test-code-stubs-arm.cc
index 076c918906..d042ea617b 100644
--- a/deps/v8/test/cctest/test-code-stubs-arm.cc
+++ b/deps/v8/test/cctest/test-code-stubs-arm.cc
@@ -97,7 +97,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
if (reg != destination_reg) {
__ ldr(ip, MemOperand(sp, 0));
__ cmp(reg, ip);
- __ Assert(eq, kRegisterWasClobbered);
+ __ Assert(eq, AbortReason::kRegisterWasClobbered);
__ add(sp, sp, Operand(kPointerSize));
}
}
@@ -115,6 +115,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
Assembler::FlushICache(isolate, buffer, allocated);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
@@ -131,7 +132,8 @@ static Isolate* GetIsolateFrom(LocalContext* context) {
int32_t RunGeneratedCodeCallWrapper(ConvertDToIFunc func,
double from) {
#ifdef USE_SIMULATOR
- return CALL_GENERATED_FP_INT(CcTest::i_isolate(), func, from, 0);
+ return Simulator::current(CcTest::i_isolate())
+ ->CallFP<int32_t>(FUNCTION_ADDR(func), from, 0);
#else
return (*func)(from);
#endif
diff --git a/deps/v8/test/cctest/test-code-stubs-arm64.cc b/deps/v8/test/cctest/test-code-stubs-arm64.cc
index db175de5ec..1086bea7ce 100644
--- a/deps/v8/test/cctest/test-code-stubs-arm64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-arm64.cc
@@ -57,64 +57,78 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
byte* start = stub.GetCode()->instruction_start();
- __ SetStackPointer(csp);
__ PushCalleeSavedRegisters();
- __ Mov(jssp, csp);
- __ SetStackPointer(jssp);
-
- // Push the double argument.
- __ Push(d0);
MacroAssembler::PushPopQueue queue(&masm);
// Save registers make sure they don't get clobbered.
int source_reg_offset = kDoubleSize;
int reg_num = 0;
+ queue.Queue(xzr); // Push xzr to maintain sp alignment.
for (; reg_num < Register::kNumRegisters; ++reg_num) {
if (RegisterConfiguration::Default()->IsAllocatableGeneralCode(reg_num)) {
Register reg = Register::from_code(reg_num);
- if (!reg.is(destination_reg)) {
- queue.Queue(reg);
- source_reg_offset += kPointerSize;
- }
+ queue.Queue(reg);
+ source_reg_offset += kPointerSize;
}
}
- // Re-push the double argument.
+ // Push the double argument. We push a second copy to maintain sp alignment.
+ queue.Queue(d0);
queue.Queue(d0);
queue.PushQueued();
- // Call through to the actual stub
+ // Call through to the actual stub.
__ Call(start, RelocInfo::EXTERNAL_REFERENCE);
- __ Drop(1, kDoubleSize);
+ __ Drop(2, kDoubleSize);
+
+ // Make sure no registers have been unexpectedly clobbered.
+ {
+ const RegisterConfiguration* config(RegisterConfiguration::Default());
+ int allocatable_register_count =
+ config->num_allocatable_general_registers();
+ UseScratchRegisterScope temps(&masm);
+ Register temp0 = temps.AcquireX();
+ Register temp1 = temps.AcquireX();
+ for (int i = allocatable_register_count - 1; i > 0; i -= 2) {
+ int code0 = config->GetAllocatableGeneralCode(i);
+ int code1 = config->GetAllocatableGeneralCode(i - 1);
+ Register reg0 = Register::from_code(code0);
+ Register reg1 = Register::from_code(code1);
+ __ Pop(temp0, temp1);
+ if (!reg0.is(destination_reg)) {
+ __ Cmp(reg0, temp0);
+ __ Assert(eq, AbortReason::kRegisterWasClobbered);
+ }
+ if (!reg1.is(destination_reg)) {
+ __ Cmp(reg1, temp1);
+ __ Assert(eq, AbortReason::kRegisterWasClobbered);
+ }
+ }
- // // Make sure no registers have been unexpectedly clobbered
- for (--reg_num; reg_num >= 0; --reg_num) {
- if (RegisterConfiguration::Default()->IsAllocatableGeneralCode(reg_num)) {
- Register reg = Register::from_code(reg_num);
+ if (allocatable_register_count % 2 != 0) {
+ int code = config->GetAllocatableGeneralCode(0);
+ Register reg = Register::from_code(code);
+ __ Pop(temp0, xzr);
if (!reg.is(destination_reg)) {
- __ Pop(ip0);
- __ cmp(reg, ip0);
- __ Assert(eq, kRegisterWasClobbered);
+ __ Cmp(reg, temp0);
+ __ Assert(eq, AbortReason::kRegisterWasClobbered);
}
}
}
- __ Drop(1, kDoubleSize);
-
if (!destination_reg.is(x0))
__ Mov(x0, destination_reg);
// Restore callee save registers.
- __ Mov(csp, jssp);
- __ SetStackPointer(csp);
__ PopCalleeSavedRegisters();
__ Ret();
CodeDesc desc;
masm.GetCode(isolate, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
Assembler::FlushICache(isolate, buffer, allocated);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
@@ -131,12 +145,8 @@ static Isolate* GetIsolateFrom(LocalContext* context) {
int32_t RunGeneratedCodeCallWrapper(ConvertDToIFunc func,
double from) {
#ifdef USE_SIMULATOR
- Simulator::CallArgument args[] = {
- Simulator::CallArgument(from),
- Simulator::CallArgument::End()
- };
- return static_cast<int32_t>(Simulator::current(CcTest::i_isolate())
- ->CallInt64(FUNCTION_ADDR(func), args));
+ return Simulator::current(CcTest::i_isolate())
+ ->Call<int32_t>(FUNCTION_ADDR(func), from);
#else
return (*func)(from);
#endif
diff --git a/deps/v8/test/cctest/test-code-stubs-ia32.cc b/deps/v8/test/cctest/test-code-stubs-ia32.cc
index 2fe7e26ddc..7e62ab85e8 100644
--- a/deps/v8/test/cctest/test-code-stubs-ia32.cc
+++ b/deps/v8/test/cctest/test-code-stubs-ia32.cc
@@ -90,7 +90,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Register reg = Register::from_code(reg_num);
if (reg != esp && reg != ebp && reg != destination_reg) {
__ cmp(reg, MemOperand(esp, 0));
- __ Assert(equal, kRegisterWasClobbered);
+ __ Assert(equal, AbortReason::kRegisterWasClobbered);
__ add(esp, Immediate(kPointerSize));
}
}
@@ -108,6 +108,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
return reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer));
}
diff --git a/deps/v8/test/cctest/test-code-stubs-mips.cc b/deps/v8/test/cctest/test-code-stubs-mips.cc
index 123089614b..b1df94feed 100644
--- a/deps/v8/test/cctest/test-code-stubs-mips.cc
+++ b/deps/v8/test/cctest/test-code-stubs-mips.cc
@@ -102,7 +102,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Register reg = Register::from_code(reg_num);
if (reg != destination_reg) {
__ lw(at, MemOperand(sp, 0));
- __ Assert(eq, kRegisterWasClobbered, reg, Operand(at));
+ __ Assert(eq, AbortReason::kRegisterWasClobbered, reg, Operand(at));
__ Addu(sp, sp, Operand(kPointerSize));
}
}
@@ -128,6 +128,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
Assembler::FlushICache(isolate, buffer, allocated);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
diff --git a/deps/v8/test/cctest/test-code-stubs-mips64.cc b/deps/v8/test/cctest/test-code-stubs-mips64.cc
index ad4c49338a..c09dac24ea 100644
--- a/deps/v8/test/cctest/test-code-stubs-mips64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-mips64.cc
@@ -100,7 +100,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Register reg = Register::from_code(reg_num);
if (reg != destination_reg) {
__ Ld(at, MemOperand(sp, 0));
- __ Assert(eq, kRegisterWasClobbered, reg, Operand(at));
+ __ Assert(eq, AbortReason::kRegisterWasClobbered, reg, Operand(at));
__ Daddu(sp, sp, Operand(kPointerSize));
}
}
@@ -125,6 +125,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
Assembler::FlushICache(isolate, buffer, allocated);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
diff --git a/deps/v8/test/cctest/test-code-stubs-x64.cc b/deps/v8/test/cctest/test-code-stubs-x64.cc
index d69da6d0f6..a03cb4b658 100644
--- a/deps/v8/test/cctest/test-code-stubs-x64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-x64.cc
@@ -89,7 +89,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Register::from_code(config->GetAllocatableGeneralCode(reg_num));
if (reg != rsp && reg != rbp && reg != destination_reg) {
__ cmpq(reg, MemOperand(rsp, 0));
- __ Assert(equal, kRegisterWasClobbered);
+ __ Assert(equal, AbortReason::kRegisterWasClobbered);
__ addq(rsp, Immediate(kPointerSize));
}
}
@@ -106,6 +106,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
return reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer));
}
diff --git a/deps/v8/test/cctest/test-code-stubs.cc b/deps/v8/test/cctest/test-code-stubs.cc
index 27f411c56c..61d3a81083 100644
--- a/deps/v8/test/cctest/test-code-stubs.cc
+++ b/deps/v8/test/cctest/test-code-stubs.cc
@@ -193,7 +193,7 @@ TEST(CodeStubMajorKeys) {
#define CHECK_STUB(NAME) \
{ \
HandleScope scope(isolate); \
- NAME##Stub stub_impl(0xabcd, isolate); \
+ NAME##Stub stub_impl(0xABCD, isolate); \
CodeStub* stub = &stub_impl; \
CHECK_EQ(stub->MajorKey(), CodeStub::NAME); \
}
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index 92ed988b06..f9195c57fa 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -405,6 +405,7 @@ TEST(OptimizedCodeSharing1) {
}
TEST(CompileFunctionInContext) {
+ if (i::FLAG_always_opt) return;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
LocalContext env;
@@ -420,6 +421,8 @@ TEST(CompileFunctionInContext) {
0, nullptr, 1, &math)
.ToLocalChecked();
CHECK(!fun.IsEmpty());
+
+ i::DisallowCompilation no_compile(CcTest::i_isolate());
fun->Call(env.local(), env->Global(), 0, nullptr).ToLocalChecked();
CHECK(env->Global()->Has(env.local(), v8_str("a")).FromJust());
v8::Local<v8::Value> a =
@@ -483,7 +486,11 @@ TEST(CompileFunctionInContextArgs) {
v8::ScriptCompiler::CompileFunctionInContext(env.local(), &script_source,
1, &arg, 1, ext)
.ToLocalChecked();
- CHECK(!fun.IsEmpty());
+ CHECK_EQ(1, fun->Get(env.local(), v8_str("length"))
+ .ToLocalChecked()
+ ->ToInt32(env.local())
+ .ToLocalChecked()
+ ->Value());
v8::Local<v8::Value> b_value = v8::Number::New(CcTest::isolate(), 42.0);
fun->Call(env.local(), env->Global(), 1, &b_value).ToLocalChecked();
CHECK(env->Global()->Has(env.local(), v8_str("result")).FromJust());
@@ -531,6 +538,97 @@ TEST(CompileFunctionInContextNonIdentifierArgs) {
.IsEmpty());
}
+TEST(CompileFunctionInContextRenderCallSite) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ LocalContext env;
+ static const char* source1 =
+ "try {"
+ " var a = [];"
+ " a[0]();"
+ "} catch (e) {"
+ " return e.toString();"
+ "}";
+ static const char* expect1 = "TypeError: a[0] is not a function";
+ static const char* source2 =
+ "try {"
+ " (function() {"
+ " var a = [];"
+ " a[0]();"
+ " })()"
+ "} catch (e) {"
+ " return e.toString();"
+ "}";
+ static const char* expect2 = "TypeError: a[0] is not a function";
+ {
+ v8::ScriptCompiler::Source script_source(v8_str(source1));
+ v8::Local<v8::Function> fun =
+ v8::ScriptCompiler::CompileFunctionInContext(
+ env.local(), &script_source, 0, nullptr, 0, nullptr)
+ .ToLocalChecked();
+ CHECK(!fun.IsEmpty());
+ v8::Local<v8::Value> result =
+ fun->Call(env.local(), env->Global(), 0, nullptr).ToLocalChecked();
+ CHECK(result->IsString());
+ CHECK(v8::Local<v8::String>::Cast(result)
+ ->Equals(env.local(), v8_str(expect1))
+ .FromJust());
+ }
+ {
+ v8::ScriptCompiler::Source script_source(v8_str(source2));
+ v8::Local<v8::Function> fun =
+ v8::ScriptCompiler::CompileFunctionInContext(
+ env.local(), &script_source, 0, nullptr, 0, nullptr)
+ .ToLocalChecked();
+ v8::Local<v8::Value> result =
+ fun->Call(env.local(), env->Global(), 0, nullptr).ToLocalChecked();
+ CHECK(result->IsString());
+ CHECK(v8::Local<v8::String>::Cast(result)
+ ->Equals(env.local(), v8_str(expect2))
+ .FromJust());
+ }
+}
+
+TEST(CompileFunctionInContextQuirks) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ LocalContext env;
+ {
+ static const char* source =
+ "[x, y] = ['ab', 'cd'];"
+ "return x + y";
+ static const char* expect = "abcd";
+ v8::ScriptCompiler::Source script_source(v8_str(source));
+ v8::Local<v8::Function> fun =
+ v8::ScriptCompiler::CompileFunctionInContext(
+ env.local(), &script_source, 0, nullptr, 0, nullptr)
+ .ToLocalChecked();
+ v8::Local<v8::Value> result =
+ fun->Call(env.local(), env->Global(), 0, nullptr).ToLocalChecked();
+ CHECK(result->IsString());
+ CHECK(v8::Local<v8::String>::Cast(result)
+ ->Equals(env.local(), v8_str(expect))
+ .FromJust());
+ }
+ {
+ static const char* source = "'use strict'; var a = 077";
+ v8::ScriptCompiler::Source script_source(v8_str(source));
+ v8::TryCatch try_catch(CcTest::isolate());
+ CHECK(v8::ScriptCompiler::CompileFunctionInContext(
+ env.local(), &script_source, 0, nullptr, 0, nullptr)
+ .IsEmpty());
+ CHECK(try_catch.HasCaught());
+ }
+ {
+ static const char* source = "{ let x; { var x } }";
+ v8::ScriptCompiler::Source script_source(v8_str(source));
+ v8::TryCatch try_catch(CcTest::isolate());
+ CHECK(v8::ScriptCompiler::CompileFunctionInContext(
+ env.local(), &script_source, 0, nullptr, 0, nullptr)
+ .IsEmpty());
+ CHECK(try_catch.HasCaught());
+ }
+}
TEST(CompileFunctionInContextScriptOrigin) {
CcTest::InitializeVM();
@@ -559,7 +657,7 @@ TEST(CompileFunctionInContextScriptOrigin) {
CHECK_EQ(42 + strlen("throw "), static_cast<unsigned>(frame->GetColumn()));
}
-TEST(CompileFunctionInContextHarmonyFunctionToString) {
+void TestCompileFunctionInContextToStringImpl() {
#define CHECK_NOT_CAUGHT(__local_context__, try_catch, __op__) \
do { \
const char* op = (__op__); \
@@ -573,9 +671,7 @@ TEST(CompileFunctionInContextHarmonyFunctionToString) {
} \
} while (0)
- auto previous_flag = v8::internal::FLAG_harmony_function_tostring;
- v8::internal::FLAG_harmony_function_tostring = true;
- {
+ { // NOLINT
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
LocalContext env;
@@ -601,7 +697,8 @@ TEST(CompileFunctionInContextHarmonyFunctionToString) {
v8::Local<v8::String> result =
fun->ToString(env.local()).ToLocalChecked();
v8::Local<v8::String> expected = v8_str(
- "function(event){return event\n"
+ "function (event) {\n"
+ "return event\n"
"}");
CHECK(expected->Equals(env.local(), result).FromJust());
}
@@ -625,16 +722,52 @@ TEST(CompileFunctionInContextHarmonyFunctionToString) {
v8::Local<v8::String> result =
fun->ToString(env.local()).ToLocalChecked();
v8::Local<v8::String> expected = v8_str(
- "function(){return 0\n"
+ "function () {\n"
+ "return 0\n"
"}");
CHECK(expected->Equals(env.local(), result).FromJust());
}
- }
- v8::internal::FLAG_harmony_function_tostring = previous_flag;
+ // With a name:
+ {
+ v8::ScriptOrigin origin(v8_str("test"), v8_int(17), v8_int(31));
+ v8::ScriptCompiler::Source script_source(v8_str("return 0"), origin);
+
+ v8::TryCatch try_catch(CcTest::isolate());
+ v8::MaybeLocal<v8::Function> maybe_fun =
+ v8::ScriptCompiler::CompileFunctionInContext(
+ env.local(), &script_source, 0, nullptr, 0, nullptr);
+
+ CHECK_NOT_CAUGHT(env.local(), try_catch,
+ "v8::ScriptCompiler::CompileFunctionInContext");
+
+ v8::Local<v8::Function> fun = maybe_fun.ToLocalChecked();
+ CHECK(!fun.IsEmpty());
+ CHECK(!try_catch.HasCaught());
+
+ fun->SetName(v8_str("onclick"));
+
+ v8::Local<v8::String> result =
+ fun->ToString(env.local()).ToLocalChecked();
+ v8::Local<v8::String> expected = v8_str(
+ "function onclick() {\n"
+ "return 0\n"
+ "}");
+ CHECK(expected->Equals(env.local(), result).FromJust());
+ }
+ }
#undef CHECK_NOT_CAUGHT
}
+TEST(CompileFunctionInContextHarmonyFunctionToString) {
+ v8::internal::FLAG_harmony_function_tostring = true;
+ TestCompileFunctionInContextToStringImpl();
+}
+
+TEST(CompileFunctionInContextFunctionToString) {
+ TestCompileFunctionInContextToStringImpl();
+}
+
TEST(InvocationCount) {
FLAG_allow_natives_syntax = true;
FLAG_always_opt = false;
diff --git a/deps/v8/test/cctest/test-conversions.cc b/deps/v8/test/cctest/test-conversions.cc
index dc6e9fcb9d..e306eb9db1 100644
--- a/deps/v8/test/cctest/test-conversions.cc
+++ b/deps/v8/test/cctest/test-conversions.cc
@@ -45,8 +45,8 @@ TEST(Hex) {
CHECK_EQ(0.0, StringToDouble(&uc, "0X0", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
CHECK_EQ(1.0, StringToDouble(&uc, "0x1", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
CHECK_EQ(16.0, StringToDouble(&uc, "0x10", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(255.0, StringToDouble(&uc, "0xff",
- ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(255.0,
+ StringToDouble(&uc, "0xFF", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
CHECK_EQ(175.0, StringToDouble(&uc, "0xAF",
ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
@@ -54,7 +54,7 @@ TEST(Hex) {
CHECK_EQ(0.0, StringToDouble(&uc, "0X0", ALLOW_HEX));
CHECK_EQ(1.0, StringToDouble(&uc, "0x1", ALLOW_HEX));
CHECK_EQ(16.0, StringToDouble(&uc, "0x10", ALLOW_HEX));
- CHECK_EQ(255.0, StringToDouble(&uc, "0xff", ALLOW_HEX));
+ CHECK_EQ(255.0, StringToDouble(&uc, "0xFF", ALLOW_HEX));
CHECK_EQ(175.0, StringToDouble(&uc, "0xAF", ALLOW_HEX));
}
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 6dfd22e34a..cd847893b0 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -476,6 +476,13 @@ v8::CpuProfile* ProfilerHelper::Run(v8::Local<v8::Function> function,
return profile;
}
+static unsigned TotalHitCount(const v8::CpuProfileNode* node) {
+ unsigned hit_count = node->GetHitCount();
+ for (int i = 0, count = node->GetChildrenCount(); i < count; ++i)
+ hit_count += TotalHitCount(node->GetChild(i));
+ return hit_count;
+}
+
static const v8::CpuProfileNode* FindChild(v8::Local<v8::Context> context,
const v8::CpuProfileNode* node,
const char* name) {
@@ -490,16 +497,22 @@ static const v8::CpuProfileNode* FindChild(v8::Local<v8::Context> context,
return nullptr;
}
+static const v8::CpuProfileNode* FindChild(const v8::CpuProfileNode* node,
+ const char* name) {
+ for (int i = 0, count = node->GetChildrenCount(); i < count; ++i) {
+ const v8::CpuProfileNode* child = node->GetChild(i);
+ if (strcmp(child->GetFunctionNameStr(), name) == 0) {
+ return child;
+ }
+ }
+ return nullptr;
+}
static const v8::CpuProfileNode* GetChild(v8::Local<v8::Context> context,
const v8::CpuProfileNode* node,
const char* name) {
const v8::CpuProfileNode* result = FindChild(context, node, name);
- if (!result) {
- char buffer[100];
- i::SNPrintF(i::ArrayVector(buffer), "Failed to GetChild: %s", name);
- FATAL(buffer);
- }
+ if (!result) FATAL("Failed to GetChild: %s", name);
return result;
}
@@ -1941,7 +1954,7 @@ TEST(CollectDeoptEvents) {
GetBranchDeoptReason(env, iprofile, branch, arraysize(branch));
if (deopt_reason != reason(i::DeoptimizeReason::kNotAHeapNumber) &&
deopt_reason != reason(i::DeoptimizeReason::kNotASmi)) {
- FATAL(deopt_reason);
+ FATAL("%s", deopt_reason);
}
}
{
@@ -1951,7 +1964,7 @@ TEST(CollectDeoptEvents) {
if (deopt_reason != reason(i::DeoptimizeReason::kNaN) &&
deopt_reason != reason(i::DeoptimizeReason::kLostPrecisionOrNaN) &&
deopt_reason != reason(i::DeoptimizeReason::kNotASmi)) {
- FATAL(deopt_reason);
+ FATAL("%s", deopt_reason);
}
}
{
@@ -2374,6 +2387,53 @@ TEST(CodeEntriesMemoryLeak) {
CHECK_GE(10000ul, profiler_listener->entries_count_for_test());
}
+TEST(NativeFrameStackTrace) {
+ // A test for issue https://crbug.com/768540
+ // When a sample lands in a native function which has not EXIT frame
+ // stack frame iterator used to bail out and produce an empty stack trace.
+ // The source code below makes v8 call the
+ // v8::internal::StringTable::LookupStringIfExists_NoAllocate native function
+ // without producing an EXIT frame.
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Context::Scope context_scope(env);
+
+ const char* source = R"(
+ function jsFunction() {
+ var s = {};
+ for (var i = 0; i < 1e4; ++i) {
+ for (var j = 0; j < 100; j++) {
+ s['item' + j] = 'alph';
+ }
+ }
+ })";
+
+ CompileRun(source);
+ v8::Local<v8::Function> function = GetFunction(env, "jsFunction");
+
+ ProfilerHelper helper(env);
+
+ v8::CpuProfile* profile = helper.Run(function, nullptr, 0, 100, 0, true);
+
+ // Count the fraction of samples landing in 'jsFunction' (valid stack)
+ // vs '(program)' (no stack captured).
+ const v8::CpuProfileNode* root = profile->GetTopDownRoot();
+ const v8::CpuProfileNode* js_function = FindChild(root, "jsFunction");
+ const v8::CpuProfileNode* program = FindChild(root, "(program)");
+ if (program) {
+ unsigned js_function_samples = TotalHitCount(js_function);
+ unsigned program_samples = TotalHitCount(program);
+ double valid_samples_ratio =
+ 1. * js_function_samples / (js_function_samples + program_samples);
+ i::PrintF("Ratio: %f\n", valid_samples_ratio);
+ // TODO(alph): Investigate other causes of dropped frames. The ratio
+ // should be close to 99%.
+ CHECK_GE(valid_samples_ratio, 0.3);
+ }
+
+ profile->Delete();
+}
+
} // namespace test_cpu_profiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 3d6130549f..dd93a7eaf8 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -3951,6 +3951,72 @@ TEST(DebugBreak) {
CheckDebuggerUnloaded();
}
+static void DebugScopingListener(const v8::Debug::EventDetails& event_details) {
+ v8::DebugEvent event = event_details.GetEvent();
+ if (event != v8::Exception) return;
+
+ auto stack_traces = v8::debug::StackTraceIterator::Create(CcTest::isolate());
+ v8::debug::Location location = stack_traces->GetSourceLocation();
+ CHECK_EQ(26, location.GetColumnNumber());
+ CHECK_EQ(0, location.GetLineNumber());
+
+ auto scopes = stack_traces->GetScopeIterator();
+ CHECK_EQ(v8::debug::ScopeIterator::ScopeTypeWith, scopes->GetType());
+ CHECK_EQ(20, scopes->GetStartLocation().GetColumnNumber());
+ CHECK_EQ(31, scopes->GetEndLocation().GetColumnNumber());
+
+ scopes->Advance();
+ CHECK_EQ(v8::debug::ScopeIterator::ScopeTypeLocal, scopes->GetType());
+ CHECK_EQ(0, scopes->GetStartLocation().GetColumnNumber());
+ CHECK_EQ(68, scopes->GetEndLocation().GetColumnNumber());
+
+ scopes->Advance();
+ CHECK_EQ(v8::debug::ScopeIterator::ScopeTypeGlobal, scopes->GetType());
+ CHECK(scopes->GetFunction().IsEmpty());
+
+ scopes->Advance();
+ CHECK(scopes->Done());
+}
+
+TEST(DebugBreakInWrappedScript) {
+ i::FLAG_stress_compaction = false;
+#ifdef VERIFY_HEAP
+ i::FLAG_verify_heap = true;
+#endif
+ DebugLocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ // Register a debug event listener which sets the break flag and counts.
+ SetDebugEventListener(isolate, DebugScopingListener);
+
+ static const char* source =
+ // 0 1 2 3 4 5 6 7
+ "try { with({o : []}){ o[0](); } } catch (e) { return e.toString(); }";
+ static const char* expect = "TypeError: o[0] is not a function";
+
+ // For this test, we want to break on uncaught exceptions:
+ ChangeBreakOnException(true, true);
+
+ {
+ v8::ScriptCompiler::Source script_source(v8_str(source));
+ v8::Local<v8::Function> fun =
+ v8::ScriptCompiler::CompileFunctionInContext(
+ env.context(), &script_source, 0, nullptr, 0, nullptr)
+ .ToLocalChecked();
+ v8::Local<v8::Value> result =
+ fun->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
+ CHECK(result->IsString());
+ CHECK(v8::Local<v8::String>::Cast(result)
+ ->Equals(env.context(), v8_str(expect))
+ .FromJust());
+ }
+
+ // Get rid of the debug event listener.
+ SetDebugEventListener(isolate, nullptr);
+ CheckDebuggerUnloaded();
+}
+
TEST(DebugBreakWithoutJS) {
i::FLAG_stress_compaction = false;
#ifdef VERIFY_HEAP
@@ -4123,7 +4189,6 @@ static void NamedGetter(v8::Local<v8::Name> name,
info.GetReturnValue().SetUndefined();
return;
}
- info.GetReturnValue().Set(name);
}
diff --git a/deps/v8/test/cctest/test-disasm-arm.cc b/deps/v8/test/cctest/test-disasm-arm.cc
index 253daefa6c..300309244e 100644
--- a/deps/v8/test/cctest/test-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-disasm-arm.cc
@@ -274,7 +274,7 @@ TEST(Type0) {
"e3e03000 mvn r3, #0");
COMPARE(mov(r4, Operand(-2), SetCC, al),
"e3f04001 mvns r4, #1");
- COMPARE(mov(r5, Operand(0x0ffffff0), SetCC, ne),
+ COMPARE(mov(r5, Operand(0x0FFFFFF0), SetCC, ne),
"13f052ff mvnnes r5, #-268435441");
COMPARE(mov(r6, Operand(-1), LeaveCC, ne),
"13e06000 mvnne r6, #0");
@@ -284,7 +284,7 @@ TEST(Type0) {
"e3a03000 mov r3, #0");
COMPARE(mvn(r4, Operand(-2), SetCC, al),
"e3b04001 movs r4, #1");
- COMPARE(mvn(r5, Operand(0x0ffffff0), SetCC, ne),
+ COMPARE(mvn(r5, Operand(0x0FFFFFF0), SetCC, ne),
"13b052ff movnes r5, #-268435441");
COMPARE(mvn(r6, Operand(-1), LeaveCC, ne),
"13a06000 movne r6, #0");
@@ -312,20 +312,20 @@ TEST(Type0) {
COMPARE(movt(r5, 0x4321, ne),
"13445321 movtne r5, #17185");
- COMPARE(movw(r5, 0xabcd, eq),
+ COMPARE(movw(r5, 0xABCD, eq),
"030a5bcd movweq r5, #43981");
}
// Eor doesn't have an eor-negative variant, but we can do an mvn followed by
// an eor to get the same effect.
- COMPARE(eor(r5, r4, Operand(0xffffff34), SetCC, ne),
+ COMPARE(eor(r5, r4, Operand(0xFFFFFF34), SetCC, ne),
"13e050cb mvnne r5, #203",
"10345005 eornes r5, r4, r5");
// and <-> bic.
- COMPARE(and_(r3, r5, Operand(0xfc03ffff)),
+ COMPARE(and_(r3, r5, Operand(0xFC03FFFF)),
"e3c537ff bic r3, r5, #66846720");
- COMPARE(bic(r3, r5, Operand(0xfc03ffff)),
+ COMPARE(bic(r3, r5, Operand(0xFC03FFFF)),
"e20537ff and r3, r5, #66846720");
// sub <-> add.
@@ -345,7 +345,7 @@ TEST(Type0) {
"e12fff3c blx ip");
COMPARE(bkpt(0),
"e1200070 bkpt 0");
- COMPARE(bkpt(0xffff),
+ COMPARE(bkpt(0xFFFF),
"e12fff7f bkpt 65535");
COMPARE(clz(r6, r7),
"e16f6f17 clz r6, r7");
@@ -516,7 +516,7 @@ TEST(msr_mrs_disasm) {
"e169f007 msr SPSR_fc, r7");
// MSR with no mask is UNPREDICTABLE, and checked by the assembler, but check
// that the disassembler does something sensible.
- COMPARE(dd(0xe120f008), "e120f008 msr CPSR_(none), r8");
+ COMPARE(dd(0xE120F008), "e120f008 msr CPSR_(none), r8");
COMPARE(mrs(r0, CPSR), "e10f0000 mrs r0, CPSR");
COMPARE(mrs(r1, SPSR), "e14f1000 mrs r1, SPSR");
@@ -1478,7 +1478,7 @@ static void TestLoadLiteral(byte* buffer, Assembler* assm, bool* failure,
int offset) {
int pc_offset = assm->pc_offset();
byte *progcounter = &buffer[pc_offset];
- assm->ldr(r0, MemOperand(pc, offset));
+ assm->ldr_pcrel(r0, offset);
const char *expected_string_template =
(offset >= 0) ?
diff --git a/deps/v8/test/cctest/test-disasm-arm64.cc b/deps/v8/test/cctest/test-disasm-arm64.cc
index a3823518fc..13deeb534b 100644
--- a/deps/v8/test/cctest/test-disasm-arm64.cc
+++ b/deps/v8/test/cctest/test-disasm-arm64.cc
@@ -399,7 +399,7 @@ TEST_(add_extended) {
COMPARE(add(x18, x19, Operand(x20, SXTB, 3)), "add x18, x19, w20, sxtb #3");
COMPARE(adds(w21, w22, Operand(w23, SXTH, 2)), "adds w21, w22, w23, sxth #2");
COMPARE(add(x24, x25, Operand(x26, SXTW, 1)), "add x24, x25, w26, sxtw #1");
- COMPARE(adds(cp, jssp, Operand(fp, SXTX)), "adds cp, jssp, fp, sxtx");
+ COMPARE(adds(cp, x28, Operand(fp, SXTX)), "adds cp, x28, fp, sxtx");
COMPARE(cmn(w0, Operand(w1, UXTB, 2)), "cmn w0, w1, uxtb #2");
COMPARE(cmn(x2, Operand(x3, SXTH, 4)), "cmn x2, w3, sxth #4");
@@ -425,7 +425,7 @@ TEST_(sub_extended) {
COMPARE(sub(x18, x19, Operand(x20, SXTB, 3)), "sub x18, x19, w20, sxtb #3");
COMPARE(subs(w21, w22, Operand(w23, SXTH, 2)), "subs w21, w22, w23, sxth #2");
COMPARE(sub(x24, x25, Operand(x26, SXTW, 1)), "sub x24, x25, w26, sxtw #1");
- COMPARE(subs(cp, jssp, Operand(fp, SXTX)), "subs cp, jssp, fp, sxtx");
+ COMPARE(subs(cp, x28, Operand(fp, SXTX)), "subs cp, x28, fp, sxtx");
COMPARE(cmp(w0, Operand(w1, SXTB, 1)), "cmp w0, w1, sxtb #1");
COMPARE(cmp(x2, Operand(x3, UXTH, 3)), "cmp x2, w3, uxth #3");
@@ -891,11 +891,10 @@ TEST_(load_store) {
COMPARE(str(x20, MemOperand(x21, 255, PostIndex)), "str x20, [x21], #255");
COMPARE(str(x22, MemOperand(x23, -256, PostIndex)), "str x22, [x23], #-256");
- // TODO(all): Fix this for jssp.
- COMPARE(ldr(w24, MemOperand(jssp)), "ldr w24, [jssp]");
- COMPARE(ldr(x25, MemOperand(jssp, 8)), "ldr x25, [jssp, #8]");
- COMPARE(str(w26, MemOperand(jssp, 4, PreIndex)), "str w26, [jssp, #4]!");
- COMPARE(str(cp, MemOperand(jssp, -8, PostIndex)), "str cp, [jssp], #-8");
+ COMPARE(ldr(w24, MemOperand(x28)), "ldr w24, [x28]");
+ COMPARE(ldr(x25, MemOperand(x28, 8)), "ldr x25, [x28, #8]");
+ COMPARE(str(w26, MemOperand(x28, 4, PreIndex)), "str w26, [x28, #4]!");
+ COMPARE(str(cp, MemOperand(x28, -8, PostIndex)), "str cp, [x28], #-8");
COMPARE(ldrsw(x0, MemOperand(x1)), "ldrsw x0, [x1]");
COMPARE(ldrsw(x2, MemOperand(x3, 8)), "ldrsw x2, [x3, #8]");
@@ -983,9 +982,8 @@ TEST_(load_store_regoffset) {
COMPARE(strh(w21, MemOperand(x22, x23, SXTX, 1)),
"strh w21, [x22, x23, sxtx #1]");
- // TODO(all): Fix this for jssp.
- COMPARE(ldr(x0, MemOperand(jssp, wzr, SXTW)), "ldr x0, [jssp, wzr, sxtw]");
- COMPARE(str(x1, MemOperand(jssp, xzr)), "str x1, [jssp, xzr]");
+ COMPARE(ldr(x0, MemOperand(x28, wzr, SXTW)), "ldr x0, [x28, wzr, sxtw]");
+ COMPARE(str(x1, MemOperand(x28, xzr)), "str x1, [x28, xzr]");
CLEANUP();
}
@@ -1011,9 +1009,8 @@ TEST_(load_store_byte) {
COMPARE(strb(w24, MemOperand(x25, 255, PostIndex)), "strb w24, [x25], #255");
COMPARE(strb(w26, MemOperand(cp, -256, PostIndex)),
"strb w26, [cp], #-256");
- // TODO(all): Fix this for jssp.
- COMPARE(ldrb(w28, MemOperand(jssp, 3, PostIndex)), "ldrb w28, [jssp], #3");
- COMPARE(strb(fp, MemOperand(jssp, -42, PreIndex)), "strb w29, [jssp, #-42]!");
+ COMPARE(ldrb(w28, MemOperand(x28, 3, PostIndex)), "ldrb w28, [x28], #3");
+ COMPARE(strb(fp, MemOperand(x28, -42, PreIndex)), "strb w29, [x28, #-42]!");
COMPARE(ldrsb(w0, MemOperand(x1)), "ldrsb w0, [x1]");
COMPARE(ldrsb(x2, MemOperand(x3, 8)), "ldrsb x2, [x3, #8]");
COMPARE(ldrsb(w4, MemOperand(x5, 42, PreIndex)), "ldrsb w4, [x5, #42]!");
@@ -1043,9 +1040,8 @@ TEST_(load_store_half) {
COMPARE(strh(w24, MemOperand(x25, 255, PostIndex)), "strh w24, [x25], #255");
COMPARE(strh(w26, MemOperand(cp, -256, PostIndex)),
"strh w26, [cp], #-256");
- // TODO(all): Fix this for jssp.
- COMPARE(ldrh(w28, MemOperand(jssp, 3, PostIndex)), "ldrh w28, [jssp], #3");
- COMPARE(strh(fp, MemOperand(jssp, -42, PreIndex)), "strh w29, [jssp, #-42]!");
+ COMPARE(ldrh(w28, MemOperand(x28, 3, PostIndex)), "ldrh w28, [x28], #3");
+ COMPARE(strh(fp, MemOperand(x28, -42, PreIndex)), "strh w29, [x28, #-42]!");
COMPARE(ldrh(w30, MemOperand(x0, 255)), "ldurh w30, [x0, #255]");
COMPARE(ldrh(x1, MemOperand(x2, -256)), "ldurh w1, [x2, #-256]");
COMPARE(strh(w3, MemOperand(x4, 255)), "sturh w3, [x4, #255]");
@@ -1306,7 +1302,7 @@ TEST_(load_store_unscaled) {
COMPARE(str(w22, MemOperand(x23, -256)), "stur w22, [x23, #-256]");
COMPARE(str(x24, MemOperand(x25, 1)), "stur x24, [x25, #1]");
COMPARE(str(x26, MemOperand(x27, -1)), "stur x26, [cp, #-1]");
- COMPARE(str(x28, MemOperand(x29, 255)), "stur jssp, [fp, #255]");
+ COMPARE(str(x28, MemOperand(x29, 255)), "stur x28, [fp, #255]");
COMPARE(str(x30, MemOperand(x0, -256)), "stur lr, [x0, #-256]");
COMPARE(ldr(w0, MemOperand(csp, 1)), "ldur w0, [csp, #1]");
COMPARE(str(x1, MemOperand(csp, -1)), "stur x1, [csp, #-1]");
@@ -1346,8 +1342,8 @@ TEST_(load_store_pair) {
COMPARE(ldp(x21, x22, MemOperand(x23, -512)), "ldp x21, x22, [x23, #-512]");
COMPARE(ldp(w24, w25, MemOperand(x26, 252, PreIndex)),
"ldp w24, w25, [x26, #252]!");
- COMPARE(ldp(cp, jssp, MemOperand(fp, 504, PreIndex)),
- "ldp cp, jssp, [fp, #504]!");
+ COMPARE(ldp(cp, x28, MemOperand(fp, 504, PreIndex)),
+ "ldp cp, x28, [fp, #504]!");
COMPARE(ldp(w30, w0, MemOperand(x1, -256, PreIndex)),
"ldp w30, w0, [x1, #-256]!");
COMPARE(ldp(x2, x3, MemOperand(x4, -512, PreIndex)),
@@ -1364,8 +1360,8 @@ TEST_(load_store_pair) {
COMPARE(ldp(s17, s18, MemOperand(x19)), "ldp s17, s18, [x19]");
COMPARE(ldp(s20, s21, MemOperand(x22, 252)), "ldp s20, s21, [x22, #252]");
COMPARE(ldp(s23, s24, MemOperand(x25, -256)), "ldp s23, s24, [x25, #-256]");
- COMPARE(ldp(s26, s27, MemOperand(jssp, 252, PreIndex)),
- "ldp s26, s27, [jssp, #252]!");
+ COMPARE(ldp(s26, s27, MemOperand(x28, 252, PreIndex)),
+ "ldp s26, s27, [x28, #252]!");
COMPARE(ldp(s29, s30, MemOperand(fp, -256, PreIndex)),
"ldp s29, s30, [fp, #-256]!");
COMPARE(ldp(s31, s0, MemOperand(x1, 252, PostIndex)),
@@ -1375,8 +1371,8 @@ TEST_(load_store_pair) {
COMPARE(ldp(d17, d18, MemOperand(x19)), "ldp d17, d18, [x19]");
COMPARE(ldp(d20, d21, MemOperand(x22, 504)), "ldp d20, d21, [x22, #504]");
COMPARE(ldp(d23, d24, MemOperand(x25, -512)), "ldp d23, d24, [x25, #-512]");
- COMPARE(ldp(d26, d27, MemOperand(jssp, 504, PreIndex)),
- "ldp d26, d27, [jssp, #504]!");
+ COMPARE(ldp(d26, d27, MemOperand(x28, 504, PreIndex)),
+ "ldp d26, d27, [x28, #504]!");
COMPARE(ldp(d29, d30, MemOperand(fp, -512, PreIndex)),
"ldp d29, d30, [fp, #-512]!");
COMPARE(ldp(d31, d0, MemOperand(x1, 504, PostIndex)),
@@ -1394,8 +1390,8 @@ TEST_(load_store_pair) {
COMPARE(stp(x21, x22, MemOperand(x23, -512)), "stp x21, x22, [x23, #-512]");
COMPARE(stp(w24, w25, MemOperand(x26, 252, PreIndex)),
"stp w24, w25, [x26, #252]!");
- COMPARE(stp(cp, jssp, MemOperand(fp, 504, PreIndex)),
- "stp cp, jssp, [fp, #504]!");
+ COMPARE(stp(cp, x28, MemOperand(fp, 504, PreIndex)),
+ "stp cp, x28, [fp, #504]!");
COMPARE(stp(w30, w0, MemOperand(x1, -256, PreIndex)),
"stp w30, w0, [x1, #-256]!");
COMPARE(stp(x2, x3, MemOperand(x4, -512, PreIndex)),
@@ -1412,8 +1408,8 @@ TEST_(load_store_pair) {
COMPARE(stp(s17, s18, MemOperand(x19)), "stp s17, s18, [x19]");
COMPARE(stp(s20, s21, MemOperand(x22, 252)), "stp s20, s21, [x22, #252]");
COMPARE(stp(s23, s24, MemOperand(x25, -256)), "stp s23, s24, [x25, #-256]");
- COMPARE(stp(s26, s27, MemOperand(jssp, 252, PreIndex)),
- "stp s26, s27, [jssp, #252]!");
+ COMPARE(stp(s26, s27, MemOperand(x28, 252, PreIndex)),
+ "stp s26, s27, [x28, #252]!");
COMPARE(stp(s29, s30, MemOperand(fp, -256, PreIndex)),
"stp s29, s30, [fp, #-256]!");
COMPARE(stp(s31, s0, MemOperand(x1, 252, PostIndex)),
@@ -1423,8 +1419,8 @@ TEST_(load_store_pair) {
COMPARE(stp(d17, d18, MemOperand(x19)), "stp d17, d18, [x19]");
COMPARE(stp(d20, d21, MemOperand(x22, 504)), "stp d20, d21, [x22, #504]");
COMPARE(stp(d23, d24, MemOperand(x25, -512)), "stp d23, d24, [x25, #-512]");
- COMPARE(stp(d26, d27, MemOperand(jssp, 504, PreIndex)),
- "stp d26, d27, [jssp, #504]!");
+ COMPARE(stp(d26, d27, MemOperand(x28, 504, PreIndex)),
+ "stp d26, d27, [x28, #504]!");
COMPARE(stp(d29, d30, MemOperand(fp, -512, PreIndex)),
"stp d29, d30, [fp, #-512]!");
COMPARE(stp(d31, d0, MemOperand(x1, 504, PostIndex)),
@@ -1444,16 +1440,15 @@ TEST_(load_store_pair) {
COMPARE(stp(q23, q24, MemOperand(x25, -1024, PostIndex)),
"stp q23, q24, [x25], #-1024");
- COMPARE(ldp(w16, w17, MemOperand(jssp, 4, PostIndex)),
- "ldp w16, w17, [jssp], #4");
- COMPARE(stp(x18, x19, MemOperand(jssp, -8, PreIndex)),
- "stp x18, x19, [jssp, #-8]!");
- COMPARE(ldp(s30, s31, MemOperand(jssp, 12, PostIndex)),
- "ldp s30, s31, [jssp], #12");
- COMPARE(stp(d30, d31, MemOperand(jssp, -16)),
- "stp d30, d31, [jssp, #-16]");
- COMPARE(ldp(q30, q31, MemOperand(jssp, 32, PostIndex)),
- "ldp q30, q31, [jssp], #32");
+ COMPARE(ldp(w16, w17, MemOperand(x28, 4, PostIndex)),
+ "ldp w16, w17, [x28], #4");
+ COMPARE(stp(x18, x19, MemOperand(x28, -8, PreIndex)),
+ "stp x18, x19, [x28, #-8]!");
+ COMPARE(ldp(s30, s31, MemOperand(x28, 12, PostIndex)),
+ "ldp s30, s31, [x28], #12");
+ COMPARE(stp(d30, d31, MemOperand(x28, -16)), "stp d30, d31, [x28, #-16]");
+ COMPARE(ldp(q30, q31, MemOperand(x28, 32, PostIndex)),
+ "ldp q30, q31, [x28], #32");
COMPARE(ldpsw(x0, x1, MemOperand(x2)), "ldpsw x0, x1, [x2]");
COMPARE(ldpsw(x3, x4, MemOperand(x5, 16)), "ldpsw x3, x4, [x5, #16]");
@@ -1804,7 +1799,7 @@ TEST_(fcvt_scvtf_ucvtf) {
COMPARE(fcvtzu(w6, s5, 32), "fcvtzu w6, s5, #32");
COMPARE(fcvtpu(x24, d25), "fcvtpu x24, d25");
COMPARE(fcvtpu(w26, d27), "fcvtpu w26, d27");
- COMPARE(fcvtps(x28, d29), "fcvtps jssp, d29");
+ COMPARE(fcvtps(x28, d29), "fcvtps x28, d29");
COMPARE(fcvtps(w30, d31), "fcvtps w30, d31");
COMPARE(fcvtpu(x0, s1), "fcvtpu x0, s1");
COMPARE(fcvtpu(w2, s3), "fcvtpu w2, s3");
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index 66716e9d44..35638c723a 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -481,6 +481,8 @@ TEST(DisasmIa320) {
__ maxsd(xmm1, Operand(ebx, ecx, times_4, 10000));
__ ucomisd(xmm0, xmm1);
__ cmpltsd(xmm0, xmm1);
+ __ haddps(xmm1, xmm0);
+ __ haddps(xmm1, Operand(ebx, ecx, times_4, 10000));
__ andpd(xmm0, xmm1);
@@ -550,6 +552,8 @@ TEST(DisasmIa320) {
__ pextrw(Operand(edx, 4), xmm0, 1);
__ pextrd(eax, xmm0, 1);
__ pextrd(Operand(edx, 4), xmm0, 1);
+ __ insertps(xmm1, xmm2, 0);
+ __ insertps(xmm1, Operand(edx, 4), 0);
__ pinsrb(xmm1, eax, 0);
__ pinsrb(xmm1, Operand(edx, 4), 0);
__ pinsrd(xmm1, eax, 0);
@@ -611,6 +615,9 @@ TEST(DisasmIa320) {
__ vrcpps(xmm1, Operand(ebx, ecx, times_4, 10000));
__ vrsqrtps(xmm1, xmm0);
__ vrsqrtps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ vmovaps(xmm0, xmm1);
+ __ vshufps(xmm0, xmm1, xmm2, 3);
+ __ vshufps(xmm0, xmm1, Operand(edx, 4), 3);
__ vcmpeqps(xmm5, xmm4, xmm1);
__ vcmpeqps(xmm5, xmm4, Operand(ebx, ecx, times_4, 10000));
@@ -655,6 +662,8 @@ TEST(DisasmIa320) {
__ vpextrw(Operand(edx, 4), xmm0, 1);
__ vpextrd(eax, xmm0, 1);
__ vpextrd(Operand(edx, 4), xmm0, 1);
+ __ vinsertps(xmm0, xmm1, xmm2, 0);
+ __ vinsertps(xmm0, xmm1, Operand(edx, 4), 0);
__ vpinsrb(xmm0, xmm1, eax, 0);
__ vpinsrb(xmm0, xmm1, Operand(edx, 4), 0);
__ vpinsrw(xmm0, xmm1, eax, 0);
@@ -667,6 +676,8 @@ TEST(DisasmIa320) {
__ vcvttps2dq(xmm1, xmm0);
__ vcvttps2dq(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ vmovdqu(xmm0, Operand(ebx, ecx, times_4, 10000));
+ __ vmovdqu(Operand(ebx, ecx, times_4, 10000), xmm0);
__ vmovd(xmm0, edi);
__ vmovd(xmm0, Operand(ebx, ecx, times_4, 10000));
__ vmovd(eax, xmm1);
diff --git a/deps/v8/test/cctest/test-disasm-mips.cc b/deps/v8/test/cctest/test-disasm-mips.cc
index 04f007fcb3..e394f9428a 100644
--- a/deps/v8/test/cctest/test-disasm-mips.cc
+++ b/deps/v8/test/cctest/test-disasm-mips.cc
@@ -144,14 +144,14 @@ if (failure) { \
#define COMPARE_PC_JUMP(asm_, compare_string, target) \
{ \
int pc_offset = assm.pc_offset(); \
- byte *progcounter = &buffer[pc_offset]; \
+ byte* progcounter = &buffer[pc_offset]; \
char str_with_address[100]; \
int instr_index = (target >> 2) & kImm26Mask; \
snprintf( \
str_with_address, sizeof(str_with_address), "%s %p -> %p", \
- compare_string, reinterpret_cast<void *>(target), \
- reinterpret_cast<void *>(((uint32_t)(progcounter + 4) & ~0xfffffff) | \
- (instr_index << 2))); \
+ compare_string, reinterpret_cast<void*>(target), \
+ reinterpret_cast<void*>(((uint32_t)(progcounter + 4) & ~0xFFFFFFF) | \
+ (instr_index << 2))); \
assm.asm_; \
if (!DisassembleAndCompare(progcounter, str_with_address)) failure = true; \
}
@@ -159,11 +159,10 @@ if (failure) { \
#define GET_PC_REGION(pc_region) \
{ \
int pc_offset = assm.pc_offset(); \
- byte *progcounter = &buffer[pc_offset]; \
- pc_region = reinterpret_cast<int32_t>(progcounter + 4) & ~0xfffffff; \
+ byte* progcounter = &buffer[pc_offset]; \
+ pc_region = reinterpret_cast<int32_t>(progcounter + 4) & ~0xFFFFFFF; \
}
-
TEST(Type0) {
SET_UP();
@@ -504,12 +503,12 @@ TEST(Type0) {
int32_t target = pc_region | 0x4;
COMPARE_PC_JUMP(j(target), "08000001 j ", target);
- target = pc_region | 0xffffffc;
+ target = pc_region | 0xFFFFFFC;
COMPARE_PC_JUMP(j(target), "0bffffff j ", target);
target = pc_region | 0x4;
COMPARE_PC_JUMP(jal(target), "0c000001 jal ", target);
- target = pc_region | 0xffffffc;
+ target = pc_region | 0xFFFFFFC;
COMPARE_PC_JUMP(jal(target), "0fffffff jal ", target);
COMPARE(addiu(a0, a1, 0x0),
diff --git a/deps/v8/test/cctest/test-disasm-mips64.cc b/deps/v8/test/cctest/test-disasm-mips64.cc
index 0405a82e5d..0a16e860b3 100644
--- a/deps/v8/test/cctest/test-disasm-mips64.cc
+++ b/deps/v8/test/cctest/test-disasm-mips64.cc
@@ -144,14 +144,14 @@ if (failure) { \
#define COMPARE_PC_JUMP(asm_, compare_string, target) \
{ \
int pc_offset = assm.pc_offset(); \
- byte *progcounter = &buffer[pc_offset]; \
+ byte* progcounter = &buffer[pc_offset]; \
char str_with_address[100]; \
int instr_index = (target >> 2) & kImm26Mask; \
snprintf( \
str_with_address, sizeof(str_with_address), "%s %p -> %p", \
- compare_string, reinterpret_cast<void *>(target), \
- reinterpret_cast<void *>(((uint64_t)(progcounter + 1) & ~0xfffffff) | \
- (instr_index << 2))); \
+ compare_string, reinterpret_cast<void*>(target), \
+ reinterpret_cast<void*>(((uint64_t)(progcounter + 1) & ~0xFFFFFFF) | \
+ (instr_index << 2))); \
assm.asm_; \
if (!DisassembleAndCompare(progcounter, str_with_address)) failure = true; \
}
@@ -159,11 +159,10 @@ if (failure) { \
#define GET_PC_REGION(pc_region) \
{ \
int pc_offset = assm.pc_offset(); \
- byte *progcounter = &buffer[pc_offset]; \
- pc_region = reinterpret_cast<int64_t>(progcounter + 4) & ~0xfffffff; \
+ byte* progcounter = &buffer[pc_offset]; \
+ pc_region = reinterpret_cast<int64_t>(progcounter + 4) & ~0xFFFFFFF; \
}
-
TEST(Type0) {
SET_UP();
@@ -972,7 +971,7 @@ TEST(Type3) {
"60a48000 bnvc a1, a0, -32768", -32768);
COMPARE_PC_REL_COMPACT(beqzc(a0, 0), "d8800000 beqzc a0, 0", 0);
- COMPARE_PC_REL_COMPACT(beqzc(a0, 1048575), // 0x0fffff == 1048575.
+ COMPARE_PC_REL_COMPACT(beqzc(a0, 1048575), // 0x0FFFFF == 1048575.
"d88fffff beqzc a0, 1048575", 1048575);
COMPARE_PC_REL_COMPACT(beqzc(a0, -1048576), // 0x100000 == -1048576.
"d8900000 beqzc a0, -1048576", -1048576);
@@ -1184,12 +1183,12 @@ TEST(Type3) {
int64_t target = pc_region | 0x4;
COMPARE_PC_JUMP(j(target), "08000001 j ", target);
- target = pc_region | 0xffffffc;
+ target = pc_region | 0xFFFFFFC;
COMPARE_PC_JUMP(j(target), "0bffffff j ", target);
target = pc_region | 0x4;
COMPARE_PC_JUMP(jal(target), "0c000001 jal ", target);
- target = pc_region | 0xffffffc;
+ target = pc_region | 0xFFFFFFC;
COMPARE_PC_JUMP(jal(target), "0fffffff jal ", target);
VERIFY_RUN();
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index 10608f1877..bbc1f89480 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -386,6 +386,10 @@ TEST(DisasmX64) {
__ cvtsd2ss(xmm0, xmm1);
__ cvtsd2ss(xmm0, Operand(rbx, rcx, times_4, 10000));
__ movaps(xmm0, xmm1);
+ __ movdqa(xmm0, Operand(rsp, 12));
+ __ movdqa(Operand(rsp, 12), xmm0);
+ __ movdqu(xmm0, Operand(rsp, 12));
+ __ movdqu(Operand(rsp, 12), xmm0);
__ shufps(xmm0, xmm9, 0x0);
// logic operation
@@ -451,6 +455,8 @@ TEST(DisasmX64) {
__ maxsd(xmm1, xmm0);
__ maxsd(xmm1, Operand(rbx, rcx, times_4, 10000));
__ ucomisd(xmm0, xmm1);
+ __ haddps(xmm1, xmm0);
+ __ haddps(xmm1, Operand(rbx, rcx, times_4, 10000));
__ andpd(xmm0, xmm1);
__ andpd(xmm0, Operand(rbx, rcx, times_4, 10000));
diff --git a/deps/v8/test/cctest/test-diy-fp.cc b/deps/v8/test/cctest/test-diy-fp.cc
index 4c597883f1..d46d0519d6 100644
--- a/deps/v8/test/cctest/test-diy-fp.cc
+++ b/deps/v8/test/cctest/test-diy-fp.cc
@@ -73,7 +73,7 @@ TEST(Multiply) {
CHECK_EQ(1, product.f());
CHECK_EQ(11 + 13 + 64, product.e());
- diy_fp1 = DiyFp(V8_2PART_UINT64_C(0x7fffffff, ffffffff), 11);
+ diy_fp1 = DiyFp(V8_2PART_UINT64_C(0x7FFFFFFF, FFFFFFFF), 11);
diy_fp2 = DiyFp(1, 13);
product = DiyFp::Times(diy_fp1, diy_fp2);
CHECK_EQ(0, product.f());
@@ -84,9 +84,9 @@ TEST(Multiply) {
// Big numbers.
diy_fp1 = DiyFp(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF), 11);
diy_fp2 = DiyFp(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF), 13);
- // 128bit result: 0xfffffffffffffffe0000000000000001
+ // 128bit result: 0xFFFFFFFFFFFFFFFE0000000000000001
product = DiyFp::Times(diy_fp1, diy_fp2);
- CHECK(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFe) == product.f());
+ CHECK(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFE) == product.f());
CHECK_EQ(11 + 13 + 64, product.e());
}
diff --git a/deps/v8/test/cctest/test-double.cc b/deps/v8/test/cctest/test-double.cc
index 81a06bf997..46d6d55cc1 100644
--- a/deps/v8/test/cctest/test-double.cc
+++ b/deps/v8/test/cctest/test-double.cc
@@ -45,7 +45,7 @@ TEST(Uint64Conversions) {
uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
CHECK_EQ(5e-324, Double(min_double64).value());
- uint64_t max_double64 = V8_2PART_UINT64_C(0x7fefffff, ffffffff);
+ uint64_t max_double64 = V8_2PART_UINT64_C(0x7FEFFFFF, FFFFFFFF);
CHECK_EQ(1.7976931348623157e308, Double(max_double64).value());
}
@@ -63,10 +63,10 @@ TEST(AsDiyFp) {
// This is a denormal; so no hidden bit.
CHECK_EQ(1, diy_fp.f());
- uint64_t max_double64 = V8_2PART_UINT64_C(0x7fefffff, ffffffff);
+ uint64_t max_double64 = V8_2PART_UINT64_C(0x7FEFFFFF, FFFFFFFF);
diy_fp = Double(max_double64).AsDiyFp();
CHECK_EQ(0x7FE - 0x3FF - 52, diy_fp.e());
- CHECK(V8_2PART_UINT64_C(0x001fffff, ffffffff) == diy_fp.f()); // NOLINT
+ CHECK(V8_2PART_UINT64_C(0x001FFFFF, FFFFFFFF) == diy_fp.f()); // NOLINT
}
@@ -83,10 +83,10 @@ TEST(AsNormalizedDiyFp) {
// This is a denormal; so no hidden bit.
CHECK(V8_2PART_UINT64_C(0x80000000, 00000000) == diy_fp.f()); // NOLINT
- uint64_t max_double64 = V8_2PART_UINT64_C(0x7fefffff, ffffffff);
+ uint64_t max_double64 = V8_2PART_UINT64_C(0x7FEFFFFF, FFFFFFFF);
diy_fp = Double(max_double64).AsNormalizedDiyFp();
CHECK_EQ(0x7FE - 0x3FF - 52 - 11, diy_fp.e());
- CHECK((V8_2PART_UINT64_C(0x001fffff, ffffffff) << 11) ==
+ CHECK((V8_2PART_UINT64_C(0x001FFFFF, FFFFFFFF) << 11) ==
diy_fp.f()); // NOLINT
}
@@ -202,7 +202,7 @@ TEST(NormalizedBoundaries) {
CHECK(diy_fp.f() - boundary_minus.f() == boundary_plus.f() - diy_fp.f());
CHECK((1 << 11) == diy_fp.f() - boundary_minus.f()); // NOLINT
- uint64_t max_double64 = V8_2PART_UINT64_C(0x7fefffff, ffffffff);
+ uint64_t max_double64 = V8_2PART_UINT64_C(0x7FEFFFFF, FFFFFFFF);
diy_fp = Double(max_double64).AsNormalizedDiyFp();
Double(max_double64).NormalizedBoundaries(&boundary_minus, &boundary_plus);
CHECK_EQ(diy_fp.e(), boundary_minus.e());
@@ -226,7 +226,7 @@ TEST(NextDouble) {
CHECK_EQ(4e-324, d2.NextDouble());
CHECK_EQ(-1.7976931348623157e308, Double(-V8_INFINITY).NextDouble());
CHECK_EQ(V8_INFINITY,
- Double(V8_2PART_UINT64_C(0x7fefffff, ffffffff)).NextDouble());
+ Double(V8_2PART_UINT64_C(0x7FEFFFFF, FFFFFFFF)).NextDouble());
}
} // namespace internal
diff --git a/deps/v8/test/cctest/test-feedback-vector.cc b/deps/v8/test/cctest/test-feedback-vector.cc
index 1dd99c5362..e590b60649 100644
--- a/deps/v8/test/cctest/test-feedback-vector.cc
+++ b/deps/v8/test/cctest/test-feedback-vector.cc
@@ -257,12 +257,12 @@ TEST(VectorCallCounts) {
CompileRun("f(foo); f(foo);");
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
- CHECK_EQ(3, nexus.ExtractCallCount());
+ CHECK_EQ(3, nexus.GetCallCount());
// Send the IC megamorphic, but we should still have incrementing counts.
CompileRun("f(function() { return 12; });");
CHECK_EQ(GENERIC, nexus.StateFromFeedback());
- CHECK_EQ(4, nexus.ExtractCallCount());
+ CHECK_EQ(4, nexus.GetCallCount());
}
TEST(VectorConstructCounts) {
@@ -288,12 +288,42 @@ TEST(VectorConstructCounts) {
CompileRun("f(Foo); f(Foo);");
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
- CHECK_EQ(3, nexus.ExtractCallCount());
+ CHECK_EQ(3, nexus.GetCallCount());
// Send the IC megamorphic, but we should still have incrementing counts.
CompileRun("f(function() {});");
CHECK_EQ(GENERIC, nexus.StateFromFeedback());
- CHECK_EQ(4, nexus.ExtractCallCount());
+ CHECK_EQ(4, nexus.GetCallCount());
+}
+
+TEST(VectorSpeculationMode) {
+ if (i::FLAG_always_opt) return;
+ CcTest::InitializeVM();
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+ Isolate* isolate = CcTest::i_isolate();
+
+ // Make sure function f has a call that uses a type feedback slot.
+ CompileRun(
+ "function Foo() {}"
+ "function f(a) { new a(); } f(Foo);");
+ Handle<JSFunction> f = GetFunction("f");
+ Handle<FeedbackVector> feedback_vector =
+ Handle<FeedbackVector>(f->feedback_vector(), isolate);
+
+ FeedbackSlot slot(0);
+ CallICNexus nexus(feedback_vector, slot);
+ CHECK_EQ(SpeculationMode::kAllowSpeculation, nexus.GetSpeculationMode());
+
+ CompileRun("f(Foo); f(Foo);");
+ CHECK_EQ(3, nexus.GetCallCount());
+ CHECK_EQ(SpeculationMode::kAllowSpeculation, nexus.GetSpeculationMode());
+
+ nexus.SetSpeculationMode(SpeculationMode::kAllowSpeculation);
+ nexus.SetSpeculationMode(SpeculationMode::kDisallowSpeculation);
+ CHECK_EQ(SpeculationMode::kDisallowSpeculation, nexus.GetSpeculationMode());
+ nexus.SetSpeculationMode(SpeculationMode::kAllowSpeculation);
+ CHECK_EQ(SpeculationMode::kAllowSpeculation, nexus.GetSpeculationMode());
}
TEST(VectorLoadICStates) {
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index 9622da53b8..8736c9c5db 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -2728,7 +2728,7 @@ TEST(HoleyMutableHeapNumber) {
CHECK_EQ(kHoleNanInt64, mhn->value_as_bits());
mhn = isolate->factory()->NewHeapNumber(0.0, MUTABLE);
- CHECK_EQ(V8_UINT64_C(0), mhn->value_as_bits());
+ CHECK_EQ(uint64_t{0}, mhn->value_as_bits());
mhn->set_value_as_bits(kHoleNanInt64);
CHECK_EQ(kHoleNanInt64, mhn->value_as_bits());
diff --git a/deps/v8/test/cctest/test-func-name-inference.cc b/deps/v8/test/cctest/test-func-name-inference.cc
index e5ccbc3275..783ab3da83 100644
--- a/deps/v8/test/cctest/test-func-name-inference.cc
+++ b/deps/v8/test/cctest/test-func-name-inference.cc
@@ -555,3 +555,31 @@ TEST(ReturnAnonymousFunction) {
script->Run(CcTest::isolate()->GetCurrentContext()).ToLocalChecked();
CheckFunctionName(script, "return 2012", "");
}
+
+TEST(IgnoreExtendsClause) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ v8::Local<v8::Script> script =
+ Compile(CcTest::isolate(),
+ "(function() {\n"
+ " var foo = {};\n"
+ " foo.C = class {}\n"
+ " class D extends foo.C {}\n"
+ " foo.bar = function() { return 1; };\n"
+ "})()");
+ script->Run(CcTest::isolate()->GetCurrentContext()).ToLocalChecked();
+ CheckFunctionName(script, "return 1", "foo.bar");
+}
+
+TEST(ParameterAndArrow) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ v8::Local<v8::Script> script = Compile(CcTest::isolate(),
+ "(function(param) {\n"
+ " (() => { return 2017 })();\n"
+ "})()");
+ script->Run(CcTest::isolate()->GetCurrentContext()).ToLocalChecked();
+ CheckFunctionName(script, "return 2017", "");
+}
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 5a94708ba8..d9f919fbee 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -875,8 +875,7 @@ class TestJSONStream : public v8::OutputStream {
return kContinue;
}
virtual WriteResult WriteUint32Chunk(uint32_t* buffer, int chars_written) {
- CHECK(false);
- return kAbort;
+ UNREACHABLE();
}
void WriteTo(i::Vector<char> dest) { buffer_.WriteTo(dest); }
int eos_signaled() { return eos_signaled_; }
@@ -1064,8 +1063,7 @@ class TestStatsStream : public v8::OutputStream {
virtual ~TestStatsStream() {}
virtual void EndOfStream() { ++eos_signaled_; }
virtual WriteResult WriteAsciiChunk(char* buffer, int chars_written) {
- CHECK(false);
- return kAbort;
+ UNREACHABLE();
}
virtual WriteResult WriteHeapStatsChunk(v8::HeapStatsUpdate* buffer,
int updates_written) {
@@ -1460,8 +1458,7 @@ class TestRetainedObjectInfo : public v8::RetainedObjectInfo {
return new TestRetainedObjectInfo(2, "ccc-group", "ccc");
}
}
- CHECK(false);
- return nullptr;
+ UNREACHABLE();
}
static std::vector<TestRetainedObjectInfo*> instances;
@@ -2816,7 +2813,7 @@ TEST(AddressToTraceMap) {
// [0x100, 0x200) -> 1, [0x200, 0x300) -> 2
map.AddRange(ToAddress(0x200), 0x100, 2U);
- CHECK_EQ(2u, map.GetTraceNodeId(ToAddress(0x2a0)));
+ CHECK_EQ(2u, map.GetTraceNodeId(ToAddress(0x2A0)));
CHECK_EQ(2u, map.size());
// [0x100, 0x180) -> 1, [0x180, 0x280) -> 3, [0x280, 0x300) -> 2
@@ -3139,6 +3136,31 @@ TEST(SamplingHeapProfilerPretenuredInlineAllocations) {
CHECK_GE(count, 8000);
}
+TEST(SamplingHeapProfilerLargeInterval) {
+ v8::HandleScope scope(v8::Isolate::GetCurrent());
+ LocalContext env;
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+
+ // Suppress randomness to avoid flakiness in tests.
+ v8::internal::FLAG_sampling_heap_profiler_suppress_randomness = true;
+
+ heap_profiler->StartSamplingHeapProfiler(512 * 1024);
+
+ for (int i = 0; i < 8 * 1024; ++i) {
+ CcTest::i_isolate()->factory()->NewFixedArray(1024);
+ }
+
+ std::unique_ptr<v8::AllocationProfile> profile(
+ heap_profiler->GetAllocationProfile());
+ CHECK(profile);
+ const char* names[] = {"(EXTERNAL)"};
+ auto node = FindAllocationProfileNode(env->GetIsolate(), *profile,
+ ArrayVector(names));
+ CHECK(node);
+
+ heap_profiler->StopSamplingHeapProfiler();
+}
+
TEST(SamplingHeapProfilerSampleDuringDeopt) {
i::FLAG_allow_natives_syntax = true;
diff --git a/deps/v8/test/cctest/test-inobject-slack-tracking.cc b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
index 48ec9e18cd..be6a71b0e3 100644
--- a/deps/v8/test/cctest/test-inobject-slack-tracking.cc
+++ b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
@@ -622,6 +622,12 @@ static void TestSubclassChain(const std::vector<int>& hierarchy_desc) {
TestClassHierarchy(hierarchy_desc, static_cast<int>(hierarchy_desc.size()));
}
+TEST(Subclasses) {
+ std::vector<int> hierarchy_desc;
+ hierarchy_desc.push_back(50);
+ hierarchy_desc.push_back(128);
+ TestSubclassChain(hierarchy_desc);
+}
TEST(LongSubclassChain1) {
std::vector<int> hierarchy_desc;
diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc
index 9c25f3ce3e..24d6d9c8ba 100644
--- a/deps/v8/test/cctest/test-log-stack-tracer.cc
+++ b/deps/v8/test/cctest/test-log-stack-tracer.cc
@@ -86,7 +86,7 @@ static void construct_call(const v8::FunctionCallbackInfo<v8::Value>& args) {
.FromJust();
#elif defined(V8_HOST_ARCH_64_BIT)
uint64_t fp = reinterpret_cast<uint64_t>(calling_frame->fp());
- int32_t low_bits = static_cast<int32_t>(fp & 0xffffffff);
+ int32_t low_bits = static_cast<int32_t>(fp & 0xFFFFFFFF);
int32_t high_bits = static_cast<int32_t>(fp >> 32);
args.This()->Set(context, v8_str("low_bits"), v8_num(low_bits)).FromJust();
args.This()->Set(context, v8_str("high_bits"), v8_num(high_bits)).FromJust();
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index 05ae2e8fcd..0579010292 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -648,14 +648,12 @@ TEST(EquivalenceOfLoggingAndTraversal) {
v8::Local<v8::Script> script = CompileWithOrigin(source_str, "");
if (script.IsEmpty()) {
v8::String::Utf8Value exception(isolate, try_catch.Exception());
- printf("compile: %s\n", *exception);
- CHECK(false);
+ FATAL("compile: %s\n", *exception);
}
v8::Local<v8::Value> result;
if (!script->Run(logger.env()).ToLocal(&result)) {
v8::String::Utf8Value exception(isolate, try_catch.Exception());
- printf("run: %s\n", *exception);
- CHECK(false);
+ FATAL("run: %s\n", *exception);
}
// The result either be the "true" literal or problem description.
if (!result->IsTrue()) {
@@ -663,10 +661,7 @@ TEST(EquivalenceOfLoggingAndTraversal) {
i::ScopedVector<char> data(s->Utf8Length() + 1);
CHECK(data.start());
s->WriteUtf8(data.start());
- printf("%s\n", data.start());
- // Make sure that our output is written prior crash due to CHECK failure.
- fflush(stdout);
- CHECK(false);
+ FATAL("%s\n", data.start());
}
}
isolate->Dispose();
@@ -885,6 +880,9 @@ TEST(ConsoleTimeEvents) {
}
TEST(LogFunctionEvents) {
+ // Always opt and stress opt will break the fine-grained log order.
+ if (i::FLAG_always_opt) return;
+
SETUP_FLAGS();
i::FLAG_log_function_events = true;
v8::Isolate::CreateParams create_params;
@@ -936,14 +934,21 @@ TEST(LogFunctionEvents) {
// - execute eager functions.
{"function,parse-function,", ",lazyFunction"},
{"function,compile-lazy,", ",lazyFunction"},
+ {"function,first-execution,", ",lazyFunction"},
{"function,parse-function,", ",lazyInnerFunction"},
{"function,compile-lazy,", ",lazyInnerFunction"},
+ {"function,first-execution,", ",lazyInnerFunction"},
+
+ {"function,first-execution,", ",eagerFunction"},
{"function,parse-function,", ",Foo"},
{"function,compile-lazy,", ",Foo"},
+ {"function,first-execution,", ",Foo"},
+
{"function,parse-function,", ",Foo.foo"},
{"function,compile-lazy,", ",Foo.foo"},
+ {"function,first-execution,", ",Foo.foo"},
};
logger.FindLogLines(pairs, arraysize(pairs), start);
}
diff --git a/deps/v8/test/cctest/test-macro-assembler-arm.cc b/deps/v8/test/cctest/test-macro-assembler-arm.cc
index 0becfa52ab..acef2731b9 100644
--- a/deps/v8/test/cctest/test-macro-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-arm.cc
@@ -27,10 +27,10 @@
#include <stdlib.h>
-#include "src/arm/simulator-arm.h"
#include "src/assembler-inl.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
+#include "src/simulator.h"
#include "src/v8.h"
#include "test/cctest/cctest.h"
@@ -38,12 +38,12 @@ namespace v8 {
namespace internal {
namespace test_macro_assembler_arm {
-typedef void* (*F)(int x, int y, int p2, int p3, int p4);
+using F = void*(int x, int y, int p2, int p3, int p4);
#define __ masm->
-typedef Object* (*F3)(void* p0, int p1, int p2, int p3, int p4);
-typedef int (*F5)(void*, void*, void*, void*, void*);
+using F3 = Object*(void* p0, int p1, int p2, int p3, int p4);
+using F5 = int(void*, void*, void*, void*, void*);
TEST(LoadAndStoreWithRepresentation) {
Isolate* isolate = CcTest::i_isolate();
@@ -129,8 +129,8 @@ TEST(LoadAndStoreWithRepresentation) {
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
// Call the function from C++.
- F5 f = FUNCTION_CAST<F5>(code->entry());
- CHECK(!CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ auto f = GeneratedCode<F5>::FromCode(*code);
+ CHECK(!f.Call(0, 0, 0, 0, 0));
}
TEST(ExtractLane) {
@@ -239,9 +239,8 @@ TEST(ExtractLane) {
OFStream os(stdout);
code->Print(os);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ f.Call(&t, 0, 0, 0, 0);
for (int i = 0; i < 4; i++) {
CHECK_EQ(i, t.i32x4_low[i]);
CHECK_EQ(i, t.f32x4_low[i]);
@@ -372,9 +371,8 @@ TEST(ReplaceLane) {
OFStream os(stdout);
code->Print(os);
#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ f.Call(&t, 0, 0, 0, 0);
for (int i = 0; i < 4; i++) {
CHECK_EQ(i, t.i32x4_low[i]);
CHECK_EQ(i, t.f32x4_low[i]);
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips.cc b/deps/v8/test/cctest/test-macro-assembler-mips.cc
index 7879ff2622..66d6c4bf9d 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips.cc
@@ -32,17 +32,18 @@
#include "src/base/utils/random-number-generator.h"
#include "src/macro-assembler.h"
#include "src/mips/macro-assembler-mips.h"
-#include "src/mips/simulator-mips.h"
#include "src/objects-inl.h"
+#include "src/simulator.h"
#include "src/v8.h"
#include "test/cctest/cctest.h"
namespace v8 {
namespace internal {
-typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
-typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
-typedef Object* (*F4)(void* p0, void* p1, int p2, int p3, int p4);
+// TODO(mips): Refine these signatures per test case.
+using F1 = Object*(int x, int p1, int p2, int p3, int p4);
+using F3 = Object*(void* p, int p1, int p2, int p3, int p4);
+using F4 = Object*(void* p0, void* p1, int p2, int p3, int p4);
#define __ masm->
@@ -96,14 +97,13 @@ TEST(BYTESWAP) {
masm->GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
t.r1 = 0x781A15C3;
t.r2 = 0x2CDE;
t.r3 = 0x9F;
t.r4 = 0x9F;
t.r5 = 0x2CDE;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(static_cast<int32_t>(0xC3151A78), t.r1);
CHECK_EQ(static_cast<int32_t>(0xDE2C0000), t.r2);
@@ -208,10 +208,9 @@ TEST(jump_tables4) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
+ auto f = GeneratedCode<F1>::FromCode(*code);
for (int i = 0; i < kNumCases; ++i) {
- int res =
- reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0));
+ int res = reinterpret_cast<int>(f.Call(i, 0, 0, 0, 0));
::printf("f(%d) = %d\n", i, res);
CHECK_EQ(values[i], res);
}
@@ -275,10 +274,9 @@ TEST(jump_tables5) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
+ auto f = GeneratedCode<F1>::FromCode(*code);
for (int i = 0; i < kNumCases; ++i) {
- int32_t res = reinterpret_cast<int32_t>(
- CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0));
+ int32_t res = reinterpret_cast<int32_t>(f.Call(i, 0, 0, 0, 0));
::printf("f(%d) = %d\n", i, res);
CHECK_EQ(values[i], res);
}
@@ -366,10 +364,9 @@ TEST(jump_tables6) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
+ auto f = GeneratedCode<F1>::FromCode(*code);
for (int i = 0; i < kSwitchTableCases; ++i) {
- int res =
- reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0));
+ int res = reinterpret_cast<int>(f.Call(i, 0, 0, 0, 0));
::printf("f(%d) = %d\n", i, res);
CHECK_EQ(values[i], res);
}
@@ -391,10 +388,9 @@ static uint32_t run_lsa(uint32_t rt, uint32_t rs, int8_t sa) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F1 f = FUNCTION_CAST<F1>(code->entry());
+ auto f = GeneratedCode<F1>::FromCode(*code);
- uint32_t res = reinterpret_cast<uint32_t>(
- CALL_GENERATED_CODE(isolate, f, rt, rs, 0, 0, 0));
+ uint32_t res = reinterpret_cast<uint32_t>(f.Call(rt, rs, 0, 0, 0));
return res;
}
@@ -412,7 +408,7 @@ TEST(Lsa) {
struct TestCaseLsa tc[] = {// rt, rs, sa, expected_res
{0x4, 0x1, 1, 0x6},
{0x4, 0x1, 2, 0x8},
- {0x4, 0x1, 3, 0xc},
+ {0x4, 0x1, 3, 0xC},
{0x4, 0x1, 4, 0x14},
{0x4, 0x1, 5, 0x24},
{0x0, 0x1, 1, 0x2},
@@ -429,9 +425,9 @@ TEST(Lsa) {
// Shift overflow.
{0x4, INT32_MAX, 1, 0x2},
{0x4, INT32_MAX >> 1, 2, 0x0},
- {0x4, INT32_MAX >> 2, 3, 0xfffffffc},
- {0x4, INT32_MAX >> 3, 4, 0xfffffff4},
- {0x4, INT32_MAX >> 4, 5, 0xffffffe4},
+ {0x4, INT32_MAX >> 2, 3, 0xFFFFFFFC},
+ {0x4, INT32_MAX >> 3, 4, 0xFFFFFFF4},
+ {0x4, INT32_MAX >> 4, 5, 0xFFFFFFE4},
// Signed addition overflow.
{INT32_MAX - 1, 0x1, 1, 0x80000000},
@@ -457,19 +453,19 @@ TEST(Lsa) {
}
static const std::vector<uint32_t> cvt_trunc_uint32_test_values() {
- static const uint32_t kValues[] = {0x00000000, 0x00000001, 0x00ffff00,
- 0x7fffffff, 0x80000000, 0x80000001,
- 0x80ffff00, 0x8fffffff, 0xffffffff};
+ static const uint32_t kValues[] = {0x00000000, 0x00000001, 0x00FFFF00,
+ 0x7FFFFFFF, 0x80000000, 0x80000001,
+ 0x80FFFF00, 0x8FFFFFFF, 0xFFFFFFFF};
return std::vector<uint32_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
static const std::vector<int32_t> cvt_trunc_int32_test_values() {
static const int32_t kValues[] = {
static_cast<int32_t>(0x00000000), static_cast<int32_t>(0x00000001),
- static_cast<int32_t>(0x00ffff00), static_cast<int32_t>(0x7fffffff),
+ static_cast<int32_t>(0x00FFFF00), static_cast<int32_t>(0x7FFFFFFF),
static_cast<int32_t>(0x80000000), static_cast<int32_t>(0x80000001),
- static_cast<int32_t>(0x80ffff00), static_cast<int32_t>(0x8fffffff),
- static_cast<int32_t>(0xffffffff)};
+ static_cast<int32_t>(0x80FFFF00), static_cast<int32_t>(0x8FFFFFFF),
+ static_cast<int32_t>(0xFFFFFFFF)};
return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
@@ -502,7 +498,7 @@ static const std::vector<int32_t> cvt_trunc_int32_test_values() {
template <typename RET_TYPE, typename IN_TYPE, typename Func>
RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
- typedef RET_TYPE (*F_CVT)(IN_TYPE x0, int x1, int x2, int x3, int x4);
+ typedef RET_TYPE(F_CVT)(IN_TYPE x0, int x1, int x2, int x3, int x4);
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -521,10 +517,9 @@ RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
+ auto f = GeneratedCode<F_CVT>::FromCode(*code);
- return reinterpret_cast<RET_TYPE>(
- CALL_GENERATED_CODE(isolate, f, x, 0, 0, 0, 0));
+ return reinterpret_cast<RET_TYPE>(f.Call(x, 0, 0, 0, 0));
}
TEST(cvt_s_w_Trunc_uw_s) {
@@ -553,11 +548,11 @@ TEST(cvt_d_w_Trunc_w_d) {
static const std::vector<int32_t> overflow_int32_test_values() {
static const int32_t kValues[] = {
- static_cast<int32_t>(0xf0000000), static_cast<int32_t>(0x00000001),
- static_cast<int32_t>(0xff000000), static_cast<int32_t>(0x0000f000),
- static_cast<int32_t>(0x0f000000), static_cast<int32_t>(0x991234ab),
- static_cast<int32_t>(0xb0ffff01), static_cast<int32_t>(0x00006fff),
- static_cast<int32_t>(0xffffffff)};
+ static_cast<int32_t>(0xF0000000), static_cast<int32_t>(0x00000001),
+ static_cast<int32_t>(0xFF000000), static_cast<int32_t>(0x0000F000),
+ static_cast<int32_t>(0x0F000000), static_cast<int32_t>(0x991234AB),
+ static_cast<int32_t>(0xB0FFFF01), static_cast<int32_t>(0x00006FFF),
+ static_cast<int32_t>(0xFFFFFFFF)};
return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
@@ -609,7 +604,7 @@ static bool IsSubOverflow(T x, T y) {
template <typename IN_TYPE, typename Func>
static bool runOverflow(IN_TYPE valLeft, IN_TYPE valRight,
Func GenerateOverflowInstructions) {
- typedef int32_t (*F_CVT)(char* x0, int x1, int x2, int x3, int x4);
+ typedef int32_t(F_CVT)(char* x0, int x1, int x2, int x3, int x4);
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -626,10 +621,9 @@ static bool runOverflow(IN_TYPE valLeft, IN_TYPE valRight,
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
+ auto f = GeneratedCode<F_CVT>::FromCode(*code);
- int32_t r =
- reinterpret_cast<int32_t>(CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ int32_t r = reinterpret_cast<int32_t>(f.Call(0, 0, 0, 0, 0));
DCHECK(r == 0 || r == 1);
return r;
@@ -1054,14 +1048,14 @@ TEST(min_max_nan) {
masm->GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
test.b = inputsb[i];
test.e = inputse[i];
test.f = inputsf[i];
- CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0);
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(0, memcmp(&test.c, &outputsdmin[i], sizeof(test.c)));
CHECK_EQ(0, memcmp(&test.d, &outputsdmax[i], sizeof(test.d)));
@@ -1073,7 +1067,7 @@ TEST(min_max_nan) {
template <typename IN_TYPE, typename Func>
bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
IN_TYPE value, Func GenerateUnalignedInstructionFunc) {
- typedef int32_t (*F_CVT)(char* x0, int x1, int x2, int x3, int x4);
+ typedef int32_t(F_CVT)(char* x0, int x1, int x2, int x3, int x4);
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -1091,10 +1085,10 @@ bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
+ auto f = GeneratedCode<F_CVT>::FromCode(*code);
MemCopy(memory_buffer + in_offset, &value, sizeof(IN_TYPE));
- CALL_GENERATED_CODE(isolate, f, memory_buffer, 0, 0, 0, 0);
+ f.Call(memory_buffer, 0, 0, 0, 0);
MemCopy(&res, memory_buffer + out_offset, sizeof(IN_TYPE));
return res == value;
@@ -1102,8 +1096,8 @@ bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
static const std::vector<uint64_t> unsigned_test_values() {
static const uint64_t kValues[] = {
- 0x2180f18a06384414, 0x000a714532102277, 0xbc1acccf180649f0,
- 0x8000000080008000, 0x0000000000000001, 0xffffffffffffffff,
+ 0x2180F18A06384414, 0x000A714532102277, 0xBC1ACCCF180649F0,
+ 0x8000000080008000, 0x0000000000000001, 0xFFFFFFFFFFFFFFFF,
};
return std::vector<uint64_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
@@ -1313,16 +1307,16 @@ TEST(Uldc1) {
static const std::vector<uint32_t> sltu_test_values() {
static const uint32_t kValues[] = {
- 0, 1, 0x7ffe, 0x7fff, 0x8000,
- 0x8001, 0xfffe, 0xffff, 0xffff7ffe, 0xffff7fff,
- 0xffff8000, 0xffff8001, 0xfffffffe, 0xffffffff,
+ 0, 1, 0x7FFE, 0x7FFF, 0x8000,
+ 0x8001, 0xFFFE, 0xFFFF, 0xFFFF7FFE, 0xFFFF7FFF,
+ 0xFFFF8000, 0xFFFF8001, 0xFFFFFFFE, 0xFFFFFFFF,
};
return std::vector<uint32_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
template <typename Func>
bool run_Sltu(uint32_t rs, uint32_t rd, Func GenerateSltuInstructionFunc) {
- typedef int32_t (*F_CVT)(uint32_t x0, uint32_t x1, int x2, int x3, int x4);
+ typedef int32_t(F_CVT)(uint32_t x0, uint32_t x1, int x2, int x3, int x4);
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -1339,9 +1333,8 @@ bool run_Sltu(uint32_t rs, uint32_t rd, Func GenerateSltuInstructionFunc) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
- int32_t res = reinterpret_cast<int32_t>(
- CALL_GENERATED_CODE(isolate, f, rs, rd, 0, 0, 0));
+ auto f = GeneratedCode<F_CVT>::FromCode(*code);
+ int32_t res = reinterpret_cast<int32_t>(f.Call(rs, rd, 0, 0, 0));
return res == 1;
}
@@ -1367,7 +1360,7 @@ TEST(Sltu) {
}
template <typename T, typename Inputs, typename Results>
-static F4 GenerateMacroFloat32MinMax(MacroAssembler* masm) {
+static GeneratedCode<F4> GenerateMacroFloat32MinMax(MacroAssembler* masm) {
T a = T::from_code(4); // f4
T b = T::from_code(6); // f6
T c = T::from_code(8); // f8
@@ -1437,7 +1430,7 @@ static F4 GenerateMacroFloat32MinMax(MacroAssembler* masm) {
OFStream os(stdout);
code->Print(os);
#endif
- return FUNCTION_CAST<F4>(code->entry());
+ return GeneratedCode<F4>::FromCode(*code);
}
TEST(macro_float_minmax_f32) {
@@ -1466,15 +1459,14 @@ TEST(macro_float_minmax_f32) {
float max_aba_;
};
- F4 f = GenerateMacroFloat32MinMax<FPURegister, Inputs, Results>(masm);
- Object* dummy = nullptr;
- USE(dummy);
+ GeneratedCode<F4> f =
+ GenerateMacroFloat32MinMax<FPURegister, Inputs, Results>(masm);
#define CHECK_MINMAX(src1, src2, min, max) \
do { \
Inputs inputs = {src1, src2}; \
Results results; \
- dummy = CALL_GENERATED_CODE(isolate, f, &inputs, &results, 0, 0, 0); \
+ f.Call(&inputs, &results, 0, 0, 0); \
CHECK_EQ(bit_cast<uint32_t>(min), bit_cast<uint32_t>(results.min_abc_)); \
CHECK_EQ(bit_cast<uint32_t>(min), bit_cast<uint32_t>(results.min_aab_)); \
CHECK_EQ(bit_cast<uint32_t>(min), bit_cast<uint32_t>(results.min_aba_)); \
@@ -1510,7 +1502,7 @@ TEST(macro_float_minmax_f32) {
}
template <typename T, typename Inputs, typename Results>
-static F4 GenerateMacroFloat64MinMax(MacroAssembler* masm) {
+static GeneratedCode<F4> GenerateMacroFloat64MinMax(MacroAssembler* masm) {
T a = T::from_code(4); // f4
T b = T::from_code(6); // f6
T c = T::from_code(8); // f8
@@ -1580,7 +1572,7 @@ static F4 GenerateMacroFloat64MinMax(MacroAssembler* masm) {
OFStream os(stdout);
code->Print(os);
#endif
- return FUNCTION_CAST<F4>(code->entry());
+ return GeneratedCode<F4>::FromCode(*code);
}
TEST(macro_float_minmax_f64) {
@@ -1609,15 +1601,14 @@ TEST(macro_float_minmax_f64) {
double max_aba_;
};
- F4 f = GenerateMacroFloat64MinMax<DoubleRegister, Inputs, Results>(masm);
- Object* dummy = nullptr;
- USE(dummy);
+ GeneratedCode<F4> f =
+ GenerateMacroFloat64MinMax<DoubleRegister, Inputs, Results>(masm);
#define CHECK_MINMAX(src1, src2, min, max) \
do { \
Inputs inputs = {src1, src2}; \
Results results; \
- dummy = CALL_GENERATED_CODE(isolate, f, &inputs, &results, 0, 0, 0); \
+ f.Call(&inputs, &results, 0, 0, 0); \
CHECK_EQ(bit_cast<uint64_t>(min), bit_cast<uint64_t>(results.min_abc_)); \
CHECK_EQ(bit_cast<uint64_t>(min), bit_cast<uint64_t>(results.min_aab_)); \
CHECK_EQ(bit_cast<uint64_t>(min), bit_cast<uint64_t>(results.min_aba_)); \
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips64.cc b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
index c695d29203..2b199cb9c7 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
@@ -34,16 +34,17 @@
#include "src/base/utils/random-number-generator.h"
#include "src/macro-assembler.h"
#include "src/mips64/macro-assembler-mips64.h"
-#include "src/mips64/simulator-mips64.h"
#include "src/objects-inl.h"
+#include "src/simulator.h"
namespace v8 {
namespace internal {
-typedef void* (*FV)(int64_t x, int64_t y, int p2, int p3, int p4);
-typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
-typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
-typedef Object* (*F4)(void* p0, void* p1, int p2, int p3, int p4);
+// TODO(mips64): Refine these signatures per test case.
+using FV = void*(int64_t x, int64_t y, int p2, int p3, int p4);
+using F1 = Object*(int x, int p1, int p2, int p3, int p4);
+using F3 = Object*(void* p, int p1, int p2, int p3, int p4);
+using F4 = Object*(void* p0, void* p1, int p2, int p3, int p4);
#define __ masm->
@@ -111,7 +112,7 @@ TEST(BYTESWAP) {
masm->GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
t.r1 = 0x5612FFCD9D327ACC;
t.r2 = 0x781A15C3;
t.r3 = 0xFCDE;
@@ -119,8 +120,7 @@ TEST(BYTESWAP) {
t.r5 = 0x9F;
t.r6 = 0xFCDE;
t.r7 = 0xC81A15C3;
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
+ f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(static_cast<int64_t>(0xCC7A329DCDFF1256), t.r1);
CHECK_EQ(static_cast<int64_t>(0xC3151A7800000000), t.r2);
@@ -164,9 +164,8 @@ TEST(LoadConstants) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- FV f = FUNCTION_CAST<FV>(code->entry());
- (void)CALL_GENERATED_CODE(isolate, f, reinterpret_cast<int64_t>(result), 0, 0,
- 0, 0);
+ auto f = GeneratedCode<FV>::FromCode(*code);
+ (void)f.Call(reinterpret_cast<int64_t>(result), 0, 0, 0, 0);
// Check results.
for (int i = 0; i < 64; i++) {
CHECK(refConstants[i] == result[i]);
@@ -209,8 +208,8 @@ TEST(LoadAddress) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- FV f = FUNCTION_CAST<FV>(code->entry());
- (void)CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0);
+ auto f = GeneratedCode<FV>::FromCode(*code);
+ (void)f.Call(0, 0, 0, 0, 0);
// Check results.
}
@@ -269,10 +268,9 @@ TEST(jump_tables4) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
+ auto f = GeneratedCode<F1>::FromCode(*code);
for (int i = 0; i < kNumCases; ++i) {
- int64_t res = reinterpret_cast<int64_t>(
- CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0));
+ int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
::printf("f(%d) = %" PRId64 "\n", i, res);
CHECK_EQ(values[i], res);
}
@@ -343,10 +341,9 @@ TEST(jump_tables5) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
+ auto f = GeneratedCode<F1>::FromCode(*code);
for (int i = 0; i < kNumCases; ++i) {
- int64_t res = reinterpret_cast<int64_t>(
- CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0));
+ int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
::printf("f(%d) = %" PRId64 "\n", i, res);
CHECK_EQ(values[i], res);
}
@@ -435,10 +432,9 @@ TEST(jump_tables6) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
+ auto f = GeneratedCode<F1>::FromCode(*code);
for (int i = 0; i < kSwitchTableCases; ++i) {
- int64_t res = reinterpret_cast<int64_t>(
- CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0));
+ int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
::printf("f(%d) = %" PRId64 "\n", i, res);
CHECK_EQ(values[i], res);
}
@@ -460,10 +456,9 @@ static uint64_t run_lsa(uint32_t rt, uint32_t rs, int8_t sa) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F1 f = FUNCTION_CAST<F1>(code->entry());
+ auto f = GeneratedCode<F1>::FromCode(*code);
- uint64_t res = reinterpret_cast<uint64_t>(
- CALL_GENERATED_CODE(isolate, f, rt, rs, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(rt, rs, 0, 0, 0));
return res;
}
@@ -481,7 +476,7 @@ TEST(Lsa) {
struct TestCaseLsa tc[] = {// rt, rs, sa, expected_res
{0x4, 0x1, 1, 0x6},
{0x4, 0x1, 2, 0x8},
- {0x4, 0x1, 3, 0xc},
+ {0x4, 0x1, 3, 0xC},
{0x4, 0x1, 4, 0x14},
{0x4, 0x1, 5, 0x24},
{0x0, 0x1, 1, 0x2},
@@ -498,16 +493,16 @@ TEST(Lsa) {
// Shift overflow.
{0x4, INT32_MAX, 1, 0x2},
{0x4, INT32_MAX >> 1, 2, 0x0},
- {0x4, INT32_MAX >> 2, 3, 0xfffffffffffffffc},
- {0x4, INT32_MAX >> 3, 4, 0xfffffffffffffff4},
- {0x4, INT32_MAX >> 4, 5, 0xffffffffffffffe4},
+ {0x4, INT32_MAX >> 2, 3, 0xFFFFFFFFFFFFFFFC},
+ {0x4, INT32_MAX >> 3, 4, 0xFFFFFFFFFFFFFFF4},
+ {0x4, INT32_MAX >> 4, 5, 0xFFFFFFFFFFFFFFE4},
// Signed addition overflow.
- {INT32_MAX - 1, 0x1, 1, 0xffffffff80000000},
- {INT32_MAX - 3, 0x1, 2, 0xffffffff80000000},
- {INT32_MAX - 7, 0x1, 3, 0xffffffff80000000},
- {INT32_MAX - 15, 0x1, 4, 0xffffffff80000000},
- {INT32_MAX - 31, 0x1, 5, 0xffffffff80000000},
+ {INT32_MAX - 1, 0x1, 1, 0xFFFFFFFF80000000},
+ {INT32_MAX - 3, 0x1, 2, 0xFFFFFFFF80000000},
+ {INT32_MAX - 7, 0x1, 3, 0xFFFFFFFF80000000},
+ {INT32_MAX - 15, 0x1, 4, 0xFFFFFFFF80000000},
+ {INT32_MAX - 31, 0x1, 5, 0xFFFFFFFF80000000},
// Addition overflow.
{-2, 0x1, 1, 0x0},
@@ -542,10 +537,9 @@ static uint64_t run_dlsa(uint64_t rt, uint64_t rs, int8_t sa) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- FV f = FUNCTION_CAST<FV>(code->entry());
+ auto f = GeneratedCode<FV>::FromCode(*code);
- uint64_t res = reinterpret_cast<uint64_t>(
- CALL_GENERATED_CODE(isolate, f, rt, rs, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(rt, rs, 0, 0, 0));
return res;
}
@@ -563,7 +557,7 @@ TEST(Dlsa) {
struct TestCaseLsa tc[] = {// rt, rs, sa, expected_res
{0x4, 0x1, 1, 0x6},
{0x4, 0x1, 2, 0x8},
- {0x4, 0x1, 3, 0xc},
+ {0x4, 0x1, 3, 0xC},
{0x4, 0x1, 4, 0x14},
{0x4, 0x1, 5, 0x24},
{0x0, 0x1, 1, 0x2},
@@ -580,9 +574,9 @@ TEST(Dlsa) {
// Shift overflow.
{0x4, INT64_MAX, 1, 0x2},
{0x4, INT64_MAX >> 1, 2, 0x0},
- {0x4, INT64_MAX >> 2, 3, 0xfffffffffffffffc},
- {0x4, INT64_MAX >> 3, 4, 0xfffffffffffffff4},
- {0x4, INT64_MAX >> 4, 5, 0xffffffffffffffe4},
+ {0x4, INT64_MAX >> 2, 3, 0xFFFFFFFFFFFFFFFC},
+ {0x4, INT64_MAX >> 3, 4, 0xFFFFFFFFFFFFFFF4},
+ {0x4, INT64_MAX >> 4, 5, 0xFFFFFFFFFFFFFFE4},
// Signed addition overflow.
{INT64_MAX - 1, 0x1, 1, 0x8000000000000000},
@@ -609,40 +603,40 @@ TEST(Dlsa) {
}
static const std::vector<uint32_t> cvt_trunc_uint32_test_values() {
- static const uint32_t kValues[] = {0x00000000, 0x00000001, 0x00ffff00,
- 0x7fffffff, 0x80000000, 0x80000001,
- 0x80ffff00, 0x8fffffff, 0xffffffff};
+ static const uint32_t kValues[] = {0x00000000, 0x00000001, 0x00FFFF00,
+ 0x7FFFFFFF, 0x80000000, 0x80000001,
+ 0x80FFFF00, 0x8FFFFFFF, 0xFFFFFFFF};
return std::vector<uint32_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
static const std::vector<int32_t> cvt_trunc_int32_test_values() {
static const int32_t kValues[] = {
static_cast<int32_t>(0x00000000), static_cast<int32_t>(0x00000001),
- static_cast<int32_t>(0x00ffff00), static_cast<int32_t>(0x7fffffff),
+ static_cast<int32_t>(0x00FFFF00), static_cast<int32_t>(0x7FFFFFFF),
static_cast<int32_t>(0x80000000), static_cast<int32_t>(0x80000001),
- static_cast<int32_t>(0x80ffff00), static_cast<int32_t>(0x8fffffff),
- static_cast<int32_t>(0xffffffff)};
+ static_cast<int32_t>(0x80FFFF00), static_cast<int32_t>(0x8FFFFFFF),
+ static_cast<int32_t>(0xFFFFFFFF)};
return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
static const std::vector<uint64_t> cvt_trunc_uint64_test_values() {
static const uint64_t kValues[] = {
- 0x0000000000000000, 0x0000000000000001, 0x0000ffffffff0000,
- 0x7fffffffffffffff, 0x8000000000000000, 0x8000000000000001,
- 0x8000ffffffff0000, 0x8fffffffffffffff, 0xffffffffffffffff};
+ 0x0000000000000000, 0x0000000000000001, 0x0000FFFFFFFF0000,
+ 0x7FFFFFFFFFFFFFFF, 0x8000000000000000, 0x8000000000000001,
+ 0x8000FFFFFFFF0000, 0x8FFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF};
return std::vector<uint64_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
static const std::vector<int64_t> cvt_trunc_int64_test_values() {
static const int64_t kValues[] = {static_cast<int64_t>(0x0000000000000000),
static_cast<int64_t>(0x0000000000000001),
- static_cast<int64_t>(0x0000ffffffff0000),
- static_cast<int64_t>(0x7fffffffffffffff),
+ static_cast<int64_t>(0x0000FFFFFFFF0000),
+ static_cast<int64_t>(0x7FFFFFFFFFFFFFFF),
static_cast<int64_t>(0x8000000000000000),
static_cast<int64_t>(0x8000000000000001),
- static_cast<int64_t>(0x8000ffffffff0000),
- static_cast<int64_t>(0x8fffffffffffffff),
- static_cast<int64_t>(0xffffffffffffffff)};
+ static_cast<int64_t>(0x8000FFFFFFFF0000),
+ static_cast<int64_t>(0x8FFFFFFFFFFFFFFF),
+ static_cast<int64_t>(0xFFFFFFFFFFFFFFFF)};
return std::vector<int64_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
@@ -676,7 +670,7 @@ static const std::vector<int64_t> cvt_trunc_int64_test_values() {
template <typename RET_TYPE, typename IN_TYPE, typename Func>
RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
- typedef RET_TYPE (*F_CVT)(IN_TYPE x0, int x1, int x2, int x3, int x4);
+ typedef RET_TYPE(F_CVT)(IN_TYPE x0, int x1, int x2, int x3, int x4);
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -694,10 +688,9 @@ RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
+ auto f = GeneratedCode<F_CVT>::FromCode(*code);
- return reinterpret_cast<RET_TYPE>(
- CALL_GENERATED_CODE(isolate, f, x, 0, 0, 0, 0));
+ return reinterpret_cast<RET_TYPE>(f.Call(x, 0, 0, 0, 0));
}
TEST(Cvt_s_uw_Trunc_uw_s) {
@@ -780,15 +773,15 @@ TEST(cvt_d_w_Trunc_w_d) {
}
static const std::vector<int64_t> overflow_int64_test_values() {
- static const int64_t kValues[] = {static_cast<int64_t>(0xf000000000000000),
+ static const int64_t kValues[] = {static_cast<int64_t>(0xF000000000000000),
static_cast<int64_t>(0x0000000000000001),
- static_cast<int64_t>(0xff00000000000000),
- static_cast<int64_t>(0x0000f00111111110),
- static_cast<int64_t>(0x0f00001000000000),
- static_cast<int64_t>(0x991234ab12a96731),
- static_cast<int64_t>(0xb0ffff0f0f0f0f01),
- static_cast<int64_t>(0x00006fffffffffff),
- static_cast<int64_t>(0xffffffffffffffff)};
+ static_cast<int64_t>(0xFF00000000000000),
+ static_cast<int64_t>(0x0000F00111111110),
+ static_cast<int64_t>(0x0F00001000000000),
+ static_cast<int64_t>(0x991234AB12A96731),
+ static_cast<int64_t>(0xB0FFFF0F0F0F0F01),
+ static_cast<int64_t>(0x00006FFFFFFFFFFF),
+ static_cast<int64_t>(0xFFFFFFFFFFFFFFFF)};
return std::vector<int64_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
@@ -840,7 +833,7 @@ static bool IsSubOverflow(T x, T y) {
template <typename IN_TYPE, typename Func>
static bool runOverflow(IN_TYPE valLeft, IN_TYPE valRight,
Func GenerateOverflowInstructions) {
- typedef int64_t (*F_CVT)(char* x0, int x1, int x2, int x3, int x4);
+ typedef int64_t(F_CVT)(char* x0, int x1, int x2, int x3, int x4);
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -857,10 +850,9 @@ static bool runOverflow(IN_TYPE valLeft, IN_TYPE valRight,
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
+ auto f = GeneratedCode<F_CVT>::FromCode(*code);
- int64_t r =
- reinterpret_cast<int64_t>(CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ int64_t r = reinterpret_cast<int64_t>(f.Call(0, 0, 0, 0, 0));
DCHECK(r == 0 || r == 1);
return r;
@@ -1212,14 +1204,14 @@ TEST(min_max_nan) {
masm->GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
+ auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
test.b = inputsb[i];
test.e = inputse[i];
test.f = inputsf[i];
- CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0);
+ f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(0, memcmp(&test.c, &outputsdmin[i], sizeof(test.c)));
CHECK_EQ(0, memcmp(&test.d, &outputsdmax[i], sizeof(test.d)));
@@ -1231,7 +1223,7 @@ TEST(min_max_nan) {
template <typename IN_TYPE, typename Func>
bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
IN_TYPE value, Func GenerateUnalignedInstructionFunc) {
- typedef int32_t (*F_CVT)(char* x0, int x1, int x2, int x3, int x4);
+ typedef int32_t(F_CVT)(char* x0, int x1, int x2, int x3, int x4);
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -1249,10 +1241,10 @@ bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
+ auto f = GeneratedCode<F_CVT>::FromCode(*code);
MemCopy(memory_buffer + in_offset, &value, sizeof(IN_TYPE));
- CALL_GENERATED_CODE(isolate, f, memory_buffer, 0, 0, 0, 0);
+ f.Call(memory_buffer, 0, 0, 0, 0);
MemCopy(&res, memory_buffer + out_offset, sizeof(IN_TYPE));
return res == value;
@@ -1260,8 +1252,8 @@ bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
static const std::vector<uint64_t> unsigned_test_values() {
static const uint64_t kValues[] = {
- 0x2180f18a06384414, 0x000a714532102277, 0xbc1acccf180649f0,
- 0x8000000080008000, 0x0000000000000001, 0xffffffffffffffff,
+ 0x2180F18A06384414, 0x000A714532102277, 0xBC1ACCCF180649F0,
+ 0x8000000080008000, 0x0000000000000001, 0xFFFFFFFFFFFFFFFF,
};
return std::vector<uint64_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
@@ -1579,25 +1571,25 @@ static const std::vector<uint64_t> sltu_test_values() {
static const uint64_t kValues[] = {
0,
1,
- 0x7ffe,
- 0x7fff,
+ 0x7FFE,
+ 0x7FFF,
0x8000,
0x8001,
- 0xfffe,
- 0xffff,
- 0xffffffffffff7ffe,
- 0xffffffffffff7fff,
- 0xffffffffffff8000,
- 0xffffffffffff8001,
- 0xfffffffffffffffe,
- 0xffffffffffffffff,
+ 0xFFFE,
+ 0xFFFF,
+ 0xFFFFFFFFFFFF7FFE,
+ 0xFFFFFFFFFFFF7FFF,
+ 0xFFFFFFFFFFFF8000,
+ 0xFFFFFFFFFFFF8001,
+ 0xFFFFFFFFFFFFFFFE,
+ 0xFFFFFFFFFFFFFFFF,
};
return std::vector<uint64_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
template <typename Func>
bool run_Sltu(uint64_t rs, uint64_t rd, Func GenerateSltuInstructionFunc) {
- typedef int64_t (*F_CVT)(uint64_t x0, uint64_t x1, int x2, int x3, int x4);
+ typedef int64_t(F_CVT)(uint64_t x0, uint64_t x1, int x2, int x3, int x4);
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -1614,9 +1606,8 @@ bool run_Sltu(uint64_t rs, uint64_t rd, Func GenerateSltuInstructionFunc) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
- int64_t res = reinterpret_cast<int64_t>(
- CALL_GENERATED_CODE(isolate, f, rs, rd, 0, 0, 0));
+ auto f = GeneratedCode<F_CVT>::FromCode(*code);
+ int64_t res = reinterpret_cast<int64_t>(f.Call(rs, rd, 0, 0, 0));
return res == 1;
}
@@ -1642,7 +1633,7 @@ TEST(Sltu) {
}
template <typename T, typename Inputs, typename Results>
-static F4 GenerateMacroFloat32MinMax(MacroAssembler* masm) {
+static GeneratedCode<F4> GenerateMacroFloat32MinMax(MacroAssembler* masm) {
T a = T::from_code(4); // f4
T b = T::from_code(6); // f6
T c = T::from_code(8); // f8
@@ -1712,7 +1703,7 @@ static F4 GenerateMacroFloat32MinMax(MacroAssembler* masm) {
OFStream os(stdout);
code->Print(os);
#endif
- return FUNCTION_CAST<F4>(code->entry());
+ return GeneratedCode<F4>::FromCode(*code);
}
TEST(macro_float_minmax_f32) {
@@ -1741,15 +1732,14 @@ TEST(macro_float_minmax_f32) {
float max_aba_;
};
- F4 f = GenerateMacroFloat32MinMax<FPURegister, Inputs, Results>(masm);
- Object* dummy = nullptr;
- USE(dummy);
+ GeneratedCode<F4> f =
+ GenerateMacroFloat32MinMax<FPURegister, Inputs, Results>(masm);
#define CHECK_MINMAX(src1, src2, min, max) \
do { \
Inputs inputs = {src1, src2}; \
Results results; \
- dummy = CALL_GENERATED_CODE(isolate, f, &inputs, &results, 0, 0, 0); \
+ f.Call(&inputs, &results, 0, 0, 0); \
CHECK_EQ(bit_cast<uint32_t>(min), bit_cast<uint32_t>(results.min_abc_)); \
CHECK_EQ(bit_cast<uint32_t>(min), bit_cast<uint32_t>(results.min_aab_)); \
CHECK_EQ(bit_cast<uint32_t>(min), bit_cast<uint32_t>(results.min_aba_)); \
@@ -1785,7 +1775,7 @@ TEST(macro_float_minmax_f32) {
}
template <typename T, typename Inputs, typename Results>
-static F4 GenerateMacroFloat64MinMax(MacroAssembler* masm) {
+static GeneratedCode<F4> GenerateMacroFloat64MinMax(MacroAssembler* masm) {
T a = T::from_code(4); // f4
T b = T::from_code(6); // f6
T c = T::from_code(8); // f8
@@ -1855,7 +1845,7 @@ static F4 GenerateMacroFloat64MinMax(MacroAssembler* masm) {
OFStream os(stdout);
code->Print(os);
#endif
- return FUNCTION_CAST<F4>(code->entry());
+ return GeneratedCode<F4>::FromCode(*code);
}
TEST(macro_float_minmax_f64) {
@@ -1884,15 +1874,14 @@ TEST(macro_float_minmax_f64) {
double max_aba_;
};
- F4 f = GenerateMacroFloat64MinMax<DoubleRegister, Inputs, Results>(masm);
- Object* dummy = nullptr;
- USE(dummy);
+ GeneratedCode<F4> f =
+ GenerateMacroFloat64MinMax<DoubleRegister, Inputs, Results>(masm);
#define CHECK_MINMAX(src1, src2, min, max) \
do { \
Inputs inputs = {src1, src2}; \
Results results; \
- dummy = CALL_GENERATED_CODE(isolate, f, &inputs, &results, 0, 0, 0); \
+ f.Call(&inputs, &results, 0, 0, 0); \
CHECK_EQ(bit_cast<uint64_t>(min), bit_cast<uint64_t>(results.min_abc_)); \
CHECK_EQ(bit_cast<uint64_t>(min), bit_cast<uint64_t>(results.min_aab_)); \
CHECK_EQ(bit_cast<uint64_t>(min), bit_cast<uint64_t>(results.min_aba_)); \
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index 5d94412d9b..6ace37c8b4 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -127,6 +127,7 @@ TEST(SmiMove) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
// Call the function from C++.
int result = FUNCTION_CAST<F0>(buffer)();
CHECK_EQ(0, result);
@@ -218,6 +219,7 @@ TEST(SmiCompare) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
// Call the function from C++.
int result = FUNCTION_CAST<F0>(buffer)();
CHECK_EQ(0, result);
@@ -317,6 +319,7 @@ TEST(Integer32ToSmi) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
// Call the function from C++.
int result = FUNCTION_CAST<F0>(buffer)();
CHECK_EQ(0, result);
@@ -391,6 +394,7 @@ TEST(SmiCheck) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
// Call the function from C++.
int result = FUNCTION_CAST<F0>(buffer)();
CHECK_EQ(0, result);
@@ -444,6 +448,7 @@ TEST(SmiIndex) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
// Call the function from C++.
int result = FUNCTION_CAST<F0>(buffer)();
CHECK_EQ(0, result);
@@ -794,6 +799,7 @@ TEST(OperandOffset) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
// Call the function from C++.
int result = FUNCTION_CAST<F0>(buffer)();
CHECK_EQ(0, result);
@@ -829,10 +835,10 @@ TEST(LoadAndStoreWithRepresentation) {
// Test 2.
__ movq(rax, Immediate(2)); // Test number.
__ movq(Operand(rsp, 0 * kPointerSize), Immediate(0));
- __ Set(rcx, V8_2PART_UINT64_C(0xdeadbeaf, 12345678));
+ __ Set(rcx, V8_2PART_UINT64_C(0xDEADBEAF, 12345678));
__ Store(Operand(rsp, 0 * kPointerSize), rcx, Representation::Smi());
__ movq(rcx, Operand(rsp, 0 * kPointerSize));
- __ Set(rdx, V8_2PART_UINT64_C(0xdeadbeaf, 12345678));
+ __ Set(rdx, V8_2PART_UINT64_C(0xDEADBEAF, 12345678));
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
__ Load(rdx, Operand(rsp, 0 * kPointerSize), Representation::Smi());
@@ -868,10 +874,10 @@ TEST(LoadAndStoreWithRepresentation) {
// Test 5.
__ movq(rax, Immediate(5)); // Test number.
__ movq(Operand(rsp, 0 * kPointerSize), Immediate(0));
- __ Set(rcx, V8_2PART_UINT64_C(0x12345678, deadbeaf));
+ __ Set(rcx, V8_2PART_UINT64_C(0x12345678, DEADBEAF));
__ Store(Operand(rsp, 0 * kPointerSize), rcx, Representation::Tagged());
__ movq(rcx, Operand(rsp, 0 * kPointerSize));
- __ Set(rdx, V8_2PART_UINT64_C(0x12345678, deadbeaf));
+ __ Set(rdx, V8_2PART_UINT64_C(0x12345678, DEADBEAF));
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
__ Load(rdx, Operand(rsp, 0 * kPointerSize), Representation::Tagged());
@@ -940,6 +946,7 @@ TEST(LoadAndStoreWithRepresentation) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
// Call the function from C++.
int result = FUNCTION_CAST<F0>(buffer)();
CHECK_EQ(0, result);
@@ -1092,6 +1099,7 @@ TEST(SIMDMacros) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
// Call the function from C++.
int result = FUNCTION_CAST<F0>(buffer)();
CHECK_EQ(0, result);
diff --git a/deps/v8/test/cctest/test-modules.cc b/deps/v8/test/cctest/test-modules.cc
index b61b10bcea..2523b83a16 100644
--- a/deps/v8/test/cctest/test-modules.cc
+++ b/deps/v8/test/cctest/test-modules.cc
@@ -27,87 +27,166 @@ ScriptOrigin ModuleOrigin(Local<v8::Value> resource_name, Isolate* isolate) {
return origin;
}
-MaybeLocal<Module> FailAlwaysResolveCallback(Local<Context> context,
- Local<String> specifier,
- Local<Module> referrer) {
- Isolate* isolate = context->GetIsolate();
- isolate->ThrowException(v8_str("boom"));
- return MaybeLocal<Module>();
-}
-
-static int g_count = 0;
-MaybeLocal<Module> FailOnSecondCallResolveCallback(Local<Context> context,
- Local<String> specifier,
- Local<Module> referrer) {
+static Local<Module> dep1;
+static Local<Module> dep2;
+MaybeLocal<Module> ResolveCallback(Local<Context> context,
+ Local<String> specifier,
+ Local<Module> referrer) {
Isolate* isolate = CcTest::isolate();
- if (g_count++ > 0) {
- isolate->ThrowException(v8_str("booom"));
+ if (specifier->StrictEquals(v8_str("./dep1.js"))) {
+ return dep1;
+ } else if (specifier->StrictEquals(v8_str("./dep2.js"))) {
+ return dep2;
+ } else {
+ isolate->ThrowException(v8_str("boom"));
return MaybeLocal<Module>();
}
- Local<String> source_text = v8_str("");
- ScriptOrigin origin = ModuleOrigin(v8_str("module.js"), isolate);
- ScriptCompiler::Source source(source_text, origin);
- return ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
}
-TEST(ModuleInstantiationFailures) {
+TEST(ModuleInstantiationFailures1) {
Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
LocalContext env;
v8::TryCatch try_catch(isolate);
- Local<String> source_text = v8_str(
- "import './foo.js';\n"
- "export {} from './bar.js';");
- ScriptOrigin origin = ModuleOrigin(v8_str("file.js"), CcTest::isolate());
- ScriptCompiler::Source source(source_text, origin);
- Local<Module> module =
- ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
- CHECK_EQ(Module::kUninstantiated, module->GetStatus());
- CHECK_EQ(2, module->GetModuleRequestsLength());
- CHECK(v8_str("./foo.js")->StrictEquals(module->GetModuleRequest(0)));
- v8::Location loc = module->GetModuleRequestLocation(0);
- CHECK_EQ(0, loc.GetLineNumber());
- CHECK_EQ(7, loc.GetColumnNumber());
-
- CHECK(v8_str("./bar.js")->StrictEquals(module->GetModuleRequest(1)));
- loc = module->GetModuleRequestLocation(1);
- CHECK_EQ(1, loc.GetLineNumber());
- CHECK_EQ(15, loc.GetColumnNumber());
+ Local<Module> module;
+ {
+ Local<String> source_text = v8_str(
+ "import './foo.js';\n"
+ "export {} from './bar.js';");
+ ScriptOrigin origin = ModuleOrigin(v8_str("file.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ module = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ CHECK_EQ(Module::kUninstantiated, module->GetStatus());
+ CHECK_EQ(2, module->GetModuleRequestsLength());
+ CHECK(v8_str("./foo.js")->StrictEquals(module->GetModuleRequest(0)));
+ v8::Location loc = module->GetModuleRequestLocation(0);
+ CHECK_EQ(0, loc.GetLineNumber());
+ CHECK_EQ(7, loc.GetColumnNumber());
+ CHECK(v8_str("./bar.js")->StrictEquals(module->GetModuleRequest(1)));
+ loc = module->GetModuleRequestLocation(1);
+ CHECK_EQ(1, loc.GetLineNumber());
+ CHECK_EQ(15, loc.GetColumnNumber());
+ }
// Instantiation should fail.
{
v8::TryCatch inner_try_catch(isolate);
- CHECK(module->InstantiateModule(env.local(), FailAlwaysResolveCallback)
- .IsNothing());
+ CHECK(module->InstantiateModule(env.local(), ResolveCallback).IsNothing());
CHECK(inner_try_catch.HasCaught());
CHECK(inner_try_catch.Exception()->StrictEquals(v8_str("boom")));
- CHECK_EQ(Module::kErrored, module->GetStatus());
- Local<Value> exception = module->GetException();
- CHECK(exception->StrictEquals(v8_str("boom")));
- // TODO(neis): Check object identity.
+ CHECK_EQ(Module::kUninstantiated, module->GetStatus());
}
// Start over again...
- module = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ {
+ Local<String> source_text = v8_str(
+ "import './dep1.js';\n"
+ "export {} from './bar.js';");
+ ScriptOrigin origin = ModuleOrigin(v8_str("file.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ module = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ }
+
+ // dep1.js
+ {
+ Local<String> source_text = v8_str("");
+ ScriptOrigin origin = ModuleOrigin(v8_str("dep1.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ dep1 = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ }
- // Instantiation should fail if a sub-module fails to resolve.
- g_count = 0;
+ // Instantiation should fail because a sub-module fails to resolve.
{
v8::TryCatch inner_try_catch(isolate);
- CHECK(
- module->InstantiateModule(env.local(), FailOnSecondCallResolveCallback)
- .IsNothing());
+ CHECK(module->InstantiateModule(env.local(), ResolveCallback).IsNothing());
CHECK(inner_try_catch.HasCaught());
- CHECK(inner_try_catch.Exception()->StrictEquals(v8_str("booom")));
- CHECK_EQ(Module::kErrored, module->GetStatus());
- Local<Value> exception = module->GetException();
- CHECK(exception->StrictEquals(v8_str("booom")));
+ CHECK(inner_try_catch.Exception()->StrictEquals(v8_str("boom")));
+ CHECK_EQ(Module::kUninstantiated, module->GetStatus());
}
CHECK(!try_catch.HasCaught());
}
+TEST(ModuleInstantiationFailures2) {
+ Isolate* isolate = CcTest::isolate();
+ HandleScope scope(isolate);
+ LocalContext env;
+ v8::TryCatch try_catch(isolate);
+
+ // root1.js
+ Local<Module> root;
+ {
+ Local<String> source_text =
+ v8_str("import './dep1.js'; import './dep2.js'");
+ ScriptOrigin origin = ModuleOrigin(v8_str("root1.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ root = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ }
+
+ // dep1.js
+ {
+ Local<String> source_text = v8_str("export let x = 42");
+ ScriptOrigin origin = ModuleOrigin(v8_str("dep1.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ dep1 = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ }
+
+ // dep2.js
+ {
+ Local<String> source_text = v8_str("import {foo} from './dep3.js'");
+ ScriptOrigin origin = ModuleOrigin(v8_str("dep2.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ dep2 = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ }
+
+ {
+ v8::TryCatch inner_try_catch(isolate);
+ CHECK(root->InstantiateModule(env.local(), ResolveCallback).IsNothing());
+ CHECK(inner_try_catch.HasCaught());
+ CHECK(inner_try_catch.Exception()->StrictEquals(v8_str("boom")));
+ CHECK_EQ(Module::kUninstantiated, root->GetStatus());
+ CHECK_EQ(Module::kUninstantiated, dep1->GetStatus());
+ CHECK_EQ(Module::kUninstantiated, dep2->GetStatus());
+ }
+
+ // Change dep2.js
+ {
+ Local<String> source_text = v8_str("import {foo} from './dep2.js'");
+ ScriptOrigin origin = ModuleOrigin(v8_str("dep2.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ dep2 = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ }
+
+ {
+ v8::TryCatch inner_try_catch(isolate);
+ CHECK(root->InstantiateModule(env.local(), ResolveCallback).IsNothing());
+ CHECK(inner_try_catch.HasCaught());
+ CHECK(!inner_try_catch.Exception()->StrictEquals(v8_str("boom")));
+ CHECK_EQ(Module::kUninstantiated, root->GetStatus());
+ CHECK_EQ(Module::kInstantiated, dep1->GetStatus());
+ CHECK_EQ(Module::kUninstantiated, dep2->GetStatus());
+ }
+
+ // Change dep2.js again
+ {
+ Local<String> source_text = v8_str("import {foo} from './dep3.js'");
+ ScriptOrigin origin = ModuleOrigin(v8_str("dep2.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ dep2 = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ }
+
+ {
+ v8::TryCatch inner_try_catch(isolate);
+ CHECK(root->InstantiateModule(env.local(), ResolveCallback).IsNothing());
+ CHECK(inner_try_catch.HasCaught());
+ CHECK(inner_try_catch.Exception()->StrictEquals(v8_str("boom")));
+ CHECK_EQ(Module::kUninstantiated, root->GetStatus());
+ CHECK_EQ(Module::kInstantiated, dep1->GetStatus());
+ CHECK_EQ(Module::kUninstantiated, dep2->GetStatus());
+ }
+}
+
static MaybeLocal<Module> CompileSpecifierAsModuleResolveCallback(
Local<Context> context, Local<String> specifier, Local<Module> referrer) {
ScriptOrigin origin = ModuleOrigin(v8_str("module.js"), CcTest::isolate());
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 0ecdbf2dd6..52a7d3ff7a 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -93,7 +93,7 @@ TEST(ScanKeywords) {
CHECK(static_cast<int>(sizeof(buffer)) >= length);
{
auto stream = i::ScannerStream::ForTesting(keyword, length);
- i::Scanner scanner(&unicode_cache, global_use_counts);
+ i::Scanner scanner(&unicode_cache);
scanner.Initialize(stream.get(), false);
CHECK_EQ(key_token.token, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -101,7 +101,7 @@ TEST(ScanKeywords) {
// Removing characters will make keyword matching fail.
{
auto stream = i::ScannerStream::ForTesting(keyword, length - 1);
- i::Scanner scanner(&unicode_cache, global_use_counts);
+ i::Scanner scanner(&unicode_cache);
scanner.Initialize(stream.get(), false);
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -112,7 +112,7 @@ TEST(ScanKeywords) {
i::MemMove(buffer, keyword, length);
buffer[length] = chars_to_append[j];
auto stream = i::ScannerStream::ForTesting(buffer, length + 1);
- i::Scanner scanner(&unicode_cache, global_use_counts);
+ i::Scanner scanner(&unicode_cache);
scanner.Initialize(stream.get(), false);
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -122,7 +122,7 @@ TEST(ScanKeywords) {
i::MemMove(buffer, keyword, length);
buffer[length - 1] = '_';
auto stream = i::ScannerStream::ForTesting(buffer, length);
- i::Scanner scanner(&unicode_cache, global_use_counts);
+ i::Scanner scanner(&unicode_cache);
scanner.Initialize(stream.get(), false);
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -189,7 +189,7 @@ TEST(ScanHTMLEndComments) {
for (int i = 0; tests[i]; i++) {
const char* source = tests[i];
auto stream = i::ScannerStream::ForTesting(source);
- i::Scanner scanner(i_isolate->unicode_cache(), global_use_counts);
+ i::Scanner scanner(i_isolate->unicode_cache());
scanner.Initialize(stream.get(), false);
i::Zone zone(i_isolate->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(&zone,
@@ -208,7 +208,7 @@ TEST(ScanHTMLEndComments) {
for (int i = 0; fail_tests[i]; i++) {
const char* source = fail_tests[i];
auto stream = i::ScannerStream::ForTesting(source);
- i::Scanner scanner(i_isolate->unicode_cache(), global_use_counts);
+ i::Scanner scanner(i_isolate->unicode_cache());
scanner.Initialize(stream.get(), false);
i::Zone zone(i_isolate->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(&zone,
@@ -233,7 +233,7 @@ TEST(ScanHtmlComments) {
// Disallow HTML comments.
{
auto stream = i::ScannerStream::ForTesting(src);
- i::Scanner scanner(&unicode_cache, global_use_counts);
+ i::Scanner scanner(&unicode_cache);
scanner.Initialize(stream.get(), true);
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::ILLEGAL, scanner.Next());
@@ -242,7 +242,7 @@ TEST(ScanHtmlComments) {
// Skip HTML comments:
{
auto stream = i::ScannerStream::ForTesting(src);
- i::Scanner scanner(&unicode_cache, global_use_counts);
+ i::Scanner scanner(&unicode_cache);
scanner.Initialize(stream.get(), false);
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -400,7 +400,7 @@ TEST(StandAlonePreParser) {
uintptr_t stack_limit = i_isolate->stack_guard()->real_climit();
for (int i = 0; programs[i]; i++) {
auto stream = i::ScannerStream::ForTesting(programs[i]);
- i::Scanner scanner(i_isolate->unicode_cache(), global_use_counts);
+ i::Scanner scanner(i_isolate->unicode_cache());
scanner.Initialize(stream.get(), false);
i::Zone zone(i_isolate->allocator(), ZONE_NAME);
@@ -433,7 +433,7 @@ TEST(StandAlonePreParserNoNatives) {
uintptr_t stack_limit = isolate->stack_guard()->real_climit();
for (int i = 0; programs[i]; i++) {
auto stream = i::ScannerStream::ForTesting(programs[i]);
- i::Scanner scanner(isolate->unicode_cache(), global_use_counts);
+ i::Scanner scanner(isolate->unicode_cache());
scanner.Initialize(stream.get(), false);
// Preparser defaults to disallowing natives syntax.
@@ -504,7 +504,7 @@ TEST(RegressChromium62639) {
// failed in debug mode, and sometimes crashed in release mode.
auto stream = i::ScannerStream::ForTesting(program);
- i::Scanner scanner(CcTest::i_isolate()->unicode_cache(), global_use_counts);
+ i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(stream.get(), false);
i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(
@@ -579,7 +579,7 @@ TEST(PreParseOverflow) {
uintptr_t stack_limit = isolate->stack_guard()->real_climit();
auto stream = i::ScannerStream::ForTesting(program.get(), kProgramSize);
- i::Scanner scanner(isolate->unicode_cache(), global_use_counts);
+ i::Scanner scanner(isolate->unicode_cache());
scanner.Initialize(stream.get(), false);
i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
@@ -599,7 +599,7 @@ void TestStreamScanner(i::Utf16CharacterStream* stream,
i::Token::Value* expected_tokens,
int skip_pos = 0, // Zero means not skipping.
int skip_to = 0) {
- i::Scanner scanner(CcTest::i_isolate()->unicode_cache(), global_use_counts);
+ i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(stream, false);
int i = 0;
@@ -677,7 +677,7 @@ TEST(StreamScanner) {
void TestScanRegExp(const char* re_source, const char* expected) {
auto stream = i::ScannerStream::ForTesting(re_source);
i::HandleScope scope(CcTest::i_isolate());
- i::Scanner scanner(CcTest::i_isolate()->unicode_cache(), global_use_counts);
+ i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(stream.get(), false);
i::Token::Value start = scanner.peek();
@@ -1078,110 +1078,110 @@ TEST(ScopePositions) {
// Check that 6-byte and 4-byte encodings of UTF-8 strings do not throw
// the preparser off in terms of byte offsets.
// 2 surrogates, encode a character that doesn't need a surrogate.
- {" 'foo\355\240\201\355\260\211';\n"
+ {" 'foo\xED\xA0\x81\xED\xB0\x89';\n"
" (function fun",
"(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
i::LanguageMode::kSloppy},
- // 4 byte encoding.
- {" 'foo\360\220\220\212';\n"
+ // 4-byte encoding.
+ {" 'foo\xF0\x90\x90\x8A';\n"
" (function fun",
"(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
i::LanguageMode::kSloppy},
- // 3 byte encoding of \u0fff.
- {" 'foo\340\277\277';\n"
+ // 3-byte encoding of \u0FFF.
+ {" 'foo\xE0\xBF\xBF';\n"
" (function fun",
"(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
i::LanguageMode::kSloppy},
- // 3 byte surrogate, followed by broken 2-byte surrogate w/ impossible 2nd
+ // 3-byte surrogate, followed by broken 2-byte surrogate w/ impossible 2nd
// byte and last byte missing.
- {" 'foo\355\240\201\355\211';\n"
+ {" 'foo\xED\xA0\x81\xED\x89';\n"
" (function fun",
"(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
i::LanguageMode::kSloppy},
- // Broken 3 byte encoding of \u0fff with missing last byte.
- {" 'foo\340\277';\n"
+ // Broken 3-byte encoding of \u0FFF with missing last byte.
+ {" 'foo\xE0\xBF';\n"
" (function fun",
"(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
i::LanguageMode::kSloppy},
- // Broken 3 byte encoding of \u0fff with missing 2 last bytes.
- {" 'foo\340';\n"
+ // Broken 3-byte encoding of \u0FFF with missing 2 last bytes.
+ {" 'foo\xE0';\n"
" (function fun",
"(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
i::LanguageMode::kSloppy},
- // Broken 3 byte encoding of \u00ff should be a 2 byte encoding.
- {" 'foo\340\203\277';\n"
+ // Broken 3-byte encoding of \u00FF should be a 2-byte encoding.
+ {" 'foo\xE0\x83\xBF';\n"
" (function fun",
"(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
i::LanguageMode::kSloppy},
- // Broken 3 byte encoding of \u007f should be a 2 byte encoding.
- {" 'foo\340\201\277';\n"
+ // Broken 3-byte encoding of \u007F should be a 2-byte encoding.
+ {" 'foo\xE0\x81\xBF';\n"
" (function fun",
"(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
i::LanguageMode::kSloppy},
// Unpaired lead surrogate.
- {" 'foo\355\240\201';\n"
+ {" 'foo\xED\xA0\x81';\n"
" (function fun",
"(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
i::LanguageMode::kSloppy},
- // Unpaired lead surrogate where following code point is a 3 byte
+ // Unpaired lead surrogate where the following code point is a 3-byte
// sequence.
- {" 'foo\355\240\201\340\277\277';\n"
+ {" 'foo\xED\xA0\x81\xE0\xBF\xBF';\n"
" (function fun",
"(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
i::LanguageMode::kSloppy},
- // Unpaired lead surrogate where following code point is a 4 byte encoding
- // of a trail surrogate.
- {" 'foo\355\240\201\360\215\260\211';\n"
+ // Unpaired lead surrogate where the following code point is a 4-byte
+ // encoding of a trail surrogate.
+ {" 'foo\xED\xA0\x81\xF0\x8D\xB0\x89';\n"
" (function fun",
"(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
i::LanguageMode::kSloppy},
// Unpaired trail surrogate.
- {" 'foo\355\260\211';\n"
+ {" 'foo\xED\xB0\x89';\n"
" (function fun",
"(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
i::LanguageMode::kSloppy},
- // 2 byte encoding of \u00ff.
- {" 'foo\303\277';\n"
+ // 2-byte encoding of \u00FF.
+ {" 'foo\xC3\xBF';\n"
" (function fun",
"(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
i::LanguageMode::kSloppy},
- // Broken 2 byte encoding of \u00ff with missing last byte.
- {" 'foo\303';\n"
+ // Broken 2-byte encoding of \u00FF with missing last byte.
+ {" 'foo\xC3';\n"
" (function fun",
"(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
i::LanguageMode::kSloppy},
- // Broken 2 byte encoding of \u007f should be a 1 byte encoding.
- {" 'foo\301\277';\n"
+ // Broken 2-byte encoding of \u007F should be a 1-byte encoding.
+ {" 'foo\xC1\xBF';\n"
" (function fun",
"(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
i::LanguageMode::kSloppy},
- // Illegal 5 byte encoding.
- {" 'foo\370\277\277\277\277';\n"
+ // Illegal 5-byte encoding.
+ {" 'foo\xF8\xBF\xBF\xBF\xBF';\n"
" (function fun",
"(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
i::LanguageMode::kSloppy},
- // Illegal 6 byte encoding.
- {" 'foo\374\277\277\277\277\277';\n"
+ // Illegal 6-byte encoding.
+ {" 'foo\xFC\xBF\xBF\xBF\xBF\xBF';\n"
" (function fun",
"(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
i::LanguageMode::kSloppy},
- // Illegal 0xfe byte
- {" 'foo\376\277\277\277\277\277\277';\n"
+ // Illegal 0xFE byte
+ {" 'foo\xFE\xBF\xBF\xBF\xBF\xBF\xBF';\n"
" (function fun",
"(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
i::LanguageMode::kSloppy},
- // Illegal 0xff byte
- {" 'foo\377\277\277\277\277\277\277\277';\n"
+ // Illegal 0xFF byte
+ {" 'foo\xFF\xBF\xBF\xBF\xBF\xBF\xBF\xBF';\n"
" (function fun",
"(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE,
i::LanguageMode::kSloppy},
{" 'foo';\n"
" (function fun",
- "(a,b) { 'bar\355\240\201\355\260\213'; }", ")();", i::FUNCTION_SCOPE,
+ "(a,b) { 'bar\xED\xA0\x81\xED\xB0\x8B'; }", ")();", i::FUNCTION_SCOPE,
i::LanguageMode::kSloppy},
{" 'foo';\n"
" (function fun",
- "(a,b) { 'bar\360\220\220\214'; }", ")();", i::FUNCTION_SCOPE,
+ "(a,b) { 'bar\xF0\x90\x90\x8C'; }", ")();", i::FUNCTION_SCOPE,
i::LanguageMode::kSloppy},
{nullptr, nullptr, nullptr, i::EVAL_SCOPE, i::LanguageMode::kSloppy}};
@@ -1315,9 +1315,12 @@ enum ParserFlag {
kAllowNatives,
kAllowHarmonyFunctionSent,
kAllowHarmonyPublicFields,
+ kAllowHarmonyPrivateFields,
+ kAllowHarmonyStaticFields,
kAllowHarmonyDynamicImport,
- kAllowHarmonyAsyncIteration,
kAllowHarmonyImportMeta,
+ kAllowHarmonyDoExpressions,
+ kAllowHarmonyOptionalCatchBinding,
};
enum ParserSyncTestResult {
@@ -1330,9 +1333,13 @@ void SetGlobalFlags(i::EnumSet<ParserFlag> flags) {
i::FLAG_allow_natives_syntax = flags.Contains(kAllowNatives);
i::FLAG_harmony_function_sent = flags.Contains(kAllowHarmonyFunctionSent);
i::FLAG_harmony_public_fields = flags.Contains(kAllowHarmonyPublicFields);
+ i::FLAG_harmony_private_fields = flags.Contains(kAllowHarmonyPrivateFields);
+ i::FLAG_harmony_static_fields = flags.Contains(kAllowHarmonyStaticFields);
i::FLAG_harmony_dynamic_import = flags.Contains(kAllowHarmonyDynamicImport);
i::FLAG_harmony_import_meta = flags.Contains(kAllowHarmonyImportMeta);
- i::FLAG_harmony_async_iteration = flags.Contains(kAllowHarmonyAsyncIteration);
+ i::FLAG_harmony_do_expressions = flags.Contains(kAllowHarmonyDoExpressions);
+ i::FLAG_harmony_optional_catch_binding =
+ flags.Contains(kAllowHarmonyOptionalCatchBinding);
}
void SetParserFlags(i::PreParser* parser, i::EnumSet<ParserFlag> flags) {
@@ -1341,12 +1348,18 @@ void SetParserFlags(i::PreParser* parser, i::EnumSet<ParserFlag> flags) {
flags.Contains(kAllowHarmonyFunctionSent));
parser->set_allow_harmony_public_fields(
flags.Contains(kAllowHarmonyPublicFields));
+ parser->set_allow_harmony_private_fields(
+ flags.Contains(kAllowHarmonyPrivateFields));
+ parser->set_allow_harmony_static_fields(
+ flags.Contains(kAllowHarmonyStaticFields));
parser->set_allow_harmony_dynamic_import(
flags.Contains(kAllowHarmonyDynamicImport));
parser->set_allow_harmony_import_meta(
flags.Contains(kAllowHarmonyImportMeta));
- parser->set_allow_harmony_async_iteration(
- flags.Contains(kAllowHarmonyAsyncIteration));
+ parser->set_allow_harmony_do_expressions(
+ flags.Contains(kAllowHarmonyDoExpressions));
+ parser->set_allow_harmony_optional_catch_binding(
+ flags.Contains(kAllowHarmonyOptionalCatchBinding));
}
void TestParserSyncWithFlags(i::Handle<i::String> source,
@@ -1362,7 +1375,7 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
// Preparse the data.
i::PendingCompilationErrorHandler pending_error_handler;
if (test_preparser) {
- i::Scanner scanner(isolate->unicode_cache(), global_use_counts);
+ i::Scanner scanner(isolate->unicode_cache());
std::unique_ptr<i::Utf16CharacterStream> stream(
i::ScannerStream::For(source));
i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
@@ -1403,32 +1416,30 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
isolate->clear_pending_exception();
if (result == kSuccess) {
- v8::base::OS::Print(
+ FATAL(
"Parser failed on:\n"
"\t%s\n"
"with error:\n"
"\t%s\n"
"However, we expected no error.",
source->ToCString().get(), message_string->ToCString().get());
- CHECK(false);
}
if (test_preparser && !pending_error_handler.has_pending_error()) {
- v8::base::OS::Print(
+ FATAL(
"Parser failed on:\n"
"\t%s\n"
"with error:\n"
"\t%s\n"
"However, the preparser succeeded",
source->ToCString().get(), message_string->ToCString().get());
- CHECK(false);
}
// Check that preparser and parser produce the same error.
if (test_preparser && !ignore_error_msg) {
i::Handle<i::String> preparser_message =
pending_error_handler.FormatErrorMessageForTest(CcTest::i_isolate());
if (!i::String::Equals(message_string, preparser_message)) {
- v8::base::OS::Print(
+ FATAL(
"Expected parser and preparser to produce the same error on:\n"
"\t%s\n"
"However, found the following error messages\n"
@@ -1436,11 +1447,10 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
"\tpreparser: %s\n",
source->ToCString().get(), message_string->ToCString().get(),
preparser_message->ToCString().get());
- CHECK(false);
}
}
} else if (test_preparser && pending_error_handler.has_pending_error()) {
- v8::base::OS::Print(
+ FATAL(
"Preparser failed on:\n"
"\t%s\n"
"with error:\n"
@@ -1450,14 +1460,12 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
pending_error_handler.FormatErrorMessageForTest(CcTest::i_isolate())
->ToCString()
.get());
- CHECK(false);
} else if (result == kError) {
- v8::base::OS::Print(
+ FATAL(
"Expected error on:\n"
"\t%s\n"
"However, parser and preparser succeeded",
source->ToCString().get());
- CHECK(false);
}
}
@@ -2387,12 +2395,11 @@ TEST(DontRegressPreParserDataSizes) {
i::ParseData* pd = i::ParseData::FromCachedData(sd);
if (pd->FunctionCount() != test_cases[i].functions) {
- v8::base::OS::Print(
+ FATAL(
"Expected preparse data for program:\n"
"\t%s\n"
"to contain %d functions, however, received %d functions.\n",
program, test_cases[i].functions, pd->FunctionCount());
- CHECK(false);
}
delete sd;
delete pd;
@@ -2450,6 +2457,66 @@ TEST(NoErrorsTryCatchFinally) {
RunParserSyncTest(context_data, statement_data, kSuccess);
}
+TEST(OptionalCatchBinding) {
+ // clang-format off
+ const char* context_data[][2] = {
+ {"", ""},
+ {"'use strict';", ""},
+ {"try {", "} catch (e) { }"},
+ {"try {} catch (e) {", "}"},
+ {"try {", "} catch ({e}) { }"},
+ {"try {} catch ({e}) {", "}"},
+ {"function f() {", "}"},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "try { } catch { }",
+ "try { } catch { } finally { }",
+ "try { let e; } catch { let e; }",
+ "try { let e; } catch { let e; } finally { let e; }",
+ NULL
+ };
+ // clang-format on
+
+ // No error with flag
+ static const ParserFlag flags[] = {kAllowHarmonyOptionalCatchBinding};
+ RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0, flags,
+ arraysize(flags));
+
+ // Still an error without flag
+ RunParserSyncTest(context_data, statement_data, kError);
+}
+
+TEST(OptionalCatchBindingInDoExpression) {
+ // This is an edge case no otherwise hit: a catch scope in a parameter
+ // expression which needs its own scope.
+ // clang-format off
+ const char* context_data[][2] = {
+ {"((x = (eval(''), do {", "}))=>{})()"},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "try { } catch { }",
+ "try { } catch { } finally { }",
+ "try { let e; } catch { let e; }",
+ "try { let e; } catch { let e; } finally { let e; }",
+ NULL
+ };
+ // clang-format on
+
+ // No error with flag
+ static const ParserFlag do_and_catch_flags[] = {
+ kAllowHarmonyDoExpressions, kAllowHarmonyOptionalCatchBinding};
+ RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
+ do_and_catch_flags, arraysize(do_and_catch_flags));
+
+ // Still an error without flag
+ static const ParserFlag do_flag[] = {kAllowHarmonyDoExpressions};
+ RunParserSyncTest(context_data, statement_data, kError, NULL, 0, do_flag,
+ arraysize(do_flag));
+}
TEST(ErrorsRegexpLiteral) {
const char* context_data[][2] = {{"var r = ", ""}, {nullptr, nullptr}};
@@ -2566,7 +2633,6 @@ TEST(ErrorsObjectLiteralChecking) {
"static async get x : 0",
"async static x(){}",
"*async x(){}",
- "async *x(){}",
"async x*(){}",
"async x : 0",
"async 0 : 0",
@@ -2869,7 +2935,7 @@ TEST(FuncNameInferrerTwoByte) {
"%FunctionGetInferredName(obj1.oXj2.foo1)");
uint16_t* two_byte_name = AsciiToTwoByteString("obj1.oXj2.foo1");
// Make it really non-Latin1 (replace the Xs with a non-Latin1 character).
- two_byte_source[14] = two_byte_source[78] = two_byte_name[6] = 0x010d;
+ two_byte_source[14] = two_byte_source[78] = two_byte_name[6] = 0x010D;
v8::Local<v8::String> source =
v8::String::NewFromTwoByte(isolate, two_byte_source,
v8::NewStringType::kNormal)
@@ -2888,7 +2954,7 @@ TEST(FuncNameInferrerTwoByte) {
TEST(FuncNameInferrerEscaped) {
// The same as FuncNameInferrerTwoByte, except that we express the two-byte
- // character as a unicode escape.
+ // character as a Unicode escape.
i::FLAG_allow_natives_syntax = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
@@ -2898,7 +2964,7 @@ TEST(FuncNameInferrerEscaped) {
"%FunctionGetInferredName(obj1.o\\u010dj2.foo1)");
uint16_t* two_byte_name = AsciiToTwoByteString("obj1.oXj2.foo1");
// Fix to correspond to the non-ASCII name in two_byte_source.
- two_byte_name[6] = 0x010d;
+ two_byte_name[6] = 0x010D;
v8::Local<v8::String> source =
v8::String::NewFromTwoByte(isolate, two_byte_source,
v8::NewStringType::kNormal)
@@ -3435,81 +3501,81 @@ TEST(MaybeAssignedInsideLoop) {
{1, "for (j of x) { [foo] = [j] }", top},
{1, "for (j of x) { var foo = j }", top},
{1, "for (j of x) { var [foo] = [j] }", top},
- {0, "for (j of x) { let foo = j }", {2}},
- {0, "for (j of x) { let [foo] = [j] }", {2}},
- {0, "for (j of x) { const foo = j }", {2}},
- {0, "for (j of x) { const [foo] = [j] }", {2}},
- {0, "for (j of x) { function foo() {return j} }", {2}},
+ {0, "for (j of x) { let foo = j }", {1}},
+ {0, "for (j of x) { let [foo] = [j] }", {1}},
+ {0, "for (j of x) { const foo = j }", {1}},
+ {0, "for (j of x) { const [foo] = [j] }", {1}},
+ {0, "for (j of x) { function foo() {return j} }", {1}},
{1, "for ({j} of x) { foo = j }", top},
{1, "for ({j} of x) { [foo] = [j] }", top},
{1, "for ({j} of x) { var foo = j }", top},
{1, "for ({j} of x) { var [foo] = [j] }", top},
- {0, "for ({j} of x) { let foo = j }", {2}},
- {0, "for ({j} of x) { let [foo] = [j] }", {2}},
- {0, "for ({j} of x) { const foo = j }", {2}},
- {0, "for ({j} of x) { const [foo] = [j] }", {2}},
- {0, "for ({j} of x) { function foo() {return j} }", {2}},
+ {0, "for ({j} of x) { let foo = j }", {1}},
+ {0, "for ({j} of x) { let [foo] = [j] }", {1}},
+ {0, "for ({j} of x) { const foo = j }", {1}},
+ {0, "for ({j} of x) { const [foo] = [j] }", {1}},
+ {0, "for ({j} of x) { function foo() {return j} }", {1}},
{1, "for (var j of x) { foo = j }", top},
{1, "for (var j of x) { [foo] = [j] }", top},
{1, "for (var j of x) { var foo = j }", top},
{1, "for (var j of x) { var [foo] = [j] }", top},
- {0, "for (var j of x) { let foo = j }", {2}},
- {0, "for (var j of x) { let [foo] = [j] }", {2}},
- {0, "for (var j of x) { const foo = j }", {2}},
- {0, "for (var j of x) { const [foo] = [j] }", {2}},
- {0, "for (var j of x) { function foo() {return j} }", {2}},
+ {0, "for (var j of x) { let foo = j }", {1}},
+ {0, "for (var j of x) { let [foo] = [j] }", {1}},
+ {0, "for (var j of x) { const foo = j }", {1}},
+ {0, "for (var j of x) { const [foo] = [j] }", {1}},
+ {0, "for (var j of x) { function foo() {return j} }", {1}},
{1, "for (var {j} of x) { foo = j }", top},
{1, "for (var {j} of x) { [foo] = [j] }", top},
{1, "for (var {j} of x) { var foo = j }", top},
{1, "for (var {j} of x) { var [foo] = [j] }", top},
- {0, "for (var {j} of x) { let foo = j }", {2}},
- {0, "for (var {j} of x) { let [foo] = [j] }", {2}},
- {0, "for (var {j} of x) { const foo = j }", {2}},
- {0, "for (var {j} of x) { const [foo] = [j] }", {2}},
- {0, "for (var {j} of x) { function foo() {return j} }", {2}},
+ {0, "for (var {j} of x) { let foo = j }", {1}},
+ {0, "for (var {j} of x) { let [foo] = [j] }", {1}},
+ {0, "for (var {j} of x) { const foo = j }", {1}},
+ {0, "for (var {j} of x) { const [foo] = [j] }", {1}},
+ {0, "for (var {j} of x) { function foo() {return j} }", {1}},
{1, "for (let j of x) { foo = j }", top},
{1, "for (let j of x) { [foo] = [j] }", top},
{1, "for (let j of x) { var foo = j }", top},
{1, "for (let j of x) { var [foo] = [j] }", top},
- {0, "for (let j of x) { let foo = j }", {0, 2, 0}},
- {0, "for (let j of x) { let [foo] = [j] }", {0, 2, 0}},
- {0, "for (let j of x) { const foo = j }", {0, 2, 0}},
- {0, "for (let j of x) { const [foo] = [j] }", {0, 2, 0}},
- {0, "for (let j of x) { function foo() {return j} }", {0, 2, 0}},
+ {0, "for (let j of x) { let foo = j }", {0, 1, 0}},
+ {0, "for (let j of x) { let [foo] = [j] }", {0, 1, 0}},
+ {0, "for (let j of x) { const foo = j }", {0, 1, 0}},
+ {0, "for (let j of x) { const [foo] = [j] }", {0, 1, 0}},
+ {0, "for (let j of x) { function foo() {return j} }", {0, 1, 0}},
{1, "for (let {j} of x) { foo = j }", top},
{1, "for (let {j} of x) { [foo] = [j] }", top},
{1, "for (let {j} of x) { var foo = j }", top},
{1, "for (let {j} of x) { var [foo] = [j] }", top},
- {0, "for (let {j} of x) { let foo = j }", {0, 2, 0}},
- {0, "for (let {j} of x) { let [foo] = [j] }", {0, 2, 0}},
- {0, "for (let {j} of x) { const foo = j }", {0, 2, 0}},
- {0, "for (let {j} of x) { const [foo] = [j] }", {0, 2, 0}},
- {0, "for (let {j} of x) { function foo() {return j} }", {0, 2, 0}},
+ {0, "for (let {j} of x) { let foo = j }", {0, 1, 0}},
+ {0, "for (let {j} of x) { let [foo] = [j] }", {0, 1, 0}},
+ {0, "for (let {j} of x) { const foo = j }", {0, 1, 0}},
+ {0, "for (let {j} of x) { const [foo] = [j] }", {0, 1, 0}},
+ {0, "for (let {j} of x) { function foo() {return j} }", {0, 1, 0}},
{1, "for (const j of x) { foo = j }", top},
{1, "for (const j of x) { [foo] = [j] }", top},
{1, "for (const j of x) { var foo = j }", top},
{1, "for (const j of x) { var [foo] = [j] }", top},
- {0, "for (const j of x) { let foo = j }", {0, 2, 0}},
- {0, "for (const j of x) { let [foo] = [j] }", {0, 2, 0}},
- {0, "for (const j of x) { const foo = j }", {0, 2, 0}},
- {0, "for (const j of x) { const [foo] = [j] }", {0, 2, 0}},
- {0, "for (const j of x) { function foo() {return j} }", {0, 2, 0}},
+ {0, "for (const j of x) { let foo = j }", {0, 1, 0}},
+ {0, "for (const j of x) { let [foo] = [j] }", {0, 1, 0}},
+ {0, "for (const j of x) { const foo = j }", {0, 1, 0}},
+ {0, "for (const j of x) { const [foo] = [j] }", {0, 1, 0}},
+ {0, "for (const j of x) { function foo() {return j} }", {0, 1, 0}},
{1, "for (const {j} of x) { foo = j }", top},
{1, "for (const {j} of x) { [foo] = [j] }", top},
{1, "for (const {j} of x) { var foo = j }", top},
{1, "for (const {j} of x) { var [foo] = [j] }", top},
- {0, "for (const {j} of x) { let foo = j }", {0, 2, 0}},
- {0, "for (const {j} of x) { let [foo] = [j] }", {0, 2, 0}},
- {0, "for (const {j} of x) { const foo = j }", {0, 2, 0}},
- {0, "for (const {j} of x) { const [foo] = [j] }", {0, 2, 0}},
- {0, "for (const {j} of x) { function foo() {return j} }", {0, 2, 0}},
+ {0, "for (const {j} of x) { let foo = j }", {0, 1, 0}},
+ {0, "for (const {j} of x) { let [foo] = [j] }", {0, 1, 0}},
+ {0, "for (const {j} of x) { const foo = j }", {0, 1, 0}},
+ {0, "for (const {j} of x) { const [foo] = [j] }", {0, 1, 0}},
+ {0, "for (const {j} of x) { function foo() {return j} }", {0, 1, 0}},
{1, "for (j in x) { foo = j }", top},
{1, "for (j in x) { [foo] = [j] }", top},
@@ -3843,24 +3909,6 @@ TEST(LineOrParagraphSeparatorAsLineTerminator) {
RunParserSyncTest(context_data, statement_data, kError);
}
-TEST(LineOrParagraphSeparatorAsLineTerminatorUseCount) {
- i::Isolate* isolate = CcTest::i_isolate();
- i::HandleScope scope(isolate);
- LocalContext env;
- int use_counts[v8::Isolate::kUseCounterFeatureCount] = {};
- global_use_counts = use_counts;
- CcTest::isolate()->SetUseCounterCallback(MockUseCounterCallback);
- CompileRun("");
- CHECK_EQ(0, use_counts[v8::Isolate::UseCounterFeature::
- kLineOrParagraphSeparatorAsLineTerminator]);
- CompileRun("// Foo\xE2\x80\xA8"); // "// Foo<U+2028>"
- CHECK_LT(0, use_counts[v8::Isolate::UseCounterFeature::
- kLineOrParagraphSeparatorAsLineTerminator]);
- CompileRun("// Foo\xE2\x80\xA9"); // "// Foo<U+2029>"
- CHECK_LT(1, use_counts[v8::Isolate::UseCounterFeature::
- kLineOrParagraphSeparatorAsLineTerminator]);
-}
-
TEST(ErrorsArrowFormalParameters) {
const char* context_data[][2] = {
{ "()", "=>{}" },
@@ -4639,6 +4687,7 @@ TEST(ClassBodyNoErrors) {
"*g() {};",
"; *g() {}",
"*g() {}; *h(x) {}",
+ "async *x(){}",
"static() {}",
"get static() {}",
"set static(v) {}",
@@ -4659,6 +4708,7 @@ TEST(ClassBodyNoErrors) {
"*async(){}",
"static async(){}",
"static *async(){}",
+ "static async *x(){}",
// Escaped 'static' should be allowed anywhere
// static-as-PropertyName is.
@@ -4717,6 +4767,96 @@ TEST(ClassPropertyNameNoErrors) {
RunParserSyncTest(context_data, name_data, kSuccess);
}
+TEST(StaticClassFieldsNoErrors) {
+ // clang-format off
+ // Tests proposed class fields syntax.
+ const char* context_data[][2] = {{"(class {", "});"},
+ {"(class extends Base {", "});"},
+ {"class C {", "}"},
+ {"class C extends Base {", "}"},
+ {nullptr, nullptr}};
+ const char* class_body_data[] = {
+ // Basic syntax
+ "static a = 0;",
+ "static a = 0; b",
+ "static a = 0; b(){}",
+ "static a = 0; *b(){}",
+ "static a = 0; ['b'](){}",
+ "static a;",
+ "static a; b;",
+ "static a; b(){}",
+ "static a; *b(){}",
+ "static a; ['b'](){}",
+ "static ['a'] = 0;",
+ "static ['a'] = 0; b",
+ "static ['a'] = 0; b(){}",
+ "static ['a'] = 0; *b(){}",
+ "static ['a'] = 0; ['b'](){}",
+ "static ['a'];",
+ "static ['a']; b;",
+ "static ['a']; b(){}",
+ "static ['a']; *b(){}",
+ "static ['a']; ['b'](){}",
+
+ "static 0 = 0;",
+ "static 0;",
+ "static 'a' = 0;",
+ "static 'a';",
+
+ // ASI
+ "static a = 0\n",
+ "static a = 0\n b",
+ "static a = 0\n b(){}",
+ "static a\n",
+ "static a\n b\n",
+ "static a\n b(){}",
+ "static a\n *b(){}",
+ "static a\n ['b'](){}",
+ "static ['a'] = 0\n",
+ "static ['a'] = 0\n b",
+ "static ['a'] = 0\n b(){}",
+ "static ['a']\n",
+ "static ['a']\n b\n",
+ "static ['a']\n b(){}",
+ "static ['a']\n *b(){}",
+ "static ['a']\n ['b'](){}",
+
+ "static a = function t() { arguments; }",
+ "static a = () => function t() { arguments; }",
+
+ // ASI edge cases
+ "static a\n get",
+ "static get\n *a(){}",
+ "static a\n static",
+
+ // Misc edge cases
+ "static yield",
+ "static yield = 0",
+ "static yield\n a",
+ "static async;",
+ "static async = 0;",
+ "static async",
+ "static async = 0",
+ "static async\n a(){}", // a field named async, and a method named a.
+ "static async\n a",
+ "static await;",
+ "static await = 0;",
+ "static await\n a",
+ nullptr
+ };
+ // clang-format on
+
+ static const ParserFlag always_flags[] = {kAllowHarmonyPublicFields,
+ kAllowHarmonyStaticFields};
+ RunParserSyncTest(context_data, class_body_data, kSuccess, nullptr, 0,
+ always_flags, arraysize(always_flags));
+
+ // Without the static flag, all of these are errors
+ static const ParserFlag no_static_flags[] = {kAllowHarmonyPublicFields};
+ RunParserSyncTest(context_data, class_body_data, kError, nullptr, 0,
+ no_static_flags, arraysize(no_static_flags));
+}
+
TEST(ClassFieldsNoErrors) {
// clang-format off
// Tests proposed class fields syntax.
@@ -4753,15 +4893,6 @@ TEST(ClassFieldsNoErrors) {
"'a' = 0;",
"'a';",
- "static a = 0;",
- "static a;",
- "static ['a'] = 0",
- "static ['a']",
- "static 0 = 0;",
- "static 0;",
- "static 'a' = 0;",
- "static 'a';",
-
// ASI
"a = 0\n",
"a = 0\n b",
@@ -4785,16 +4916,17 @@ TEST(ClassFieldsNoErrors) {
"get\n *a(){}",
"a\n static",
+ "a = function t() { arguments; }",
+ "a = () => function() { arguments; }",
+
// Misc edge cases
"yield",
"yield = 0",
"yield\n a",
"async;",
"async = 0;",
- "static async;"
"async",
"async = 0",
- "static async",
"async\n a(){}", // a field named async, and a method named a.
"async\n a",
"await;",
@@ -4807,6 +4939,128 @@ TEST(ClassFieldsNoErrors) {
static const ParserFlag always_flags[] = {kAllowHarmonyPublicFields};
RunParserSyncTest(context_data, class_body_data, kSuccess, nullptr, 0,
always_flags, arraysize(always_flags));
+
+ static const ParserFlag static_flags[] = {kAllowHarmonyPublicFields,
+ kAllowHarmonyStaticFields};
+ RunParserSyncTest(context_data, class_body_data, kSuccess, nullptr, 0,
+ static_flags, arraysize(static_flags));
+}
+
+TEST(PrivateClassFieldsNoErrors) {
+ // clang-format off
+ // Tests proposed class fields syntax.
+ const char* context_data[][2] = {{"(class {", "});"},
+ {"(class extends Base {", "});"},
+ {"class C {", "}"},
+ {"class C extends Base {", "}"},
+ {nullptr, nullptr}};
+ const char* class_body_data[] = {
+ // Basic syntax
+ "#a = 0;",
+ "#a = 0; #b",
+ "#a = 0; b",
+ "#a = 0; b(){}",
+ "#a = 0; *b(){}",
+ "#a = 0; ['b'](){}",
+ "#a;",
+ "#a; #b;",
+ "#a; b;",
+ "#a; b(){}",
+ "#a; *b(){}",
+ "#a; ['b'](){}",
+
+ // ASI
+ "#a = 0\n",
+ "#a = 0\n #b",
+ "#a = 0\n b",
+ "#a = 0\n b(){}",
+ "#a\n",
+ "#a\n #b\n",
+ "#a\n b\n",
+ "#a\n b(){}",
+ "#a\n *b(){}",
+ "#a\n ['b'](){}",
+
+ // ASI edge cases
+ "#a\n get",
+ "#get\n *a(){}",
+ "#a\n static",
+
+ "#a = function t() { arguments; }",
+ "#a = () => function() { arguments; }",
+
+ // Misc edge cases
+ "#yield",
+ "#yield = 0",
+ "#yield\n a",
+ "#async;",
+ "#async = 0;",
+ "#async",
+ "#async = 0",
+ "#async\n a(){}", // a field named async, and a method named a.
+ "#async\n a",
+ "#await;",
+ "#await = 0;",
+ "#await\n a",
+ nullptr
+ };
+ // clang-format on
+
+ RunParserSyncTest(context_data, class_body_data, kError);
+
+ static const ParserFlag private_fields[] = {kAllowHarmonyPrivateFields};
+ RunParserSyncTest(context_data, class_body_data, kSuccess, nullptr, 0,
+ private_fields, arraysize(private_fields));
+}
+
+TEST(StaticClassFieldsErrors) {
+ // clang-format off
+ // Tests proposed class fields syntax.
+ const char* context_data[][2] = {{"(class {", "});"},
+ {"(class extends Base {", "});"},
+ {"class C {", "}"},
+ {"class C extends Base {", "}"},
+ {nullptr, nullptr}};
+ const char* class_body_data[] = {
+ "static a : 0",
+ "static a =",
+ "static constructor",
+ "static prototype",
+ "static *a = 0",
+ "static *a",
+ "static get a",
+ "static get\n a",
+ "static yield a",
+ "static async a = 0",
+ "static async a",
+
+ "static a = arguments",
+ "static a = () => arguments",
+ "static a = () => { arguments }",
+ "static a = arguments[0]",
+ "static a = delete arguments[0]",
+ "static a = f(arguments)",
+ "static a = () => () => arguments",
+
+ // ASI requires a linebreak
+ "static a b",
+ "static a = 0 b",
+
+ // ASI requires that the next token is not part of any legal production
+ "static a = 0\n *b(){}",
+ "static a = 0\n ['b'](){}",
+ nullptr
+ };
+ // clang-format on
+
+ static const ParserFlag no_static_flags[] = {kAllowHarmonyPublicFields};
+ RunParserSyncTest(context_data, class_body_data, kError, nullptr, 0,
+ no_static_flags, arraysize(no_static_flags));
+
+ static const ParserFlag always_flags[] = {kAllowHarmonyPublicFields,
+ kAllowHarmonyStaticFields};
+ RunParserSyncTest(context_data, class_body_data, kError, nullptr, 0,
+ always_flags, arraysize(always_flags));
}
TEST(ClassFieldsErrors) {
@@ -4820,22 +5074,22 @@ TEST(ClassFieldsErrors) {
const char* class_body_data[] = {
"a : 0",
"a =",
- "static constructor",
- "static prototype",
"constructor",
"*a = 0",
"*a",
"get a",
"yield a",
- "a : 0;",
- "a =;",
- "*a = 0;",
- "*a;",
- "get a;",
- "yield a;",
"async a = 0",
"async a",
+ "a = arguments",
+ "a = () => arguments",
+ "a = () => { arguments }",
+ "a = arguments[0]",
+ "a = delete arguments[0]",
+ "a = f(arguments)",
+ "a = () => () => arguments",
+
// ASI requires a linebreak
"a b",
"a = 0 b",
@@ -4851,6 +5105,279 @@ TEST(ClassFieldsErrors) {
static const ParserFlag always_flags[] = {kAllowHarmonyPublicFields};
RunParserSyncTest(context_data, class_body_data, kError, nullptr, 0,
always_flags, arraysize(always_flags));
+
+ static const ParserFlag static_flags[] = {kAllowHarmonyPublicFields,
+ kAllowHarmonyStaticFields};
+ RunParserSyncTest(context_data, class_body_data, kError, nullptr, 0,
+ static_flags, arraysize(static_flags));
+}
+
+TEST(PrivateClassFieldsErrors) {
+ // clang-format off
+ // Tests proposed class fields syntax.
+ const char* context_data[][2] = {{"(class {", "});"},
+ {"(class extends Base {", "});"},
+ {"class C {", "}"},
+ {"class C extends Base {", "}"},
+ {nullptr, nullptr}};
+ const char* class_body_data[] = {
+ "#a : 0",
+ "#a =",
+ "#*a = 0",
+ "#*a",
+ "#get a",
+ "#yield a",
+ "#async a = 0",
+ "#async a",
+
+ "# a = 0",
+ "#a() { }",
+ "get #a() { }",
+ "#get a() { }",
+ "set #a() { }",
+ "#set a() { }",
+ "*#a() { }",
+ "#*a() { }",
+ "async #a() { }",
+ "async *#a() { }",
+ "async #*a() { }",
+
+ "#0 = 0;",
+ "#0;",
+ "#'a' = 0;",
+ "#'a';",
+
+ "#['a']",
+ "#['a'] = 1",
+ "#[a]",
+ "#[a] = 1",
+
+ "#a = arguments",
+ "#a = () => arguments",
+ "#a = () => { arguments }",
+ "#a = arguments[0]",
+ "#a = delete arguments[0]",
+ "#a = f(arguments)",
+ "#a = () => () => arguments",
+
+ // ASI requires a linebreak
+ "#a b",
+ "#a = 0 b",
+
+ // ASI requires that the next token is not part of any legal production
+ "#a = 0\n *b(){}",
+ "#a = 0\n ['b'](){}",
+ nullptr
+ };
+ // clang-format on
+
+ RunParserSyncTest(context_data, class_body_data, kError);
+
+ static const ParserFlag private_fields[] = {kAllowHarmonyPrivateFields};
+ RunParserSyncTest(context_data, class_body_data, kError, nullptr, 0,
+ private_fields, arraysize(private_fields));
+}
+
+TEST(PrivateStaticClassFieldsErrors) {
+ // clang-format off
+ // Tests proposed class fields syntax.
+ const char* context_data[][2] = {{"(class {", "});"},
+ {"(class extends Base {", "});"},
+ {"class C {", "}"},
+ {"class C extends Base {", "}"},
+ {nullptr, nullptr}};
+ const char* class_body_data[] = {
+ // Basic syntax
+ "static #a = 0;",
+ "static #a = 0; b",
+ "static #a = 0; #b",
+ "static #a = 0; b(){}",
+ "static #a = 0; *b(){}",
+ "static #a = 0; ['b'](){}",
+ "static #a;",
+ "static #a; b;",
+ "static #a; b(){}",
+ "static #a; *b(){}",
+ "static #a; ['b'](){}",
+ "static #['a'] = 0;",
+ "static #['a'] = 0; b",
+ "static #['a'] = 0; #b",
+ "static #['a'] = 0; b(){}",
+ "static #['a'] = 0; *b(){}",
+ "static #['a'] = 0; ['b'](){}",
+ "static #['a'];",
+ "static #['a']; b;",
+ "static #['a']; #b;",
+ "static #['a']; b(){}",
+ "static #['a']; *b(){}",
+ "static #['a']; ['b'](){}",
+
+ "static #0 = 0;",
+ "static #0;",
+ "static #'a' = 0;",
+ "static #'a';",
+
+ "static # a = 0",
+ "static #a() { }",
+ "static get #a() { }",
+ "static #get a() { }",
+ "static set #a() { }",
+ "static #set a() { }",
+ "static *#a() { }",
+ "static #*a() { }",
+ "static async #a() { }",
+ "static async *#a() { }",
+ "static async #*a() { }",
+
+ // ASI
+ "static #a = 0\n",
+ "static #a = 0\n b",
+ "static #a = 0\n #b",
+ "static #a = 0\n b(){}",
+ "static #a\n",
+ "static #a\n b\n",
+ "static #a\n #b\n",
+ "static #a\n b(){}",
+ "static #a\n *b(){}",
+ "static #a\n ['b'](){}",
+ "static #['a'] = 0\n",
+ "static #['a'] = 0\n b",
+ "static #['a'] = 0\n #b",
+ "static #['a'] = 0\n b(){}",
+ "static #['a']\n",
+ "static #['a']\n b\n",
+ "static #['a']\n #b\n",
+ "static #['a']\n b(){}",
+ "static #['a']\n *b(){}",
+ "static #['a']\n ['b'](){}",
+
+ "static #a = function t() { arguments; }",
+ "static #a = () => function t() { arguments; }",
+
+ // ASI edge cases
+ "static #a\n get",
+ "static #get\n *a(){}",
+ "static #a\n static",
+
+ // Misc edge cases
+ "static #yield",
+ "static #yield = 0",
+ "static #yield\n a",
+ "static #async;",
+ "static #async = 0;",
+ "static #async",
+ "static #async = 0",
+ "static #async\n a(){}", // a field named async, and a method named a.
+ "static #async\n a",
+ "static #await;",
+ "static #await = 0;",
+ "static #await\n a",
+ nullptr
+ };
+ // clang-format on
+
+ RunParserSyncTest(context_data, class_body_data, kError);
+
+ static const ParserFlag public_static_fields[] = {kAllowHarmonyPublicFields,
+ kAllowHarmonyStaticFields};
+ RunParserSyncTest(context_data, class_body_data, kError, nullptr, 0,
+ public_static_fields, arraysize(public_static_fields));
+
+ static const ParserFlag private_static_fields[] = {
+ kAllowHarmonyPublicFields, kAllowHarmonyStaticFields,
+ kAllowHarmonyPrivateFields};
+ RunParserSyncTest(context_data, class_body_data, kError, nullptr, 0,
+ private_static_fields, arraysize(private_static_fields));
+}
+
+TEST(PrivateNameNoErrors) {
+ // clang-format off
+ const char* context_data[][2] = {
+ {"", ""},
+ {"\"use strict\";", ""},
+ {nullptr, nullptr}
+ };
+
+ const char* statement_data[] = {
+ "this.#a",
+ "this.#a()",
+ "this.#b.#a",
+ "this.#b.#a()",
+
+ "foo.#a",
+ "foo.#a()",
+ "foo.#b.#a",
+ "foo.#b.#a()",
+
+ "new foo.#a",
+ "new foo.#b.#a",
+ "new foo.#b.#a()",
+
+ "foo.#if;",
+ "foo.#yield;",
+ "foo.#super;",
+ "foo.#interface;",
+ "foo.#eval;",
+ "foo.#arguments;",
+
+ nullptr
+ };
+
+ // clang-format on
+ RunParserSyncTest(context_data, statement_data, kError);
+
+ static const ParserFlag private_fields[] = {kAllowHarmonyPrivateFields};
+ RunParserSyncTest(context_data, statement_data, kSuccess, nullptr, 0,
+ private_fields, arraysize(private_fields));
+}
+
+TEST(PrivateNameErrors) {
+ // clang-format off
+ const char* context_data[][2] = {
+ {"", ""},
+ {"\"use strict\";", ""},
+ {nullptr, nullptr}
+ };
+
+ const char* statement_data[] = {
+ "#foo",
+ "#foo = 1",
+
+ "# a;",
+ "#\n a;",
+ "a, # b",
+ "a, #, b;",
+
+ "foo.#[a];",
+ "foo.#['a'];",
+
+ "foo()#a",
+ "foo()#[a]",
+ "foo()#['a']",
+
+ "super.#a;",
+ "super.#a = 1;",
+ "super.#['a']",
+ "super.#[a]",
+
+ "new.#a",
+ "new.#[a]",
+
+ "foo.#{;",
+ "foo.#};",
+ "foo.#=;",
+ "foo.#888;",
+ "foo.#-;",
+ "foo.#--;",
+ nullptr
+ };
+
+ // clang-format on
+ RunParserSyncTest(context_data, statement_data, kError);
+
+ static const ParserFlag private_fields[] = {kAllowHarmonyPrivateFields};
+ RunParserSyncTest(context_data, statement_data, kError, nullptr, 0,
+ private_fields, arraysize(private_fields));
}
TEST(ClassExpressionErrors) {
@@ -4916,7 +5443,6 @@ TEST(ClassAsyncErrors) {
const char* async_data[] = {
"*async x(){}",
"async *(){}",
- "async *x(){}",
"async get x(){}",
"async set x(y){}",
"async x : 0",
@@ -4926,7 +5452,6 @@ TEST(ClassAsyncErrors) {
"static *async x(){}",
"static async *(){}",
- "static async *x(){}",
"static async get x(){}",
"static async set x(y){}",
"static async x : 0",
@@ -5358,9 +5883,9 @@ TEST(InvalidUnicodeEscapes) {
// Braces gone wrong
"var foob\\u{c481r = 0;", "var foob\\uc481}r = 0;", "var \\u{0052oo = 0;",
"var \\u0052}oo = 0;", "\"foob\\u{c481r\"", "var foob\\u{}ar = 0;",
- // Too high value for the unicode escape
+ // Too high value for the Unicode code point escape
"\"\\u{110000}\"",
- // Not an unicode escape
+ // Not a Unicode code point escape
"var foob\\v1234r = 0;", "var foob\\U1234r = 0;",
"var foob\\v{1234}r = 0;", "var foob\\U{1234}r = 0;", nullptr};
RunParserSyncTest(context_data, data, kError);
@@ -5378,11 +5903,10 @@ TEST(UnicodeEscapes) {
"var foob\\uc481r = 0;", "var foob\\u{c481}r = 0;",
// String with an escape
"\"foob\\uc481r\"", "\"foob\\{uc481}r\"",
- // This character is a valid unicode character, representable as a
- // surrogate
- // pair, not representable as 4 hex digits.
+ // This character is a valid Unicode character, representable as a
+ // surrogate pair, not representable as 4 hex digits.
"\"foo\\u{10e6d}\"",
- // Max value for the unicode escape
+ // Max value for the Unicode code point escape
"\"\\u{10ffff}\"", nullptr};
RunParserSyncTest(context_data, data, kSuccess);
}
@@ -5837,14 +6361,13 @@ TEST(BasicImportExportParsing) {
.ToHandleChecked());
isolate->clear_pending_exception();
- v8::base::OS::Print(
+ FATAL(
"Parser failed on:\n"
"\t%s\n"
"with error:\n"
"\t%s\n"
"However, we expected no error.",
source->ToCString().get(), message_string->ToCString().get());
- CHECK(false);
}
}
@@ -6243,9 +6766,9 @@ TEST(ModuleParsingInternals) {
CHECK(declarations->AtForTest(8)->proxy()->raw_name()->IsOneByteEqualTo(
"nonexport"));
- CHECK(declarations->AtForTest(8)->proxy()->var()->binding_needs_init());
- CHECK(declarations->AtForTest(8)->proxy()->var()->location() !=
- i::VariableLocation::MODULE);
+ CHECK(!declarations->AtForTest(8)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(8)->proxy()->var()->location() ==
+ i::VariableLocation::LOCAL);
CHECK(
declarations->AtForTest(9)->proxy()->raw_name()->IsOneByteEqualTo("mm"));
@@ -6296,7 +6819,7 @@ TEST(ModuleParsingInternals) {
CHECK_EQ(4, elem.second.index);
CHECK_EQ(370, elem.second.position);
} else {
- CHECK(false);
+ UNREACHABLE();
}
}
@@ -6942,19 +7465,12 @@ TEST(DestructuringNegativeTests) {
"{...[ x = 5 ] }",
"{...x.f }",
"{...x[0] }",
+ "async function* a() {}",
nullptr
};
- const char* async_gen_data[] = {
- "async function* a() {}",
- nullptr
- };
-
// clang-format on
RunParserSyncTest(context_data, data, kError);
- static const ParserFlag async_gen_flags[] = {kAllowHarmonyAsyncIteration};
- RunParserSyncTest(context_data, async_gen_data, kError, nullptr, 0,
- async_gen_flags, arraysize(async_gen_flags));
}
{ // All modes.
@@ -8210,6 +8726,8 @@ TEST(FunctionDeclarationError) {
"label: function* f() { }",
"if (true) async function f() { }",
"label: async function f() { }",
+ "if (true) async function* f() { }",
+ "label: async function* f() { }",
nullptr
};
// Valid only in sloppy mode.
@@ -8231,20 +8749,6 @@ TEST(FunctionDeclarationError) {
// In sloppy mode, sloppy_data is successful
RunParserSyncTest(sloppy_context, error_data, kError);
RunParserSyncTest(sloppy_context, sloppy_data, kSuccess);
-
- // No single statement async iterators
- // clang-format off
- const char* async_iterator_data[] = {
- "if (true) async function* f() { }",
- "label: async function* f() { }",
- nullptr,
- };
- // clang-format on
- static const ParserFlag flags[] = {kAllowHarmonyAsyncIteration};
- RunParserSyncTest(sloppy_context, async_iterator_data, kError, nullptr, 0,
- flags, arraysize(flags));
- RunParserSyncTest(strict_context, async_iterator_data, kError, nullptr, 0,
- flags, arraysize(flags));
}
TEST(ExponentiationOperator) {
@@ -8410,6 +8914,9 @@ TEST(AsyncAwait) {
"var O = { method(await) { return await; } };",
"var O = { *method() { var await = 1; return await; } };",
"var O = { *method(await) { return await; } };",
+ "var asyncFn = async function*() {}",
+ "async function* f() {}",
+ "var O = { async *method() {} };",
"(function await() {})",
nullptr
@@ -8448,10 +8955,7 @@ TEST(AsyncAwaitErrors) {
"var f = async() => await;",
- "var asyncFn = async function*() {}",
- "async function* f() {}",
"var O = { *async method() {} };",
- "var O = { async *method() {} };",
"var O = { async method*() {} };",
"var asyncFn = async function(x = await 1) { return x; }",
@@ -8571,6 +9075,64 @@ TEST(AsyncAwaitErrors) {
RunParserSyncTest(async_body_context_data, async_body_error_data, kError);
}
+TEST(Regress7173) {
+ // Await expression is an invalid destructuring target, and should not crash
+
+ // clang-format off
+ const char* error_context_data[][2] = {
+ { "'use strict'; async function f() {", "}" },
+ { "async function f() {", "}" },
+ { "'use strict'; function f() {", "}" },
+ { "function f() {", "}" },
+ { "let f = async() => {", "}" },
+ { "let f = () => {", "}" },
+ { "'use strict'; async function* f() {", "}" },
+ { "async function* f() {", "}" },
+ { "'use strict'; function* f() {", "}" },
+ { "function* f() {", "}" },
+ { nullptr, nullptr }
+ };
+
+ const char* error_data[] = {
+ "var [await f] = [];",
+ "let [await f] = [];",
+ "const [await f] = [];",
+
+ "var [...await f] = [];",
+ "let [...await f] = [];",
+ "const [...await f] = [];",
+
+ "var { await f } = {};",
+ "let { await f } = {};",
+ "const { await f } = {};",
+
+ "var { ...await f } = {};",
+ "let { ...await f } = {};",
+ "const { ...await f } = {};",
+
+ "var { f: await f } = {};",
+ "let { f: await f } = {};",
+ "const { f: await f } = {};"
+
+ "var { f: ...await f } = {};",
+ "let { f: ...await f } = {};",
+ "const { f: ...await f } = {};"
+
+ "var { [f]: await f } = {};",
+ "let { [f]: await f } = {};",
+ "const { [f]: await f } = {};",
+
+ "var { [f]: ...await f } = {};",
+ "let { [f]: ...await f } = {};",
+ "const { [f]: ...await f } = {};",
+
+ nullptr
+ };
+ // clang-format on
+
+ RunParserSyncTest(error_context_data, error_data, kError);
+}
+
TEST(AsyncAwaitFormalParameters) {
// clang-format off
const char* context_for_formal_parameters[][2] = {
@@ -9332,24 +9894,18 @@ TEST(ForAwaitOf) {
nullptr
};
// clang-format on
- static const ParserFlag always_flags[] = {kAllowHarmonyAsyncIteration};
- RunParserSyncTest(context_data, expr_data, kSuccess, nullptr, 0, always_flags,
- arraysize(always_flags));
- RunParserSyncTest(context_data2, expr_data, kSuccess, nullptr, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, expr_data, kSuccess);
+ RunParserSyncTest(context_data2, expr_data, kSuccess);
- RunParserSyncTest(context_data, var_data, kSuccess, nullptr, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, var_data, kSuccess);
// TODO(marja): PreParser doesn't report early errors.
// (https://bugs.chromium.org/p/v8/issues/detail?id=2728)
// RunParserSyncTest(context_data2, var_data, kError, nullptr, 0,
// always_flags,
// arraysize(always_flags));
- RunParserSyncTest(context_data, lexical_data, kSuccess, nullptr, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(context_data2, lexical_data, kSuccess, nullptr, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, lexical_data, kSuccess);
+ RunParserSyncTest(context_data2, lexical_data, kSuccess);
}
TEST(ForAwaitOfErrors) {
@@ -9510,9 +10066,7 @@ TEST(ForAwaitOfErrors) {
nullptr
};
// clang-format on
- static const ParserFlag always_flags[] = {kAllowHarmonyAsyncIteration};
- RunParserSyncTest(context_data, data, kError, nullptr, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kError);
}
TEST(ForAwaitOfFunctionDeclaration) {
@@ -9535,9 +10089,7 @@ TEST(ForAwaitOfFunctionDeclaration) {
};
// clang-format on
- static const ParserFlag always_flags[] = {kAllowHarmonyAsyncIteration};
- RunParserSyncTest(context_data, data, kError, nullptr, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kError);
}
TEST(AsyncGenerator) {
@@ -9634,9 +10186,7 @@ TEST(AsyncGenerator) {
};
// clang-format on
- static const ParserFlag always_flags[] = {kAllowHarmonyAsyncIteration};
- RunParserSyncTest(context_data, statement_data, kSuccess, nullptr, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, statement_data, kSuccess);
}
TEST(AsyncGeneratorErrors) {
@@ -9724,9 +10274,7 @@ TEST(AsyncGeneratorErrors) {
};
// clang-format on
- static const ParserFlag always_flags[] = {kAllowHarmonyAsyncIteration};
- RunParserSyncTest(context_data, statement_data, kError, nullptr, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, statement_data, kError);
}
TEST(LexicalLoopVariable) {
diff --git a/deps/v8/test/cctest/test-platform.cc b/deps/v8/test/cctest/test-platform.cc
index a50a08b35f..e7ad2d84f1 100644
--- a/deps/v8/test/cctest/test-platform.cc
+++ b/deps/v8/test/cctest/test-platform.cc
@@ -12,45 +12,6 @@ using OS = v8::base::OS;
namespace v8 {
namespace internal {
-TEST(OSAllocateAndFree) {
- size_t page_size = OS::AllocatePageSize();
- CHECK_NE(0, page_size);
-
- // A large allocation, aligned at native allocation granularity.
- const size_t kAllocationSize = 1 * MB;
- void* mem_addr = OS::Allocate(OS::GetRandomMmapAddr(), kAllocationSize,
- page_size, OS::MemoryPermission::kReadWrite);
- CHECK_NOT_NULL(mem_addr);
- CHECK(OS::Free(mem_addr, kAllocationSize));
-
- // A large allocation, aligned significantly beyond native granularity.
- const size_t kBigAlignment = 64 * MB;
- void* aligned_mem_addr =
- OS::Allocate(OS::GetRandomMmapAddr(), kAllocationSize, kBigAlignment,
- OS::MemoryPermission::kReadWrite);
- CHECK_NOT_NULL(aligned_mem_addr);
- CHECK_EQ(aligned_mem_addr, AlignedAddress(aligned_mem_addr, kBigAlignment));
- CHECK(OS::Free(aligned_mem_addr, kAllocationSize));
-}
-
-TEST(OSReserveMemory) {
- size_t page_size = OS::AllocatePageSize();
- const size_t kAllocationSize = 1 * MB;
- void* mem_addr = OS::Allocate(OS::GetRandomMmapAddr(), kAllocationSize,
- page_size, OS::MemoryPermission::kReadWrite);
- CHECK_NE(0, page_size);
- CHECK_NOT_NULL(mem_addr);
- size_t commit_size = OS::CommitPageSize();
- CHECK(OS::SetPermissions(mem_addr, commit_size,
- OS::MemoryPermission::kReadWrite));
- // Check whether we can write to memory.
- int* addr = static_cast<int*>(mem_addr);
- addr[KB - 1] = 2;
- CHECK(OS::SetPermissions(mem_addr, commit_size,
- OS::MemoryPermission::kNoAccess));
- CHECK(OS::Free(mem_addr, kAllocationSize));
-}
-
#ifdef V8_CC_GNU
static uintptr_t sp_addr = 0;
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index 46c0c4e132..9dda53a063 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -649,8 +649,7 @@ int GetFunctionLineNumber(CpuProfiler& profiler, LocalContext& env,
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
env->Global()->Get(env.local(), v8_str(name)).ToLocalChecked())));
CodeEntry* func_entry = code_map->FindEntry(func->abstract_code()->address());
- if (!func_entry)
- FATAL(name);
+ if (!func_entry) FATAL("%s", name);
return func_entry->line_number();
}
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index c2e6526f40..5b4e42c26d 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -1707,35 +1707,35 @@ TEST(UnicodeRangeSplitter) {
base->Add(CharacterRange::Everything(), &zone);
UnicodeRangeSplitter splitter(&zone, base);
// BMP
- for (uc32 c = 0; c < 0xd800; c++) {
+ for (uc32 c = 0; c < 0xD800; c++) {
CHECK(InClass(c, splitter.bmp()));
CHECK(!InClass(c, splitter.lead_surrogates()));
CHECK(!InClass(c, splitter.trail_surrogates()));
CHECK(!InClass(c, splitter.non_bmp()));
}
// Lead surrogates
- for (uc32 c = 0xd800; c < 0xdbff; c++) {
+ for (uc32 c = 0xD800; c < 0xDBFF; c++) {
CHECK(!InClass(c, splitter.bmp()));
CHECK(InClass(c, splitter.lead_surrogates()));
CHECK(!InClass(c, splitter.trail_surrogates()));
CHECK(!InClass(c, splitter.non_bmp()));
}
// Trail surrogates
- for (uc32 c = 0xdc00; c < 0xdfff; c++) {
+ for (uc32 c = 0xDC00; c < 0xDFFF; c++) {
CHECK(!InClass(c, splitter.bmp()));
CHECK(!InClass(c, splitter.lead_surrogates()));
CHECK(InClass(c, splitter.trail_surrogates()));
CHECK(!InClass(c, splitter.non_bmp()));
}
// BMP
- for (uc32 c = 0xe000; c < 0xffff; c++) {
+ for (uc32 c = 0xE000; c < 0xFFFF; c++) {
CHECK(InClass(c, splitter.bmp()));
CHECK(!InClass(c, splitter.lead_surrogates()));
CHECK(!InClass(c, splitter.trail_surrogates()));
CHECK(!InClass(c, splitter.non_bmp()));
}
// Non-BMP
- for (uc32 c = 0x10000; c < 0x10ffff; c++) {
+ for (uc32 c = 0x10000; c < 0x10FFFF; c++) {
CHECK(!InClass(c, splitter.bmp()));
CHECK(!InClass(c, splitter.lead_surrogates()));
CHECK(!InClass(c, splitter.trail_surrogates()));
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc b/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc
index d1931d87dd..000cf34c87 100644
--- a/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc
@@ -40,7 +40,7 @@ TEST(WasmRelocationArmContextReference) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- compiler::CSignature0<int32_t> csig;
+ compiler::CSignatureOf<int32_t> csig;
compiler::CodeRunner<int32_t> runnable(isolate, code, &csig);
int32_t ret_value = runnable.Call();
CHECK_EQ(ret_value, imm);
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc b/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc
index 0e2b09e43a..59f38e1554 100644
--- a/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc
@@ -45,7 +45,7 @@ TEST(WasmRelocationArm64ContextReference) {
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- compiler::CSignature0<int64_t> csig;
+ compiler::CSignatureOf<int64_t> csig;
compiler::CodeRunner<int64_t> runnable(isolate, code, &csig);
int64_t ret_value = runnable.Call();
CHECK_EQ(ret_value, imm);
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc b/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc
index 829e0685a8..080da36a47 100644
--- a/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc
@@ -40,7 +40,7 @@ TEST(WasmRelocationIa32ContextReference) {
__ nop();
__ ret(0);
- compiler::CSignature0<int32_t> csig;
+ compiler::CSignatureOf<int32_t> csig;
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc b/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc
index d5c29604dd..0526ce25a2 100644
--- a/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc
@@ -43,7 +43,7 @@ TEST(WasmRelocationX64ContextReference) {
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
USE(code);
- CSignature0<int64_t> csig;
+ CSignatureOf<int64_t> csig;
CodeRunner<int64_t> runnable(isolate, code, &csig);
int64_t ret_value = runnable.Call();
CHECK_EQ(ret_value, imm);
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 20aa3f008c..70fc8586eb 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -57,6 +57,8 @@
namespace v8 {
namespace internal {
+enum CodeCacheType { kLazy, kEager, kAfterExecute };
+
void DisableLazyDeserialization() {
// UNINITIALIZED tests do not set up the isolate sufficiently for lazy
// deserialization to work.
@@ -120,9 +122,10 @@ struct StartupBlobs {
static StartupBlobs Serialize(v8::Isolate* isolate) {
// We have to create one context. One reason for this is so that the builtins
- // can be loaded from v8natives.js and their addresses can be processed. This
- // will clear the pending fixups array, which would otherwise contain GC roots
- // that would confuse the serialization/deserialization process.
+ // can be loaded from self hosted JS builtins and their addresses can be
+ // processed. This will clear the pending fixups array, which would otherwise
+ // contain GC roots that would confuse the serialization/deserialization
+ // process.
v8::Isolate::Scope isolate_scope(isolate);
{
v8::HandleScope scope(isolate);
@@ -368,7 +371,7 @@ static void PartiallySerializeContext(Vector<const byte>* startup_blob_out,
v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
}
- i::Object* raw_context = *v8::Utils::OpenPersistent(env);
+ i::Context* raw_context = i::Context::cast(*v8::Utils::OpenPersistent(env));
env.Reset();
@@ -493,7 +496,7 @@ static void PartiallySerializeCustomContext(
v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
}
- i::Object* raw_context = *v8::Utils::OpenPersistent(env);
+ i::Context* raw_context = i::Context::cast(*v8::Utils::OpenPersistent(env));
env.Reset();
@@ -1379,10 +1382,11 @@ TEST(CodeSerializerLargeCodeObject) {
// code. Don't even bother generating optimized code to avoid timeouts.
FLAG_always_opt = false;
- Vector<const uint8_t> source =
- ConstructSource(STATIC_CHAR_VECTOR("var j=1; if (j == 0) {"),
- STATIC_CHAR_VECTOR("for (let i of Object.prototype);"),
- STATIC_CHAR_VECTOR("} j=7; j"), 1100);
+ Vector<const uint8_t> source = ConstructSource(
+ STATIC_CHAR_VECTOR("var j=1; if (j == 0) {"),
+ STATIC_CHAR_VECTOR(
+ "for (let i of Object.prototype) for (let k = 0; k < 0; ++k);"),
+ STATIC_CHAR_VECTOR("} j=7; j"), 1100);
Handle<String> source_str =
isolate->factory()->NewStringFromOneByte(source).ToHandleChecked();
@@ -1637,28 +1641,34 @@ class SerializerOneByteResource
: public v8::String::ExternalOneByteStringResource {
public:
SerializerOneByteResource(const char* data, size_t length)
- : data_(data), length_(length) {}
+ : data_(data), length_(length), dispose_count_(0) {}
virtual const char* data() const { return data_; }
virtual size_t length() const { return length_; }
+ virtual void Dispose() { dispose_count_++; }
+ int dispose_count() { return dispose_count_; }
private:
const char* data_;
size_t length_;
+ int dispose_count_;
};
class SerializerTwoByteResource : public v8::String::ExternalStringResource {
public:
SerializerTwoByteResource(const char* data, size_t length)
- : data_(AsciiToTwoByteString(data)), length_(length) {}
+ : data_(AsciiToTwoByteString(data)), length_(length), dispose_count_(0) {}
~SerializerTwoByteResource() { DeleteArray<const uint16_t>(data_); }
virtual const uint16_t* data() const { return data_; }
virtual size_t length() const { return length_; }
+ virtual void Dispose() { dispose_count_++; }
+ int dispose_count() { return dispose_count_; }
private:
const uint16_t* data_;
size_t length_;
+ int dispose_count_;
};
TEST(CodeSerializerExternalString) {
@@ -1838,8 +1848,8 @@ static void SerializerCodeEventListener(const v8::JitCodeEvent* event) {
}
}
-v8::ScriptCompiler::CachedData* ProduceCache(const char* source,
- bool eager = false) {
+v8::ScriptCompiler::CachedData* ProduceCache(
+ const char* source, CodeCacheType cacheType = CodeCacheType::kLazy) {
v8::ScriptCompiler::CachedData* cache;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -1853,19 +1863,23 @@ v8::ScriptCompiler::CachedData* ProduceCache(const char* source,
v8::Local<v8::String> source_str = v8_str(source);
v8::ScriptOrigin origin(v8_str("test"));
v8::ScriptCompiler::Source source(source_str, origin);
- v8::ScriptCompiler::CompileOptions options =
- eager ? v8::ScriptCompiler::kProduceFullCodeCache
- : v8::ScriptCompiler::kProduceCodeCache;
+ v8::ScriptCompiler::CompileOptions options;
+ switch (cacheType) {
+ case CodeCacheType::kLazy:
+ options = v8::ScriptCompiler::kProduceCodeCache;
+ break;
+ case CodeCacheType::kEager:
+ options = v8::ScriptCompiler::kProduceFullCodeCache;
+ break;
+ case CodeCacheType::kAfterExecute:
+ options = v8::ScriptCompiler::kNoCompileOptions;
+ break;
+ default:
+ UNREACHABLE();
+ }
v8::Local<v8::UnboundScript> script =
v8::ScriptCompiler::CompileUnboundScript(isolate1, &source, options)
.ToLocalChecked();
- const v8::ScriptCompiler::CachedData* data = source.GetCachedData();
- CHECK(data);
- // Persist cached data.
- uint8_t* buffer = NewArray<uint8_t>(data->length);
- MemCopy(buffer, data->data, data->length);
- cache = new v8::ScriptCompiler::CachedData(
- buffer, data->length, v8::ScriptCompiler::CachedData::BufferOwned);
v8::Local<v8::Value> result = script->BindToCurrentContext()
->Run(isolate1->GetCurrentContext())
@@ -1874,6 +1888,18 @@ v8::ScriptCompiler::CachedData* ProduceCache(const char* source,
result->ToString(isolate1->GetCurrentContext()).ToLocalChecked();
CHECK(result_string->Equals(isolate1->GetCurrentContext(), v8_str("abcdef"))
.FromJust());
+
+ if (cacheType == CodeCacheType::kAfterExecute) {
+ cache = ScriptCompiler::CreateCodeCache(script, source_str);
+ } else {
+ const ScriptCompiler::CachedData* data = source.GetCachedData();
+ CHECK(data);
+ uint8_t* buffer = NewArray<uint8_t>(data->length);
+ MemCopy(buffer, data->data, data->length);
+ cache = new v8::ScriptCompiler::CachedData(
+ buffer, data->length, v8::ScriptCompiler::CachedData::BufferOwned);
+ }
+ CHECK(cache);
}
isolate1->Dispose();
return cache;
@@ -1936,7 +1962,8 @@ TEST(CodeSerializerIsolatesEager) {
" }"
"}"
"f()() + 'def'";
- v8::ScriptCompiler::CachedData* cache = ProduceCache(source, true);
+ v8::ScriptCompiler::CachedData* cache =
+ ProduceCache(source, CodeCacheType::kEager);
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -1974,6 +2001,68 @@ TEST(CodeSerializerIsolatesEager) {
isolate2->Dispose();
}
+TEST(CodeSerializerAfterExecute) {
+ // We test that no compilations happen when running this code. Forcing
+ // to always optimize breaks this test.
+ bool prev_opt_value = FLAG_opt;
+ bool prev_always_opt_value = FLAG_always_opt;
+ FLAG_always_opt = false;
+ FLAG_opt = false;
+ const char* source = "function f() { return 'abc'; }; f() + 'def'";
+ v8::ScriptCompiler::CachedData* cache =
+ ProduceCache(source, CodeCacheType::kAfterExecute);
+
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate2 = v8::Isolate::New(create_params);
+
+ {
+ v8::Isolate::Scope iscope(isolate2);
+ v8::HandleScope scope(isolate2);
+ v8::Local<v8::Context> context = v8::Context::New(isolate2);
+ v8::Context::Scope context_scope(context);
+
+ v8::Local<v8::String> source_str = v8_str(source);
+ v8::ScriptOrigin origin(v8_str("test"));
+ v8::ScriptCompiler::Source source(source_str, origin, cache);
+ v8::Local<v8::UnboundScript> script;
+ {
+ DisallowCompilation no_compile_expected(
+ reinterpret_cast<Isolate*>(isolate2));
+ script = v8::ScriptCompiler::CompileUnboundScript(
+ isolate2, &source, v8::ScriptCompiler::kConsumeCodeCache)
+ .ToLocalChecked();
+ }
+ CHECK(!cache->rejected);
+ CheckDeserializedFlag(script);
+
+ Handle<SharedFunctionInfo> sfi = v8::Utils::OpenHandle(*script);
+ CHECK(sfi->HasBytecodeArray());
+ BytecodeArray* bytecode = sfi->bytecode_array();
+ CHECK_EQ(bytecode->interrupt_budget(),
+ interpreter::Interpreter::kInterruptBudget);
+ CHECK_EQ(bytecode->osr_loop_nesting_level(), 0);
+
+ {
+ DisallowCompilation no_compile_expected(
+ reinterpret_cast<Isolate*>(isolate2));
+ v8::Local<v8::Value> result = script->BindToCurrentContext()
+ ->Run(isolate2->GetCurrentContext())
+ .ToLocalChecked();
+ v8::Local<v8::String> result_string =
+ result->ToString(isolate2->GetCurrentContext()).ToLocalChecked();
+ CHECK(
+ result_string->Equals(isolate2->GetCurrentContext(), v8_str("abcdef"))
+ .FromJust());
+ }
+ }
+ isolate2->Dispose();
+
+ // Restore the flags.
+ FLAG_always_opt = prev_always_opt_value;
+ FLAG_opt = prev_opt_value;
+}
+
TEST(CodeSerializerFlagChange) {
const char* source = "function f() { return 'abc'; }; f() + 'def'";
v8::ScriptCompiler::CachedData* cache = ProduceCache(source);
@@ -2241,6 +2330,9 @@ class SerializedExtension : public v8::Extension {
}
};
+static SerializerOneByteResource serializable_one_byte_resource("one_byte", 8);
+static SerializerTwoByteResource serializable_two_byte_resource("two_byte", 8);
+
intptr_t original_external_references[] = {
reinterpret_cast<intptr_t>(SerializedCallback),
reinterpret_cast<intptr_t>(&serialized_static_field),
@@ -2248,6 +2340,8 @@ intptr_t original_external_references[] = {
reinterpret_cast<intptr_t>(&AccessorForSerialization),
reinterpret_cast<intptr_t>(&SerializedExtension::FunctionCallback),
reinterpret_cast<intptr_t>(&serialized_static_field), // duplicate entry
+ reinterpret_cast<intptr_t>(&serializable_one_byte_resource),
+ reinterpret_cast<intptr_t>(&serializable_two_byte_resource),
0};
intptr_t replaced_external_references[] = {
@@ -2257,6 +2351,8 @@ intptr_t replaced_external_references[] = {
reinterpret_cast<intptr_t>(&AccessorForSerialization),
reinterpret_cast<intptr_t>(&SerializedExtension::FunctionCallback),
reinterpret_cast<intptr_t>(&serialized_static_field),
+ reinterpret_cast<intptr_t>(&serializable_one_byte_resource),
+ reinterpret_cast<intptr_t>(&serializable_two_byte_resource),
0};
intptr_t short_external_references[] = {
@@ -2277,13 +2373,32 @@ TEST(SnapshotCreatorExternalReferences) {
v8::Local<v8::Value> function =
callback->GetFunction(context).ToLocalChecked();
CHECK(context->Global()->Set(context, v8_str("f"), function).FromJust());
+
+ CHECK(context->Global()
+ ->Set(context, v8_str("one_byte"),
+ v8::String::NewExternalOneByte(
+ isolate, &serializable_one_byte_resource)
+ .ToLocalChecked())
+ .FromJust());
+ CHECK(context->Global()
+ ->Set(context, v8_str("two_byte"),
+ v8::String::NewExternalTwoByte(
+ isolate, &serializable_two_byte_resource)
+ .ToLocalChecked())
+ .FromJust());
+
ExpectInt32("f()", 42);
+ ExpectString("one_byte", "one_byte");
+ ExpectString("two_byte", "two_byte");
creator.SetDefaultContext(context);
}
blob =
creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
}
+ CHECK_EQ(1, serializable_one_byte_resource.dispose_count());
+ CHECK_EQ(1, serializable_two_byte_resource.dispose_count());
+
// Deserialize with the original external reference.
{
v8::Isolate::CreateParams params;
@@ -2298,10 +2413,17 @@ TEST(SnapshotCreatorExternalReferences) {
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
ExpectInt32("f()", 42);
+ ExpectString("one_byte", "one_byte");
+ ExpectString("two_byte", "two_byte");
+ CHECK(CompileRun("one_byte").As<v8::String>()->IsExternalOneByte());
+ CHECK(CompileRun("two_byte").As<v8::String>()->IsExternal());
}
isolate->Dispose();
}
+ CHECK_EQ(2, serializable_one_byte_resource.dispose_count());
+ CHECK_EQ(2, serializable_two_byte_resource.dispose_count());
+
// Deserialize with some other external reference.
{
v8::Isolate::CreateParams params;
@@ -2319,6 +2441,10 @@ TEST(SnapshotCreatorExternalReferences) {
}
isolate->Dispose();
}
+
+ CHECK_EQ(3, serializable_one_byte_resource.dispose_count());
+ CHECK_EQ(3, serializable_two_byte_resource.dispose_count());
+
delete[] blob.data;
}
@@ -2650,6 +2776,231 @@ TEST(SnapshotCreatorTemplates) {
delete[] blob.data;
}
+TEST(SnapshotCreatorAddData) {
+ DisableAlwaysOpt();
+ v8::StartupData blob;
+
+ {
+ v8::SnapshotCreator creator;
+ v8::Isolate* isolate = creator.GetIsolate();
+ v8::Eternal<v8::Value> eternal_number;
+ v8::Persistent<v8::Value> persistent_number_1;
+ v8::Persistent<v8::Value> persistent_number_2;
+ v8::Persistent<v8::Context> persistent_context;
+ {
+ v8::HandleScope handle_scope(isolate);
+
+ eternal_number.Set(isolate, v8_num(2017));
+ persistent_number_1.Reset(isolate, v8_num(2018));
+ persistent_number_2.Reset(isolate, v8_num(2019));
+
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ CHECK_EQ(0u, creator.AddData(context, persistent_number_2.Get(isolate)));
+ creator.SetDefaultContext(context);
+ context = v8::Context::New(isolate);
+ persistent_context.Reset(isolate, context);
+
+ v8::Context::Scope context_scope(context);
+
+ v8::Local<v8::Object> object = CompileRun("({ p: 12 })").As<v8::Object>();
+
+ v8::Local<v8::ObjectTemplate> object_template =
+ v8::ObjectTemplate::New(isolate);
+ object_template->SetInternalFieldCount(3);
+
+ v8::Local<v8::Private> private_symbol =
+ v8::Private::ForApi(isolate, v8_str("private_symbol"));
+
+ v8::Local<v8::Signature> signature =
+ v8::Signature::New(isolate, v8::FunctionTemplate::New(isolate));
+
+ v8::Local<v8::AccessorSignature> accessor_signature =
+ v8::AccessorSignature::New(isolate,
+ v8::FunctionTemplate::New(isolate));
+
+ CHECK_EQ(0u, creator.AddData(context, object));
+ CHECK_EQ(1u, creator.AddData(context, v8_str("context-dependent")));
+ CHECK_EQ(2u, creator.AddData(context, persistent_number_1.Get(isolate)));
+ CHECK_EQ(3u, creator.AddData(context, object_template));
+ CHECK_EQ(4u, creator.AddData(context, persistent_context.Get(isolate)));
+ creator.AddContext(context);
+
+ CHECK_EQ(0u, creator.AddData(v8_str("context-independent")));
+ CHECK_EQ(1u, creator.AddData(eternal_number.Get(isolate)));
+ CHECK_EQ(2u, creator.AddData(object_template));
+ CHECK_EQ(3u, creator.AddData(v8::FunctionTemplate::New(isolate)));
+ CHECK_EQ(4u, creator.AddData(private_symbol));
+ CHECK_EQ(5u, creator.AddData(signature));
+ CHECK_EQ(6u, creator.AddData(accessor_signature));
+ }
+
+ blob =
+ creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
+ }
+
+ {
+ v8::Isolate::CreateParams params;
+ params.snapshot_blob = &blob;
+ params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ // Test-appropriate equivalent of v8::Isolate::New.
+ v8::Isolate* isolate = TestIsolate::New(params);
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context =
+ v8::Context::FromSnapshot(isolate, 0).ToLocalChecked();
+
+ // Check serialized data on the context.
+ v8::Local<v8::Object> object =
+ context->GetDataFromSnapshotOnce<v8::Object>(0).ToLocalChecked();
+ CHECK(context->GetDataFromSnapshotOnce<v8::Object>(0).IsEmpty());
+ CHECK_EQ(12, object->Get(context, v8_str("p"))
+ .ToLocalChecked()
+ ->Int32Value(context)
+ .FromJust());
+
+ v8::Local<v8::String> string =
+ context->GetDataFromSnapshotOnce<v8::String>(1).ToLocalChecked();
+ CHECK(context->GetDataFromSnapshotOnce<v8::String>(1).IsEmpty());
+ CHECK(string->Equals(context, v8_str("context-dependent")).FromJust());
+
+ v8::Local<v8::Number> number =
+ context->GetDataFromSnapshotOnce<v8::Number>(2).ToLocalChecked();
+ CHECK(context->GetDataFromSnapshotOnce<v8::Number>(2).IsEmpty());
+ CHECK_EQ(2018, number->Int32Value(context).FromJust());
+
+ v8::Local<v8::ObjectTemplate> templ =
+ context->GetDataFromSnapshotOnce<v8::ObjectTemplate>(3)
+ .ToLocalChecked();
+ CHECK(context->GetDataFromSnapshotOnce<v8::ObjectTemplate>(3).IsEmpty());
+ CHECK_EQ(3, templ->InternalFieldCount());
+
+ v8::Local<v8::Context> serialized_context =
+ context->GetDataFromSnapshotOnce<v8::Context>(4).ToLocalChecked();
+ CHECK(context->GetDataFromSnapshotOnce<v8::Context>(4).IsEmpty());
+ CHECK_EQ(*v8::Utils::OpenHandle(*serialized_context),
+ *v8::Utils::OpenHandle(*context));
+
+ CHECK(context->GetDataFromSnapshotOnce<v8::Value>(5).IsEmpty());
+
+ // Check serialized data on the isolate.
+ string = isolate->GetDataFromSnapshotOnce<v8::String>(0).ToLocalChecked();
+ CHECK(context->GetDataFromSnapshotOnce<v8::String>(0).IsEmpty());
+ CHECK(string->Equals(context, v8_str("context-independent")).FromJust());
+
+ number = isolate->GetDataFromSnapshotOnce<v8::Number>(1).ToLocalChecked();
+ CHECK(isolate->GetDataFromSnapshotOnce<v8::Number>(1).IsEmpty());
+ CHECK_EQ(2017, number->Int32Value(context).FromJust());
+
+ templ = isolate->GetDataFromSnapshotOnce<v8::ObjectTemplate>(2)
+ .ToLocalChecked();
+ CHECK(isolate->GetDataFromSnapshotOnce<v8::ObjectTemplate>(2).IsEmpty());
+ CHECK_EQ(3, templ->InternalFieldCount());
+
+ isolate->GetDataFromSnapshotOnce<v8::FunctionTemplate>(3)
+ .ToLocalChecked();
+ CHECK(
+ isolate->GetDataFromSnapshotOnce<v8::FunctionTemplate>(3).IsEmpty());
+
+ isolate->GetDataFromSnapshotOnce<v8::Private>(4).ToLocalChecked();
+ CHECK(
+ isolate->GetDataFromSnapshotOnce<v8::Private>(4).IsEmpty());
+
+ isolate->GetDataFromSnapshotOnce<v8::Signature>(5).ToLocalChecked();
+ CHECK(isolate->GetDataFromSnapshotOnce<v8::Signature>(5).IsEmpty());
+
+ isolate->GetDataFromSnapshotOnce<v8::AccessorSignature>(6)
+ .ToLocalChecked();
+ CHECK(
+ isolate->GetDataFromSnapshotOnce<v8::AccessorSignature>(6).IsEmpty());
+
+ CHECK(isolate->GetDataFromSnapshotOnce<v8::Value>(7).IsEmpty());
+ }
+ isolate->Dispose();
+ }
+ {
+ SnapshotCreator creator(nullptr, &blob);
+ v8::Isolate* isolate = creator.GetIsolate();
+ {
+ // Adding data to a snapshot replaces the list of existing data.
+ v8::HandleScope hscope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ creator.SetDefaultContext(context);
+ context = v8::Context::FromSnapshot(isolate, 0).ToLocalChecked();
+ v8::Local<v8::String> string =
+ context->GetDataFromSnapshotOnce<v8::String>(1).ToLocalChecked();
+ CHECK(context->GetDataFromSnapshotOnce<v8::String>(1).IsEmpty());
+ CHECK(string->Equals(context, v8_str("context-dependent")).FromJust());
+ v8::Local<v8::Number> number =
+ isolate->GetDataFromSnapshotOnce<v8::Number>(1).ToLocalChecked();
+ CHECK(isolate->GetDataFromSnapshotOnce<v8::Number>(1).IsEmpty());
+ CHECK_EQ(2017, number->Int32Value(context).FromJust());
+
+ CHECK_EQ(0u, creator.AddData(context, v8_num(2016)));
+ CHECK_EQ(0u, creator.AddContext(context));
+ CHECK_EQ(0u, creator.AddData(v8_str("stuff")));
+ }
+ delete[] blob.data;
+ blob =
+ creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
+ }
+ {
+ v8::Isolate::CreateParams params;
+ params.snapshot_blob = &blob;
+ params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ // Test-appropriate equivalent of v8::Isolate::New.
+ v8::Isolate* isolate = TestIsolate::New(params);
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+
+ // Context where we did not re-add data no longer has data.
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ CHECK(context->GetDataFromSnapshotOnce<v8::Object>(0).IsEmpty());
+
+ // Context where we re-added data has completely new ones.
+ context = v8::Context::FromSnapshot(isolate, 0).ToLocalChecked();
+ v8::Local<v8::Value> value =
+ context->GetDataFromSnapshotOnce<v8::Value>(0).ToLocalChecked();
+ CHECK_EQ(2016, value->Int32Value(context).FromJust());
+ CHECK(context->GetDataFromSnapshotOnce<v8::Value>(1).IsEmpty());
+
+ // Ditto for the isolate.
+ v8::Local<v8::String> string =
+ isolate->GetDataFromSnapshotOnce<v8::String>(0).ToLocalChecked();
+ CHECK(string->Equals(context, v8_str("stuff")).FromJust());
+ CHECK(context->GetDataFromSnapshotOnce<v8::String>(1).IsEmpty());
+ }
+ isolate->Dispose();
+ }
+ delete[] blob.data;
+}
+
+TEST(SnapshotCreatorUnknownHandles) {
+ DisableAlwaysOpt();
+ v8::StartupData blob;
+
+ {
+ v8::SnapshotCreator creator;
+ v8::Isolate* isolate = creator.GetIsolate();
+ v8::Eternal<v8::Value> eternal_number;
+ v8::Persistent<v8::Value> persistent_number;
+ {
+ v8::HandleScope handle_scope(isolate);
+
+ eternal_number.Set(isolate, v8_num(2017));
+ persistent_number.Reset(isolate, v8_num(2018));
+
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ creator.SetDefaultContext(context);
+ }
+
+ blob =
+ creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
+ }
+ delete[] blob.data;
+}
+
TEST(SnapshotCreatorIncludeGlobalProxy) {
DisableAlwaysOpt();
v8::StartupData blob;
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index 66a221f948..ba6186828d 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -50,8 +50,8 @@ class MyRandomNumberGenerator {
init();
}
- void init(uint32_t seed = 0x5688c73e) {
- static const uint32_t phi = 0x9e3779b9;
+ void init(uint32_t seed = 0x5688C73E) {
+ static const uint32_t phi = 0x9E3779B9;
c = 362436;
i = kQSize-1;
Q[0] = seed;
@@ -64,7 +64,7 @@ class MyRandomNumberGenerator {
uint32_t next() {
uint64_t a = 18782;
- uint32_t r = 0xfffffffe;
+ uint32_t r = 0xFFFFFFFE;
i = (i + 1) & (kQSize-1);
uint64_t t = a * Q[i] + c;
c = (t >> 32);
diff --git a/deps/v8/test/cctest/test-strtod.cc b/deps/v8/test/cctest/test-strtod.cc
index 2a9bf99723..68cf9783b1 100644
--- a/deps/v8/test/cctest/test-strtod.cc
+++ b/deps/v8/test/cctest/test-strtod.cc
@@ -436,8 +436,8 @@ static uint32_t DeterministicRandom() {
// Initialization values don't have any special meaning. (They are the result
// of two calls to rand().)
- if (hi == 0) hi = 0xbfe166e7;
- if (lo == 0) lo = 0x64d1c3c9;
+ if (hi == 0) hi = 0xBFE166E7;
+ if (lo == 0) lo = 0x64D1C3C9;
// Mix the bits.
hi = 36969 * (hi & 0xFFFF) + (hi >> 16);
diff --git a/deps/v8/test/cctest/test-sync-primitives-arm.cc b/deps/v8/test/cctest/test-sync-primitives-arm.cc
index 403d41ffe9..c99b462319 100644
--- a/deps/v8/test/cctest/test-sync-primitives-arm.cc
+++ b/deps/v8/test/cctest/test-sync-primitives-arm.cc
@@ -29,11 +29,11 @@
#include "test/cctest/assembler-helper-arm.h"
#include "test/cctest/cctest.h"
-#include "src/arm/simulator-arm.h"
#include "src/assembler-inl.h"
#include "src/disassembler.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
+#include "src/simulator.h"
namespace v8 {
namespace internal {
@@ -198,16 +198,15 @@ void TestInvalidateExclusiveAccess(TestData initial_data, MemoryAccess access1,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- F_piiii f = FUNCTION_CAST<F_piiii>(AssembleCode([&](Assembler& assm) {
+ auto f = AssembleCode<int(TestData*, int, int, int)>([&](Assembler& assm) {
AssembleLoadExcl(&assm, access1, r1, r1);
AssembleMemoryAccess(&assm, access2, r3, r2, r1);
AssembleStoreExcl(&assm, access3, r0, r3, r1);
- }));
+ });
TestData t = initial_data;
- int res =
- reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0));
+ int res = f.Call(&t, 0, 0, 0);
CHECK_EQ(expected_res, res);
switch (access3.size) {
case MemoryAccess::Size::Byte:
@@ -267,12 +266,11 @@ namespace {
int ExecuteMemoryAccess(Isolate* isolate, TestData* test_data,
MemoryAccess access) {
HandleScope scope(isolate);
- F_piiii f = FUNCTION_CAST<F_piiii>(AssembleCode([&](Assembler& assm) {
+ auto f = AssembleCode<int(TestData*, int, int)>([&](Assembler& assm) {
AssembleMemoryAccess(&assm, access, r0, r2, r1);
- }));
+ });
- return reinterpret_cast<int>(
- CALL_GENERATED_CODE(isolate, f, test_data, 0, 0, 0, 0));
+ return f.Call(test_data, 0, 0);
}
} // namespace
diff --git a/deps/v8/test/cctest/test-sync-primitives-arm64.cc b/deps/v8/test/cctest/test-sync-primitives-arm64.cc
index a4edee69fa..348faf81f7 100644
--- a/deps/v8/test/cctest/test-sync-primitives-arm64.cc
+++ b/deps/v8/test/cctest/test-sync-primitives-arm64.cc
@@ -209,10 +209,7 @@ void TestInvalidateExclusiveAccess(TestData initial_data, MemoryAccess access1,
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
TestData t = initial_data;
- Simulator::CallArgument args[] = {
- Simulator::CallArgument(reinterpret_cast<uintptr_t>(&t)),
- Simulator::CallArgument::End()};
- Simulator::current(isolate)->CallVoid(code->entry(), args);
+ Simulator::current(isolate)->Call<void>(code->entry(), &t);
int res = Simulator::current(isolate)->wreg(0);
CHECK_EQ(expected_res, res);
@@ -283,10 +280,7 @@ int ExecuteMemoryAccess(Isolate* isolate, TestData* test_data,
masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- Simulator::CallArgument args[] = {
- Simulator::CallArgument(reinterpret_cast<uintptr_t>(test_data)),
- Simulator::CallArgument::End()};
- Simulator::current(isolate)->CallVoid(code->entry(), args);
+ Simulator::current(isolate)->Call<void>(code->entry(), test_data);
return Simulator::current(isolate)->wreg(0);
}
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index af8e41d2cf..21bdb645a5 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -45,11 +45,7 @@ void TerminateCurrentThread(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetIsolate()->TerminateExecution();
}
-
-void Fail(const v8::FunctionCallbackInfo<v8::Value>& args) {
- CHECK(false);
-}
-
+void Fail(const v8::FunctionCallbackInfo<v8::Value>& args) { UNREACHABLE(); }
void Loop(const v8::FunctionCallbackInfo<v8::Value>& args) {
CHECK(!args.GetIsolate()->IsExecutionTerminating());
@@ -406,7 +402,7 @@ TEST(TerminateCancelTerminateFromThreadItself) {
void MicrotaskShouldNotRun(const v8::FunctionCallbackInfo<v8::Value>& info) {
- CHECK(false);
+ UNREACHABLE();
}
diff --git a/deps/v8/test/cctest/test-traced-value.cc b/deps/v8/test/cctest/test-traced-value.cc
index 1c3e7ac252..3a33389a3e 100644
--- a/deps/v8/test/cctest/test-traced-value.cc
+++ b/deps/v8/test/cctest/test-traced-value.cc
@@ -114,13 +114,13 @@ TEST(Escaping) {
std::string json;
value->AppendAsTraceFormat(&json);
// Cannot use the expected value literal directly in CHECK_EQ
- // as it fails to process # character on Windows.
+ // as it fails to process the # character on Windows.
const char* expected =
"{\"a\":\"abc\\\"\'\\\\\\\\x\\\"y\'z\\n\\t\\u0017\",\"b\":"
"\"\\u0001\\u0002\\u0003\\u0004\\u0005\\u0006\\u0007\\u0008\\t\\n\\u000B"
"\\u000C\\u000D\\u000E\\u000F\\u0010\\u0011\\u0012\\u0013\\u0014\\u0015\\"
"u0016\\u0017\\u0018\\u0019\\u001A\\u001B\\u001C\\u001D\\u001E\\u001F "
"!\\\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`"
- "abcdefghijklmnopqrstuvwxyz{|}~\177\"}";
+ "abcdefghijklmnopqrstuvwxyz{|}~\x7F\"}";
CHECK_EQ(expected, json);
}
diff --git a/deps/v8/test/cctest/test-typedarrays.cc b/deps/v8/test/cctest/test-typedarrays.cc
index 723a6f0680..c785b45022 100644
--- a/deps/v8/test/cctest/test-typedarrays.cc
+++ b/deps/v8/test/cctest/test-typedarrays.cc
@@ -85,5 +85,79 @@ TEST(AllocateNotExternal) {
CHECK_EQ(memory, buffer->GetContents().Data());
}
+void TestSpeciesProtector(char* code,
+ bool invalidates_species_protector = true) {
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ std::string typed_array_constructors[] = {
+#define TYPED_ARRAY_CTOR(Type, type, TYPE, ctype, size) #Type "Array",
+
+ TYPED_ARRAYS(TYPED_ARRAY_CTOR)
+#undef TYPED_ARRAY_CTOR
+ };
+
+ for (auto& constructor : typed_array_constructors) {
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ isolate->Enter();
+ {
+ LocalContext context(isolate);
+ v8::HandleScope scope(isolate);
+ v8::TryCatch try_catch(isolate);
+
+ CompileRun(("let x = new " + constructor + "();").c_str());
+ CompileRun(("let constructor = " + constructor + ";").c_str());
+ v8::Local<v8::Value> constructor_obj = CompileRun(constructor.c_str());
+ CHECK_EQ(constructor_obj, CompileRun("x.slice().constructor"));
+ CHECK_EQ(constructor_obj, CompileRun("x.map(()=>{}).constructor"));
+ std::string decl = "class MyTypedArray extends " + constructor + " { }";
+ CompileRun(decl.c_str());
+
+ v8::internal::Isolate* i_isolate =
+ reinterpret_cast<v8::internal::Isolate*>(isolate);
+ CHECK(i_isolate->IsArraySpeciesLookupChainIntact());
+ CompileRun(code);
+ if (invalidates_species_protector) {
+ CHECK(!i_isolate->IsArraySpeciesLookupChainIntact());
+ } else {
+ CHECK(i_isolate->IsArraySpeciesLookupChainIntact());
+ }
+
+ v8::Local<v8::Value> my_typed_array = CompileRun("MyTypedArray");
+ CHECK_EQ(my_typed_array, CompileRun("x.slice().constructor"));
+ CHECK_EQ(my_typed_array, CompileRun("x.map(()=>{}).constructor"));
+ }
+ isolate->Exit();
+ isolate->Dispose();
+ }
+}
+
+UNINITIALIZED_TEST(SpeciesConstructor) {
+ char code[] = "x.constructor = MyTypedArray";
+ TestSpeciesProtector(code);
+}
+
+UNINITIALIZED_TEST(SpeciesConstructorAccessor) {
+ char code[] =
+ "Object.defineProperty(x, 'constructor',{get() {return MyTypedArray;}})";
+ TestSpeciesProtector(code);
+}
+
+UNINITIALIZED_TEST(SpeciesModified) {
+ char code[] =
+ "Object.defineProperty(constructor, Symbol.species, "
+ "{value:MyTypedArray})";
+ TestSpeciesProtector(code);
+}
+
+UNINITIALIZED_TEST(SpeciesParentConstructor) {
+ char code[] = "constructor.prototype.constructor = MyTypedArray";
+ TestSpeciesProtector(code);
+}
+
+UNINITIALIZED_TEST(SpeciesProto) {
+ char code[] = "x.__proto__ = MyTypedArray.prototype";
+ TestSpeciesProtector(code, false);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-types.cc b/deps/v8/test/cctest/test-types.cc
index fcd09eaee5..126e64c7de 100644
--- a/deps/v8/test/cctest/test-types.cc
+++ b/deps/v8/test/cctest/test-types.cc
@@ -111,7 +111,7 @@ struct Tests {
CHECK(this->IsBitset(T.Any));
CHECK(bitset(0) == this->AsBitset(T.None));
- CHECK(bitset(0xfffffffeu) == this->AsBitset(T.Any));
+ CHECK(bitset(0xFFFFFFFEu) == this->AsBitset(T.Any));
// Union(T1, T2) is bitset for bitsets T1,T2
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
@@ -224,38 +224,38 @@ struct Tests {
Factory* fac = isolate->factory();
CHECK(T.NewConstant(fac->NewNumber(0))->Is(T.UnsignedSmall));
CHECK(T.NewConstant(fac->NewNumber(1))->Is(T.UnsignedSmall));
- CHECK(T.NewConstant(fac->NewNumber(0x3fffffff))->Is(T.UnsignedSmall));
+ CHECK(T.NewConstant(fac->NewNumber(0x3FFFFFFF))->Is(T.UnsignedSmall));
CHECK(T.NewConstant(fac->NewNumber(-1))->Is(T.Negative31));
- CHECK(T.NewConstant(fac->NewNumber(-0x3fffffff))->Is(T.Negative31));
+ CHECK(T.NewConstant(fac->NewNumber(-0x3FFFFFFF))->Is(T.Negative31));
CHECK(T.NewConstant(fac->NewNumber(-0x40000000))->Is(T.Negative31));
CHECK(T.NewConstant(fac->NewNumber(0x40000000))->Is(T.Unsigned31));
CHECK(!T.NewConstant(fac->NewNumber(0x40000000))->Is(T.Unsigned30));
- CHECK(T.NewConstant(fac->NewNumber(0x7fffffff))->Is(T.Unsigned31));
- CHECK(!T.NewConstant(fac->NewNumber(0x7fffffff))->Is(T.Unsigned30));
+ CHECK(T.NewConstant(fac->NewNumber(0x7FFFFFFF))->Is(T.Unsigned31));
+ CHECK(!T.NewConstant(fac->NewNumber(0x7FFFFFFF))->Is(T.Unsigned30));
CHECK(T.NewConstant(fac->NewNumber(-0x40000001))->Is(T.Negative32));
CHECK(!T.NewConstant(fac->NewNumber(-0x40000001))->Is(T.Negative31));
- CHECK(T.NewConstant(fac->NewNumber(-0x7fffffff))->Is(T.Negative32));
- CHECK(!T.NewConstant(fac->NewNumber(-0x7fffffff - 1))->Is(T.Negative31));
+ CHECK(T.NewConstant(fac->NewNumber(-0x7FFFFFFF))->Is(T.Negative32));
+ CHECK(!T.NewConstant(fac->NewNumber(-0x7FFFFFFF - 1))->Is(T.Negative31));
if (SmiValuesAre31Bits()) {
CHECK(!T.NewConstant(fac->NewNumber(0x40000000))->Is(T.UnsignedSmall));
- CHECK(!T.NewConstant(fac->NewNumber(0x7fffffff))->Is(T.UnsignedSmall));
+ CHECK(!T.NewConstant(fac->NewNumber(0x7FFFFFFF))->Is(T.UnsignedSmall));
CHECK(!T.NewConstant(fac->NewNumber(-0x40000001))->Is(T.SignedSmall));
- CHECK(!T.NewConstant(fac->NewNumber(-0x7fffffff - 1))->Is(T.SignedSmall));
+ CHECK(!T.NewConstant(fac->NewNumber(-0x7FFFFFFF - 1))->Is(T.SignedSmall));
} else {
CHECK(SmiValuesAre32Bits());
CHECK(T.NewConstant(fac->NewNumber(0x40000000))->Is(T.UnsignedSmall));
- CHECK(T.NewConstant(fac->NewNumber(0x7fffffff))->Is(T.UnsignedSmall));
+ CHECK(T.NewConstant(fac->NewNumber(0x7FFFFFFF))->Is(T.UnsignedSmall));
CHECK(T.NewConstant(fac->NewNumber(-0x40000001))->Is(T.SignedSmall));
- CHECK(T.NewConstant(fac->NewNumber(-0x7fffffff - 1))->Is(T.SignedSmall));
+ CHECK(T.NewConstant(fac->NewNumber(-0x7FFFFFFF - 1))->Is(T.SignedSmall));
}
CHECK(T.NewConstant(fac->NewNumber(0x80000000u))->Is(T.Unsigned32));
CHECK(!T.NewConstant(fac->NewNumber(0x80000000u))->Is(T.Unsigned31));
- CHECK(T.NewConstant(fac->NewNumber(0xffffffffu))->Is(T.Unsigned32));
- CHECK(!T.NewConstant(fac->NewNumber(0xffffffffu))->Is(T.Unsigned31));
- CHECK(T.NewConstant(fac->NewNumber(0xffffffffu + 1.0))->Is(T.PlainNumber));
- CHECK(!T.NewConstant(fac->NewNumber(0xffffffffu + 1.0))->Is(T.Integral32));
- CHECK(T.NewConstant(fac->NewNumber(-0x7fffffff - 2.0))->Is(T.PlainNumber));
- CHECK(!T.NewConstant(fac->NewNumber(-0x7fffffff - 2.0))->Is(T.Integral32));
+ CHECK(T.NewConstant(fac->NewNumber(0xFFFFFFFFu))->Is(T.Unsigned32));
+ CHECK(!T.NewConstant(fac->NewNumber(0xFFFFFFFFu))->Is(T.Unsigned31));
+ CHECK(T.NewConstant(fac->NewNumber(0xFFFFFFFFu + 1.0))->Is(T.PlainNumber));
+ CHECK(!T.NewConstant(fac->NewNumber(0xFFFFFFFFu + 1.0))->Is(T.Integral32));
+ CHECK(T.NewConstant(fac->NewNumber(-0x7FFFFFFF - 2.0))->Is(T.PlainNumber));
+ CHECK(!T.NewConstant(fac->NewNumber(-0x7FFFFFFF - 2.0))->Is(T.Integral32));
CHECK(T.NewConstant(fac->NewNumber(0.1))->Is(T.PlainNumber));
CHECK(!T.NewConstant(fac->NewNumber(0.1))->Is(T.Integral32));
CHECK(T.NewConstant(fac->NewNumber(-10.1))->Is(T.PlainNumber));
diff --git a/deps/v8/test/cctest/test-unboxed-doubles.cc b/deps/v8/test/cctest/test-unboxed-doubles.cc
index 5f1584bc06..0245bf9e91 100644
--- a/deps/v8/test/cctest/test-unboxed-doubles.cc
+++ b/deps/v8/test/cctest/test-unboxed-doubles.cc
@@ -1519,7 +1519,7 @@ static void TestIncrementalWriteBarrier(Handle<Map> map, Handle<Map> new_map,
// barrier.
JSObject::MigrateToMap(obj, new_map);
- uint64_t boom_value = UINT64_C(0xbaad0176a37c28e1);
+ uint64_t boom_value = UINT64_C(0xBAAD0176A37C28E1);
FieldIndex double_field_index =
FieldIndex::ForDescriptor(*new_map, double_descriptor);
diff --git a/deps/v8/test/cctest/test-usecounters.cc b/deps/v8/test/cctest/test-usecounters.cc
index a4512ac21d..5e37991252 100644
--- a/deps/v8/test/cctest/test-usecounters.cc
+++ b/deps/v8/test/cctest/test-usecounters.cc
@@ -60,31 +60,6 @@ TEST(AssigmentExpressionLHSIsCall) {
use_counts[v8::Isolate::kAssigmentExpressionLHSIsCallInStrict] = 0;
}
-TEST(LabeledExpressionStatement) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- LocalContext env;
- int use_counts[v8::Isolate::kUseCounterFeatureCount] = {};
- global_use_counts = use_counts;
- CcTest::isolate()->SetUseCounterCallback(MockUseCounterCallback);
-
- CompileRun("typeof a");
- CHECK_EQ(0, use_counts[v8::Isolate::kLabeledExpressionStatement]);
-
- CompileRun("foo: null");
- CHECK_EQ(1, use_counts[v8::Isolate::kLabeledExpressionStatement]);
-
- CompileRun("foo: bar: baz: undefined");
- CHECK_EQ(2, use_counts[v8::Isolate::kLabeledExpressionStatement]);
-
- CompileRun(
- "foo: if (false);"
- "bar: { }"
- "baz: switch (false) { }"
- "bat: do { } while (false);");
- CHECK_EQ(2, use_counts[v8::Isolate::kLabeledExpressionStatement]);
-}
-
} // namespace test_usecounters
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-utils-arm64.cc b/deps/v8/test/cctest/test-utils-arm64.cc
index b65b4a765a..e8bc58ffff 100644
--- a/deps/v8/test/cctest/test-utils-arm64.cc
+++ b/deps/v8/test/cctest/test-utils-arm64.cc
@@ -114,7 +114,7 @@ bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg) {
// Retrieve the corresponding X register so we can check that the upper part
// was properly cleared.
int64_t result_x = core->xreg(reg.code());
- if ((result_x & 0xffffffff00000000L) != 0) {
+ if ((result_x & 0xFFFFFFFF00000000L) != 0) {
printf("Expected 0x%08" PRIx32 "\t Found 0x%016" PRIx64 "\n",
expected, result_x);
return false;
@@ -146,7 +146,7 @@ bool EqualFP32(float expected, const RegisterDump* core,
// Retrieve the corresponding D register so we can check that the upper part
// was properly cleared.
uint64_t result_64 = core->dreg_bits(fpreg.code());
- if ((result_64 & 0xffffffff00000000L) != 0) {
+ if ((result_64 & 0xFFFFFFFF00000000L) != 0) {
printf("Expected 0x%08" PRIx32 " (%f)\t Found 0x%016" PRIx64 "\n",
bit_cast<uint32_t>(expected), expected, result_64);
return false;
diff --git a/deps/v8/test/cctest/test-utils-arm64.h b/deps/v8/test/cctest/test-utils-arm64.h
index a709240662..78b266cb0b 100644
--- a/deps/v8/test/cctest/test-utils-arm64.h
+++ b/deps/v8/test/cctest/test-utils-arm64.h
@@ -244,7 +244,7 @@ RegList PopulateVRegisterArray(VRegister* s, VRegister* d, VRegister* v,
// top word anyway, so clobbering the full X registers should make tests more
// rigorous.
void Clobber(MacroAssembler* masm, RegList reg_list,
- uint64_t const value = 0xfedcba9876543210UL);
+ uint64_t const value = 0xFEDCBA9876543210UL);
// As Clobber, but for FP registers.
void ClobberFP(MacroAssembler* masm, RegList reg_list,
diff --git a/deps/v8/test/cctest/test-utils.cc b/deps/v8/test/cctest/test-utils.cc
index de2b16203b..c1c15873e9 100644
--- a/deps/v8/test/cctest/test-utils.cc
+++ b/deps/v8/test/cctest/test-utils.cc
@@ -164,7 +164,7 @@ void TestMemMove(byte* area1,
printf("diff at offset %d (%p): is %d, should be %d\n", i,
reinterpret_cast<void*>(area1 + i), area1[i], area2[i]);
}
- CHECK(false);
+ FATAL("memmove error");
}
}
@@ -197,7 +197,7 @@ TEST(Collector) {
const int kSequentialSize = 1000;
const int kBlockSize = 7;
for (int loop = 0; loop < kLoops; loop++) {
- Vector<int> block = collector.AddBlock(7, 0xbadcafe);
+ Vector<int> block = collector.AddBlock(7, 0xBADCAFE);
for (int i = 0; i < kSequentialSize; i++) {
collector.Add(i);
}
@@ -212,7 +212,7 @@ TEST(Collector) {
for (int j = 0; j < kBlockSize - 1; j++) {
CHECK_EQ(j * 7, result[offset + j]);
}
- CHECK_EQ(0xbadcafe, result[offset + kBlockSize - 1]);
+ CHECK_EQ(0xBADCAFE, result[offset + kBlockSize - 1]);
for (int j = 0; j < kSequentialSize; j++) {
CHECK_EQ(j, result[offset + kBlockSize + j]);
}
diff --git a/deps/v8/test/cctest/test-weakmaps.cc b/deps/v8/test/cctest/test-weakmaps.cc
index 8db1855cf5..546db6acf4 100644
--- a/deps/v8/test/cctest/test-weakmaps.cc
+++ b/deps/v8/test/cctest/test-weakmaps.cc
@@ -42,21 +42,6 @@ static Isolate* GetIsolateFrom(LocalContext* context) {
return reinterpret_cast<Isolate*>((*context)->GetIsolate());
}
-
-static Handle<JSWeakMap> AllocateJSWeakMap(Isolate* isolate) {
- Handle<Map> map =
- isolate->factory()->NewMap(JS_WEAK_MAP_TYPE, JSWeakMap::kSize);
- Handle<JSObject> weakmap_obj = isolate->factory()->NewJSObjectFromMap(map);
- Handle<JSWeakMap> weakmap(JSWeakMap::cast(*weakmap_obj));
- // Do not leak handles for the hash table, it would make entries strong.
- {
- HandleScope scope(isolate);
- Handle<ObjectHashTable> table = ObjectHashTable::New(isolate, 1);
- weakmap->set_table(*table);
- }
- return weakmap;
-}
-
static int NumberOfWeakCalls = 0;
static void WeakPointerCallback(const v8::WeakCallbackInfo<void>& data) {
std::pair<v8::Persistent<v8::Value>*, int>* p =
@@ -74,7 +59,7 @@ TEST(Weakness) {
Isolate* isolate = GetIsolateFrom(&context);
Factory* factory = isolate->factory();
HandleScope scope(isolate);
- Handle<JSWeakMap> weakmap = AllocateJSWeakMap(isolate);
+ Handle<JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
GlobalHandles* global_handles = isolate->global_handles();
// Keep global reference to the key.
@@ -127,7 +112,7 @@ TEST(Shrinking) {
Isolate* isolate = GetIsolateFrom(&context);
Factory* factory = isolate->factory();
HandleScope scope(isolate);
- Handle<JSWeakMap> weakmap = AllocateJSWeakMap(isolate);
+ Handle<JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
// Check initial capacity.
CHECK_EQ(32, ObjectHashTable::cast(weakmap->table())->Capacity());
@@ -174,7 +159,7 @@ TEST(Regress2060a) {
Handle<JSFunction> function =
factory->NewFunctionForTest(factory->function_string());
Handle<JSObject> key = factory->NewJSObject(function);
- Handle<JSWeakMap> weakmap = AllocateJSWeakMap(isolate);
+ Handle<JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
// Start second old-space page so that values land on evacuation candidate.
Page* first_page = heap->old_space()->anchor()->next_page();
@@ -226,7 +211,7 @@ TEST(Regress2060b) {
CHECK(!heap->InNewSpace(*keys[i]));
CHECK(!first_page->Contains(keys[i]->address()));
}
- Handle<JSWeakMap> weakmap = AllocateJSWeakMap(isolate);
+ Handle<JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
for (int i = 0; i < 32; i++) {
Handle<Smi> smi(Smi::FromInt(i), isolate);
int32_t hash = keys[i]->GetOrCreateHash(isolate)->value();
@@ -250,7 +235,7 @@ TEST(Regress399527) {
Heap* heap = isolate->heap();
{
HandleScope scope(isolate);
- AllocateJSWeakMap(isolate);
+ isolate->factory()->NewJSWeakMap();
heap::SimulateIncrementalMarking(heap);
}
// The weak map is marked black here but leaving the handle scope will make
diff --git a/deps/v8/test/cctest/testcfg.py b/deps/v8/test/cctest/testcfg.py
index d9c3c23609..28d1ab27f9 100644
--- a/deps/v8/test/cctest/testcfg.py
+++ b/deps/v8/test/cctest/testcfg.py
@@ -28,46 +28,44 @@
import os
import shutil
-from testrunner.local import commands
+from testrunner.local import command
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.objects import testcase
+SHELL = 'cctest'
-class CcTestSuite(testsuite.TestSuite):
- SHELL = 'cctest'
-
- def __init__(self, name, root):
- super(CcTestSuite, self).__init__(name, root)
- if utils.IsWindows():
- build_dir = "build"
- else:
- build_dir = "out"
+class TestSuite(testsuite.TestSuite):
def ListTests(self, context):
- shell = os.path.abspath(os.path.join(context.shell_dir, self.SHELL))
+ shell = os.path.abspath(os.path.join(context.shell_dir, SHELL))
if utils.IsWindows():
shell += ".exe"
- cmd = context.command_prefix + [shell, "--list"] + context.extra_flags
- output = commands.Execute(cmd)
+ cmd = command.Command(
+ cmd_prefix=context.command_prefix,
+ shell=shell,
+ args=["--list"] + context.extra_flags)
+ output = cmd.execute()
if output.exit_code != 0:
- print ' '.join(cmd)
+ print cmd
print output.stdout
print output.stderr
return []
- tests = []
- for test_desc in output.stdout.strip().split():
- test = testcase.TestCase(self, test_desc)
- tests.append(test)
+ tests = map(self._create_test, output.stdout.strip().split())
tests.sort(key=lambda t: t.path)
return tests
- def GetShellForTestCase(self, testcase):
- return self.SHELL
+ def _test_class(self):
+ return TestCase
+
+
+class TestCase(testcase.TestCase):
+ def get_shell(self):
+ return SHELL
- def GetParametersForTestCase(self, testcase, context):
- return [testcase.path], testcase.flags + context.mode_flags, {}
+ def _get_files_params(self, ctx):
+ return [self.path]
def GetSuite(name, root):
- return CcTestSuite(name, root)
+ return TestSuite(name, root)
diff --git a/deps/v8/test/cctest/trace-extension.cc b/deps/v8/test/cctest/trace-extension.cc
index f0cc3cc2cc..00f9946180 100644
--- a/deps/v8/test/cctest/trace-extension.cc
+++ b/deps/v8/test/cctest/trace-extension.cc
@@ -67,10 +67,8 @@ v8::Local<v8::FunctionTemplate> TraceExtension::GetNativeFunctionTemplate(
.ToLocalChecked())
.FromJust()) {
return v8::FunctionTemplate::New(isolate, TraceExtension::JSEntrySPLevel2);
- } else {
- CHECK(false);
- return v8::Local<v8::FunctionTemplate>();
}
+ UNREACHABLE();
}
diff --git a/deps/v8/test/cctest/unicode-helpers.h b/deps/v8/test/cctest/unicode-helpers.h
index a09a8cbb3e..ca75fb65d7 100644
--- a/deps/v8/test/cctest/unicode-helpers.h
+++ b/deps/v8/test/cctest/unicode-helpers.h
@@ -10,7 +10,7 @@
static int Ucs2CharLength(unibrow::uchar c) {
if (c == unibrow::Utf8::kIncomplete || c == unibrow::Utf8::kBufferEmpty) {
return 0;
- } else if (c < 0xffff) {
+ } else if (c < 0xFFFF) {
return 1;
} else {
return 2;
@@ -19,12 +19,16 @@ static int Ucs2CharLength(unibrow::uchar c) {
static int Utf8LengthHelper(const char* s) {
unibrow::Utf8::Utf8IncrementalBuffer buffer(unibrow::Utf8::kBufferEmpty);
+ unibrow::Utf8::State state = unibrow::Utf8::State::kAccept;
+
int length = 0;
- for (; *s != '\0'; s++) {
- unibrow::uchar tmp = unibrow::Utf8::ValueOfIncremental(*s, &buffer);
+ size_t i = 0;
+ while (s[i] != '\0') {
+ unibrow::uchar tmp =
+ unibrow::Utf8::ValueOfIncremental(s[i], &i, &state, &buffer);
length += Ucs2CharLength(tmp);
}
- unibrow::uchar tmp = unibrow::Utf8::ValueOfIncrementalFinish(&buffer);
+ unibrow::uchar tmp = unibrow::Utf8::ValueOfIncrementalFinish(&state);
length += Ucs2CharLength(tmp);
return length;
}
diff --git a/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc b/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
index 6907b8381e..f5c09b519e 100644
--- a/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
+++ b/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
@@ -71,6 +71,9 @@ class CWasmEntryArgTester {
static_assert(
arraysize(call_args) == compiler::CWasmEntryParameters::kNumParameters,
"adapt this test");
+ if (FLAG_wasm_jit_to_native) {
+ wasm_code_.GetWasmCode()->owner()->SetExecutable(true);
+ }
MaybeHandle<Object> return_obj = Execution::Call(
isolate_, c_wasm_entry_fn_, receiver, arraysize(call_args), call_args);
CHECK(!return_obj.is_null());
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
index 3fe8b4ae99..3ded63730b 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
@@ -214,7 +214,7 @@ WASM_EXEC_TEST(I64ShlUseOnlyLowWord) {
WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
- int32_t expected = static_cast<int32_t>((*i) << (*j & 0x3f));
+ int32_t expected = static_cast<int32_t>((*i) << (*j & 0x3F));
CHECK_EQ(expected, r.Call(*i, *j));
}
}
@@ -228,7 +228,7 @@ WASM_EXEC_TEST(I64ShrUseOnlyLowWord) {
WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
- int32_t expected = static_cast<int32_t>((*i) >> (*j & 0x3f));
+ int32_t expected = static_cast<int32_t>((*i) >> (*j & 0x3F));
CHECK_EQ(expected, r.Call(*i, *j));
}
}
@@ -242,7 +242,7 @@ WASM_EXEC_TEST(I64SarUseOnlyLowWord) {
WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
- int32_t expected = static_cast<int32_t>((*i) >> (*j & 0x3f));
+ int32_t expected = static_cast<int32_t>((*i) >> (*j & 0x3F));
CHECK_EQ(expected, r.Call(*i, *j));
}
}
@@ -318,11 +318,11 @@ WASM_EXEC_TEST(I64DivU_Trap) {
WASM_EXEC_TEST(I64DivU_Byzero_Const) {
REQUIRE(I64DivU);
- for (uint64_t denom = 0xfffffffffffffffe; denom < 8; denom++) {
+ for (uint64_t denom = 0xFFFFFFFFFFFFFFFE; denom < 8; denom++) {
WasmRunner<uint64_t, uint64_t> r(execution_mode);
BUILD(r, WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_I64V_1(denom)));
- for (uint64_t val = 0xfffffffffffffff0; val < 8; val++) {
+ for (uint64_t val = 0xFFFFFFFFFFFFFFF0; val < 8; val++) {
if (denom == 0) {
CHECK_TRAP64(r.Call(val));
} else {
@@ -418,7 +418,7 @@ WASM_EXEC_TEST(I64Shl) {
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
- uint64_t expected = (*i) << (*j & 0x3f);
+ uint64_t expected = (*i) << (*j & 0x3F);
CHECK_EQ(expected, r.Call(*i, *j));
}
}
@@ -453,7 +453,7 @@ WASM_EXEC_TEST(I64ShrU) {
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
- uint64_t expected = (*i) >> (*j & 0x3f);
+ uint64_t expected = (*i) >> (*j & 0x3F);
CHECK_EQ(expected, r.Call(*i, *j));
}
}
@@ -488,7 +488,7 @@ WASM_EXEC_TEST(I64ShrS) {
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
- int64_t expected = (*i) >> (*j & 0x3f);
+ int64_t expected = (*i) >> (*j & 0x3F);
CHECK_EQ(expected, r.Call(*i, *j));
}
}
@@ -632,11 +632,11 @@ WASM_EXEC_TEST(I64Popcnt) {
struct {
int64_t expected;
uint64_t input;
- } values[] = {{64, 0xffffffffffffffff},
+ } values[] = {{64, 0xFFFFFFFFFFFFFFFF},
{0, 0x0000000000000000},
{2, 0x0000080000008000},
{26, 0x1123456782345678},
- {38, 0xffedcba09edcba09}};
+ {38, 0xFFEDCBA09EDCBA09}};
WasmRunner<int64_t, uint64_t> r(execution_mode);
BUILD(r, WASM_I64_POPCNT(WASM_GET_LOCAL(0)));
@@ -658,81 +658,81 @@ WASM_EXEC_TEST(F32UConvertI64) {
uint64_t input;
uint32_t expected;
} values[] = {{0x0, 0x0},
- {0x1, 0x3f800000},
- {0xffffffff, 0x4f800000},
- {0x1b09788b, 0x4dd84bc4},
- {0x4c5fce8, 0x4c98bf9d},
- {0xcc0de5bf, 0x4f4c0de6},
+ {0x1, 0x3F800000},
+ {0xFFFFFFFF, 0x4F800000},
+ {0x1B09788B, 0x4DD84BC4},
+ {0x4C5FCE8, 0x4C98BF9D},
+ {0xCC0DE5BF, 0x4F4C0DE6},
{0x2, 0x40000000},
{0x3, 0x40400000},
{0x4, 0x40800000},
- {0x5, 0x40a00000},
+ {0x5, 0x40A00000},
{0x8, 0x41000000},
{0x9, 0x41100000},
- {0xffffffffffffffff, 0x5f800000},
- {0xfffffffffffffffe, 0x5f800000},
- {0xfffffffffffffffd, 0x5f800000},
+ {0xFFFFFFFFFFFFFFFF, 0x5F800000},
+ {0xFFFFFFFFFFFFFFFE, 0x5F800000},
+ {0xFFFFFFFFFFFFFFFD, 0x5F800000},
{0x0, 0x0},
- {0x100000000, 0x4f800000},
- {0xffffffff00000000, 0x5f800000},
- {0x1b09788b00000000, 0x5dd84bc4},
- {0x4c5fce800000000, 0x5c98bf9d},
- {0xcc0de5bf00000000, 0x5f4c0de6},
+ {0x100000000, 0x4F800000},
+ {0xFFFFFFFF00000000, 0x5F800000},
+ {0x1B09788B00000000, 0x5DD84BC4},
+ {0x4C5FCE800000000, 0x5C98BF9D},
+ {0xCC0DE5BF00000000, 0x5F4C0DE6},
{0x200000000, 0x50000000},
{0x300000000, 0x50400000},
{0x400000000, 0x50800000},
- {0x500000000, 0x50a00000},
+ {0x500000000, 0x50A00000},
{0x800000000, 0x51000000},
{0x900000000, 0x51100000},
- {0x273a798e187937a3, 0x5e1ce9e6},
- {0xece3af835495a16b, 0x5f6ce3b0},
- {0xb668ecc11223344, 0x5d3668ed},
- {0x9e, 0x431e0000},
+ {0x273A798E187937A3, 0x5E1CE9E6},
+ {0xECE3AF835495A16B, 0x5F6CE3B0},
+ {0xB668ECC11223344, 0x5D3668ED},
+ {0x9E, 0x431E0000},
{0x43, 0x42860000},
- {0xaf73, 0x472f7300},
- {0x116b, 0x458b5800},
- {0x658ecc, 0x4acb1d98},
- {0x2b3b4c, 0x4a2ced30},
- {0x88776655, 0x4f087766},
- {0x70000000, 0x4ee00000},
- {0x7200000, 0x4ce40000},
- {0x7fffffff, 0x4f000000},
- {0x56123761, 0x4eac246f},
- {0x7fffff00, 0x4efffffe},
- {0x761c4761eeeeeeee, 0x5eec388f},
- {0x80000000eeeeeeee, 0x5f000000},
- {0x88888888dddddddd, 0x5f088889},
- {0xa0000000dddddddd, 0x5f200000},
- {0xddddddddaaaaaaaa, 0x5f5dddde},
- {0xe0000000aaaaaaaa, 0x5f600000},
- {0xeeeeeeeeeeeeeeee, 0x5f6eeeef},
- {0xfffffffdeeeeeeee, 0x5f800000},
- {0xf0000000dddddddd, 0x5f700000},
- {0x7fffffdddddddd, 0x5b000000},
- {0x3fffffaaaaaaaa, 0x5a7fffff},
- {0x1fffffaaaaaaaa, 0x59fffffd},
- {0xfffff, 0x497ffff0},
- {0x7ffff, 0x48ffffe0},
- {0x3ffff, 0x487fffc0},
- {0x1ffff, 0x47ffff80},
- {0xffff, 0x477fff00},
- {0x7fff, 0x46fffe00},
- {0x3fff, 0x467ffc00},
- {0x1fff, 0x45fff800},
- {0xfff, 0x457ff000},
- {0x7ff, 0x44ffe000},
- {0x3ff, 0x447fc000},
- {0x1ff, 0x43ff8000},
- {0x3fffffffffff, 0x56800000},
- {0x1fffffffffff, 0x56000000},
- {0xfffffffffff, 0x55800000},
- {0x7ffffffffff, 0x55000000},
- {0x3ffffffffff, 0x54800000},
- {0x1ffffffffff, 0x54000000},
- {0x8000008000000000, 0x5f000000},
- {0x8000008000000001, 0x5f000001},
- {0x8000000000000400, 0x5f000000},
- {0x8000000000000401, 0x5f000000}};
+ {0xAF73, 0x472F7300},
+ {0x116B, 0x458B5800},
+ {0x658ECC, 0x4ACB1D98},
+ {0x2B3B4C, 0x4A2CED30},
+ {0x88776655, 0x4F087766},
+ {0x70000000, 0x4EE00000},
+ {0x7200000, 0x4CE40000},
+ {0x7FFFFFFF, 0x4F000000},
+ {0x56123761, 0x4EAC246F},
+ {0x7FFFFF00, 0x4EFFFFFE},
+ {0x761C4761EEEEEEEE, 0x5EEC388F},
+ {0x80000000EEEEEEEE, 0x5F000000},
+ {0x88888888DDDDDDDD, 0x5F088889},
+ {0xA0000000DDDDDDDD, 0x5F200000},
+ {0xDDDDDDDDAAAAAAAA, 0x5F5DDDDE},
+ {0xE0000000AAAAAAAA, 0x5F600000},
+ {0xEEEEEEEEEEEEEEEE, 0x5F6EEEEF},
+ {0xFFFFFFFDEEEEEEEE, 0x5F800000},
+ {0xF0000000DDDDDDDD, 0x5F700000},
+ {0x7FFFFFDDDDDDDD, 0x5B000000},
+ {0x3FFFFFAAAAAAAA, 0x5A7FFFFF},
+ {0x1FFFFFAAAAAAAA, 0x59FFFFFD},
+ {0xFFFFF, 0x497FFFF0},
+ {0x7FFFF, 0x48FFFFE0},
+ {0x3FFFF, 0x487FFFC0},
+ {0x1FFFF, 0x47FFFF80},
+ {0xFFFF, 0x477FFF00},
+ {0x7FFF, 0x46FFFE00},
+ {0x3FFF, 0x467FFC00},
+ {0x1FFF, 0x45FFF800},
+ {0xFFF, 0x457FF000},
+ {0x7FF, 0x44FFE000},
+ {0x3FF, 0x447FC000},
+ {0x1FF, 0x43FF8000},
+ {0x3FFFFFFFFFFF, 0x56800000},
+ {0x1FFFFFFFFFFF, 0x56000000},
+ {0xFFFFFFFFFFF, 0x55800000},
+ {0x7FFFFFFFFFF, 0x55000000},
+ {0x3FFFFFFFFFF, 0x54800000},
+ {0x1FFFFFFFFFF, 0x54000000},
+ {0x8000008000000000, 0x5F000000},
+ {0x8000008000000001, 0x5F000001},
+ {0x8000000000000400, 0x5F000000},
+ {0x8000000000000401, 0x5F000000}};
WasmRunner<float, uint64_t> r(execution_mode);
BUILD(r, WASM_F32_UCONVERT_I64(WASM_GET_LOCAL(0)));
for (size_t i = 0; i < arraysize(values); i++) {
@@ -753,80 +753,80 @@ WASM_EXEC_TEST(F64UConvertI64) {
uint64_t input;
uint64_t expected;
} values[] = {{0x0, 0x0},
- {0x1, 0x3ff0000000000000},
- {0xffffffff, 0x41efffffffe00000},
- {0x1b09788b, 0x41bb09788b000000},
- {0x4c5fce8, 0x419317f3a0000000},
- {0xcc0de5bf, 0x41e981bcb7e00000},
+ {0x1, 0x3FF0000000000000},
+ {0xFFFFFFFF, 0x41EFFFFFFFE00000},
+ {0x1B09788B, 0x41BB09788B000000},
+ {0x4C5FCE8, 0x419317F3A0000000},
+ {0xCC0DE5BF, 0x41E981BCB7E00000},
{0x2, 0x4000000000000000},
{0x3, 0x4008000000000000},
{0x4, 0x4010000000000000},
{0x5, 0x4014000000000000},
{0x8, 0x4020000000000000},
{0x9, 0x4022000000000000},
- {0xffffffffffffffff, 0x43f0000000000000},
- {0xfffffffffffffffe, 0x43f0000000000000},
- {0xfffffffffffffffd, 0x43f0000000000000},
- {0x100000000, 0x41f0000000000000},
- {0xffffffff00000000, 0x43efffffffe00000},
- {0x1b09788b00000000, 0x43bb09788b000000},
- {0x4c5fce800000000, 0x439317f3a0000000},
- {0xcc0de5bf00000000, 0x43e981bcb7e00000},
+ {0xFFFFFFFFFFFFFFFF, 0x43F0000000000000},
+ {0xFFFFFFFFFFFFFFFE, 0x43F0000000000000},
+ {0xFFFFFFFFFFFFFFFD, 0x43F0000000000000},
+ {0x100000000, 0x41F0000000000000},
+ {0xFFFFFFFF00000000, 0x43EFFFFFFFE00000},
+ {0x1B09788B00000000, 0x43BB09788B000000},
+ {0x4C5FCE800000000, 0x439317F3A0000000},
+ {0xCC0DE5BF00000000, 0x43E981BCB7E00000},
{0x200000000, 0x4200000000000000},
{0x300000000, 0x4208000000000000},
{0x400000000, 0x4210000000000000},
{0x500000000, 0x4214000000000000},
{0x800000000, 0x4220000000000000},
{0x900000000, 0x4222000000000000},
- {0x273a798e187937a3, 0x43c39d3cc70c3c9c},
- {0xece3af835495a16b, 0x43ed9c75f06a92b4},
- {0xb668ecc11223344, 0x43a6cd1d98224467},
- {0x9e, 0x4063c00000000000},
- {0x43, 0x4050c00000000000},
- {0xaf73, 0x40e5ee6000000000},
- {0x116b, 0x40b16b0000000000},
- {0x658ecc, 0x415963b300000000},
- {0x2b3b4c, 0x41459da600000000},
- {0x88776655, 0x41e10eeccaa00000},
- {0x70000000, 0x41dc000000000000},
- {0x7200000, 0x419c800000000000},
- {0x7fffffff, 0x41dfffffffc00000},
- {0x56123761, 0x41d5848dd8400000},
- {0x7fffff00, 0x41dfffffc0000000},
- {0x761c4761eeeeeeee, 0x43dd8711d87bbbbc},
- {0x80000000eeeeeeee, 0x43e00000001dddde},
- {0x88888888dddddddd, 0x43e11111111bbbbc},
- {0xa0000000dddddddd, 0x43e40000001bbbbc},
- {0xddddddddaaaaaaaa, 0x43ebbbbbbbb55555},
- {0xe0000000aaaaaaaa, 0x43ec000000155555},
- {0xeeeeeeeeeeeeeeee, 0x43edddddddddddde},
- {0xfffffffdeeeeeeee, 0x43efffffffbdddde},
- {0xf0000000dddddddd, 0x43ee0000001bbbbc},
- {0x7fffffdddddddd, 0x435ffffff7777777},
- {0x3fffffaaaaaaaa, 0x434fffffd5555555},
- {0x1fffffaaaaaaaa, 0x433fffffaaaaaaaa},
- {0xfffff, 0x412ffffe00000000},
- {0x7ffff, 0x411ffffc00000000},
- {0x3ffff, 0x410ffff800000000},
- {0x1ffff, 0x40fffff000000000},
- {0xffff, 0x40efffe000000000},
- {0x7fff, 0x40dfffc000000000},
- {0x3fff, 0x40cfff8000000000},
- {0x1fff, 0x40bfff0000000000},
- {0xfff, 0x40affe0000000000},
- {0x7ff, 0x409ffc0000000000},
- {0x3ff, 0x408ff80000000000},
- {0x1ff, 0x407ff00000000000},
- {0x3fffffffffff, 0x42cfffffffffff80},
- {0x1fffffffffff, 0x42bfffffffffff00},
- {0xfffffffffff, 0x42affffffffffe00},
- {0x7ffffffffff, 0x429ffffffffffc00},
- {0x3ffffffffff, 0x428ffffffffff800},
- {0x1ffffffffff, 0x427ffffffffff000},
- {0x8000008000000000, 0x43e0000010000000},
- {0x8000008000000001, 0x43e0000010000000},
- {0x8000000000000400, 0x43e0000000000000},
- {0x8000000000000401, 0x43e0000000000001}};
+ {0x273A798E187937A3, 0x43C39D3CC70C3C9C},
+ {0xECE3AF835495A16B, 0x43ED9C75F06A92B4},
+ {0xB668ECC11223344, 0x43A6CD1D98224467},
+ {0x9E, 0x4063C00000000000},
+ {0x43, 0x4050C00000000000},
+ {0xAF73, 0x40E5EE6000000000},
+ {0x116B, 0x40B16B0000000000},
+ {0x658ECC, 0x415963B300000000},
+ {0x2B3B4C, 0x41459DA600000000},
+ {0x88776655, 0x41E10EECCAA00000},
+ {0x70000000, 0x41DC000000000000},
+ {0x7200000, 0x419C800000000000},
+ {0x7FFFFFFF, 0x41DFFFFFFFC00000},
+ {0x56123761, 0x41D5848DD8400000},
+ {0x7FFFFF00, 0x41DFFFFFC0000000},
+ {0x761C4761EEEEEEEE, 0x43DD8711D87BBBBC},
+ {0x80000000EEEEEEEE, 0x43E00000001DDDDE},
+ {0x88888888DDDDDDDD, 0x43E11111111BBBBC},
+ {0xA0000000DDDDDDDD, 0x43E40000001BBBBC},
+ {0xDDDDDDDDAAAAAAAA, 0x43EBBBBBBBB55555},
+ {0xE0000000AAAAAAAA, 0x43EC000000155555},
+ {0xEEEEEEEEEEEEEEEE, 0x43EDDDDDDDDDDDDE},
+ {0xFFFFFFFDEEEEEEEE, 0x43EFFFFFFFBDDDDE},
+ {0xF0000000DDDDDDDD, 0x43EE0000001BBBBC},
+ {0x7FFFFFDDDDDDDD, 0x435FFFFFF7777777},
+ {0x3FFFFFAAAAAAAA, 0x434FFFFFD5555555},
+ {0x1FFFFFAAAAAAAA, 0x433FFFFFAAAAAAAA},
+ {0xFFFFF, 0x412FFFFE00000000},
+ {0x7FFFF, 0x411FFFFC00000000},
+ {0x3FFFF, 0x410FFFF800000000},
+ {0x1FFFF, 0x40FFFFF000000000},
+ {0xFFFF, 0x40EFFFE000000000},
+ {0x7FFF, 0x40DFFFC000000000},
+ {0x3FFF, 0x40CFFF8000000000},
+ {0x1FFF, 0x40BFFF0000000000},
+ {0xFFF, 0x40AFFE0000000000},
+ {0x7FF, 0x409FFC0000000000},
+ {0x3FF, 0x408FF80000000000},
+ {0x1FF, 0x407FF00000000000},
+ {0x3FFFFFFFFFFF, 0x42CFFFFFFFFFFF80},
+ {0x1FFFFFFFFFFF, 0x42BFFFFFFFFFFF00},
+ {0xFFFFFFFFFFF, 0x42AFFFFFFFFFFE00},
+ {0x7FFFFFFFFFF, 0x429FFFFFFFFFFC00},
+ {0x3FFFFFFFFFF, 0x428FFFFFFFFFF800},
+ {0x1FFFFFFFFFF, 0x427FFFFFFFFFF000},
+ {0x8000008000000000, 0x43E0000010000000},
+ {0x8000008000000001, 0x43E0000010000000},
+ {0x8000000000000400, 0x43E0000000000000},
+ {0x8000000000000401, 0x43E0000000000001}};
WasmRunner<double, uint64_t> r(execution_mode);
BUILD(r, WASM_F64_UCONVERT_I64(WASM_GET_LOCAL(0)));
for (size_t i = 0; i < arraysize(values); i++) {
@@ -907,19 +907,19 @@ WASM_EXEC_TEST(CallI64Parameter) {
BUILD(
r,
WASM_I32_CONVERT_I64(WASM_CALL_FUNCTION(
- t.function_index(), WASM_I64V_9(0xbcd12340000000b),
- WASM_I64V_9(0xbcd12340000000c), WASM_I32V_1(0xd),
- WASM_I32_CONVERT_I64(WASM_I64V_9(0xbcd12340000000e)),
- WASM_I64V_9(0xbcd12340000000f), WASM_I64V_10(0xbcd1234000000010),
- WASM_I64V_10(0xbcd1234000000011), WASM_I64V_10(0xbcd1234000000012),
- WASM_I64V_10(0xbcd1234000000013), WASM_I64V_10(0xbcd1234000000014),
- WASM_I64V_10(0xbcd1234000000015), WASM_I64V_10(0xbcd1234000000016),
- WASM_I64V_10(0xbcd1234000000017), WASM_I64V_10(0xbcd1234000000018),
- WASM_I64V_10(0xbcd1234000000019), WASM_I64V_10(0xbcd123400000001a),
- WASM_I64V_10(0xbcd123400000001b), WASM_I64V_10(0xbcd123400000001c),
- WASM_I64V_10(0xbcd123400000001d))));
+ t.function_index(), WASM_I64V_9(0xBCD12340000000B),
+ WASM_I64V_9(0xBCD12340000000C), WASM_I32V_1(0xD),
+ WASM_I32_CONVERT_I64(WASM_I64V_9(0xBCD12340000000E)),
+ WASM_I64V_9(0xBCD12340000000F), WASM_I64V_10(0xBCD1234000000010),
+ WASM_I64V_10(0xBCD1234000000011), WASM_I64V_10(0xBCD1234000000012),
+ WASM_I64V_10(0xBCD1234000000013), WASM_I64V_10(0xBCD1234000000014),
+ WASM_I64V_10(0xBCD1234000000015), WASM_I64V_10(0xBCD1234000000016),
+ WASM_I64V_10(0xBCD1234000000017), WASM_I64V_10(0xBCD1234000000018),
+ WASM_I64V_10(0xBCD1234000000019), WASM_I64V_10(0xBCD123400000001A),
+ WASM_I64V_10(0xBCD123400000001B), WASM_I64V_10(0xBCD123400000001C),
+ WASM_I64V_10(0xBCD123400000001D))));
- CHECK_EQ(i + 0xb, r.Call());
+ CHECK_EQ(i + 0xB, r.Call());
}
}
@@ -935,11 +935,10 @@ WASM_EXEC_TEST(CallI64Return) {
BUILD(t, WASM_GET_LOCAL(0), WASM_I32V(7));
// Build the first calling function.
- BUILD(r,
- WASM_CALL_FUNCTION(
- t.function_index(), WASM_I64V(0xbcd12340000000b)), WASM_DROP);
+ BUILD(r, WASM_CALL_FUNCTION(t.function_index(), WASM_I64V(0xBCD12340000000B)),
+ WASM_DROP);
- CHECK_EQ(0xbcd12340000000b, r.Call());
+ CHECK_EQ(0xBCD12340000000B, r.Call());
}
void TestI64Binop(WasmExecutionMode execution_mode, WasmOpcode opcode,
@@ -981,35 +980,35 @@ void TestI64Cmp(WasmExecutionMode execution_mode, WasmOpcode opcode,
} while (false)
WASM_EXEC_TEST(I64Binops) {
- TEST_I64_BINOP(I64Add, -5586332274295447011, 0x501b72ebabc26847,
- 0x625de9793d8f79d6);
- TEST_I64_BINOP(I64Sub, 9001903251710731490, 0xf24fe6474640002e,
- 0x7562b6f711991b4c);
- TEST_I64_BINOP(I64Mul, -4569547818546064176, 0x231a263c2cbc6451,
- 0xead44de6bd3e23d0);
- TEST_I64_BINOP(I64Mul, -25963122347507043, 0x4da1fa47c9352b73,
- 0x91fe82317aa035af);
- TEST_I64_BINOP(I64Mul, 7640290486138131960, 0x185731abe8eea47c,
- 0x714ec59f1380d4c2);
- TEST_I64_BINOP(I64DivS, -91517, 0x93b1190a34de56a0, 0x00004d8f68863948);
- TEST_I64_BINOP(I64DivU, 149016, 0xe15b3727e8a2080a, 0x0000631bfa72db8b);
- TEST_I64_BINOP(I64RemS, -664128064149968, 0x9a78b4e4fe708692,
- 0x0003e0b6b3be7609);
- TEST_I64_BINOP(I64RemU, 1742040017332765, 0x0ce84708c6258c81,
- 0x000a6fde82016697);
- TEST_I64_BINOP(I64And, 2531040582801836054, 0xaf257d1602644a16,
- 0x33b290a91a10d997);
- TEST_I64_BINOP(I64Ior, 8556201506536114940, 0x169d9be7bd3f0a5c,
- 0x66bca28d77af40e8);
- TEST_I64_BINOP(I64Xor, -4605655183785456377, 0xb6ea20a5d48e85b8,
- 0x76ff4da6c80688bf);
- TEST_I64_BINOP(I64Shl, -7240704056088331264, 0xef4dc1ed030e8ffe, 9);
- TEST_I64_BINOP(I64ShrU, 12500673744059159, 0xb1a52fa7deec5d14, 10);
- TEST_I64_BINOP(I64ShrS, 1725103446999874, 0x3107c791461a112b, 11);
- TEST_I64_BINOP(I64Ror, -8960135652432576946, 0x73418d1717e4e83a, 12);
- TEST_I64_BINOP(I64Ror, 7617662827409989779, 0xebff67cf0c126d36, 13);
- TEST_I64_BINOP(I64Rol, -2097714064174346012, 0x43938b8db0b0f230, 14);
- TEST_I64_BINOP(I64Rol, 8728493013947314237, 0xe07af243ac4d219d, 15);
+ TEST_I64_BINOP(I64Add, -5586332274295447011, 0x501B72EBABC26847,
+ 0x625DE9793D8F79D6);
+ TEST_I64_BINOP(I64Sub, 9001903251710731490, 0xF24FE6474640002E,
+ 0x7562B6F711991B4C);
+ TEST_I64_BINOP(I64Mul, -4569547818546064176, 0x231A263C2CBC6451,
+ 0xEAD44DE6BD3E23D0);
+ TEST_I64_BINOP(I64Mul, -25963122347507043, 0x4DA1FA47C9352B73,
+ 0x91FE82317AA035AF);
+ TEST_I64_BINOP(I64Mul, 7640290486138131960, 0x185731ABE8EEA47C,
+ 0x714EC59F1380D4C2);
+ TEST_I64_BINOP(I64DivS, -91517, 0x93B1190A34DE56A0, 0x00004D8F68863948);
+ TEST_I64_BINOP(I64DivU, 149016, 0xE15B3727E8A2080A, 0x0000631BFA72DB8B);
+ TEST_I64_BINOP(I64RemS, -664128064149968, 0x9A78B4E4FE708692,
+ 0x0003E0B6B3BE7609);
+ TEST_I64_BINOP(I64RemU, 1742040017332765, 0x0CE84708C6258C81,
+ 0x000A6FDE82016697);
+ TEST_I64_BINOP(I64And, 2531040582801836054, 0xAF257D1602644A16,
+ 0x33B290A91A10D997);
+ TEST_I64_BINOP(I64Ior, 8556201506536114940, 0x169D9BE7BD3F0A5C,
+ 0x66BCA28D77AF40E8);
+ TEST_I64_BINOP(I64Xor, -4605655183785456377, 0xB6EA20A5D48E85B8,
+ 0x76FF4DA6C80688BF);
+ TEST_I64_BINOP(I64Shl, -7240704056088331264, 0xEF4DC1ED030E8FFE, 9);
+ TEST_I64_BINOP(I64ShrU, 12500673744059159, 0xB1A52FA7DEEC5D14, 10);
+ TEST_I64_BINOP(I64ShrS, 1725103446999874, 0x3107C791461A112B, 11);
+ TEST_I64_BINOP(I64Ror, -8960135652432576946, 0x73418D1717E4E83A, 12);
+ TEST_I64_BINOP(I64Ror, 7617662827409989779, 0xEBFF67CF0C126D36, 13);
+ TEST_I64_BINOP(I64Rol, -2097714064174346012, 0x43938B8DB0B0F230, 14);
+ TEST_I64_BINOP(I64Rol, 8728493013947314237, 0xE07AF243AC4D219D, 15);
}
#undef TEST_I64_BINOP
@@ -1043,9 +1042,9 @@ WASM_EXEC_TEST(I64Clz) {
} values[] = {{0, 0x8000100000000000}, {1, 0x4000050000000000},
{2, 0x2000030000000000}, {3, 0x1000000300000000},
{4, 0x0805000000000000}, {5, 0x0400600000000000},
- {6, 0x0200000000000000}, {7, 0x010000a000000000},
- {8, 0x00800c0000000000}, {9, 0x0040000000000000},
- {10, 0x0020000d00000000}, {11, 0x00100f0000000000},
+ {6, 0x0200000000000000}, {7, 0x010000A000000000},
+ {8, 0x00800C0000000000}, {9, 0x0040000000000000},
+ {10, 0x0020000D00000000}, {11, 0x00100F0000000000},
{12, 0x0008000000000000}, {13, 0x0004100000000000},
{14, 0x0002002000000000}, {15, 0x0001030000000000},
{16, 0x0000804000000000}, {17, 0x0000400500000000},
@@ -1059,9 +1058,9 @@ WASM_EXEC_TEST(I64Clz) {
{32, 0x0000000080001000}, {33, 0x0000000040000500},
{34, 0x0000000020000300}, {35, 0x0000000010000003},
{36, 0x0000000008050000}, {37, 0x0000000004006000},
- {38, 0x0000000002000000}, {39, 0x00000000010000a0},
- {40, 0x0000000000800c00}, {41, 0x0000000000400000},
- {42, 0x000000000020000d}, {43, 0x0000000000100f00},
+ {38, 0x0000000002000000}, {39, 0x00000000010000A0},
+ {40, 0x0000000000800C00}, {41, 0x0000000000400000},
+ {42, 0x000000000020000D}, {43, 0x0000000000100F00},
{44, 0x0000000000080000}, {45, 0x0000000000041000},
{46, 0x0000000000020020}, {47, 0x0000000000010300},
{48, 0x0000000000008040}, {49, 0x0000000000004005},
@@ -1088,37 +1087,37 @@ WASM_EXEC_TEST(I64Ctz) {
uint64_t input;
} values[] = {{64, 0x0000000000000000}, {63, 0x8000000000000000},
{62, 0x4000000000000000}, {61, 0x2000000000000000},
- {60, 0x1000000000000000}, {59, 0xa800000000000000},
- {58, 0xf400000000000000}, {57, 0x6200000000000000},
- {56, 0x9100000000000000}, {55, 0xcd80000000000000},
- {54, 0x0940000000000000}, {53, 0xaf20000000000000},
- {52, 0xac10000000000000}, {51, 0xe0b8000000000000},
- {50, 0x9ce4000000000000}, {49, 0xc792000000000000},
- {48, 0xb8f1000000000000}, {47, 0x3b9f800000000000},
- {46, 0xdb4c400000000000}, {45, 0xe9a3200000000000},
- {44, 0xfca6100000000000}, {43, 0x6c8a780000000000},
- {42, 0x8ce5a40000000000}, {41, 0xcb7d020000000000},
- {40, 0xcb4dc10000000000}, {39, 0xdfbec58000000000},
- {38, 0x27a9db4000000000}, {37, 0xde3bcb2000000000},
- {36, 0xd7e8a61000000000}, {35, 0x9afdbc8800000000},
- {34, 0x9afdbc8400000000}, {33, 0x9afdbc8200000000},
- {32, 0x9afdbc8100000000}, {31, 0x0000000080000000},
+ {60, 0x1000000000000000}, {59, 0xA800000000000000},
+ {58, 0xF400000000000000}, {57, 0x6200000000000000},
+ {56, 0x9100000000000000}, {55, 0xCD80000000000000},
+ {54, 0x0940000000000000}, {53, 0xAF20000000000000},
+ {52, 0xAC10000000000000}, {51, 0xE0B8000000000000},
+ {50, 0x9CE4000000000000}, {49, 0xC792000000000000},
+ {48, 0xB8F1000000000000}, {47, 0x3B9F800000000000},
+ {46, 0xDB4C400000000000}, {45, 0xE9A3200000000000},
+ {44, 0xFCA6100000000000}, {43, 0x6C8A780000000000},
+ {42, 0x8CE5A40000000000}, {41, 0xCB7D020000000000},
+ {40, 0xCB4DC10000000000}, {39, 0xDFBEC58000000000},
+ {38, 0x27A9DB4000000000}, {37, 0xDE3BCB2000000000},
+ {36, 0xD7E8A61000000000}, {35, 0x9AFDBC8800000000},
+ {34, 0x9AFDBC8400000000}, {33, 0x9AFDBC8200000000},
+ {32, 0x9AFDBC8100000000}, {31, 0x0000000080000000},
{30, 0x0000000040000000}, {29, 0x0000000020000000},
- {28, 0x0000000010000000}, {27, 0x00000000a8000000},
- {26, 0x00000000f4000000}, {25, 0x0000000062000000},
- {24, 0x0000000091000000}, {23, 0x00000000cd800000},
- {22, 0x0000000009400000}, {21, 0x00000000af200000},
- {20, 0x00000000ac100000}, {19, 0x00000000e0b80000},
- {18, 0x000000009ce40000}, {17, 0x00000000c7920000},
- {16, 0x00000000b8f10000}, {15, 0x000000003b9f8000},
- {14, 0x00000000db4c4000}, {13, 0x00000000e9a32000},
- {12, 0x00000000fca61000}, {11, 0x000000006c8a7800},
- {10, 0x000000008ce5a400}, {9, 0x00000000cb7d0200},
- {8, 0x00000000cb4dc100}, {7, 0x00000000dfbec580},
- {6, 0x0000000027a9db40}, {5, 0x00000000de3bcb20},
- {4, 0x00000000d7e8a610}, {3, 0x000000009afdbc88},
- {2, 0x000000009afdbc84}, {1, 0x000000009afdbc82},
- {0, 0x000000009afdbc81}};
+ {28, 0x0000000010000000}, {27, 0x00000000A8000000},
+ {26, 0x00000000F4000000}, {25, 0x0000000062000000},
+ {24, 0x0000000091000000}, {23, 0x00000000CD800000},
+ {22, 0x0000000009400000}, {21, 0x00000000AF200000},
+ {20, 0x00000000AC100000}, {19, 0x00000000E0B80000},
+ {18, 0x000000009CE40000}, {17, 0x00000000C7920000},
+ {16, 0x00000000B8F10000}, {15, 0x000000003B9F8000},
+ {14, 0x00000000DB4C4000}, {13, 0x00000000E9A32000},
+ {12, 0x00000000FCA61000}, {11, 0x000000006C8A7800},
+ {10, 0x000000008CE5A400}, {9, 0x00000000CB7D0200},
+ {8, 0x00000000CB4DC100}, {7, 0x00000000DFBEC580},
+ {6, 0x0000000027A9DB40}, {5, 0x00000000DE3BCB20},
+ {4, 0x00000000D7E8A610}, {3, 0x000000009AFDBC88},
+ {2, 0x000000009AFDBC84}, {1, 0x000000009AFDBC82},
+ {0, 0x000000009AFDBC81}};
WasmRunner<int64_t, uint64_t> r(execution_mode);
BUILD(r, WASM_I64_CTZ(WASM_GET_LOCAL(0)));
@@ -1132,11 +1131,11 @@ WASM_EXEC_TEST(I64Popcnt2) {
struct {
int64_t expected;
uint64_t input;
- } values[] = {{64, 0xffffffffffffffff},
+ } values[] = {{64, 0xFFFFFFFFFFFFFFFF},
{0, 0x0000000000000000},
{2, 0x0000080000008000},
{26, 0x1123456782345678},
- {38, 0xffedcba09edcba09}};
+ {38, 0xFFEDCBA09EDCBA09}};
WasmRunner<int64_t, uint64_t> r(execution_mode);
BUILD(r, WASM_I64_POPCNT(WASM_GET_LOCAL(0)));
@@ -1343,10 +1342,10 @@ WASM_EXEC_TEST(SignallingNanSurvivesI64ReinterpretF64) {
REQUIRE(I64ReinterpretF64);
WasmRunner<int64_t> r(execution_mode);
BUILD(r, WASM_I64_REINTERPRET_F64(WASM_SEQ(kExprF64Const, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0xf4, 0x7f)));
+ 0x00, 0x00, 0x00, 0xF4, 0x7F)));
// This is a signalling nan.
- CHECK_EQ(0x7ff4000000000000, r.Call());
+ CHECK_EQ(0x7FF4000000000000, r.Call());
}
WASM_EXEC_TEST(F64ReinterpretI64) {
@@ -1373,11 +1372,11 @@ WASM_EXEC_TEST(LoadMemI64) {
BUILD(r, WASM_LOAD_MEM(MachineType::Int64(), WASM_ZERO));
- r.builder().WriteMemory<int64_t>(&memory[0], 0x1abbccdd00112233LL);
- CHECK_EQ(0x1abbccdd00112233LL, r.Call());
+ r.builder().WriteMemory<int64_t>(&memory[0], 0x1ABBCCDD00112233LL);
+ CHECK_EQ(0x1ABBCCDD00112233LL, r.Call());
- r.builder().WriteMemory<int64_t>(&memory[0], 0x33aabbccdd001122LL);
- CHECK_EQ(0x33aabbccdd001122LL, r.Call());
+ r.builder().WriteMemory<int64_t>(&memory[0], 0x33AABBCCDD001122LL);
+ CHECK_EQ(0x33AABBCCDD001122LL, r.Call());
r.builder().WriteMemory<int64_t>(&memory[0], 77777777);
CHECK_EQ(77777777, r.Call());
@@ -1393,11 +1392,11 @@ WASM_EXEC_TEST(LoadMemI64_alignment) {
BUILD(r,
WASM_LOAD_MEM_ALIGNMENT(MachineType::Int64(), WASM_ZERO, alignment));
- r.builder().WriteMemory<int64_t>(&memory[0], 0x1abbccdd00112233LL);
- CHECK_EQ(0x1abbccdd00112233LL, r.Call());
+ r.builder().WriteMemory<int64_t>(&memory[0], 0x1ABBCCDD00112233LL);
+ CHECK_EQ(0x1ABBCCDD00112233LL, r.Call());
- r.builder().WriteMemory<int64_t>(&memory[0], 0x33aabbccdd001122LL);
- CHECK_EQ(0x33aabbccdd001122LL, r.Call());
+ r.builder().WriteMemory<int64_t>(&memory[0], 0x33AABBCCDD001122LL);
+ CHECK_EQ(0x33AABBCCDD001122LL, r.Call());
r.builder().WriteMemory<int64_t>(&memory[0], 77777777);
CHECK_EQ(77777777, r.Call());
@@ -1438,7 +1437,7 @@ WASM_EXEC_TEST(MemI64_Sum) {
}
WASM_EXEC_TEST(StoreMemI64_alignment) {
- const int64_t kWritten = 0x12345678abcd0011ll;
+ const int64_t kWritten = 0x12345678ABCD0011ll;
for (byte i = 0; i <= 3; i++) {
WasmRunner<int64_t, int64_t> r(execution_mode);
@@ -1494,7 +1493,7 @@ WASM_EXEC_TEST(I64Ror) {
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
- int64_t expected = base::bits::RotateRight64(*i, *j & 0x3f);
+ int64_t expected = base::bits::RotateRight64(*i, *j & 0x3F);
CHECK_EQ(expected, r.Call(*i, *j));
}
}
@@ -1507,7 +1506,7 @@ WASM_EXEC_TEST(I64Rol) {
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
- int64_t expected = base::bits::RotateLeft64(*i, *j & 0x3f);
+ int64_t expected = base::bits::RotateLeft64(*i, *j & 0x3F);
CHECK_EQ(expected, r.Call(*i, *j));
}
}
@@ -1515,7 +1514,7 @@ WASM_EXEC_TEST(I64Rol) {
WASM_EXEC_TEST(StoreMem_offset_oob_i64) {
// TODO(eholk): Fix this test for the trap handler.
- if (trap_handler::UseTrapHandler()) return;
+ if (trap_handler::IsTrapHandlerEnabled()) return;
static const MachineType machineTypes[] = {
MachineType::Int8(), MachineType::Uint8(), MachineType::Int16(),
MachineType::Uint16(), MachineType::Int32(), MachineType::Uint32(),
@@ -1542,6 +1541,36 @@ WASM_EXEC_TEST(StoreMem_offset_oob_i64) {
}
}
+WASM_EXEC_TEST(Store_i64_narrowed) {
+ constexpr byte kOpcodes[] = {kExprI64StoreMem8, kExprI64StoreMem16,
+ kExprI64StoreMem32, kExprI64StoreMem};
+ int stored_size_in_bytes = 0;
+ for (auto opcode : kOpcodes) {
+ stored_size_in_bytes = std::max(1, stored_size_in_bytes * 2);
+ constexpr int kBytes = 24;
+ uint8_t expected_memory[kBytes] = {0};
+ WasmRunner<int32_t, int32_t, int64_t> r(execution_mode);
+ uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(kBytes);
+ constexpr uint64_t kPattern = 0x0123456789abcdef;
+
+ BUILD(r, WASM_GET_LOCAL(0), // index
+ WASM_GET_LOCAL(1), // value
+ opcode, ZERO_ALIGNMENT, ZERO_OFFSET, // store
+ WASM_ZERO); // return value
+
+ for (int i = 0; i <= kBytes - stored_size_in_bytes; ++i) {
+ uint64_t pattern = base::bits::RotateLeft64(kPattern, i % 64);
+ r.Call(i, pattern);
+ for (int b = 0; b < stored_size_in_bytes; ++b) {
+ expected_memory[i + b] = static_cast<uint8_t>(pattern >> (b * 8));
+ }
+ for (int w = 0; w < kBytes; ++w) {
+ CHECK_EQ(expected_memory[w], memory[w]);
+ }
+ }
+ }
+}
+
WASM_EXEC_TEST(UnalignedInt64Load) {
WasmRunner<uint64_t> r(execution_mode);
r.builder().AddMemoryElems<int64_t>(8);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc b/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
index b7b200984d..3b0e319bb4 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
@@ -234,7 +234,7 @@ WASM_COMPILED_EXEC_TEST(I32AtomicCompareExchange8U) {
}
}
-WASM_COMPILED_EXEC_TEST(I32AtomicLoad) {
+WASM_EXEC_TEST(I32AtomicLoad) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint32_t> r(execution_mode);
r.builder().SetHasSharedMemory();
@@ -249,7 +249,7 @@ WASM_COMPILED_EXEC_TEST(I32AtomicLoad) {
}
}
-WASM_COMPILED_EXEC_TEST(I32AtomicLoad16U) {
+WASM_EXEC_TEST(I32AtomicLoad16U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint32_t> r(execution_mode);
r.builder().SetHasSharedMemory();
@@ -264,7 +264,7 @@ WASM_COMPILED_EXEC_TEST(I32AtomicLoad16U) {
}
}
-WASM_COMPILED_EXEC_TEST(I32AtomicLoad8U) {
+WASM_EXEC_TEST(I32AtomicLoad8U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint32_t> r(execution_mode);
r.builder().SetHasSharedMemory();
@@ -279,7 +279,7 @@ WASM_COMPILED_EXEC_TEST(I32AtomicLoad8U) {
}
}
-WASM_COMPILED_EXEC_TEST(I32AtomicStoreLoad) {
+WASM_EXEC_TEST(I32AtomicStoreLoad) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint32_t, uint32_t> r(execution_mode);
r.builder().SetHasSharedMemory();
@@ -298,7 +298,7 @@ WASM_COMPILED_EXEC_TEST(I32AtomicStoreLoad) {
}
}
-WASM_COMPILED_EXEC_TEST(I32AtomicStoreLoad16U) {
+WASM_EXEC_TEST(I32AtomicStoreLoad16U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint32_t, uint32_t> r(execution_mode);
r.builder().SetHasSharedMemory();
@@ -318,7 +318,7 @@ WASM_COMPILED_EXEC_TEST(I32AtomicStoreLoad16U) {
}
}
-WASM_COMPILED_EXEC_TEST(I32AtomicStoreLoad8U) {
+WASM_EXEC_TEST(I32AtomicStoreLoad8U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint32_t, uint32_t> r(execution_mode);
r.builder().SetHasSharedMemory();
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
index f67ce2d121..76ca00cb3b 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
@@ -294,14 +294,14 @@ TEST(Breakpoint_I32And_disable) {
TEST(GrowMemory) {
{
WasmRunner<int32_t, uint32_t> r(kExecuteInterpreter);
- r.builder().AddMemory(WasmModule::kPageSize);
+ r.builder().AddMemory(kWasmPageSize);
r.builder().SetMaxMemPages(10);
BUILD(r, WASM_GROW_MEMORY(WASM_GET_LOCAL(0)));
CHECK_EQ(1, r.Call(1));
}
{
WasmRunner<int32_t, uint32_t> r(kExecuteInterpreter);
- r.builder().AddMemory(WasmModule::kPageSize);
+ r.builder().AddMemory(kWasmPageSize);
r.builder().SetMaxMemPages(10);
BUILD(r, WASM_GROW_MEMORY(WASM_GET_LOCAL(0)));
CHECK_EQ(-1, r.Call(11));
@@ -312,7 +312,7 @@ TEST(GrowMemoryPreservesData) {
int32_t index = 16;
int32_t value = 2335;
WasmRunner<int32_t, uint32_t> r(kExecuteInterpreter);
- r.builder().AddMemory(WasmModule::kPageSize);
+ r.builder().AddMemory(kWasmPageSize);
BUILD(r, WASM_STORE_MEM(MachineType::Int32(), WASM_I32V(index),
WASM_I32V(value)),
WASM_GROW_MEMORY(WASM_GET_LOCAL(0)), WASM_DROP,
@@ -323,7 +323,7 @@ TEST(GrowMemoryPreservesData) {
TEST(GrowMemoryInvalidSize) {
// Grow memory by an invalid amount without initial memory.
WasmRunner<int32_t, uint32_t> r(kExecuteInterpreter);
- r.builder().AddMemory(WasmModule::kPageSize);
+ r.builder().AddMemory(kWasmPageSize);
BUILD(r, WASM_GROW_MEMORY(WASM_GET_LOCAL(0)));
CHECK_EQ(-1, r.Call(1048575));
}
@@ -364,7 +364,7 @@ TEST(TestPossibleNondeterminism) {
{
int32_t index = 16;
WasmRunner<int32_t, float> r(kExecuteInterpreter);
- r.builder().AddMemory(WasmModule::kPageSize);
+ r.builder().AddMemory(kWasmPageSize);
BUILD(r, WASM_STORE_MEM(MachineType::Float32(), WASM_I32V(index),
WASM_GET_LOCAL(0)),
WASM_I32V(index));
@@ -376,7 +376,7 @@ TEST(TestPossibleNondeterminism) {
{
int32_t index = 16;
WasmRunner<int32_t, double> r(kExecuteInterpreter);
- r.builder().AddMemory(WasmModule::kPageSize);
+ r.builder().AddMemory(kWasmPageSize);
BUILD(r, WASM_STORE_MEM(MachineType::Float64(), WASM_I32V(index),
WASM_GET_LOCAL(0)),
WASM_I32V(index));
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
index b4e0298a72..ab40a6366d 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
@@ -133,9 +133,9 @@ TEST(Run_WasmModule_ReadLoadedDataSegment) {
byte code[] = {
WASM_LOAD_MEM(MachineType::Int32(), WASM_I32V_1(kDataSegmentDest0))};
EMIT_CODE_WITH_END(f, code);
- byte data[] = {0xaa, 0xbb, 0xcc, 0xdd};
+ byte data[] = {0xAA, 0xBB, 0xCC, 0xDD};
builder->AddDataSegment(data, sizeof(data), kDataSegmentDest0);
- TestModule(&zone, builder, 0xddccbbaa);
+ TestModule(&zone, builder, 0xDDCCBBAA);
}
Cleanup();
}
@@ -256,7 +256,7 @@ class WasmSerializationTest {
uint32_t* slot = reinterpret_cast<uint32_t*>(
const_cast<uint8_t*>(serialized_bytes_.first) +
SerializedCodeData::kPayloadLengthOffset);
- *slot = FLAG_wasm_jit_to_native ? 0u : 0xfefefefeu;
+ *slot = FLAG_wasm_jit_to_native ? 0u : 0xFEFEFEFEu;
}
v8::MaybeLocal<v8::WasmCompiledModule> Deserialize() {
@@ -277,9 +277,10 @@ class WasmSerializationTest {
DisallowHeapAllocation assume_no_gc;
Handle<WasmCompiledModule> compiled_part(module_object->compiled_module(),
current_isolate());
- CHECK_EQ(memcmp(compiled_part->module_bytes()->GetCharsAddress(),
- wire_bytes().first, wire_bytes().second),
- 0);
+ CHECK_EQ(
+ memcmp(compiled_part->shared()->module_bytes()->GetCharsAddress(),
+ wire_bytes().first, wire_bytes().second),
+ 0);
}
Handle<WasmInstanceObject> instance =
SyncInstantiate(current_isolate(), &thrower, module_object,
@@ -724,7 +725,7 @@ TEST(Run_WasmModule_GrowMemOobOffset) {
static const int kPageSize = 0x10000;
// Initial memory size = 16 + GrowMemory(10)
static const int index = kPageSize * 17 + 4;
- int value = 0xaced;
+ int value = 0xACED;
TestSignatures sigs;
v8::internal::AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
@@ -746,7 +747,7 @@ TEST(Run_WasmModule_GrowMemOobFixedIndex) {
static const int kPageSize = 0x10000;
// Initial memory size = 16 + GrowMemory(10)
static const int index = kPageSize * 26 + 4;
- int value = 0xaced;
+ int value = 0xACED;
TestSignatures sigs;
Isolate* isolate = CcTest::InitIsolateOnce();
Zone zone(isolate->allocator(), ZONE_NAME);
@@ -785,7 +786,7 @@ TEST(Run_WasmModule_GrowMemOobFixedIndex) {
Handle<Object> params[1] = {Handle<Object>(Smi::FromInt(1), isolate)};
int32_t result =
testing::RunWasmModuleForTesting(isolate, instance, 1, params);
- CHECK_EQ(0xaced, result);
+ CHECK_EQ(0xACED, result);
}
Cleanup();
}
@@ -793,7 +794,7 @@ TEST(Run_WasmModule_GrowMemOobFixedIndex) {
TEST(Run_WasmModule_GrowMemOobVariableIndex) {
{
static const int kPageSize = 0x10000;
- int value = 0xaced;
+ int value = 0xACED;
TestSignatures sigs;
Isolate* isolate = CcTest::InitIsolateOnce();
v8::internal::AccountingAllocator allocator;
@@ -836,7 +837,7 @@ TEST(Run_WasmModule_GrowMemOobVariableIndex) {
Handle<Object>(Smi::FromInt((20 + i) * kPageSize - 4), isolate)};
int32_t result =
testing::RunWasmModuleForTesting(isolate, instance, 1, params);
- CHECK_EQ(0xaced, result);
+ CHECK_EQ(0xACED, result);
}
v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
@@ -938,7 +939,7 @@ TEST(InitDataAtTheUpperLimit) {
U32V_1(9), // section size
ENTRY_COUNT(1), // --
0, // linear memory index
- WASM_I32V_3(0xffff), // destination offset
+ WASM_I32V_3(0xFFFF), // destination offset
kExprEnd,
U32V_1(1), // source size
'c' // data bytes
@@ -949,7 +950,7 @@ TEST(InitDataAtTheUpperLimit) {
{});
if (thrower.error()) {
thrower.Reify()->Print();
- CHECK(false);
+ FATAL("compile or instantiate error");
}
}
Cleanup();
@@ -1044,7 +1045,7 @@ TEST(MemoryWithOOBEmptyDataSegment) {
U32V_1(9), // section size
ENTRY_COUNT(1), // --
0, // linear memory index
- WASM_I32V_4(0x2468ace), // destination offset
+ WASM_I32V_4(0x2468ACE), // destination offset
kExprEnd,
U32V_1(0), // source size
};
@@ -1058,12 +1059,35 @@ TEST(MemoryWithOOBEmptyDataSegment) {
Cleanup();
}
+// Utility to free the allocated memory for a buffer that is manually
+// externalized in a test.
+struct ManuallyExternalizedBuffer {
+ Isolate* isolate_;
+ Handle<JSArrayBuffer> buffer_;
+ void* allocation_base_;
+ size_t allocation_length_;
+
+ ManuallyExternalizedBuffer(JSArrayBuffer* buffer, Isolate* isolate)
+ : isolate_(isolate),
+ buffer_(buffer, isolate),
+ allocation_base_(buffer->allocation_base()),
+ allocation_length_(buffer->allocation_length()) {
+ if (!buffer->has_guard_region()) {
+ v8::Utils::ToLocal(buffer_)->Externalize();
+ }
+ }
+ ~ManuallyExternalizedBuffer() {
+ if (!buffer_->has_guard_region()) {
+ isolate_->array_buffer_allocator()->Free(
+ allocation_base_, allocation_length_, buffer_->allocation_mode());
+ }
+ }
+};
+
TEST(Run_WasmModule_Buffer_Externalized_GrowMem) {
{
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
- // Initial memory size = 16 + GrowWebAssemblyMemory(4) + GrowMemory(6)
- static const int kExpectedValue = 26;
TestSignatures sigs;
v8::internal::AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
@@ -1084,47 +1108,28 @@ TEST(Run_WasmModule_Buffer_Externalized_GrowMem) {
ModuleWireBytes(buffer.begin(), buffer.end()),
{}, {})
.ToHandleChecked();
- Handle<JSArrayBuffer> memory(instance->memory_object()->array_buffer(),
- isolate);
- Handle<WasmMemoryObject> mem_obj(instance->memory_object(), isolate);
- void* const old_allocation_base = memory->allocation_base();
- size_t const old_allocation_length = memory->allocation_length();
-
- // Fake the Embedder flow by externalizing the memory object, and grow.
- v8::Utils::ToLocal(memory)->Externalize();
-
- uint32_t result = WasmMemoryObject::Grow(isolate, mem_obj, 4);
- bool free_memory = !memory->has_guard_region();
- if (!free_memory) {
- // current_pages = Initial memory size(16) + GrowWebAssemblyMemory(4)
- const uint32_t current_pages = 20;
- i::WasmMemoryObject::SetupNewBufferWithSameBackingStore(isolate, mem_obj,
- current_pages);
- }
- wasm::DetachMemoryBuffer(isolate, memory, free_memory);
+ Handle<WasmMemoryObject> memory_object(instance->memory_object(), isolate);
+
+ // Fake the Embedder flow by externalizing the array buffer.
+ ManuallyExternalizedBuffer buffer1(memory_object->array_buffer(), isolate);
+
+ // Grow using the API.
+ uint32_t result = WasmMemoryObject::Grow(isolate, memory_object, 4);
CHECK_EQ(16, result);
- memory = handle(mem_obj->array_buffer());
- instance->memory_object()->set_array_buffer(*memory);
- // Externalize should make no difference without the JS API as in this case
- // the buffer is not detached.
- v8::Utils::ToLocal(memory)->Externalize();
+ CHECK(buffer1.buffer_->was_neutered()); // growing always neuters
+ CHECK_EQ(0, buffer1.buffer_->byte_length()->Number());
+
+ CHECK_NE(*buffer1.buffer_, memory_object->array_buffer());
+
+ // Fake the Embedder flow by externalizing the array buffer.
+ ManuallyExternalizedBuffer buffer2(memory_object->array_buffer(), isolate);
+
+ // Grow using an internal WASM bytecode.
result = testing::RunWasmModuleForTesting(isolate, instance, 0, nullptr);
- CHECK_EQ(kExpectedValue, result);
- // Free the buffer as the tracker does not know about it.
- const v8::ArrayBuffer::Allocator::AllocationMode allocation_mode =
- memory->allocation_mode();
- CHECK_NOT_NULL(memory->allocation_base());
- isolate->array_buffer_allocator()->Free(memory->allocation_base(),
- memory->allocation_length(),
- allocation_mode);
- if (free_memory) {
- // GrowMemory without guard pages enabled allocates an extra buffer,
- // that needs to be freed as well
- isolate->array_buffer_allocator()->Free(
- old_allocation_base, old_allocation_length, allocation_mode);
- }
- memory->set_allocation_base(nullptr);
- memory->set_allocation_length(0);
+ CHECK_EQ(26, result);
+ CHECK(buffer2.buffer_->was_neutered()); // growing always neuters
+ CHECK_EQ(0, buffer2.buffer_->byte_length()->Number());
+ CHECK_NE(*buffer2.buffer_, memory_object->array_buffer());
}
Cleanup();
}
@@ -1134,19 +1139,17 @@ TEST(Run_WasmModule_Buffer_Externalized_GrowMemMemSize) {
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
void* backing_store =
- isolate->array_buffer_allocator()->Allocate(16 * WasmModule::kPageSize);
- Handle<JSArrayBuffer> buffer = wasm::SetupArrayBuffer(
- isolate, backing_store, 16 * WasmModule::kPageSize, backing_store,
- 16 * WasmModule::kPageSize, false, false);
+ isolate->array_buffer_allocator()->Allocate(16 * kWasmPageSize);
+ Handle<JSArrayBuffer> buffer =
+ wasm::SetupArrayBuffer(isolate, backing_store, 16 * kWasmPageSize,
+ backing_store, 16 * kWasmPageSize, false, false);
Handle<WasmMemoryObject> mem_obj =
WasmMemoryObject::New(isolate, buffer, 100);
v8::Utils::ToLocal(buffer)->Externalize();
int32_t result = WasmMemoryObject::Grow(isolate, mem_obj, 0);
- wasm::DetachMemoryBuffer(isolate, buffer, false);
CHECK_EQ(16, result);
- isolate->array_buffer_allocator()->Free(backing_store,
- 16 * WasmModule::kPageSize);
+ isolate->array_buffer_allocator()->Free(backing_store, 16 * kWasmPageSize);
}
Cleanup();
}
@@ -1158,14 +1161,13 @@ TEST(Run_WasmModule_Buffer_Externalized_Detach) {
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
void* backing_store =
- isolate->array_buffer_allocator()->Allocate(16 * WasmModule::kPageSize);
- Handle<JSArrayBuffer> buffer = wasm::SetupArrayBuffer(
- isolate, backing_store, 16 * WasmModule::kPageSize, backing_store,
- 16 * WasmModule::kPageSize, false, false);
+ isolate->array_buffer_allocator()->Allocate(16 * kWasmPageSize);
+ Handle<JSArrayBuffer> buffer =
+ wasm::SetupArrayBuffer(isolate, backing_store, 16 * kWasmPageSize,
+ backing_store, 16 * kWasmPageSize, false, false);
v8::Utils::ToLocal(buffer)->Externalize();
wasm::DetachMemoryBuffer(isolate, buffer, true);
- isolate->array_buffer_allocator()->Free(backing_store,
- 16 * WasmModule::kPageSize);
+ isolate->array_buffer_allocator()->Free(backing_store, 16 * kWasmPageSize);
}
Cleanup();
}
@@ -1199,10 +1201,9 @@ TEST(AtomicOpDisassembly) {
MaybeHandle<WasmModuleObject> module_object = SyncCompile(
isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end()));
- MaybeHandle<WasmCompiledModule> compiled_module(
+ Handle<WasmCompiledModule> compiled_module(
module_object.ToHandleChecked()->compiled_module(), isolate);
- CHECK(!compiled_module.is_null());
- compiled_module.ToHandleChecked()->DisassembleFunction(0);
+ compiled_module->shared()->DisassembleFunction(0);
}
Cleanup();
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
index 93895d7f3c..1a97cdc122 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
@@ -31,25 +31,25 @@ typedef int8_t (*Int8BinOp)(int8_t, int8_t);
typedef int (*Int8CompareOp)(int8_t, int8_t);
typedef int8_t (*Int8ShiftOp)(int8_t, int);
-#define WASM_SIMD_TEST(name) \
- void RunWasm_##name##_Impl(WasmExecutionMode execution_mode); \
- TEST(RunWasm_##name##_compiled) { \
- EXPERIMENTAL_FLAG_SCOPE(simd); \
- RunWasm_##name##_Impl(kExecuteTurbofan); \
- } \
- TEST(RunWasm_##name##_simd_lowered) { \
- EXPERIMENTAL_FLAG_SCOPE(simd); \
- RunWasm_##name##_Impl(kExecuteSimdLowered); \
- } \
- void RunWasm_##name##_Impl(WasmExecutionMode execution_mode)
-
-#define WASM_SIMD_COMPILED_TEST(name) \
- void RunWasm_##name##_Impl(WasmExecutionMode execution_mode); \
- TEST(RunWasm_##name##_compiled) { \
- EXPERIMENTAL_FLAG_SCOPE(simd); \
- RunWasm_##name##_Impl(kExecuteTurbofan); \
- } \
- void RunWasm_##name##_Impl(WasmExecutionMode execution_mode)
+#define WASM_SIMD_TEST(name) \
+ void RunWasm_##name##_Impl(LowerSimd lower_simd); \
+ TEST(RunWasm_##name##_compiled) { \
+ EXPERIMENTAL_FLAG_SCOPE(simd); \
+ RunWasm_##name##_Impl(kNoLowerSimd); \
+ } \
+ TEST(RunWasm_##name##_simd_lowered) { \
+ EXPERIMENTAL_FLAG_SCOPE(simd); \
+ RunWasm_##name##_Impl(kLowerSimd); \
+ } \
+ void RunWasm_##name##_Impl(LowerSimd lower_simd)
+
+#define WASM_SIMD_COMPILED_TEST(name) \
+ void RunWasm_##name##_Impl(LowerSimd lower_simd); \
+ TEST(RunWasm_##name##_compiled) { \
+ EXPERIMENTAL_FLAG_SCOPE(simd); \
+ RunWasm_##name##_Impl(kNoLowerSimd); \
+ } \
+ void RunWasm_##name##_Impl(LowerSimd lower_simd)
// Generic expected value functions.
template <typename T>
@@ -195,7 +195,7 @@ template <typename T>
T UnsignedNarrow(int64_t value) {
static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
using UnsignedT = typename std::make_unsigned<T>::type;
- return static_cast<T>(Clamp<UnsignedT>(value & 0xffffffffu));
+ return static_cast<T>(Clamp<UnsignedT>(value & 0xFFFFFFFFu));
}
template <typename T>
@@ -405,10 +405,8 @@ bool SkipFPValue(float x) {
// doesn't handle NaNs. Also skip extreme values.
bool SkipFPExpectedValue(float x) { return std::isnan(x) || SkipFPValue(x); }
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64
WASM_SIMD_TEST(F32x4Splat) {
- WasmRunner<int32_t, float> r(execution_mode);
+ WasmRunner<int32_t, float> r(kExecuteTurbofan, lower_simd);
byte lane_val = 0;
byte simd = r.AllocateLocal(kWasmS128);
BUILD(r,
@@ -422,7 +420,7 @@ WASM_SIMD_TEST(F32x4Splat) {
}
WASM_SIMD_TEST(F32x4ReplaceLane) {
- WasmRunner<int32_t, float, float> r(execution_mode);
+ WasmRunner<int32_t, float, float> r(kExecuteTurbofan, lower_simd);
byte old_val = 0;
byte new_val = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -447,9 +445,11 @@ WASM_SIMD_TEST(F32x4ReplaceLane) {
CHECK_EQ(1, r.Call(3.14159f, -1.5f));
}
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_MIPS64
// Tests both signed and unsigned conversion.
WASM_SIMD_TEST(F32x4ConvertI32x4) {
- WasmRunner<int32_t, int32_t, float, float> r(execution_mode);
+ WasmRunner<int32_t, int32_t, float, float> r(kExecuteTurbofan, lower_simd);
byte a = 0;
byte expected_signed = 1;
byte expected_unsigned = 2;
@@ -470,10 +470,12 @@ WASM_SIMD_TEST(F32x4ConvertI32x4) {
static_cast<float>(static_cast<uint32_t>(*i))));
}
}
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
+ // V8_TARGET_ARCH_MIPS64
-void RunF32x4UnOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+void RunF32x4UnOpTest(LowerSimd lower_simd, WasmOpcode simd_op,
FloatUnOp expected_op, float error = 0.0f) {
- WasmRunner<int32_t, float, float, float> r(execution_mode);
+ WasmRunner<int32_t, float, float, float> r(kExecuteTurbofan, lower_simd);
byte a = 0;
byte low = 1;
byte high = 2;
@@ -492,27 +494,35 @@ void RunF32x4UnOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
}
}
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
WASM_SIMD_TEST(F32x4Abs) {
- RunF32x4UnOpTest(execution_mode, kExprF32x4Abs, std::abs);
+ RunF32x4UnOpTest(lower_simd, kExprF32x4Abs, std::abs);
}
WASM_SIMD_TEST(F32x4Neg) {
- RunF32x4UnOpTest(execution_mode, kExprF32x4Neg, Negate);
+ RunF32x4UnOpTest(lower_simd, kExprF32x4Neg, Negate);
}
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
+ // V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_X64
static const float kApproxError = 0.01f;
WASM_SIMD_COMPILED_TEST(F32x4RecipApprox) {
- RunF32x4UnOpTest(execution_mode, kExprF32x4RecipApprox, Recip, kApproxError);
+ RunF32x4UnOpTest(lower_simd, kExprF32x4RecipApprox, Recip, kApproxError);
}
WASM_SIMD_COMPILED_TEST(F32x4RecipSqrtApprox) {
- RunF32x4UnOpTest(execution_mode, kExprF32x4RecipSqrtApprox, RecipSqrt,
+ RunF32x4UnOpTest(lower_simd, kExprF32x4RecipSqrtApprox, RecipSqrt,
kApproxError);
}
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
+ // V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_X64
-void RunF32x4BinOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+void RunF32x4BinOpTest(LowerSimd lower_simd, WasmOpcode simd_op,
FloatBinOp expected_op) {
- WasmRunner<int32_t, float, float, float> r(execution_mode);
+ WasmRunner<int32_t, float, float, float> r(kExecuteTurbofan, lower_simd);
byte a = 0;
byte b = 1;
byte expected = 2;
@@ -535,25 +545,19 @@ void RunF32x4BinOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
}
}
-WASM_SIMD_TEST(F32x4Add) {
- RunF32x4BinOpTest(execution_mode, kExprF32x4Add, Add);
-}
-WASM_SIMD_TEST(F32x4Sub) {
- RunF32x4BinOpTest(execution_mode, kExprF32x4Sub, Sub);
-}
-WASM_SIMD_TEST(F32x4Mul) {
- RunF32x4BinOpTest(execution_mode, kExprF32x4Mul, Mul);
-}
+WASM_SIMD_TEST(F32x4Add) { RunF32x4BinOpTest(lower_simd, kExprF32x4Add, Add); }
+WASM_SIMD_TEST(F32x4Sub) { RunF32x4BinOpTest(lower_simd, kExprF32x4Sub, Sub); }
+WASM_SIMD_TEST(F32x4Mul) { RunF32x4BinOpTest(lower_simd, kExprF32x4Mul, Mul); }
WASM_SIMD_TEST(F32x4_Min) {
- RunF32x4BinOpTest(execution_mode, kExprF32x4Min, JSMin);
+ RunF32x4BinOpTest(lower_simd, kExprF32x4Min, JSMin);
}
WASM_SIMD_TEST(F32x4_Max) {
- RunF32x4BinOpTest(execution_mode, kExprF32x4Max, JSMax);
+ RunF32x4BinOpTest(lower_simd, kExprF32x4Max, JSMax);
}
-void RunF32x4CompareOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+void RunF32x4CompareOpTest(LowerSimd lower_simd, WasmOpcode simd_op,
FloatCompareOp expected_op) {
- WasmRunner<int32_t, float, float, int32_t> r(execution_mode);
+ WasmRunner<int32_t, float, float, int32_t> r(kExecuteTurbofan, lower_simd);
byte a = 0;
byte b = 1;
byte expected = 2;
@@ -577,30 +581,28 @@ void RunF32x4CompareOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
}
WASM_SIMD_TEST(F32x4Eq) {
- RunF32x4CompareOpTest(execution_mode, kExprF32x4Eq, Equal);
+ RunF32x4CompareOpTest(lower_simd, kExprF32x4Eq, Equal);
}
WASM_SIMD_TEST(F32x4Ne) {
- RunF32x4CompareOpTest(execution_mode, kExprF32x4Ne, NotEqual);
+ RunF32x4CompareOpTest(lower_simd, kExprF32x4Ne, NotEqual);
}
WASM_SIMD_TEST(F32x4Gt) {
- RunF32x4CompareOpTest(execution_mode, kExprF32x4Gt, Greater);
+ RunF32x4CompareOpTest(lower_simd, kExprF32x4Gt, Greater);
}
WASM_SIMD_TEST(F32x4Ge) {
- RunF32x4CompareOpTest(execution_mode, kExprF32x4Ge, GreaterEqual);
+ RunF32x4CompareOpTest(lower_simd, kExprF32x4Ge, GreaterEqual);
}
WASM_SIMD_TEST(F32x4Lt) {
- RunF32x4CompareOpTest(execution_mode, kExprF32x4Lt, Less);
+ RunF32x4CompareOpTest(lower_simd, kExprF32x4Lt, Less);
}
WASM_SIMD_TEST(F32x4Le) {
- RunF32x4CompareOpTest(execution_mode, kExprF32x4Le, LessEqual);
+ RunF32x4CompareOpTest(lower_simd, kExprF32x4Le, LessEqual);
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
- // V8_TARGET_ARCH_MIPS64
WASM_SIMD_TEST(I32x4Splat) {
// Store SIMD value in a local variable, use extract lane to check lane values
@@ -613,7 +615,7 @@ WASM_SIMD_TEST(I32x4Splat) {
// return 0
//
// return 1
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(kExecuteTurbofan, lower_simd);
byte lane_val = 0;
byte simd = r.AllocateLocal(kWasmS128);
BUILD(r,
@@ -624,7 +626,7 @@ WASM_SIMD_TEST(I32x4Splat) {
}
WASM_SIMD_TEST(I32x4ReplaceLane) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(kExecuteTurbofan, lower_simd);
byte old_val = 0;
byte new_val = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -650,7 +652,7 @@ WASM_SIMD_TEST(I32x4ReplaceLane) {
}
WASM_SIMD_TEST(I16x8Splat) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(kExecuteTurbofan, lower_simd);
byte lane_val = 0;
byte simd = r.AllocateLocal(kWasmS128);
BUILD(r,
@@ -661,7 +663,7 @@ WASM_SIMD_TEST(I16x8Splat) {
}
WASM_SIMD_TEST(I16x8ReplaceLane) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(kExecuteTurbofan, lower_simd);
byte old_val = 0;
byte new_val = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -710,7 +712,7 @@ WASM_SIMD_TEST(I16x8ReplaceLane) {
}
WASM_SIMD_TEST(I8x16Splat) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(kExecuteTurbofan, lower_simd);
byte lane_val = 0;
byte simd = r.AllocateLocal(kWasmS128);
BUILD(r,
@@ -721,7 +723,7 @@ WASM_SIMD_TEST(I8x16Splat) {
}
WASM_SIMD_TEST(I8x16ReplaceLane) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(kExecuteTurbofan, lower_simd);
byte old_val = 0;
byte new_val = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -828,7 +830,7 @@ WASM_SIMD_TEST(I8x16ReplaceLane) {
V8_TARGET_ARCH_MIPS64
// Determines if conversion from float to int will be valid.
bool CanRoundToZeroAndConvert(double val, bool unsigned_integer) {
- const double max_uint = static_cast<double>(0xffffffffu);
+ const double max_uint = static_cast<double>(0xFFFFFFFFu);
const double max_int = static_cast<double>(kMaxInt);
const double min_int = static_cast<double>(kMinInt);
@@ -849,7 +851,7 @@ int ConvertInvalidValue(double val, bool unsigned_integer) {
return 0;
} else {
if (unsigned_integer) {
- return (val < 0) ? 0 : 0xffffffffu;
+ return (val < 0) ? 0 : 0xFFFFFFFFu;
} else {
return (val < 0) ? kMinInt : kMaxInt;
}
@@ -868,7 +870,7 @@ int32_t ConvertToInt(double val, bool unsigned_integer) {
// Tests both signed and unsigned conversion.
WASM_SIMD_TEST(I32x4ConvertF32x4) {
- WasmRunner<int32_t, float, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, float, int32_t, int32_t> r(kExecuteTurbofan, lower_simd);
byte a = 0;
byte expected_signed = 1;
byte expected_unsigned = 2;
@@ -893,7 +895,8 @@ WASM_SIMD_TEST(I32x4ConvertF32x4) {
// Tests both signed and unsigned conversion from I16x8 (unpacking).
WASM_SIMD_COMPILED_TEST(I32x4ConvertI16x8) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteTurbofan,
+ lower_simd);
byte a = 0;
byte unpacked_signed = 1;
byte unpacked_unsigned = 2;
@@ -918,9 +921,9 @@ WASM_SIMD_COMPILED_TEST(I32x4ConvertI16x8) {
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
-void RunI32x4UnOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+void RunI32x4UnOpTest(LowerSimd lower_simd, WasmOpcode simd_op,
Int32UnOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(kExecuteTurbofan, lower_simd);
byte a = 0;
byte expected = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -932,16 +935,19 @@ void RunI32x4UnOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
}
WASM_SIMD_TEST(I32x4Neg) {
- RunI32x4UnOpTest(execution_mode, kExprI32x4Neg, Negate);
+ RunI32x4UnOpTest(lower_simd, kExprI32x4Neg, Negate);
}
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64
-WASM_SIMD_TEST(S128Not) { RunI32x4UnOpTest(execution_mode, kExprS128Not, Not); }
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || \
+ V8_TARGET_ARCH_IA32
+WASM_SIMD_TEST(S128Not) { RunI32x4UnOpTest(lower_simd, kExprS128Not, Not); }
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 ||
+ // V8_TARGET_ARCH_IA32
-void RunI32x4BinOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+void RunI32x4BinOpTest(LowerSimd lower_simd, WasmOpcode simd_op,
Int32BinOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteTurbofan,
+ lower_simd);
byte a = 0;
byte b = 1;
byte expected = 2;
@@ -958,51 +964,38 @@ void RunI32x4BinOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
}
}
-WASM_SIMD_TEST(I32x4Add) {
- RunI32x4BinOpTest(execution_mode, kExprI32x4Add, Add);
-}
+WASM_SIMD_TEST(I32x4Add) { RunI32x4BinOpTest(lower_simd, kExprI32x4Add, Add); }
-WASM_SIMD_TEST(I32x4Sub) {
- RunI32x4BinOpTest(execution_mode, kExprI32x4Sub, Sub);
-}
+WASM_SIMD_TEST(I32x4Sub) { RunI32x4BinOpTest(lower_simd, kExprI32x4Sub, Sub); }
-WASM_SIMD_TEST(I32x4Mul) {
- RunI32x4BinOpTest(execution_mode, kExprI32x4Mul, Mul);
-}
+WASM_SIMD_TEST(I32x4Mul) { RunI32x4BinOpTest(lower_simd, kExprI32x4Mul, Mul); }
WASM_SIMD_TEST(I32x4MinS) {
- RunI32x4BinOpTest(execution_mode, kExprI32x4MinS, Minimum);
+ RunI32x4BinOpTest(lower_simd, kExprI32x4MinS, Minimum);
}
WASM_SIMD_TEST(I32x4MaxS) {
- RunI32x4BinOpTest(execution_mode, kExprI32x4MaxS, Maximum);
+ RunI32x4BinOpTest(lower_simd, kExprI32x4MaxS, Maximum);
}
WASM_SIMD_TEST(I32x4MinU) {
- RunI32x4BinOpTest(execution_mode, kExprI32x4MinU, UnsignedMinimum);
+ RunI32x4BinOpTest(lower_simd, kExprI32x4MinU, UnsignedMinimum);
}
WASM_SIMD_TEST(I32x4MaxU) {
- RunI32x4BinOpTest(execution_mode, kExprI32x4MaxU, UnsignedMaximum);
+ RunI32x4BinOpTest(lower_simd, kExprI32x4MaxU, UnsignedMaximum);
}
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || \
- V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-WASM_SIMD_TEST(S128And) {
- RunI32x4BinOpTest(execution_mode, kExprS128And, And);
-}
+WASM_SIMD_TEST(S128And) { RunI32x4BinOpTest(lower_simd, kExprS128And, And); }
-WASM_SIMD_TEST(S128Or) { RunI32x4BinOpTest(execution_mode, kExprS128Or, Or); }
+WASM_SIMD_TEST(S128Or) { RunI32x4BinOpTest(lower_simd, kExprS128Or, Or); }
-WASM_SIMD_TEST(S128Xor) {
- RunI32x4BinOpTest(execution_mode, kExprS128Xor, Xor);
-}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 ||
- // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+WASM_SIMD_TEST(S128Xor) { RunI32x4BinOpTest(lower_simd, kExprS128Xor, Xor); }
-void RunI32x4CompareOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+void RunI32x4CompareOpTest(LowerSimd lower_simd, WasmOpcode simd_op,
Int32CompareOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteTurbofan,
+ lower_simd);
byte a = 0;
byte b = 1;
byte expected = 2;
@@ -1020,48 +1013,48 @@ void RunI32x4CompareOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
}
WASM_SIMD_TEST(I32x4Eq) {
- RunI32x4CompareOpTest(execution_mode, kExprI32x4Eq, Equal);
+ RunI32x4CompareOpTest(lower_simd, kExprI32x4Eq, Equal);
}
WASM_SIMD_TEST(I32x4Ne) {
- RunI32x4CompareOpTest(execution_mode, kExprI32x4Ne, NotEqual);
+ RunI32x4CompareOpTest(lower_simd, kExprI32x4Ne, NotEqual);
}
WASM_SIMD_TEST(I32x4LtS) {
- RunI32x4CompareOpTest(execution_mode, kExprI32x4LtS, Less);
+ RunI32x4CompareOpTest(lower_simd, kExprI32x4LtS, Less);
}
WASM_SIMD_TEST(I32x4LeS) {
- RunI32x4CompareOpTest(execution_mode, kExprI32x4LeS, LessEqual);
+ RunI32x4CompareOpTest(lower_simd, kExprI32x4LeS, LessEqual);
}
WASM_SIMD_TEST(I32x4GtS) {
- RunI32x4CompareOpTest(execution_mode, kExprI32x4GtS, Greater);
+ RunI32x4CompareOpTest(lower_simd, kExprI32x4GtS, Greater);
}
WASM_SIMD_TEST(I32x4GeS) {
- RunI32x4CompareOpTest(execution_mode, kExprI32x4GeS, GreaterEqual);
+ RunI32x4CompareOpTest(lower_simd, kExprI32x4GeS, GreaterEqual);
}
WASM_SIMD_TEST(I32x4LtU) {
- RunI32x4CompareOpTest(execution_mode, kExprI32x4LtU, UnsignedLess);
+ RunI32x4CompareOpTest(lower_simd, kExprI32x4LtU, UnsignedLess);
}
WASM_SIMD_TEST(I32x4LeU) {
- RunI32x4CompareOpTest(execution_mode, kExprI32x4LeU, UnsignedLessEqual);
+ RunI32x4CompareOpTest(lower_simd, kExprI32x4LeU, UnsignedLessEqual);
}
WASM_SIMD_TEST(I32x4GtU) {
- RunI32x4CompareOpTest(execution_mode, kExprI32x4GtU, UnsignedGreater);
+ RunI32x4CompareOpTest(lower_simd, kExprI32x4GtU, UnsignedGreater);
}
WASM_SIMD_TEST(I32x4GeU) {
- RunI32x4CompareOpTest(execution_mode, kExprI32x4GeU, UnsignedGreaterEqual);
+ RunI32x4CompareOpTest(lower_simd, kExprI32x4GeU, UnsignedGreaterEqual);
}
-void RunI32x4ShiftOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+void RunI32x4ShiftOpTest(LowerSimd lower_simd, WasmOpcode simd_op,
Int32ShiftOp expected_op, int shift) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(kExecuteTurbofan, lower_simd);
byte a = 0;
byte expected = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -1074,22 +1067,23 @@ void RunI32x4ShiftOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
}
WASM_SIMD_TEST(I32x4Shl) {
- RunI32x4ShiftOpTest(execution_mode, kExprI32x4Shl, LogicalShiftLeft, 1);
+ RunI32x4ShiftOpTest(lower_simd, kExprI32x4Shl, LogicalShiftLeft, 1);
}
WASM_SIMD_TEST(I32x4ShrS) {
- RunI32x4ShiftOpTest(execution_mode, kExprI32x4ShrS, ArithmeticShiftRight, 1);
+ RunI32x4ShiftOpTest(lower_simd, kExprI32x4ShrS, ArithmeticShiftRight, 1);
}
WASM_SIMD_TEST(I32x4ShrU) {
- RunI32x4ShiftOpTest(execution_mode, kExprI32x4ShrU, LogicalShiftRight, 1);
+ RunI32x4ShiftOpTest(lower_simd, kExprI32x4ShrU, LogicalShiftRight, 1);
}
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64
// Tests both signed and unsigned conversion from I8x16 (unpacking).
WASM_SIMD_COMPILED_TEST(I16x8ConvertI8x16) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteTurbofan,
+ lower_simd);
byte a = 0;
byte unpacked_signed = 1;
byte unpacked_unsigned = 2;
@@ -1113,9 +1107,9 @@ WASM_SIMD_COMPILED_TEST(I16x8ConvertI8x16) {
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
-void RunI16x8UnOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+void RunI16x8UnOpTest(LowerSimd lower_simd, WasmOpcode simd_op,
Int16UnOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(kExecuteTurbofan, lower_simd);
byte a = 0;
byte expected = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -1127,14 +1121,15 @@ void RunI16x8UnOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
}
WASM_SIMD_TEST(I16x8Neg) {
- RunI16x8UnOpTest(execution_mode, kExprI16x8Neg, Negate);
+ RunI16x8UnOpTest(lower_simd, kExprI16x8Neg, Negate);
}
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64
// Tests both signed and unsigned conversion from I32x4 (packing).
WASM_SIMD_COMPILED_TEST(I16x8ConvertI32x4) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteTurbofan,
+ lower_simd);
byte a = 0;
byte packed_signed = 1;
byte packed_unsigned = 2;
@@ -1155,16 +1150,17 @@ WASM_SIMD_COMPILED_TEST(I16x8ConvertI32x4) {
int32_t packed_signed = Narrow<int16_t>(*i);
int32_t packed_unsigned = UnsignedNarrow<int16_t>(*i);
// Sign-extend here, since ExtractLane sign extends.
- if (packed_unsigned & 0x8000) packed_unsigned |= 0xffff0000;
+ if (packed_unsigned & 0x8000) packed_unsigned |= 0xFFFF0000;
CHECK_EQ(1, r.Call(*i, packed_signed, packed_unsigned));
}
}
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
-void RunI16x8BinOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+void RunI16x8BinOpTest(LowerSimd lower_simd, WasmOpcode simd_op,
Int16BinOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteTurbofan,
+ lower_simd);
byte a = 0;
byte b = 1;
byte expected = 2;
@@ -1181,55 +1177,48 @@ void RunI16x8BinOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
}
}
-WASM_SIMD_TEST(I16x8Add) {
- RunI16x8BinOpTest(execution_mode, kExprI16x8Add, Add);
-}
+WASM_SIMD_TEST(I16x8Add) { RunI16x8BinOpTest(lower_simd, kExprI16x8Add, Add); }
WASM_SIMD_TEST(I16x8AddSaturateS) {
- RunI16x8BinOpTest(execution_mode, kExprI16x8AddSaturateS, AddSaturate);
+ RunI16x8BinOpTest(lower_simd, kExprI16x8AddSaturateS, AddSaturate);
}
-WASM_SIMD_TEST(I16x8Sub) {
- RunI16x8BinOpTest(execution_mode, kExprI16x8Sub, Sub);
-}
+WASM_SIMD_TEST(I16x8Sub) { RunI16x8BinOpTest(lower_simd, kExprI16x8Sub, Sub); }
WASM_SIMD_TEST(I16x8SubSaturateS) {
- RunI16x8BinOpTest(execution_mode, kExprI16x8SubSaturateS, SubSaturate);
+ RunI16x8BinOpTest(lower_simd, kExprI16x8SubSaturateS, SubSaturate);
}
-WASM_SIMD_TEST(I16x8Mul) {
- RunI16x8BinOpTest(execution_mode, kExprI16x8Mul, Mul);
-}
+WASM_SIMD_TEST(I16x8Mul) { RunI16x8BinOpTest(lower_simd, kExprI16x8Mul, Mul); }
WASM_SIMD_TEST(I16x8MinS) {
- RunI16x8BinOpTest(execution_mode, kExprI16x8MinS, Minimum);
+ RunI16x8BinOpTest(lower_simd, kExprI16x8MinS, Minimum);
}
WASM_SIMD_TEST(I16x8MaxS) {
- RunI16x8BinOpTest(execution_mode, kExprI16x8MaxS, Maximum);
+ RunI16x8BinOpTest(lower_simd, kExprI16x8MaxS, Maximum);
}
WASM_SIMD_TEST(I16x8AddSaturateU) {
- RunI16x8BinOpTest(execution_mode, kExprI16x8AddSaturateU,
- UnsignedAddSaturate);
+ RunI16x8BinOpTest(lower_simd, kExprI16x8AddSaturateU, UnsignedAddSaturate);
}
WASM_SIMD_TEST(I16x8SubSaturateU) {
- RunI16x8BinOpTest(execution_mode, kExprI16x8SubSaturateU,
- UnsignedSubSaturate);
+ RunI16x8BinOpTest(lower_simd, kExprI16x8SubSaturateU, UnsignedSubSaturate);
}
WASM_SIMD_TEST(I16x8MinU) {
- RunI16x8BinOpTest(execution_mode, kExprI16x8MinU, UnsignedMinimum);
+ RunI16x8BinOpTest(lower_simd, kExprI16x8MinU, UnsignedMinimum);
}
WASM_SIMD_TEST(I16x8MaxU) {
- RunI16x8BinOpTest(execution_mode, kExprI16x8MaxU, UnsignedMaximum);
+ RunI16x8BinOpTest(lower_simd, kExprI16x8MaxU, UnsignedMaximum);
}
-void RunI16x8CompareOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+void RunI16x8CompareOpTest(LowerSimd lower_simd, WasmOpcode simd_op,
Int16CompareOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteTurbofan,
+ lower_simd);
byte a = 0;
byte b = 1;
byte expected = 2;
@@ -1247,48 +1236,48 @@ void RunI16x8CompareOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
}
WASM_SIMD_TEST(I16x8Eq) {
- RunI16x8CompareOpTest(execution_mode, kExprI16x8Eq, Equal);
+ RunI16x8CompareOpTest(lower_simd, kExprI16x8Eq, Equal);
}
WASM_SIMD_TEST(I16x8Ne) {
- RunI16x8CompareOpTest(execution_mode, kExprI16x8Ne, NotEqual);
+ RunI16x8CompareOpTest(lower_simd, kExprI16x8Ne, NotEqual);
}
WASM_SIMD_TEST(I16x8LtS) {
- RunI16x8CompareOpTest(execution_mode, kExprI16x8LtS, Less);
+ RunI16x8CompareOpTest(lower_simd, kExprI16x8LtS, Less);
}
WASM_SIMD_TEST(I16x8LeS) {
- RunI16x8CompareOpTest(execution_mode, kExprI16x8LeS, LessEqual);
+ RunI16x8CompareOpTest(lower_simd, kExprI16x8LeS, LessEqual);
}
WASM_SIMD_TEST(I16x8GtS) {
- RunI16x8CompareOpTest(execution_mode, kExprI16x8GtS, Greater);
+ RunI16x8CompareOpTest(lower_simd, kExprI16x8GtS, Greater);
}
WASM_SIMD_TEST(I16x8GeS) {
- RunI16x8CompareOpTest(execution_mode, kExprI16x8GeS, GreaterEqual);
+ RunI16x8CompareOpTest(lower_simd, kExprI16x8GeS, GreaterEqual);
}
WASM_SIMD_TEST(I16x8GtU) {
- RunI16x8CompareOpTest(execution_mode, kExprI16x8GtU, UnsignedGreater);
+ RunI16x8CompareOpTest(lower_simd, kExprI16x8GtU, UnsignedGreater);
}
WASM_SIMD_TEST(I16x8GeU) {
- RunI16x8CompareOpTest(execution_mode, kExprI16x8GeU, UnsignedGreaterEqual);
+ RunI16x8CompareOpTest(lower_simd, kExprI16x8GeU, UnsignedGreaterEqual);
}
WASM_SIMD_TEST(I16x8LtU) {
- RunI16x8CompareOpTest(execution_mode, kExprI16x8LtU, UnsignedLess);
+ RunI16x8CompareOpTest(lower_simd, kExprI16x8LtU, UnsignedLess);
}
WASM_SIMD_TEST(I16x8LeU) {
- RunI16x8CompareOpTest(execution_mode, kExprI16x8LeU, UnsignedLessEqual);
+ RunI16x8CompareOpTest(lower_simd, kExprI16x8LeU, UnsignedLessEqual);
}
-void RunI16x8ShiftOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+void RunI16x8ShiftOpTest(LowerSimd lower_simd, WasmOpcode simd_op,
Int16ShiftOp expected_op, int shift) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(kExecuteTurbofan, lower_simd);
byte a = 0;
byte expected = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -1301,20 +1290,20 @@ void RunI16x8ShiftOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
}
WASM_SIMD_TEST(I16x8Shl) {
- RunI16x8ShiftOpTest(execution_mode, kExprI16x8Shl, LogicalShiftLeft, 1);
+ RunI16x8ShiftOpTest(lower_simd, kExprI16x8Shl, LogicalShiftLeft, 1);
}
WASM_SIMD_TEST(I16x8ShrS) {
- RunI16x8ShiftOpTest(execution_mode, kExprI16x8ShrS, ArithmeticShiftRight, 1);
+ RunI16x8ShiftOpTest(lower_simd, kExprI16x8ShrS, ArithmeticShiftRight, 1);
}
WASM_SIMD_TEST(I16x8ShrU) {
- RunI16x8ShiftOpTest(execution_mode, kExprI16x8ShrU, LogicalShiftRight, 1);
+ RunI16x8ShiftOpTest(lower_simd, kExprI16x8ShrU, LogicalShiftRight, 1);
}
-void RunI8x16UnOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+void RunI8x16UnOpTest(LowerSimd lower_simd, WasmOpcode simd_op,
Int8UnOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(kExecuteTurbofan, lower_simd);
byte a = 0;
byte expected = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -1326,14 +1315,15 @@ void RunI8x16UnOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
}
WASM_SIMD_TEST(I8x16Neg) {
- RunI8x16UnOpTest(execution_mode, kExprI8x16Neg, Negate);
+ RunI8x16UnOpTest(lower_simd, kExprI8x16Neg, Negate);
}
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64
// Tests both signed and unsigned conversion from I16x8 (packing).
WASM_SIMD_COMPILED_TEST(I8x16ConvertI16x8) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteTurbofan,
+ lower_simd);
byte a = 0;
byte packed_signed = 1;
byte packed_unsigned = 2;
@@ -1354,16 +1344,17 @@ WASM_SIMD_COMPILED_TEST(I8x16ConvertI16x8) {
int32_t packed_signed = Narrow<int8_t>(*i);
int32_t packed_unsigned = UnsignedNarrow<int8_t>(*i);
// Sign-extend here, since ExtractLane sign extends.
- if (packed_unsigned & 0x80) packed_unsigned |= 0xffffff00;
+ if (packed_unsigned & 0x80) packed_unsigned |= 0xFFFFFF00;
CHECK_EQ(1, r.Call(*i, packed_signed, packed_unsigned));
}
}
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
-void RunI8x16BinOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+void RunI8x16BinOpTest(LowerSimd lower_simd, WasmOpcode simd_op,
Int8BinOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteTurbofan,
+ lower_simd);
byte a = 0;
byte b = 1;
byte expected = 2;
@@ -1380,51 +1371,46 @@ void RunI8x16BinOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
}
}
-WASM_SIMD_TEST(I8x16Add) {
- RunI8x16BinOpTest(execution_mode, kExprI8x16Add, Add);
-}
+WASM_SIMD_TEST(I8x16Add) { RunI8x16BinOpTest(lower_simd, kExprI8x16Add, Add); }
WASM_SIMD_TEST(I8x16AddSaturateS) {
- RunI8x16BinOpTest(execution_mode, kExprI8x16AddSaturateS, AddSaturate);
+ RunI8x16BinOpTest(lower_simd, kExprI8x16AddSaturateS, AddSaturate);
}
-WASM_SIMD_TEST(I8x16Sub) {
- RunI8x16BinOpTest(execution_mode, kExprI8x16Sub, Sub);
-}
+WASM_SIMD_TEST(I8x16Sub) { RunI8x16BinOpTest(lower_simd, kExprI8x16Sub, Sub); }
WASM_SIMD_TEST(I8x16SubSaturateS) {
- RunI8x16BinOpTest(execution_mode, kExprI8x16SubSaturateS, SubSaturate);
+ RunI8x16BinOpTest(lower_simd, kExprI8x16SubSaturateS, SubSaturate);
}
WASM_SIMD_TEST(I8x16MinS) {
- RunI8x16BinOpTest(execution_mode, kExprI8x16MinS, Minimum);
+ RunI8x16BinOpTest(lower_simd, kExprI8x16MinS, Minimum);
}
WASM_SIMD_TEST(I8x16MaxS) {
- RunI8x16BinOpTest(execution_mode, kExprI8x16MaxS, Maximum);
+ RunI8x16BinOpTest(lower_simd, kExprI8x16MaxS, Maximum);
}
WASM_SIMD_TEST(I8x16AddSaturateU) {
- RunI8x16BinOpTest(execution_mode, kExprI8x16AddSaturateU,
- UnsignedAddSaturate);
+ RunI8x16BinOpTest(lower_simd, kExprI8x16AddSaturateU, UnsignedAddSaturate);
}
WASM_SIMD_TEST(I8x16SubSaturateU) {
- RunI8x16BinOpTest(execution_mode, kExprI8x16SubSaturateU,
- UnsignedSubSaturate);
+ RunI8x16BinOpTest(lower_simd, kExprI8x16SubSaturateU, UnsignedSubSaturate);
}
WASM_SIMD_TEST(I8x16MinU) {
- RunI8x16BinOpTest(execution_mode, kExprI8x16MinU, UnsignedMinimum);
+ RunI8x16BinOpTest(lower_simd, kExprI8x16MinU, UnsignedMinimum);
}
WASM_SIMD_TEST(I8x16MaxU) {
- RunI8x16BinOpTest(execution_mode, kExprI8x16MaxU, UnsignedMaximum);
+ RunI8x16BinOpTest(lower_simd, kExprI8x16MaxU, UnsignedMaximum);
}
-void RunI8x16CompareOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+void RunI8x16CompareOpTest(LowerSimd lower_simd, WasmOpcode simd_op,
Int8CompareOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteTurbofan,
+ lower_simd);
byte a = 0;
byte b = 1;
byte expected = 2;
@@ -1442,56 +1428,54 @@ void RunI8x16CompareOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
}
WASM_SIMD_TEST(I8x16Eq) {
- RunI8x16CompareOpTest(execution_mode, kExprI8x16Eq, Equal);
+ RunI8x16CompareOpTest(lower_simd, kExprI8x16Eq, Equal);
}
WASM_SIMD_TEST(I8x16Ne) {
- RunI8x16CompareOpTest(execution_mode, kExprI8x16Ne, NotEqual);
+ RunI8x16CompareOpTest(lower_simd, kExprI8x16Ne, NotEqual);
}
WASM_SIMD_TEST(I8x16GtS) {
- RunI8x16CompareOpTest(execution_mode, kExprI8x16GtS, Greater);
+ RunI8x16CompareOpTest(lower_simd, kExprI8x16GtS, Greater);
}
WASM_SIMD_TEST(I8x16GeS) {
- RunI8x16CompareOpTest(execution_mode, kExprI8x16GeS, GreaterEqual);
+ RunI8x16CompareOpTest(lower_simd, kExprI8x16GeS, GreaterEqual);
}
WASM_SIMD_TEST(I8x16LtS) {
- RunI8x16CompareOpTest(execution_mode, kExprI8x16LtS, Less);
+ RunI8x16CompareOpTest(lower_simd, kExprI8x16LtS, Less);
}
WASM_SIMD_TEST(I8x16LeS) {
- RunI8x16CompareOpTest(execution_mode, kExprI8x16LeS, LessEqual);
+ RunI8x16CompareOpTest(lower_simd, kExprI8x16LeS, LessEqual);
}
WASM_SIMD_TEST(I8x16GtU) {
- RunI8x16CompareOpTest(execution_mode, kExprI8x16GtU, UnsignedGreater);
+ RunI8x16CompareOpTest(lower_simd, kExprI8x16GtU, UnsignedGreater);
}
WASM_SIMD_TEST(I8x16GeU) {
- RunI8x16CompareOpTest(execution_mode, kExprI8x16GeU, UnsignedGreaterEqual);
+ RunI8x16CompareOpTest(lower_simd, kExprI8x16GeU, UnsignedGreaterEqual);
}
WASM_SIMD_TEST(I8x16LtU) {
- RunI8x16CompareOpTest(execution_mode, kExprI8x16LtU, UnsignedLess);
+ RunI8x16CompareOpTest(lower_simd, kExprI8x16LtU, UnsignedLess);
}
WASM_SIMD_TEST(I8x16LeU) {
- RunI8x16CompareOpTest(execution_mode, kExprI8x16LeU, UnsignedLessEqual);
+ RunI8x16CompareOpTest(lower_simd, kExprI8x16LeU, UnsignedLessEqual);
}
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64
-WASM_SIMD_TEST(I8x16Mul) {
- RunI8x16BinOpTest(execution_mode, kExprI8x16Mul, Mul);
-}
+WASM_SIMD_TEST(I8x16Mul) { RunI8x16BinOpTest(lower_simd, kExprI8x16Mul, Mul); }
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
-void RunI8x16ShiftOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+void RunI8x16ShiftOpTest(LowerSimd lower_simd, WasmOpcode simd_op,
Int8ShiftOp expected_op, int shift) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(kExecuteTurbofan, lower_simd);
byte a = 0;
byte expected = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -1506,15 +1490,15 @@ void RunI8x16ShiftOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64
WASM_SIMD_TEST(I8x16Shl) {
- RunI8x16ShiftOpTest(execution_mode, kExprI8x16Shl, LogicalShiftLeft, 1);
+ RunI8x16ShiftOpTest(lower_simd, kExprI8x16Shl, LogicalShiftLeft, 1);
}
WASM_SIMD_TEST(I8x16ShrS) {
- RunI8x16ShiftOpTest(execution_mode, kExprI8x16ShrS, ArithmeticShiftRight, 1);
+ RunI8x16ShiftOpTest(lower_simd, kExprI8x16ShrS, ArithmeticShiftRight, 1);
}
WASM_SIMD_TEST(I8x16ShrU) {
- RunI8x16ShiftOpTest(execution_mode, kExprI8x16ShrU, LogicalShiftRight, 1);
+ RunI8x16ShiftOpTest(lower_simd, kExprI8x16ShrU, LogicalShiftRight, 1);
}
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
@@ -1526,7 +1510,7 @@ WASM_SIMD_TEST(I8x16ShrU) {
// vector.
#define WASM_SIMD_SELECT_TEST(format) \
WASM_SIMD_COMPILED_TEST(S##format##Select) { \
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode); \
+ WasmRunner<int32_t, int32_t, int32_t> r(kExecuteTurbofan, lower_simd); \
byte val1 = 0; \
byte val2 = 1; \
byte src1 = r.AllocateLocal(kWasmS128); \
@@ -1566,7 +1550,8 @@ WASM_SIMD_SELECT_TEST(8x16)
// rest 0. The mask is not the result of a comparison op.
#define WASM_SIMD_NON_CANONICAL_SELECT_TEST(format) \
WASM_SIMD_COMPILED_TEST(S##format##NonCanonicalSelect) { \
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode); \
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteTurbofan, \
+ lower_simd); \
byte val1 = 0; \
byte val2 = 1; \
byte combined = 2; \
@@ -1602,9 +1587,9 @@ WASM_SIMD_NON_CANONICAL_SELECT_TEST(8x16)
// Test binary ops with two lane test patterns, all lanes distinct.
template <typename T>
void RunBinaryLaneOpTest(
- WasmExecutionMode execution_mode, WasmOpcode simd_op,
+ LowerSimd lower_simd, WasmOpcode simd_op,
const std::array<T, kSimd128Size / sizeof(T)>& expected) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(kExecuteTurbofan, lower_simd);
// Set up two test patterns as globals, e.g. [0, 1, 2, 3] and [4, 5, 6, 7].
T* src0 = r.builder().AddGlobal<T>(kWasmS128);
T* src1 = r.builder().AddGlobal<T>(kWasmS128);
@@ -1633,12 +1618,11 @@ void RunBinaryLaneOpTest(
}
WASM_SIMD_COMPILED_TEST(I32x4AddHoriz) {
- RunBinaryLaneOpTest<int32_t>(execution_mode, kExprI32x4AddHoriz,
- {{1, 5, 9, 13}});
+ RunBinaryLaneOpTest<int32_t>(lower_simd, kExprI32x4AddHoriz, {{1, 5, 9, 13}});
}
WASM_SIMD_COMPILED_TEST(I16x8AddHoriz) {
- RunBinaryLaneOpTest<int16_t>(execution_mode, kExprI16x8AddHoriz,
+ RunBinaryLaneOpTest<int16_t>(lower_simd, kExprI16x8AddHoriz,
{{1, 5, 9, 13, 17, 21, 25, 29}});
}
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 ||
@@ -1647,7 +1631,7 @@ WASM_SIMD_COMPILED_TEST(I16x8AddHoriz) {
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64
WASM_SIMD_COMPILED_TEST(F32x4AddHoriz) {
- RunBinaryLaneOpTest<float>(execution_mode, kExprF32x4AddHoriz,
+ RunBinaryLaneOpTest<float>(lower_simd, kExprF32x4AddHoriz,
{{1.0f, 5.0f, 9.0f, 13.0f}});
}
@@ -1655,255 +1639,255 @@ WASM_SIMD_COMPILED_TEST(F32x4AddHoriz) {
// Test a normal and unary versions (where second operand isn't used).
WASM_SIMD_COMPILED_TEST(S32x4Dup) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{16, 17, 18, 19, 16, 17, 18, 19, 16, 17, 18, 19, 16, 17, 18, 19}});
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7}});
}
WASM_SIMD_COMPILED_TEST(S32x4ZipLeft) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23}});
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 4, 5, 6, 7}});
}
WASM_SIMD_COMPILED_TEST(S32x4ZipRight) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31}});
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{8, 9, 10, 11, 8, 9, 10, 11, 12, 13, 14, 15, 12, 13, 14, 15}});
}
WASM_SIMD_COMPILED_TEST(S32x4UnzipLeft) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27}});
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{0, 1, 2, 3, 8, 9, 10, 11, 0, 1, 2, 3, 8, 9, 10, 11}});
}
WASM_SIMD_COMPILED_TEST(S32x4UnzipRight) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31}});
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{4, 5, 6, 7, 12, 13, 14, 15, 4, 5, 6, 7, 12, 13, 14, 15}});
}
WASM_SIMD_COMPILED_TEST(S32x4TransposeLeft) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27}});
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 8, 9, 10, 11}});
}
WASM_SIMD_COMPILED_TEST(S32x4TransposeRight) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31}});
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{4, 5, 6, 7, 4, 5, 6, 7, 12, 13, 14, 15, 12, 13, 14, 15}});
}
// Reverses are only unary.
WASM_SIMD_COMPILED_TEST(S32x2Reverse) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}});
}
// Test irregular shuffle.
WASM_SIMD_COMPILED_TEST(S32x4Irregular) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{0, 1, 2, 3, 16, 17, 18, 19, 16, 17, 18, 19, 20, 21, 22, 23}});
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7}});
}
WASM_SIMD_COMPILED_TEST(S16x8Dup) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{18, 19, 18, 19, 18, 19, 18, 19, 18, 19, 18, 19, 18, 19, 18, 19}});
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7}});
}
WASM_SIMD_COMPILED_TEST(S16x8ZipLeft) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23}});
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{0, 1, 0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7}});
}
WASM_SIMD_COMPILED_TEST(S16x8ZipRight) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31}});
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{8, 9, 8, 9, 10, 11, 10, 11, 12, 13, 12, 13, 14, 15, 14, 15}});
}
WASM_SIMD_COMPILED_TEST(S16x8UnzipLeft) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29}});
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 4, 5, 8, 9, 12, 13}});
}
WASM_SIMD_COMPILED_TEST(S16x8UnzipRight) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31}});
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15}});
}
WASM_SIMD_COMPILED_TEST(S16x8TransposeLeft) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29}});
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{0, 1, 0, 1, 4, 5, 4, 5, 8, 9, 8, 9, 12, 13, 12, 13}});
}
WASM_SIMD_COMPILED_TEST(S16x8TransposeRight) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31}});
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{2, 3, 2, 3, 6, 7, 6, 7, 10, 11, 10, 11, 14, 15, 14, 15}});
}
WASM_SIMD_COMPILED_TEST(S16x4Reverse) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9}});
}
WASM_SIMD_COMPILED_TEST(S16x2Reverse) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}});
}
WASM_SIMD_COMPILED_TEST(S16x8Irregular) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{0, 1, 16, 17, 16, 17, 0, 1, 4, 5, 20, 21, 6, 7, 22, 23}});
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{0, 1, 0, 1, 0, 1, 0, 1, 4, 5, 4, 5, 6, 7, 6, 7}});
}
WASM_SIMD_COMPILED_TEST(S8x16Dup) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19}});
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}});
}
WASM_SIMD_COMPILED_TEST(S8x16ZipLeft) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23}});
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7}});
}
WASM_SIMD_COMPILED_TEST(S8x16ZipRight) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31}});
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15}});
}
WASM_SIMD_COMPILED_TEST(S8x16UnzipLeft) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30}});
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14}});
}
WASM_SIMD_COMPILED_TEST(S8x16UnzipRight) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31}});
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{1, 3, 5, 7, 9, 11, 13, 15, 1, 3, 5, 7, 9, 11, 13, 15}});
}
WASM_SIMD_COMPILED_TEST(S8x16TransposeLeft) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30}});
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14}});
}
WASM_SIMD_COMPILED_TEST(S8x16TransposeRight) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31}});
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15}});
}
WASM_SIMD_COMPILED_TEST(S8x8Reverse) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}});
}
WASM_SIMD_COMPILED_TEST(S8x4Reverse) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}});
}
WASM_SIMD_COMPILED_TEST(S8x2Reverse) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}});
}
WASM_SIMD_COMPILED_TEST(S8x16Irregular) {
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{0, 16, 0, 16, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23}});
RunBinaryLaneOpTest<int8_t>(
- execution_mode, kExprS8x16Shuffle,
+ lower_simd, kExprS8x16Shuffle,
{{0, 0, 0, 0, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7}});
}
@@ -1922,7 +1906,7 @@ WASM_SIMD_COMPILED_TEST(S8x16Concat) {
for (int j = 0; j < bias; j++) {
expected[i++] = j + kLanes;
}
- RunBinaryLaneOpTest(execution_mode, kExprS8x16Shuffle, expected);
+ RunBinaryLaneOpTest(lower_simd, kExprS8x16Shuffle, expected);
}
}
@@ -1931,7 +1915,7 @@ WASM_SIMD_COMPILED_TEST(S8x16Concat) {
// test inputs. Test inputs with all true, all false, one true, and one false.
#define WASM_SIMD_BOOL_REDUCTION_TEST(format, lanes) \
WASM_SIMD_COMPILED_TEST(ReductionTest##lanes) { \
- WasmRunner<int32_t> r(execution_mode); \
+ WasmRunner<int32_t> r(kExecuteTurbofan, lower_simd); \
byte zero = r.AllocateLocal(kWasmS128); \
byte one_one = r.AllocateLocal(kWasmS128); \
byte reduced = r.AllocateLocal(kWasmI32); \
@@ -2004,7 +1988,7 @@ WASM_SIMD_BOOL_REDUCTION_TEST(16x8, 8)
WASM_SIMD_BOOL_REDUCTION_TEST(8x16, 16)
WASM_SIMD_TEST(SimdI32x4ExtractWithF32x4) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(kExecuteTurbofan, lower_simd);
BUILD(r, WASM_IF_ELSE_I(
WASM_I32_EQ(WASM_SIMD_I32x4_EXTRACT_LANE(
0, WASM_SIMD_F32x4_SPLAT(WASM_F32(30.5))),
@@ -2014,7 +1998,7 @@ WASM_SIMD_TEST(SimdI32x4ExtractWithF32x4) {
}
WASM_SIMD_TEST(SimdF32x4ExtractWithI32x4) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(kExecuteTurbofan, lower_simd);
BUILD(r,
WASM_IF_ELSE_I(WASM_F32_EQ(WASM_SIMD_F32x4_EXTRACT_LANE(
0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(15))),
@@ -2026,9 +2010,9 @@ WASM_SIMD_TEST(SimdF32x4ExtractWithI32x4) {
WASM_SIMD_TEST(SimdF32x4AddWithI32x4) {
// Choose two floating point values whose sum is normal and exactly
// representable as a float.
- const int kOne = 0x3f800000;
+ const int kOne = 0x3F800000;
const int kTwo = 0x40000000;
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(kExecuteTurbofan, lower_simd);
BUILD(r,
WASM_IF_ELSE_I(
WASM_F32_EQ(
@@ -2043,7 +2027,7 @@ WASM_SIMD_TEST(SimdF32x4AddWithI32x4) {
}
WASM_SIMD_TEST(SimdI32x4AddWithF32x4) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(kExecuteTurbofan, lower_simd);
BUILD(r,
WASM_IF_ELSE_I(
WASM_I32_EQ(
@@ -2059,10 +2043,8 @@ WASM_SIMD_TEST(SimdI32x4AddWithF32x4) {
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || \
- V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
WASM_SIMD_TEST(SimdI32x4Local) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(kExecuteTurbofan, lower_simd);
r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(31))),
@@ -2071,7 +2053,7 @@ WASM_SIMD_TEST(SimdI32x4Local) {
}
WASM_SIMD_TEST(SimdI32x4SplatFromExtract) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(kExecuteTurbofan, lower_simd);
r.AllocateLocal(kWasmI32);
r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(0, WASM_SIMD_I32x4_EXTRACT_LANE(
@@ -2082,7 +2064,7 @@ WASM_SIMD_TEST(SimdI32x4SplatFromExtract) {
}
WASM_SIMD_TEST(SimdI32x4For) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(kExecuteTurbofan, lower_simd);
r.AllocateLocal(kWasmI32);
r.AllocateLocal(kWasmS128);
BUILD(r,
@@ -2114,13 +2096,11 @@ WASM_SIMD_TEST(SimdI32x4For) {
WASM_GET_LOCAL(0));
CHECK_EQ(1, r.Call());
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 ||
- // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64
WASM_SIMD_TEST(SimdF32x4For) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(kExecuteTurbofan, lower_simd);
r.AllocateLocal(kWasmI32);
r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(1, WASM_SIMD_F32x4_SPLAT(WASM_F32(21.25))),
@@ -2145,9 +2125,6 @@ WASM_SIMD_TEST(SimdF32x4For) {
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || \
- V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-
template <typename T, int numLanes = 4>
void SetVectorByLanes(T* v, const std::array<T, numLanes>& arr) {
for (int lane = 0; lane < numLanes; lane++) {
@@ -2174,7 +2151,7 @@ const T& GetScalar(T* v, int lane) {
}
WASM_SIMD_TEST(SimdI32x4GetGlobal) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(kExecuteTurbofan, lower_simd);
// Pad the globals with a few unused slots to get a non-zero offset.
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
@@ -2202,7 +2179,7 @@ WASM_SIMD_TEST(SimdI32x4GetGlobal) {
}
WASM_SIMD_TEST(SimdI32x4SetGlobal) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(kExecuteTurbofan, lower_simd);
// Pad the globals with a few unused slots to get a non-zero offset.
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
@@ -2223,13 +2200,11 @@ WASM_SIMD_TEST(SimdI32x4SetGlobal) {
CHECK_EQ(GetScalar(global, 2), 45);
CHECK_EQ(GetScalar(global, 3), 56);
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 ||
- // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64
+ V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
WASM_SIMD_TEST(SimdF32x4GetGlobal) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(kExecuteTurbofan, lower_simd);
float* global = r.builder().AddGlobal<float>(kWasmS128);
SetVectorByLanes<float>(global, {{0.0, 1.5, 2.25, 3.5}});
r.AllocateLocal(kWasmI32);
@@ -2252,7 +2227,7 @@ WASM_SIMD_TEST(SimdF32x4GetGlobal) {
}
WASM_SIMD_TEST(SimdF32x4SetGlobal) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(kExecuteTurbofan, lower_simd);
float* global = r.builder().AddGlobal<float>(kWasmS128);
BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_SPLAT(WASM_F32(13.5))),
WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_REPLACE_LANE(1, WASM_GET_GLOBAL(0),
@@ -2269,12 +2244,10 @@ WASM_SIMD_TEST(SimdF32x4SetGlobal) {
CHECK_EQ(GetScalar(global, 3), 65.0f);
}
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
- // V8_TARGET_ARCH_MIPS64
+ // V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || \
- V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
WASM_SIMD_COMPILED_TEST(SimdLoadStoreLoad) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(kExecuteTurbofan, lower_simd);
int32_t* memory = r.builder().AddMemoryElems<int32_t>(8);
// Load memory, store it, then reload it and extract the first lane. Use a
// non-zero offset into the memory of 1 lane (4 bytes) to test indexing.
@@ -2287,8 +2260,6 @@ WASM_SIMD_COMPILED_TEST(SimdLoadStoreLoad) {
CHECK_EQ(expected, r.Call());
}
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 ||
- // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
#undef WASM_SIMD_TEST
#undef WASM_SIMD_COMPILED_TEST
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
index f928904e9c..6a7fde6401 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -195,19 +195,19 @@ WASM_I32_BINOP_TEST(Sub, int32_t, a - b)
WASM_I32_BINOP_TEST(Mul, int32_t, a* b)
WASM_I32_BINOP_TEST(DivS, int32_t,
(a == kMinInt && b == -1) || b == 0
- ? static_cast<int32_t>(0xdeadbeef)
+ ? static_cast<int32_t>(0xDEADBEEF)
: a / b)
-WASM_I32_BINOP_TEST(DivU, uint32_t, b == 0 ? 0xdeadbeef : a / b)
-WASM_I32_BINOP_TEST(RemS, int32_t, b == 0 ? 0xdeadbeef : b == -1 ? 0 : a % b)
-WASM_I32_BINOP_TEST(RemU, uint32_t, b == 0 ? 0xdeadbeef : a % b)
+WASM_I32_BINOP_TEST(DivU, uint32_t, b == 0 ? 0xDEADBEEF : a / b)
+WASM_I32_BINOP_TEST(RemS, int32_t, b == 0 ? 0xDEADBEEF : b == -1 ? 0 : a % b)
+WASM_I32_BINOP_TEST(RemU, uint32_t, b == 0 ? 0xDEADBEEF : a % b)
WASM_I32_BINOP_TEST(And, int32_t, a& b)
WASM_I32_BINOP_TEST(Ior, int32_t, a | b)
WASM_I32_BINOP_TEST(Xor, int32_t, a ^ b)
-WASM_I32_BINOP_TEST(Shl, int32_t, a << (b & 0x1f))
-WASM_I32_BINOP_TEST(ShrU, uint32_t, a >> (b & 0x1f))
-WASM_I32_BINOP_TEST(ShrS, int32_t, a >> (b & 0x1f))
-WASM_I32_BINOP_TEST(Ror, uint32_t, (a >> (b & 0x1f)) | (a << (32 - (b & 0x1f))))
-WASM_I32_BINOP_TEST(Rol, uint32_t, (a << (b & 0x1f)) | (a >> (32 - (b & 0x1f))))
+WASM_I32_BINOP_TEST(Shl, int32_t, a << (b & 0x1F))
+WASM_I32_BINOP_TEST(ShrU, uint32_t, a >> (b & 0x1F))
+WASM_I32_BINOP_TEST(ShrS, int32_t, a >> (b & 0x1F))
+WASM_I32_BINOP_TEST(Ror, uint32_t, (a >> (b & 0x1F)) | (a << (32 - (b & 0x1F))))
+WASM_I32_BINOP_TEST(Rol, uint32_t, (a << (b & 0x1F)) | (a >> (32 - (b & 0x1F))))
WASM_I32_BINOP_TEST(Eq, int32_t, a == b)
WASM_I32_BINOP_TEST(Ne, int32_t, a != b)
WASM_I32_BINOP_TEST(LtS, int32_t, a < b)
@@ -245,11 +245,11 @@ WASM_EXEC_TEST(Int32Clz) {
TestInt32Unop(execution_mode, kExprI32Clz, 4, 0x08050000);
TestInt32Unop(execution_mode, kExprI32Clz, 5, 0x04006000);
TestInt32Unop(execution_mode, kExprI32Clz, 6, 0x02000000);
- TestInt32Unop(execution_mode, kExprI32Clz, 7, 0x010000a0);
- TestInt32Unop(execution_mode, kExprI32Clz, 8, 0x00800c00);
+ TestInt32Unop(execution_mode, kExprI32Clz, 7, 0x010000A0);
+ TestInt32Unop(execution_mode, kExprI32Clz, 8, 0x00800C00);
TestInt32Unop(execution_mode, kExprI32Clz, 9, 0x00400000);
- TestInt32Unop(execution_mode, kExprI32Clz, 10, 0x0020000d);
- TestInt32Unop(execution_mode, kExprI32Clz, 11, 0x00100f00);
+ TestInt32Unop(execution_mode, kExprI32Clz, 10, 0x0020000D);
+ TestInt32Unop(execution_mode, kExprI32Clz, 11, 0x00100F00);
TestInt32Unop(execution_mode, kExprI32Clz, 12, 0x00080000);
TestInt32Unop(execution_mode, kExprI32Clz, 13, 0x00041000);
TestInt32Unop(execution_mode, kExprI32Clz, 14, 0x00020020);
@@ -279,42 +279,42 @@ WASM_EXEC_TEST(Int32Ctz) {
TestInt32Unop(execution_mode, kExprI32Ctz, 30, 0x40000000);
TestInt32Unop(execution_mode, kExprI32Ctz, 29, 0x20000000);
TestInt32Unop(execution_mode, kExprI32Ctz, 28, 0x10000000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 27, 0xa8000000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 26, 0xf4000000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 27, 0xA8000000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 26, 0xF4000000);
TestInt32Unop(execution_mode, kExprI32Ctz, 25, 0x62000000);
TestInt32Unop(execution_mode, kExprI32Ctz, 24, 0x91000000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 23, 0xcd800000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 23, 0xCD800000);
TestInt32Unop(execution_mode, kExprI32Ctz, 22, 0x09400000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 21, 0xaf200000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 20, 0xac100000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 19, 0xe0b80000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 18, 0x9ce40000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 17, 0xc7920000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 16, 0xb8f10000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 15, 0x3b9f8000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 14, 0xdb4c4000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 13, 0xe9a32000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 12, 0xfca61000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 11, 0x6c8a7800);
- TestInt32Unop(execution_mode, kExprI32Ctz, 10, 0x8ce5a400);
- TestInt32Unop(execution_mode, kExprI32Ctz, 9, 0xcb7d0200);
- TestInt32Unop(execution_mode, kExprI32Ctz, 8, 0xcb4dc100);
- TestInt32Unop(execution_mode, kExprI32Ctz, 7, 0xdfbec580);
- TestInt32Unop(execution_mode, kExprI32Ctz, 6, 0x27a9db40);
- TestInt32Unop(execution_mode, kExprI32Ctz, 5, 0xde3bcb20);
- TestInt32Unop(execution_mode, kExprI32Ctz, 4, 0xd7e8a610);
- TestInt32Unop(execution_mode, kExprI32Ctz, 3, 0x9afdbc88);
- TestInt32Unop(execution_mode, kExprI32Ctz, 2, 0x9afdbc84);
- TestInt32Unop(execution_mode, kExprI32Ctz, 1, 0x9afdbc82);
- TestInt32Unop(execution_mode, kExprI32Ctz, 0, 0x9afdbc81);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 21, 0xAF200000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 20, 0xAC100000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 19, 0xE0B80000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 18, 0x9CE40000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 17, 0xC7920000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 16, 0xB8F10000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 15, 0x3B9F8000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 14, 0xDB4C4000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 13, 0xE9A32000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 12, 0xFCA61000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 11, 0x6C8A7800);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 10, 0x8CE5A400);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 9, 0xCB7D0200);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 8, 0xCB4DC100);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 7, 0xDFBEC580);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 6, 0x27A9DB40);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 5, 0xDE3BCB20);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 4, 0xD7E8A610);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 3, 0x9AFDBC88);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 2, 0x9AFDBC84);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 1, 0x9AFDBC82);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 0, 0x9AFDBC81);
}
WASM_EXEC_TEST(Int32Popcnt) {
- TestInt32Unop(execution_mode, kExprI32Popcnt, 32, 0xffffffff);
+ TestInt32Unop(execution_mode, kExprI32Popcnt, 32, 0xFFFFFFFF);
TestInt32Unop(execution_mode, kExprI32Popcnt, 0, 0x00000000);
TestInt32Unop(execution_mode, kExprI32Popcnt, 1, 0x00008000);
TestInt32Unop(execution_mode, kExprI32Popcnt, 13, 0x12345678);
- TestInt32Unop(execution_mode, kExprI32Popcnt, 19, 0xfedcba09);
+ TestInt32Unop(execution_mode, kExprI32Popcnt, 19, 0xFEDCBA09);
}
WASM_EXEC_TEST(I32Eqz) {
@@ -419,11 +419,11 @@ WASM_EXEC_TEST(Int32AsmjsRemS_byzero_const) {
}
WASM_EXEC_TEST(Int32DivU_byzero_const) {
- for (uint32_t denom = 0xfffffffe; denom < 8; ++denom) {
+ for (uint32_t denom = 0xFFFFFFFE; denom < 8; ++denom) {
WasmRunner<uint32_t, uint32_t> r(execution_mode);
BUILD(r, WASM_I32_DIVU(WASM_GET_LOCAL(0), WASM_I32V_1(denom)));
- for (uint32_t val = 0xfffffff0; val < 8; ++val) {
+ for (uint32_t val = 0xFFFFFFF0; val < 8; ++val) {
if (denom == 0) {
CHECK_TRAP(r.Call(val));
} else {
@@ -830,7 +830,7 @@ WASM_EXEC_TEST(Regression_660262) {
WasmRunner<int32_t> r(execution_mode);
r.builder().AddMemoryElems<int32_t>(8);
BUILD(r, kExprI32Const, 0x00, kExprI32Const, 0x00, kExprI32LoadMem, 0x00,
- 0x0f, kExprBrTable, 0x00, 0x80, 0x00); // entries=0
+ 0x0F, kExprBrTable, 0x00, 0x80, 0x00); // entries=0
r.Call();
}
@@ -1047,22 +1047,22 @@ WASM_EXEC_TEST(SignallingNanSurvivesI32ReinterpretF32) {
WasmRunner<int32_t> r(execution_mode);
BUILD(r, WASM_I32_REINTERPRET_F32(
- WASM_SEQ(kExprF32Const, 0x00, 0x00, 0xa0, 0x7f)));
+ WASM_SEQ(kExprF32Const, 0x00, 0x00, 0xA0, 0x7F)));
// This is a signalling nan.
- CHECK_EQ(0x7fa00000, r.Call());
+ CHECK_EQ(0x7FA00000, r.Call());
}
#endif
WASM_EXEC_TEST(LoadMaxUint32Offset) {
// TODO(eholk): Fix this test for the trap handler.
- if (trap_handler::UseTrapHandler()) return;
+ if (trap_handler::IsTrapHandlerEnabled()) return;
WasmRunner<int32_t> r(execution_mode);
r.builder().AddMemoryElems<int32_t>(8);
BUILD(r, WASM_LOAD_MEM_OFFSET(MachineType::Int32(), // type
- U32V_5(0xffffffff), // offset
+ U32V_5(0xFFFFFFFF), // offset
WASM_ZERO)); // index
CHECK_TRAP32(r.Call());
@@ -1485,20 +1485,20 @@ WASM_EXEC_TEST(LoadMemI32_alignment) {
BUILD(r,
WASM_LOAD_MEM_ALIGNMENT(MachineType::Int32(), WASM_ZERO, alignment));
- r.builder().WriteMemory(&memory[0], 0x1a2b3c4d);
- CHECK_EQ(0x1a2b3c4d, r.Call(0));
+ r.builder().WriteMemory(&memory[0], 0x1A2B3C4D);
+ CHECK_EQ(0x1A2B3C4D, r.Call(0));
- r.builder().WriteMemory(&memory[0], 0x5e6f7a8b);
- CHECK_EQ(0x5e6f7a8b, r.Call(0));
+ r.builder().WriteMemory(&memory[0], 0x5E6F7A8B);
+ CHECK_EQ(0x5E6F7A8B, r.Call(0));
- r.builder().WriteMemory(&memory[0], 0x7ca0b1c2);
- CHECK_EQ(0x7ca0b1c2, r.Call(0));
+ r.builder().WriteMemory(&memory[0], 0x7CA0B1C2);
+ CHECK_EQ(0x7CA0B1C2, r.Call(0));
}
}
WASM_EXEC_TEST(LoadMemI32_oob) {
// TODO(eholk): Fix this test for the trap handler.
- if (trap_handler::UseTrapHandler()) return;
+ if (trap_handler::IsTrapHandlerEnabled()) return;
WasmRunner<int32_t, uint32_t> r(execution_mode);
int32_t* memory = r.builder().AddMemoryElems<int32_t>(8);
r.builder().RandomizeMemory(1111);
@@ -1518,7 +1518,7 @@ WASM_EXEC_TEST(LoadMemI32_oob) {
WASM_EXEC_TEST(LoadMem_offset_oob) {
// TODO(eholk): Fix this test for the trap handler.
- if (trap_handler::UseTrapHandler()) return;
+ if (trap_handler::IsTrapHandlerEnabled()) return;
static const MachineType machineTypes[] = {
MachineType::Int8(), MachineType::Uint8(), MachineType::Int16(),
MachineType::Uint16(), MachineType::Int32(), MachineType::Uint32(),
@@ -1569,7 +1569,7 @@ WASM_EXEC_TEST(LoadMemI32_offset) {
WASM_EXEC_TEST(LoadMemI32_const_oob_misaligned) {
// TODO(eholk): Fix this test for the trap handler.
- if (trap_handler::UseTrapHandler()) return;
+ if (trap_handler::IsTrapHandlerEnabled()) return;
constexpr byte kMemSize = 12;
// TODO(titzer): Fix misaligned accesses on MIPS and re-enable.
for (byte offset = 0; offset < kMemSize + 5; ++offset) {
@@ -1592,7 +1592,7 @@ WASM_EXEC_TEST(LoadMemI32_const_oob_misaligned) {
WASM_EXEC_TEST(LoadMemI32_const_oob) {
// TODO(eholk): Fix this test for the trap handler.
- if (trap_handler::UseTrapHandler()) return;
+ if (trap_handler::IsTrapHandlerEnabled()) return;
constexpr byte kMemSize = 24;
for (byte offset = 0; offset < kMemSize + 5; offset += 4) {
for (byte index = 0; index < kMemSize + 5; index += 4) {
@@ -1632,7 +1632,7 @@ WASM_EXEC_TEST(StoreMemI32_alignment) {
WASM_EXEC_TEST(StoreMemI32_offset) {
WasmRunner<int32_t, int32_t> r(execution_mode);
int32_t* memory = r.builder().AddMemoryElems<int32_t>(4);
- const int32_t kWritten = 0xaabbccdd;
+ const int32_t kWritten = 0xAABBCCDD;
BUILD(r, WASM_STORE_MEM_OFFSET(MachineType::Int32(), 4, WASM_GET_LOCAL(0),
WASM_I32V_5(kWritten)),
@@ -1654,7 +1654,7 @@ WASM_EXEC_TEST(StoreMemI32_offset) {
WASM_EXEC_TEST(StoreMem_offset_oob) {
// TODO(eholk): Fix this test for the trap handler.
- if (trap_handler::UseTrapHandler()) return;
+ if (trap_handler::IsTrapHandlerEnabled()) return;
// 64-bit cases are handled in test-run-wasm-64.cc
static const MachineType machineTypes[] = {
MachineType::Int8(), MachineType::Uint8(), MachineType::Int16(),
@@ -1682,6 +1682,36 @@ WASM_EXEC_TEST(StoreMem_offset_oob) {
}
}
+WASM_EXEC_TEST(Store_i32_narrowed) {
+ constexpr byte kOpcodes[] = {kExprI32StoreMem8, kExprI32StoreMem16,
+ kExprI32StoreMem};
+ int stored_size_in_bytes = 0;
+ for (auto opcode : kOpcodes) {
+ stored_size_in_bytes = std::max(1, stored_size_in_bytes * 2);
+ constexpr int kBytes = 24;
+ uint8_t expected_memory[kBytes] = {0};
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(kBytes);
+ constexpr uint32_t kPattern = 0x12345678;
+
+ BUILD(r, WASM_GET_LOCAL(0), // index
+ WASM_GET_LOCAL(1), // value
+ opcode, ZERO_ALIGNMENT, ZERO_OFFSET, // store
+ WASM_ZERO); // return value
+
+ for (int i = 0; i <= kBytes - stored_size_in_bytes; ++i) {
+ uint32_t pattern = base::bits::RotateLeft32(kPattern, i % 32);
+ r.Call(i, pattern);
+ for (int b = 0; b < stored_size_in_bytes; ++b) {
+ expected_memory[i + b] = static_cast<uint8_t>(pattern >> (b * 8));
+ }
+ for (int w = 0; w < kBytes; ++w) {
+ CHECK_EQ(expected_memory[w], memory[w]);
+ }
+ }
+ }
+}
+
WASM_EXEC_TEST(LoadMemI32_P) {
const int kNumElems = 8;
WasmRunner<int32_t, int32_t> r(execution_mode);
@@ -2110,20 +2140,20 @@ WASM_EXEC_TEST(MixedGlobals) {
WASM_SET_GLOBAL(4, WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO)),
WASM_ZERO);
- memory[0] = 0xaa;
- memory[1] = 0xcc;
+ memory[0] = 0xAA;
+ memory[1] = 0xCC;
memory[2] = 0x55;
- memory[3] = 0xee;
+ memory[3] = 0xEE;
memory[4] = 0x33;
memory[5] = 0x22;
memory[6] = 0x11;
memory[7] = 0x99;
r.Call(1);
- CHECK(static_cast<int32_t>(0xee55ccaa) == *var_int32);
- CHECK(static_cast<uint32_t>(0xee55ccaa) == *var_uint32);
- CHECK(bit_cast<float>(0xee55ccaa) == *var_float);
- CHECK(bit_cast<double>(0x99112233ee55ccaaULL) == *var_double);
+ CHECK(static_cast<int32_t>(0xEE55CCAA) == *var_int32);
+ CHECK(static_cast<uint32_t>(0xEE55CCAA) == *var_uint32);
+ CHECK(bit_cast<float>(0xEE55CCAA) == *var_float);
+ CHECK(bit_cast<double>(0x99112233EE55CCAAULL) == *var_double);
USE(unused);
}
@@ -2370,10 +2400,11 @@ WASM_EXEC_TEST(AddCall) {
byte local = r.AllocateLocal(kWasmI32);
BUILD(r, WASM_SET_LOCAL(local, WASM_I32V_2(99)),
- WASM_I32_ADD(WASM_CALL_FUNCTION(t1.function_index(), WASM_GET_LOCAL(0),
- WASM_GET_LOCAL(0)),
- WASM_CALL_FUNCTION(t1.function_index(), WASM_GET_LOCAL(1),
- WASM_GET_LOCAL(local))));
+ WASM_I32_ADD(
+ WASM_CALL_FUNCTION(t1.function_index(), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(0)),
+ WASM_CALL_FUNCTION(t1.function_index(), WASM_GET_LOCAL(local),
+ WASM_GET_LOCAL(local))));
CHECK_EQ(198, r.Call(0));
CHECK_EQ(200, r.Call(1));
@@ -2823,15 +2854,8 @@ WASM_EXEC_TEST(I32SConvertF32) {
WasmRunner<int32_t, float> r(execution_mode);
BUILD(r, WASM_I32_SCONVERT_F32(WASM_GET_LOCAL(0)));
- // The upper bound is (INT32_MAX + 1), which is the lowest float-representable
- // number above INT32_MAX which cannot be represented as int32.
- float upper_bound = 2147483648.0f;
- // We use INT32_MIN as a lower bound because (INT32_MIN - 1) is not
- // representable as float, and no number between (INT32_MIN - 1) and INT32_MIN
- // is.
- float lower_bound = static_cast<float>(INT32_MIN);
FOR_FLOAT32_INPUTS(i) {
- if (*i < upper_bound && *i >= lower_bound) {
+ if (is_inbounds<int32_t>(*i)) {
CHECK_EQ(static_cast<int32_t>(*i), r.Call(*i));
} else {
CHECK_TRAP32(r.Call(*i));
@@ -2839,18 +2863,29 @@ WASM_EXEC_TEST(I32SConvertF32) {
}
}
+WASM_EXEC_TEST(I32SConvertSatF32) {
+ EXPERIMENTAL_FLAG_SCOPE(sat_f2i_conversions);
+ WasmRunner<int32_t, float> r(execution_mode);
+ BUILD(r, WASM_I32_SCONVERT_SAT_F32(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT32_INPUTS(i) {
+ int32_t expected =
+ is_inbounds<int32_t>(*i)
+ ? static_cast<int32_t>(*i)
+ : std::isnan(*i) ? 0
+ : *i < 0.0 ? std::numeric_limits<int32_t>::min()
+ : std::numeric_limits<int32_t>::max();
+ int32_t found = r.Call(*i);
+ CHECK_EQ(expected, found);
+ }
+}
+
WASM_EXEC_TEST(I32SConvertF64) {
WasmRunner<int32_t, double> r(execution_mode);
BUILD(r, WASM_I32_SCONVERT_F64(WASM_GET_LOCAL(0)));
- // The upper bound is (INT32_MAX + 1), which is the lowest double-
- // representable number above INT32_MAX which cannot be represented as int32.
- double upper_bound = 2147483648.0;
- // The lower bound is (INT32_MIN - 1), which is the greatest double-
- // representable number below INT32_MIN which cannot be represented as int32.
- double lower_bound = -2147483649.0;
FOR_FLOAT64_INPUTS(i) {
- if (*i<upper_bound&& * i> lower_bound) {
+ if (is_inbounds<int32_t>(*i)) {
CHECK_EQ(static_cast<int32_t>(*i), r.Call(*i));
} else {
CHECK_TRAP32(r.Call(*i));
@@ -2858,16 +2893,27 @@ WASM_EXEC_TEST(I32SConvertF64) {
}
}
+WASM_EXEC_TEST(I32SConvertSatF64) {
+ EXPERIMENTAL_FLAG_SCOPE(sat_f2i_conversions);
+ WasmRunner<int32_t, double> r(execution_mode);
+ BUILD(r, WASM_I32_SCONVERT_SAT_F64(WASM_GET_LOCAL(0)));
+ FOR_FLOAT64_INPUTS(i) {
+ int32_t expected =
+ is_inbounds<int32_t>(*i)
+ ? static_cast<int32_t>(*i)
+ : std::isnan(*i) ? 0
+ : *i < 0.0 ? std::numeric_limits<int32_t>::min()
+ : std::numeric_limits<int32_t>::max();
+ int32_t found = r.Call(*i);
+ CHECK_EQ(expected, found);
+ }
+}
+
WASM_EXEC_TEST(I32UConvertF32) {
WasmRunner<uint32_t, float> r(execution_mode);
BUILD(r, WASM_I32_UCONVERT_F32(WASM_GET_LOCAL(0)));
- // The upper bound is (UINT32_MAX + 1), which is the lowest
- // float-representable number above UINT32_MAX which cannot be represented as
- // uint32.
- double upper_bound = 4294967296.0f;
- double lower_bound = -1.0f;
FOR_FLOAT32_INPUTS(i) {
- if (*i<upper_bound&& * i> lower_bound) {
+ if (is_inbounds<uint32_t>(*i)) {
CHECK_EQ(static_cast<uint32_t>(*i), r.Call(*i));
} else {
CHECK_TRAP32(r.Call(*i));
@@ -2875,16 +2921,27 @@ WASM_EXEC_TEST(I32UConvertF32) {
}
}
+WASM_EXEC_TEST(I32UConvertSatF32) {
+ EXPERIMENTAL_FLAG_SCOPE(sat_f2i_conversions);
+ WasmRunner<uint32_t, float> r(execution_mode);
+ BUILD(r, WASM_I32_UCONVERT_SAT_F32(WASM_GET_LOCAL(0)));
+ FOR_FLOAT32_INPUTS(i) {
+ int32_t expected =
+ is_inbounds<uint32_t>(*i)
+ ? static_cast<uint32_t>(*i)
+ : std::isnan(*i) ? 0
+ : *i < 0.0 ? std::numeric_limits<uint32_t>::min()
+ : std::numeric_limits<uint32_t>::max();
+ int32_t found = r.Call(*i);
+ CHECK_EQ(expected, found);
+ }
+}
+
WASM_EXEC_TEST(I32UConvertF64) {
WasmRunner<uint32_t, double> r(execution_mode);
BUILD(r, WASM_I32_UCONVERT_F64(WASM_GET_LOCAL(0)));
- // The upper bound is (UINT32_MAX + 1), which is the lowest
- // double-representable number above UINT32_MAX which cannot be represented as
- // uint32.
- double upper_bound = 4294967296.0;
- double lower_bound = -1.0;
FOR_FLOAT64_INPUTS(i) {
- if (*i<upper_bound&& * i> lower_bound) {
+ if (is_inbounds<uint32_t>(*i)) {
CHECK_EQ(static_cast<uint32_t>(*i), r.Call(*i));
} else {
CHECK_TRAP32(r.Call(*i));
@@ -2892,6 +2949,22 @@ WASM_EXEC_TEST(I32UConvertF64) {
}
}
+WASM_EXEC_TEST(I32UConvertSatF64) {
+ EXPERIMENTAL_FLAG_SCOPE(sat_f2i_conversions);
+ WasmRunner<uint32_t, double> r(execution_mode);
+ BUILD(r, WASM_I32_UCONVERT_SAT_F64(WASM_GET_LOCAL(0)));
+ FOR_FLOAT64_INPUTS(i) {
+ int32_t expected =
+ is_inbounds<uint32_t>(*i)
+ ? static_cast<uint32_t>(*i)
+ : std::isnan(*i) ? 0
+ : *i < 0.0 ? std::numeric_limits<uint32_t>::min()
+ : std::numeric_limits<uint32_t>::max();
+ int32_t found = r.Call(*i);
+ CHECK_EQ(expected, found);
+ }
+}
+
WASM_EXEC_TEST(F64CopySign) {
WasmRunner<double, double, double> r(execution_mode);
BUILD(r, WASM_F64_COPYSIGN(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
diff --git a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
index a2c352bb4d..ef77708267 100644
--- a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
+++ b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
@@ -10,6 +10,7 @@
#include "src/wasm/compilation-manager.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/streaming-decoder.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
@@ -103,8 +104,10 @@ class StreamTester {
i::Handle<i::JSPromise> i_promise = v8::Utils::OpenHandle(*promise_);
- stream_ = i_isolate->wasm_compilation_manager()->StartStreamingCompilation(
- i_isolate, v8::Utils::OpenHandle(*context), i_promise);
+ stream_ = i_isolate->wasm_engine()
+ ->compilation_manager()
+ ->StartStreamingCompilation(
+ i_isolate, v8::Utils::OpenHandle(*context), i_promise);
}
std::shared_ptr<StreamingDecoder> stream() { return stream_; }
@@ -327,12 +330,12 @@ ZoneBuffer GetModuleWithInvalidSectionSize(Zone* zone) {
ZoneBuffer buffer = GetValidModuleBytes(zone);
// 9 == 4 (wasm magic) + 4 (version) + 1 (section code)
uint8_t* section_size_address = const_cast<uint8_t*>(buffer.begin()) + 9;
- // 0x808080800f is an invalid module size in leb encoding.
+ // 0x808080800F is an invalid module size in leb encoding.
section_size_address[0] = 0x80;
section_size_address[1] = 0x80;
section_size_address[2] = 0x80;
section_size_address[3] = 0x80;
- section_size_address[4] = 0x0f;
+ section_size_address[4] = 0x0F;
return buffer;
}
@@ -871,6 +874,84 @@ STREAM_TEST(TestModuleWithZeroFunctions) {
CHECK(tester.IsPromiseFulfilled());
}
+STREAM_TEST(TestModuleWithMultipleFunctions) {
+ StreamTester tester;
+
+ uint8_t code[] = {
+ U32V_1(4), // body size
+ U32V_1(0), // locals count
+ kExprGetLocal, 0, kExprEnd // body
+ };
+
+ const uint8_t bytes[] = {
+ WASM_MODULE_HEADER, // module header
+ kTypeSectionCode, // section code
+ U32V_1(1 + SIZEOF_SIG_ENTRY_x_x), // section size
+ U32V_1(1), // type count
+ SIG_ENTRY_x_x(kLocalI32, kLocalI32), // signature entry
+ kFunctionSectionCode, // section code
+ U32V_1(1 + 3), // section size
+ U32V_1(3), // functions count
+ 0, // signature index
+ 0, // signature index
+ 0, // signature index
+ kCodeSectionCode, // section code
+ U32V_1(1 + arraysize(code) * 3), // section size
+ U32V_1(3), // functions count
+ };
+
+ tester.OnBytesReceived(bytes, arraysize(bytes));
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.RunCompilerTasks();
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.FinishStream();
+ tester.RunCompilerTasks();
+ CHECK(tester.IsPromiseFulfilled());
+}
+
+STREAM_TEST(TestModuleWithDataSection) {
+ StreamTester tester;
+
+ uint8_t code[] = {
+ U32V_1(4), // body size
+ U32V_1(0), // locals count
+ kExprGetLocal, 0, kExprEnd // body
+ };
+
+ const uint8_t bytes[] = {
+ WASM_MODULE_HEADER, // module header
+ kTypeSectionCode, // section code
+ U32V_1(1 + SIZEOF_SIG_ENTRY_x_x), // section size
+ U32V_1(1), // type count
+ SIG_ENTRY_x_x(kLocalI32, kLocalI32), // signature entry
+ kFunctionSectionCode, // section code
+ U32V_1(1 + 3), // section size
+ U32V_1(3), // functions count
+ 0, // signature index
+ 0, // signature index
+ 0, // signature index
+ kCodeSectionCode, // section code
+ U32V_1(1 + arraysize(code) * 3), // section size
+ U32V_1(3), // functions count
+ };
+
+ const uint8_t data_section[] = {
+ kDataSectionCode, // section code
+ U32V_1(1), // section size
+ U32V_1(0), // data segment count
+ };
+ tester.OnBytesReceived(bytes, arraysize(bytes));
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.RunCompilerTasks();
+ tester.OnBytesReceived(data_section, arraysize(data_section));
+ tester.RunCompilerTasks();
+ tester.FinishStream();
+ tester.RunCompilerTasks();
+ CHECK(tester.IsPromiseFulfilled());
+}
// Test that all bytes arrive before doing any compilation. FinishStream is
// called immediately.
STREAM_TEST(TestModuleWithImportedFunction) {
@@ -893,6 +974,40 @@ STREAM_TEST(TestModuleWithImportedFunction) {
CHECK(tester.IsPromiseFulfilled());
}
+
+STREAM_TEST(TestModuleWithErrorAfterDataSection) {
+ StreamTester tester;
+
+ const uint8_t bytes[] = {
+ WASM_MODULE_HEADER, // module header
+ kTypeSectionCode, // section code
+ U32V_1(1 + SIZEOF_SIG_ENTRY_x_x), // section size
+ U32V_1(1), // type count
+ SIG_ENTRY_x_x(kLocalI32, kLocalI32), // signature entry
+ kFunctionSectionCode, // section code
+ U32V_1(1 + 1), // section size
+ U32V_1(1), // functions count
+ 0, // signature index
+ kCodeSectionCode, // section code
+ U32V_1(6), // section size
+ U32V_1(1), // functions count
+ U32V_1(4), // body size
+ U32V_1(0), // locals count
+ kExprGetLocal, // some code
+ 0, // some code
+ kExprEnd, // some code
+ kDataSectionCode, // section code
+ U32V_1(1), // section size
+ U32V_1(0), // data segment count
+ kUnknownSectionCode, // section code
+ U32V_1(1), // invalid section size
+ };
+
+ tester.OnBytesReceived(bytes, arraysize(bytes));
+ tester.FinishStream();
+ tester.RunCompilerTasks();
+ CHECK(tester.IsPromiseRejected());
+}
#undef STREAM_TEST
} // namespace wasm
diff --git a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
index 22b51bfae5..48640ef1e5 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
@@ -22,12 +22,10 @@ namespace wasm {
namespace {
void CheckLocations(
- WasmCompiledModule *compiled_module, debug::Location start,
- debug::Location end,
+ WasmSharedModuleData* shared, debug::Location start, debug::Location end,
std::initializer_list<debug::Location> expected_locations_init) {
std::vector<debug::BreakLocation> locations;
- bool success =
- compiled_module->GetPossibleBreakpoints(start, end, &locations);
+ bool success = shared->GetPossibleBreakpoints(start, end, &locations);
CHECK(success);
printf("got %d locations: ", static_cast<int>(locations.size()));
@@ -46,11 +44,11 @@ void CheckLocations(
locations[i].GetColumnNumber());
}
}
-void CheckLocationsFail(WasmCompiledModule *compiled_module,
- debug::Location start, debug::Location end) {
+
+void CheckLocationsFail(WasmSharedModuleData* shared, debug::Location start,
+ debug::Location end) {
std::vector<debug::BreakLocation> locations;
- bool success =
- compiled_module->GetPossibleBreakpoints(start, end, &locations);
+ bool success = shared->GetPossibleBreakpoints(start, end, &locations);
CHECK(!success);
}
@@ -269,24 +267,26 @@ WASM_COMPILED_EXEC_TEST(WasmCollectPossibleBreakpoints) {
BUILD(runner, WASM_NOP, WASM_I32_ADD(WASM_ZERO, WASM_ONE));
- Handle<WasmInstanceObject> instance = runner.builder().instance_object();
+ WasmInstanceObject* instance = *runner.builder().instance_object();
+ WasmSharedModuleData* shared = instance->compiled_module()->shared();
+
std::vector<debug::Location> locations;
// Check all locations for function 0.
- CheckLocations(instance->compiled_module(), {0, 0}, {1, 0},
+ CheckLocations(shared, {0, 0}, {1, 0},
{{0, 1}, {0, 2}, {0, 4}, {0, 6}, {0, 7}});
// Check a range ending at an instruction.
- CheckLocations(instance->compiled_module(), {0, 2}, {0, 4}, {{0, 2}});
+ CheckLocations(shared, {0, 2}, {0, 4}, {{0, 2}});
// Check a range ending one behind an instruction.
- CheckLocations(instance->compiled_module(), {0, 2}, {0, 5}, {{0, 2}, {0, 4}});
+ CheckLocations(shared, {0, 2}, {0, 5}, {{0, 2}, {0, 4}});
// Check a range starting at an instruction.
- CheckLocations(instance->compiled_module(), {0, 7}, {0, 8}, {{0, 7}});
+ CheckLocations(shared, {0, 7}, {0, 8}, {{0, 7}});
// Check from an instruction to beginning of next function.
- CheckLocations(instance->compiled_module(), {0, 7}, {1, 0}, {{0, 7}});
+ CheckLocations(shared, {0, 7}, {1, 0}, {{0, 7}});
// Check from end of one function (no valid instruction position) to beginning
// of next function. Must be empty, but not fail.
- CheckLocations(instance->compiled_module(), {0, 8}, {1, 0}, {});
+ CheckLocations(shared, {0, 8}, {1, 0}, {});
// Check from one after the end of the function. Must fail.
- CheckLocationsFail(instance->compiled_module(), {0, 9}, {1, 0});
+ CheckLocationsFail(shared, {0, 9}, {1, 0});
}
WASM_COMPILED_EXEC_TEST(WasmSimpleBreak) {
diff --git a/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc b/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
index 818433bd57..16c525945f 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
@@ -120,7 +120,7 @@ TEST(TestArgumentPassing_double_int64) {
WASM_I64V_1(32))),
WASM_CALL_FUNCTION0(f2.function_index())},
[](int32_t a, int32_t b) {
- int64_t a64 = static_cast<int64_t>(a) & 0xffffffff;
+ int64_t a64 = static_cast<int64_t>(a) & 0xFFFFFFFF;
int64_t b64 = static_cast<int64_t>(b) << 32;
return static_cast<double>(a64 | b64);
});
@@ -223,8 +223,8 @@ TEST(TestArgumentPassing_AllTypes) {
WASM_GET_LOCAL(4), // fourth arg
WASM_CALL_FUNCTION0(f2.function_index())},
[](int32_t a, int32_t b, int32_t c, float d, double e) {
- return 0. + a + (static_cast<int64_t>(b) & 0xffffffff) +
- ((static_cast<int64_t>(c) & 0xffffffff) << 32) + d + e;
+ return 0. + a + (static_cast<int64_t>(b) & 0xFFFFFFFF) +
+ ((static_cast<int64_t>(c) & 0xFFFFFFFF) << 32) + d + e;
});
auto CheckCall = [&](int32_t a, int64_t b, float c, double d) {
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.cc b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
index 33090cfb2a..4fa1fb1c7a 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
@@ -15,7 +15,7 @@ namespace wasm {
TestingModuleBuilder::TestingModuleBuilder(
Zone* zone, WasmExecutionMode mode,
- compiler::RuntimeExceptionSupport exception_support)
+ compiler::RuntimeExceptionSupport exception_support, LowerSimd lower_simd)
: test_module_ptr_(&test_module_),
isolate_(CcTest::InitIsolateOnce()),
global_offset(0),
@@ -24,7 +24,7 @@ TestingModuleBuilder::TestingModuleBuilder(
interpreter_(nullptr),
execution_mode_(mode),
runtime_exception_support_(exception_support),
- lower_simd_(mode == kExecuteSimdLowered) {
+ lower_simd_(lower_simd) {
WasmJs::Install(isolate_, true);
test_module_.globals_size = kMaxGlobalsSize;
memset(globals_data_, 0, sizeof(globals_data_));
@@ -41,9 +41,9 @@ byte* TestingModuleBuilder::AddMemory(uint32_t size) {
DCHECK(!instance_object_->has_memory_object());
test_module_.has_memory = true;
const bool enable_guard_regions =
- trap_handler::UseTrapHandler() && test_module_.is_wasm();
+ trap_handler::IsTrapHandlerEnabled() && test_module_.is_wasm();
uint32_t alloc_size =
- enable_guard_regions ? RoundUp(size, base::OS::CommitPageSize()) : size;
+ enable_guard_regions ? RoundUp(size, CommitPageSize()) : size;
Handle<JSArrayBuffer> new_buffer =
wasm::NewArrayBuffer(isolate_, alloc_size, enable_guard_regions);
CHECK(!new_buffer.is_null());
@@ -96,17 +96,15 @@ uint32_t TestingModuleBuilder::AddJsFunction(
*v8::Local<v8::Function>::Cast(CompileRun(source))));
uint32_t index = AddFunction(sig, nullptr);
js_imports_table->set(0, *isolate_->native_context());
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate_->heap());
+ Handle<Code> code = compiler::CompileWasmToJSWrapper(
+ isolate_, jsfunc, sig, index, test_module_.origin(),
+ trap_handler::IsTrapHandlerEnabled(), js_imports_table);
if (FLAG_wasm_jit_to_native) {
native_module_->ResizeCodeTableForTest(index);
- Handle<Code> wrapper = compiler::CompileWasmToJSWrapper(
- isolate_, jsfunc, sig, index, test_module_.origin(), js_imports_table);
- native_module_->AddCodeCopy(wrapper, wasm::WasmCode::WasmToJsWrapper,
- index);
+ native_module_->AddCodeCopy(code, wasm::WasmCode::kWasmToJsWrapper, index);
} else {
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(isolate_->heap());
- Handle<Code> code = compiler::CompileWasmToJSWrapper(
- isolate_, jsfunc, sig, index, test_module_.origin(), js_imports_table);
function_code_[index] = code;
}
return index;
@@ -120,10 +118,11 @@ Handle<JSFunction> TestingModuleBuilder::WrapCode(uint32_t index) {
: WasmCodeWrapper(function_code_[index]);
byte* context_address =
test_module_.has_memory
- ? reinterpret_cast<byte*>(instance_object_->wasm_context())
+ ? reinterpret_cast<byte*>(instance_object_->wasm_context()->get())
: nullptr;
Handle<Code> ret_code = compiler::CompileJSToWasmWrapper(
- isolate_, &test_module_, code, index, context_address);
+ isolate_, &test_module_, code, index, context_address,
+ trap_handler::IsTrapHandlerEnabled());
Handle<JSFunction> ret = WasmExportedFunction::New(
isolate_, instance_object(), MaybeHandle<String>(),
static_cast<int>(index),
@@ -133,19 +132,20 @@ Handle<JSFunction> TestingModuleBuilder::WrapCode(uint32_t index) {
// Add weak reference to exported functions.
Handle<WasmCompiledModule> compiled_module(
instance_object()->compiled_module(), isolate_);
- Handle<FixedArray> old_arr = compiled_module->weak_exported_functions();
+ Handle<FixedArray> old_arr(compiled_module->weak_exported_functions(),
+ isolate_);
Handle<FixedArray> new_arr =
isolate_->factory()->NewFixedArray(old_arr->length() + 1);
old_arr->CopyTo(0, *new_arr, 0, old_arr->length());
Handle<WeakCell> weak_fn = isolate_->factory()->NewWeakCell(ret);
new_arr->set(old_arr->length(), *weak_fn);
- compiled_module->set_weak_exported_functions(new_arr);
+ compiled_module->set_weak_exported_functions(*new_arr);
return ret;
}
-void TestingModuleBuilder::AddIndirectFunctionTable(uint16_t* function_indexes,
- uint32_t table_size) {
+void TestingModuleBuilder::AddIndirectFunctionTable(
+ const uint16_t* function_indexes, uint32_t table_size) {
test_module_.function_tables.emplace_back();
WasmIndirectFunctionTable& table = test_module_.function_tables.back();
table.initial_size = table_size;
@@ -155,14 +155,10 @@ void TestingModuleBuilder::AddIndirectFunctionTable(uint16_t* function_indexes,
table.values.push_back(function_indexes[i]);
}
+ FixedArray* func_table = *isolate_->factory()->NewFixedArray(
+ table_size * compiler::kFunctionTableEntrySize);
function_tables_.push_back(
- isolate_->global_handles()
- ->Create(*isolate_->factory()->NewFixedArray(table_size))
- .address());
- signature_tables_.push_back(
- isolate_->global_handles()
- ->Create(*isolate_->factory()->NewFixedArray(table_size))
- .address());
+ isolate_->global_handles()->Create(func_table).address());
}
void TestingModuleBuilder::PopulateIndirectFunctionTable() {
@@ -172,30 +168,32 @@ void TestingModuleBuilder::PopulateIndirectFunctionTable() {
WasmIndirectFunctionTable& table = test_module_.function_tables[i];
Handle<FixedArray> function_table(
reinterpret_cast<FixedArray**>(function_tables_[i]));
- Handle<FixedArray> signature_table(
- reinterpret_cast<FixedArray**>(signature_tables_[i]));
int table_size = static_cast<int>(table.values.size());
for (int j = 0; j < table_size; j++) {
WasmFunction& function = test_module_.functions[table.values[j]];
- signature_table->set(
- j, Smi::FromInt(test_module_.signature_map.Find(function.sig)));
+ function_table->set(
+ compiler::FunctionTableSigOffset(j),
+ Smi::FromInt(test_module_.signature_map.Find(function.sig)));
if (FLAG_wasm_jit_to_native) {
Handle<Foreign> foreign_holder = isolate_->factory()->NewForeign(
native_module_->GetCode(function.func_index)
->instructions()
.start(),
TENURED);
- function_table->set(j, *foreign_holder);
+ function_table->set(compiler::FunctionTableCodeOffset(j),
+ *foreign_holder);
} else {
- function_table->set(j, *function_code_[function.func_index]);
+ function_table->set(compiler::FunctionTableCodeOffset(j),
+ *function_code_[function.func_index]);
}
}
}
}
uint32_t TestingModuleBuilder::AddBytes(Vector<const byte> bytes) {
- Handle<SeqOneByteString> old_bytes(
- instance_object_->compiled_module()->module_bytes(), isolate_);
+ Handle<WasmSharedModuleData> shared(
+ instance_object_->compiled_module()->shared(), isolate_);
+ Handle<SeqOneByteString> old_bytes(shared->module_bytes(), isolate_);
uint32_t old_size = static_cast<uint32_t>(old_bytes->length());
// Avoid placing strings at offset 0, this might be interpreted as "not
// set", e.g. for function names.
@@ -205,14 +203,13 @@ uint32_t TestingModuleBuilder::AddBytes(Vector<const byte> bytes) {
memcpy(new_bytes.start() + bytes_offset, bytes.start(), bytes.length());
Handle<SeqOneByteString> new_bytes_str = Handle<SeqOneByteString>::cast(
isolate_->factory()->NewStringFromOneByte(new_bytes).ToHandleChecked());
- instance_object_->compiled_module()->shared()->set_module_bytes(
- *new_bytes_str);
+ shared->set_module_bytes(*new_bytes_str);
return bytes_offset;
}
compiler::ModuleEnv TestingModuleBuilder::CreateModuleEnv() {
- return {&test_module_, function_tables_, signature_tables_, function_code_,
- Handle<Code>::null()};
+ return {&test_module_, function_tables_, function_code_, Handle<Code>::null(),
+ trap_handler::IsTrapHandlerEnabled()};
}
const WasmGlobal* TestingModuleBuilder::AddGlobal(ValueType type) {
@@ -243,7 +240,7 @@ Handle<WasmInstanceObject> TestingModuleBuilder::InitInstanceObject() {
Handle<FixedArray> export_wrappers = isolate_->factory()->NewFixedArray(0);
Handle<WasmCompiledModule> compiled_module = WasmCompiledModule::New(
isolate_, test_module_ptr_, code_table, export_wrappers, function_tables_,
- signature_tables_);
+ trap_handler::IsTrapHandlerEnabled());
compiled_module->OnWasmModuleDecodingComplete(shared_module_data);
// This method is called when we initialize TestEnvironment. We don't
// have a memory yet, so we won't create it here. We'll update the
@@ -251,43 +248,55 @@ Handle<WasmInstanceObject> TestingModuleBuilder::InitInstanceObject() {
native_module_ = compiled_module->GetNativeModule();
Handle<FixedArray> weak_exported = isolate_->factory()->NewFixedArray(0);
- compiled_module->set_weak_exported_functions(weak_exported);
+ compiled_module->set_weak_exported_functions(*weak_exported);
DCHECK(WasmCompiledModule::IsWasmCompiledModule(*compiled_module));
script->set_wasm_compiled_module(*compiled_module);
auto instance = WasmInstanceObject::New(isolate_, compiled_module);
instance->wasm_context()->get()->globals_start = globals_data_;
Handle<WeakCell> weak_instance = isolate()->factory()->NewWeakCell(instance);
- compiled_module->set_weak_owning_instance(weak_instance);
+ compiled_module->set_weak_owning_instance(*weak_instance);
return instance;
}
-void TestBuildingGraph(
- Zone* zone, compiler::JSGraph* jsgraph, compiler::ModuleEnv* module,
- FunctionSig* sig, compiler::SourcePositionTable* source_position_table,
- const byte* start, const byte* end,
- compiler::RuntimeExceptionSupport runtime_exception_support) {
- compiler::WasmGraphBuilder builder(
- module, zone, jsgraph, CEntryStub(jsgraph->isolate(), 1).GetCode(), sig,
- source_position_table, runtime_exception_support);
-
+void TestBuildingGraphWithBuilder(compiler::WasmGraphBuilder* builder,
+ Zone* zone, FunctionSig* sig,
+ const byte* start, const byte* end) {
DecodeResult result =
- BuildTFGraph(zone->allocator(), &builder, sig, start, end);
+ BuildTFGraph(zone->allocator(), builder, sig, start, end);
if (result.failed()) {
+#ifdef DEBUG
if (!FLAG_trace_wasm_decoder) {
// Retry the compilation with the tracing flag on, to help in debugging.
FLAG_trace_wasm_decoder = true;
- result = BuildTFGraph(zone->allocator(), &builder, sig, start, end);
+ result = BuildTFGraph(zone->allocator(), builder, sig, start, end);
}
+#endif
uint32_t pc = result.error_offset();
- std::ostringstream str;
- str << "Verification failed; pc = +" << pc
- << ", msg = " << result.error_msg().c_str();
- FATAL(str.str().c_str());
+ FATAL("Verification failed; pc = +%x, msg = %s", pc,
+ result.error_msg().c_str());
}
- builder.LowerInt64();
+ builder->LowerInt64();
if (!CpuFeatures::SupportsWasmSimd128()) {
- builder.SimdScalarLoweringForTesting();
+ builder->SimdScalarLoweringForTesting();
+ }
+}
+
+void TestBuildingGraph(
+ Zone* zone, compiler::JSGraph* jsgraph, compiler::ModuleEnv* module,
+ FunctionSig* sig, compiler::SourcePositionTable* source_position_table,
+ const byte* start, const byte* end,
+ compiler::RuntimeExceptionSupport runtime_exception_support) {
+ if (module) {
+ compiler::WasmGraphBuilder builder(
+ module, zone, jsgraph, CEntryStub(jsgraph->isolate(), 1).GetCode(), sig,
+ source_position_table, runtime_exception_support);
+ TestBuildingGraphWithBuilder(&builder, zone, sig, start, end);
+ } else {
+ compiler::WasmGraphBuilder builder(
+ nullptr, zone, jsgraph, CEntryStub(jsgraph->isolate(), 1).GetCode(),
+ sig, source_position_table, runtime_exception_support);
+ TestBuildingGraphWithBuilder(&builder, zone, sig, start, end);
}
}
@@ -428,7 +437,7 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
if (FLAG_wasm_jit_to_native) {
native_module->ResizeCodeTableForTest(function_->func_index);
}
- Handle<SeqOneByteString> wire_bytes(compiled_module->module_bytes(),
+ Handle<SeqOneByteString> wire_bytes(compiled_module->shared()->module_bytes(),
isolate());
compiler::ModuleEnv module_env = builder_->CreateModuleEnv();
@@ -476,7 +485,7 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
builder_->SetFunctionCode(function_index(), code);
// Add to code table.
- Handle<FixedArray> code_table = compiled_module->code_table();
+ Handle<FixedArray> code_table(compiled_module->code_table(), isolate());
if (static_cast<int>(function_index()) >= code_table->length()) {
Handle<FixedArray> new_arr = isolate()->factory()->NewFixedArray(
static_cast<int>(function_index()) + 1);
@@ -487,11 +496,11 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
DCHECK(code_table->get(static_cast<int>(function_index()))
->IsUndefined(isolate()));
code_table->set(static_cast<int>(function_index()), *code);
- if (trap_handler::UseTrapHandler()) {
+ if (trap_handler::IsTrapHandlerEnabled()) {
UnpackAndRegisterProtectedInstructionsGC(isolate(), code_table);
}
} else {
- if (trap_handler::UseTrapHandler()) {
+ if (trap_handler::IsTrapHandlerEnabled()) {
UnpackAndRegisterProtectedInstructions(isolate(), native_module);
}
}
@@ -516,7 +525,7 @@ WasmFunctionCompiler::WasmFunctionCompiler(Zone* zone, FunctionSig* sig,
WasmFunctionCompiler::~WasmFunctionCompiler() {
if (!FLAG_wasm_jit_to_native) {
- if (trap_handler::UseTrapHandler() &&
+ if (trap_handler::IsTrapHandlerEnabled() &&
!builder_->GetFunctionCode(function_index()).is_null()) {
const int handler_index = builder_->GetFunctionCode(function_index())
.GetCode()
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
index f46d1e3d61..c0ce21533f 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -25,6 +25,7 @@
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/local-decl-encoder.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-external-refs.h"
#include "src/wasm/wasm-interpreter.h"
#include "src/wasm/wasm-js.h"
@@ -50,23 +51,23 @@ constexpr uint32_t kMaxGlobalsSize = 128;
enum WasmExecutionMode {
kExecuteInterpreter,
kExecuteTurbofan,
- kExecuteLiftoff,
- // TODO(bug:7028): Introduce another enum for simd lowering.
- kExecuteSimdLowered
+ kExecuteLiftoff
};
+enum LowerSimd : bool { kLowerSimd = true, kNoLowerSimd = false };
+
using compiler::CallDescriptor;
using compiler::MachineTypeForC;
using compiler::Node;
// TODO(titzer): check traps more robustly in tests.
-// Currently, in tests, we just return 0xdeadbeef from the function in which
+// Currently, in tests, we just return 0xDEADBEEF from the function in which
// the trap occurs if the runtime context is not available to throw a JavaScript
// exception.
#define CHECK_TRAP32(x) \
- CHECK_EQ(0xdeadbeef, (bit_cast<uint32_t>(x)) & 0xFFFFFFFF)
+ CHECK_EQ(0xDEADBEEF, (bit_cast<uint32_t>(x)) & 0xFFFFFFFF)
#define CHECK_TRAP64(x) \
- CHECK_EQ(0xdeadbeefdeadbeef, (bit_cast<uint64_t>(x)) & 0xFFFFFFFFFFFFFFFF)
+ CHECK_EQ(0xDEADBEEFDEADBEEF, (bit_cast<uint64_t>(x)) & 0xFFFFFFFFFFFFFFFF)
#define CHECK_TRAP(x) CHECK_TRAP32(x)
#define WASM_WRAPPER_RETURN_VALUE 8754
@@ -84,7 +85,7 @@ using compiler::Node;
class TestingModuleBuilder {
public:
TestingModuleBuilder(Zone*, WasmExecutionMode,
- compiler::RuntimeExceptionSupport);
+ compiler::RuntimeExceptionSupport, LowerSimd);
void ChangeOriginToAsmjs() { test_module_.set_origin(kAsmJsOrigin); }
@@ -190,7 +191,7 @@ class TestingModuleBuilder {
function_code_[index] = code;
}
- void AddIndirectFunctionTable(uint16_t* function_indexes,
+ void AddIndirectFunctionTable(const uint16_t* function_indexes,
uint32_t table_size);
void PopulateIndirectFunctionTable();
@@ -203,7 +204,7 @@ class TestingModuleBuilder {
WasmInterpreter* interpreter() { return interpreter_; }
bool interpret() { return interpreter_ != nullptr; }
- bool lower_simd() { return lower_simd_; }
+ LowerSimd lower_simd() { return lower_simd_; }
Isolate* isolate() { return isolate_; }
Handle<WasmInstanceObject> instance_object() { return instance_object_; }
WasmCodeWrapper GetFunctionCode(uint32_t index) {
@@ -222,6 +223,7 @@ class TestingModuleBuilder {
if (!linked_) {
native_module_->LinkAll();
linked_ = true;
+ native_module_->SetExecutable(true);
}
}
@@ -242,7 +244,6 @@ class TestingModuleBuilder {
uint32_t mem_size_;
std::vector<Handle<Code>> function_code_;
std::vector<GlobalHandleAddress> function_tables_;
- std::vector<GlobalHandleAddress> signature_tables_;
V8_ALIGNED(16) byte globals_data_[kMaxGlobalsSize];
WasmInterpreter* interpreter_;
WasmExecutionMode execution_mode_;
@@ -250,7 +251,7 @@ class TestingModuleBuilder {
NativeModule* native_module_;
bool linked_ = false;
compiler::RuntimeExceptionSupport runtime_exception_support_;
- bool lower_simd_;
+ LowerSimd lower_simd_;
const WasmGlobal* AddGlobal(ValueType type);
@@ -371,9 +372,10 @@ class WasmFunctionCompiler : public compiler::GraphAndBuilders {
class WasmRunnerBase : public HandleAndZoneScope {
public:
WasmRunnerBase(WasmExecutionMode execution_mode, int num_params,
- compiler::RuntimeExceptionSupport runtime_exception_support)
+ compiler::RuntimeExceptionSupport runtime_exception_support,
+ LowerSimd lower_simd)
: zone_(&allocator_, ZONE_NAME),
- builder_(&zone_, execution_mode, runtime_exception_support),
+ builder_(&zone_, execution_mode, runtime_exception_support, lower_simd),
wrapper_(&zone_, num_params) {}
// Builds a graph from the given Wasm code and generates the machine
@@ -452,20 +454,25 @@ class WasmRunner : public WasmRunnerBase {
WasmRunner(WasmExecutionMode execution_mode,
const char* main_fn_name = "main",
compiler::RuntimeExceptionSupport runtime_exception_support =
- compiler::kNoRuntimeExceptionSupport)
+ compiler::kNoRuntimeExceptionSupport,
+ LowerSimd lower_simd = kNoLowerSimd)
: WasmRunnerBase(execution_mode, sizeof...(ParamTypes),
- runtime_exception_support) {
+ runtime_exception_support, lower_simd) {
NewFunction<ReturnType, ParamTypes...>(main_fn_name);
if (!interpret()) {
wrapper_.Init<ReturnType, ParamTypes...>(functions_[0]->descriptor());
}
}
+ WasmRunner(WasmExecutionMode execution_mode, LowerSimd lower_simd)
+ : WasmRunner(execution_mode, "main", compiler::kNoRuntimeExceptionSupport,
+ lower_simd) {}
+
ReturnType Call(ParamTypes... p) {
DCHECK(compiled_);
if (interpret()) return CallInterpreter(p...);
- ReturnType return_value = static_cast<ReturnType>(0xdeadbeefdeadbeef);
+ ReturnType return_value = static_cast<ReturnType>(0xDEADBEEFDEADBEEF);
WasmRunnerBase::trap_happened = false;
auto trap_callback = []() -> void {
WasmRunnerBase::trap_happened = true;
@@ -485,7 +492,7 @@ class WasmRunner : public WasmRunnerBase {
static_cast<void*>(&return_value));
CHECK_EQ(WASM_WRAPPER_RETURN_VALUE, result);
return WasmRunnerBase::trap_happened
- ? static_cast<ReturnType>(0xdeadbeefdeadbeef)
+ ? static_cast<ReturnType>(0xDEADBEEFDEADBEEF)
: return_value;
}
@@ -502,7 +509,7 @@ class WasmRunner : public WasmRunnerBase {
return val.to<ReturnType>();
} else if (thread->state() == WasmInterpreter::TRAPPED) {
// TODO(titzer): return the correct trap code
- int64_t result = 0xdeadbeefdeadbeef;
+ int64_t result = 0xDEADBEEFDEADBEEF;
return static_cast<ReturnType>(result);
} else {
// TODO(titzer): falling off end
diff --git a/deps/v8/test/common/wasm/wasm-macro-gen.h b/deps/v8/test/common/wasm/wasm-macro-gen.h
index 40718e79aa..83ddaa6b72 100644
--- a/deps/v8/test/common/wasm/wasm-macro-gen.h
+++ b/deps/v8/test/common/wasm/wasm-macro-gen.h
@@ -557,25 +557,34 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
#define WASM_I64_REINTERPRET_F64(x) x, kExprI64ReinterpretF64
//------------------------------------------------------------------------------
+// Numeric operations
+//------------------------------------------------------------------------------
+#define WASM_NUMERIC_OP(op) kNumericPrefix, static_cast<byte>(op)
+#define WASM_I32_SCONVERT_SAT_F32(x) x, WASM_NUMERIC_OP(kExprI32SConvertSatF32)
+#define WASM_I32_UCONVERT_SAT_F32(x) x, WASM_NUMERIC_OP(kExprI32UConvertSatF32)
+#define WASM_I32_SCONVERT_SAT_F64(x) x, WASM_NUMERIC_OP(kExprI32SConvertSatF64)
+#define WASM_I32_UCONVERT_SAT_F64(x) x, WASM_NUMERIC_OP(kExprI32UConvertSatF64)
+
+//------------------------------------------------------------------------------
// Memory Operations.
//------------------------------------------------------------------------------
#define WASM_GROW_MEMORY(x) x, kExprGrowMemory, 0
#define WASM_MEMORY_SIZE kExprMemorySize, 0
-#define SIG_ENTRY_v_v kWasmFunctionTypeForm, 0, 0
+#define SIG_ENTRY_v_v kWasmFunctionTypeCode, 0, 0
#define SIZEOF_SIG_ENTRY_v_v 3
-#define SIG_ENTRY_v_x(a) kWasmFunctionTypeForm, 1, a, 0
-#define SIG_ENTRY_v_xx(a, b) kWasmFunctionTypeForm, 2, a, b, 0
-#define SIG_ENTRY_v_xxx(a, b, c) kWasmFunctionTypeForm, 3, a, b, c, 0
+#define SIG_ENTRY_v_x(a) kWasmFunctionTypeCode, 1, a, 0
+#define SIG_ENTRY_v_xx(a, b) kWasmFunctionTypeCode, 2, a, b, 0
+#define SIG_ENTRY_v_xxx(a, b, c) kWasmFunctionTypeCode, 3, a, b, c, 0
#define SIZEOF_SIG_ENTRY_v_x 4
#define SIZEOF_SIG_ENTRY_v_xx 5
#define SIZEOF_SIG_ENTRY_v_xxx 6
-#define SIG_ENTRY_x(r) kWasmFunctionTypeForm, 0, 1, r
-#define SIG_ENTRY_x_x(r, a) kWasmFunctionTypeForm, 1, a, 1, r
-#define SIG_ENTRY_x_xx(r, a, b) kWasmFunctionTypeForm, 2, a, b, 1, r
-#define SIG_ENTRY_x_xxx(r, a, b, c) kWasmFunctionTypeForm, 3, a, b, c, 1, r
+#define SIG_ENTRY_x(r) kWasmFunctionTypeCode, 0, 1, r
+#define SIG_ENTRY_x_x(r, a) kWasmFunctionTypeCode, 1, a, 1, r
+#define SIG_ENTRY_x_xx(r, a, b) kWasmFunctionTypeCode, 2, a, b, 1, r
+#define SIG_ENTRY_x_xxx(r, a, b, c) kWasmFunctionTypeCode, 3, a, b, c, 1, r
#define SIZEOF_SIG_ENTRY_x 4
#define SIZEOF_SIG_ENTRY_x_x 5
#define SIZEOF_SIG_ENTRY_x_xx 6
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.cc b/deps/v8/test/common/wasm/wasm-module-runner.cc
index 8150fc08a8..d89d87005e 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.cc
+++ b/deps/v8/test/common/wasm/wasm-module-runner.cc
@@ -23,7 +23,7 @@ namespace wasm {
namespace testing {
uint32_t GetInitialMemSize(const WasmModule* module) {
- return WasmModule::kPageSize * module->initial_pages;
+ return kWasmPageSize * module->initial_pages;
}
std::unique_ptr<WasmModule> DecodeWasmModuleForTesting(
@@ -173,9 +173,9 @@ int32_t InterpretWasmModule(Isolate* isolate,
isolate->clear_pending_exception();
*possible_nondeterminism = thread->PossibleNondeterminism();
- if (stack_overflow) return 0xdeadbeef;
+ if (stack_overflow) return 0xDEADBEEF;
- if (thread->state() == WasmInterpreter::TRAPPED) return 0xdeadbeef;
+ if (thread->state() == WasmInterpreter::TRAPPED) return 0xDEADBEEF;
if (interpreter_result == WasmInterpreter::FINISHED)
return thread->GetReturnValue().to<int32_t>();
diff --git a/deps/v8/test/d8_default.gyp b/deps/v8/test/d8_default.gyp
new file mode 100644
index 0000000000..399623d30a
--- /dev/null
+++ b/deps/v8/test/d8_default.gyp
@@ -0,0 +1,31 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'd8_default_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'debugger/debugger.gyp:debugger_run',
+ 'intl/intl.gyp:intl_run',
+ 'message/message.gyp:message_run',
+ 'mjsunit/mjsunit.gyp:mjsunit_run',
+ 'preparser/preparser.gyp:preparser_run',
+ 'webkit/webkit.gyp:webkit_run',
+ ],
+ 'includes': [
+ '../gypfiles/features.gypi',
+ '../gypfiles/isolate.gypi',
+ ],
+ 'sources': [
+ 'd8_default.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/deps/v8/test/d8_default.isolate b/deps/v8/test/d8_default.isolate
new file mode 100644
index 0000000000..efeae64cae
--- /dev/null
+++ b/deps/v8/test/d8_default.isolate
@@ -0,0 +1,18 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'command': [
+ '../tools/run-tests.py',
+ ],
+ },
+ 'includes': [
+ 'debugger/debugger.isolate',
+ 'intl/intl.isolate',
+ 'message/message.isolate',
+ 'mjsunit/mjsunit.isolate',
+ 'preparser/preparser.isolate',
+ 'webkit/webkit.isolate',
+ ],
+}
diff --git a/deps/v8/test/debugger/debug/debug-modules-set-variable-value.js b/deps/v8/test/debugger/debug/debug-modules-set-variable-value.js
index 61c032f026..1d1c6d5d13 100644
--- a/deps/v8/test/debugger/debug/debug-modules-set-variable-value.js
+++ b/deps/v8/test/debugger/debug/debug-modules-set-variable-value.js
@@ -271,7 +271,7 @@ let salad = 12;
function listener(event, exec_state) {
if (event == Debug.DebugEvent.Break) {
let scope_count = exec_state.frame().scopeCount();
- let module_scope = exec_state.frame().scope(2);
+ let module_scope = exec_state.frame().scope(1);
assertEquals(debug.ScopeType.Module, module_scope.scopeType());
module_scope.setVariableValue('salad', 42);
}
@@ -311,7 +311,7 @@ export let ham = 1;
function listener(event, exec_state) {
if (event == Debug.DebugEvent.Break) {
let scope_count = exec_state.frame().scopeCount();
- let module_scope = exec_state.frame().scope(2);
+ let module_scope = exec_state.frame().scope(1);
assertEquals(debug.ScopeType.Module, module_scope.scopeType());
module_scope.setVariableValue('ham', 2);
}
diff --git a/deps/v8/test/debugger/debug/harmony/modules-debug-scopes2.js b/deps/v8/test/debugger/debug/harmony/modules-debug-scopes2.js
index 8b9b9e8aee..cc1091e73f 100644
--- a/deps/v8/test/debugger/debug/harmony/modules-debug-scopes2.js
+++ b/deps/v8/test/debugger/debug/harmony/modules-debug-scopes2.js
@@ -139,10 +139,10 @@ listener_delegate = function(exec_state) {
debug.ScopeType.Script,
debug.ScopeType.Global], exec_state);
CheckScopeContent(
- {local_var: undefined, exported_var: undefined, imported_var: undefined},
+ {exported_var: undefined, imported_var: undefined},
0, exec_state);
CheckScopeDoesNotHave(
- ["doesnotexist", "local_let", "exported_let", "imported_let"],
+ ["local_var", "doesntexist", "local_let", "exported_let", "imported_let"],
0, exec_state);
};
debugger;
@@ -161,8 +161,9 @@ listener_delegate = function(exec_state) {
debug.ScopeType.Script,
debug.ScopeType.Global], exec_state);
CheckScopeContent(
- {local_let: 1, local_var: 2, exported_let: 3, exported_var: 4,
+ {exported_let: 3, exported_var: 4,
imported_let: 3, imported_var: 4}, 0, exec_state);
+ CheckScopeDoesNotHave(["local_var", "local_let"], 0, exec_state);
};
debugger;
EndTest();
@@ -178,8 +179,9 @@ listener_delegate = function(exec_state) {
debug.ScopeType.Script,
debug.ScopeType.Global], exec_state);
CheckScopeContent(
- {local_let: 11, local_var: 12, exported_let: 13, exported_var: 14,
+ {exported_let: 13, exported_var: 14,
imported_let: 13, imported_var: 14}, 0, exec_state);
+ CheckScopeDoesNotHave(["local_var", "local_let"], 0, exec_state);
};
debugger;
EndTest();
diff --git a/deps/v8/test/debugger/debug/regress/regress-1853.js b/deps/v8/test/debugger/debug/regress/regress-1853.js
index 8c6e9404a7..4cd069f77c 100644
--- a/deps/v8/test/debugger/debug/regress/regress-1853.js
+++ b/deps/v8/test/debugger/debug/regress/regress-1853.js
@@ -25,10 +25,11 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
+// Flags: --expose-gc
// Test whether scripts compiled after setting the break point are
// updated correctly.
+gc();
Debug = debug.Debug;
var break_count = 0;
diff --git a/deps/v8/test/debugger/debug/regress/regress-crbug-481896.js b/deps/v8/test/debugger/debug/regress/regress-crbug-481896.js
index d12398070b..751b62a7b6 100644
--- a/deps/v8/test/debugger/debug/regress/regress-crbug-481896.js
+++ b/deps/v8/test/debugger/debug/regress/regress-crbug-481896.js
@@ -1,8 +1,8 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-
+// Flags: --expose-gc
+gc();
function static() {
print("> static"); // Break
}
diff --git a/deps/v8/test/debugger/testcfg.py b/deps/v8/test/debugger/testcfg.py
index 71b19d2159..e287077ec3 100644
--- a/deps/v8/test/debugger/testcfg.py
+++ b/deps/v8/test/debugger/testcfg.py
@@ -9,14 +9,9 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
-FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
MODULE_PATTERN = re.compile(r"^// MODULE$", flags=re.MULTILINE)
-class DebuggerTestSuite(testsuite.TestSuite):
-
- def __init__(self, name, root):
- super(DebuggerTestSuite, self).__init__(name, root)
-
+class TestSuite(testsuite.TestSuite):
def ListTests(self, context):
tests = []
for dirname, dirs, files in os.walk(self.root):
@@ -29,21 +24,23 @@ class DebuggerTestSuite(testsuite.TestSuite):
fullpath = os.path.join(dirname, filename)
relpath = fullpath[len(self.root) + 1 : -3]
testname = relpath.replace(os.path.sep, "/")
- test = testcase.TestCase(self, testname)
+ test = self._create_test(testname)
tests.append(test)
return tests
- def GetParametersForTestCase(self, testcase, context):
- flags = (
- testcase.flags +
- ["--enable-inspector", "--allow-natives-syntax"] +
- context.mode_flags
- )
- source = self.GetSourceForTest(testcase)
- flags_match = re.findall(FLAGS_PATTERN, source)
- for match in flags_match:
- flags += match.strip().split()
+ def _test_class(self):
+ return TestCase
+
+
+class TestCase(testcase.TestCase):
+ def __init__(self, *args, **kwargs):
+ super(TestCase, self).__init__(*args, **kwargs)
+ source = self.get_source()
+ self._source_files = self._parse_source_files(source)
+ self._source_flags = self._parse_source_flags(source)
+
+ def _parse_source_files(self, source):
files_list = [] # List of file names to append to command arguments.
files_match = FILES_PATTERN.search(source);
# Accept several lines of 'Files:'.
@@ -55,24 +52,31 @@ class DebuggerTestSuite(testsuite.TestSuite):
break
files = []
- files.append(os.path.normpath(os.path.join(self.root, "..", "mjsunit", "mjsunit.js")))
- files.append(os.path.join(self.root, "test-api.js"))
- files.extend([ os.path.normpath(os.path.join(self.root, '..', '..', f))
- for f in files_list ])
+ files.append(os.path.normpath(os.path.join(
+ self.suite.root, "..", "mjsunit", "mjsunit.js")))
+ files.append(os.path.join(self.suite.root, "test-api.js"))
+ files.extend([os.path.normpath(os.path.join(self.suite.root, '..', '..', f))
+ for f in files_list])
if MODULE_PATTERN.search(source):
files.append("--module")
- files.append(os.path.join(self.root, testcase.path + self.suffix()))
+ files.append(os.path.join(self.suite.root, self.path + self._get_suffix()))
+ return files
+
+ def _get_files_params(self, ctx):
+ files = self._source_files
+ if ctx.isolates:
+ files = files + ['--isolate'] + files
+ return files
+
+ def _get_source_flags(self):
+ return self._source_flags
- all_files = list(files)
- if context.isolates:
- all_files += ["--isolate"] + files
+ def _get_suite_flags(self, ctx):
+ return ['--enable-inspector', '--allow-natives-syntax']
- return all_files, flags, {}
+ def _get_source_path(self):
+ return os.path.join(self.suite.root, self.path + self._get_suffix())
- def GetSourceForTest(self, testcase):
- filename = os.path.join(self.root, testcase.path + self.suffix())
- with open(filename) as f:
- return f.read()
def GetSuite(name, root):
- return DebuggerTestSuite(name, root)
+ return TestSuite(name, root)
diff --git a/deps/v8/test/default.gyp b/deps/v8/test/default.gyp
index 2c6429bada..d1007b8a96 100644
--- a/deps/v8/test/default.gyp
+++ b/deps/v8/test/default.gyp
@@ -11,6 +11,7 @@
'type': 'none',
'dependencies': [
'cctest/cctest.gyp:cctest_run',
+ 'debugger/debugger.gyp:debugger_run',
'fuzzer/fuzzer.gyp:fuzzer_run',
'inspector/inspector.gyp:inspector-test_run',
'intl/intl.gyp:intl_run',
diff --git a/deps/v8/test/fuzzer/fuzzer-support.cc b/deps/v8/test/fuzzer/fuzzer-support.cc
index beda4899c1..d6cff118bf 100644
--- a/deps/v8/test/fuzzer/fuzzer-support.cc
+++ b/deps/v8/test/fuzzer/fuzzer-support.cc
@@ -89,7 +89,14 @@ bool FuzzerSupport::PumpMessageLoop(
} // namespace v8_fuzzer
-extern "C" int LLVMFuzzerInitialize(int* argc, char*** argv) {
+// Explicitly specify some attributes to avoid issues with the linker dead-
+// stripping the following function on macOS, as it is not called directly
+// by fuzz target. LibFuzzer runtime uses dlsym() to resolve that function.
+#if V8_OS_MACOSX
+__attribute__((used)) __attribute__((visibility("default")))
+#endif // V8_OS_MACOSX
+extern "C" int
+LLVMFuzzerInitialize(int* argc, char*** argv) {
v8_fuzzer::FuzzerSupport::InitializeFuzzerSupport(argc, argv);
return 0;
}
diff --git a/deps/v8/test/fuzzer/fuzzer.gyp b/deps/v8/test/fuzzer/fuzzer.gyp
index 3d76018d55..0c54211290 100644
--- a/deps/v8/test/fuzzer/fuzzer.gyp
+++ b/deps/v8/test/fuzzer/fuzzer.gyp
@@ -90,6 +90,36 @@
],
},
{
+ 'target_name': 'v8_simple_multi_return_fuzzer',
+ 'type': 'executable',
+ 'dependencies': [
+ 'multi_return_fuzzer_lib',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [
+ 'fuzzer.cc',
+ ],
+ },
+ {
+ 'target_name': 'multi_return_fuzzer_lib',
+ 'type': 'static_library',
+ 'dependencies': [
+ '../../src/v8.gyp:v8_libplatform',
+ 'fuzzer_support',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [ ### gcmole(all) ###
+ '../compiler/c-signature.h',
+ '../compiler/call-helper.h',
+ '../compiler/raw-machine-assembler-tester.h',
+ 'multi-return.cc',
+ ],
+ },
+ {
'target_name': 'v8_simple_wasm_fuzzer',
'type': 'executable',
'dependencies': [
diff --git a/deps/v8/test/fuzzer/fuzzer.isolate b/deps/v8/test/fuzzer/fuzzer.isolate
index 097d55885d..9391dcc7c0 100644
--- a/deps/v8/test/fuzzer/fuzzer.isolate
+++ b/deps/v8/test/fuzzer/fuzzer.isolate
@@ -8,6 +8,7 @@
'<(PRODUCT_DIR)/v8_simple_json_fuzzer<(EXECUTABLE_SUFFIX)',
'<(PRODUCT_DIR)/v8_simple_parser_fuzzer<(EXECUTABLE_SUFFIX)',
'<(PRODUCT_DIR)/v8_simple_regexp_fuzzer<(EXECUTABLE_SUFFIX)',
+ '<(PRODUCT_DIR)/v8_simple_multi_return_fuzzer<(EXECUTABLE_SUFFIX)',
'<(PRODUCT_DIR)/v8_simple_wasm_fuzzer<(EXECUTABLE_SUFFIX)',
'<(PRODUCT_DIR)/v8_simple_wasm_async_fuzzer<(EXECUTABLE_SUFFIX)',
'<(PRODUCT_DIR)/v8_simple_wasm_call_fuzzer<(EXECUTABLE_SUFFIX)',
@@ -25,6 +26,7 @@
'./json/',
'./parser/',
'./regexp/',
+ './multi_return/',
'./wasm/',
'./wasm_async/',
'./wasm_call/',
diff --git a/deps/v8/test/fuzzer/multi-return.cc b/deps/v8/test/fuzzer/multi-return.cc
new file mode 100644
index 0000000000..4766774005
--- /dev/null
+++ b/deps/v8/test/fuzzer/multi-return.cc
@@ -0,0 +1,346 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cstddef>
+#include <cstdint>
+
+#include "src/compilation-info.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/machine-type.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+#include "src/simulator.h"
+#include "src/zone/accounting-allocator.h"
+#include "src/zone/zone.h"
+#include "test/fuzzer/fuzzer-support.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+namespace fuzzer {
+
+constexpr MachineType kTypes[] = {
+ // The first entry is just a placeholder, because '0' is a separator.
+ MachineType(),
+#if !V8_TARGET_ARCH_32_BIT
+ MachineType::Int64(),
+#endif
+ MachineType::Int32(), MachineType::Float32(), MachineType::Float64()};
+
+static constexpr int kNumTypes = arraysize(kTypes);
+
+class InputProvider {
+ public:
+ InputProvider(const uint8_t* data, size_t size)
+ : current_(data), end_(data + size) {}
+
+ size_t NumNonZeroBytes(size_t offset, int limit) {
+ DCHECK_LE(limit, std::numeric_limits<uint8_t>::max());
+ DCHECK_GE(current_ + offset, current_);
+ const uint8_t* p;
+ for (p = current_ + offset; p < end_; ++p) {
+ if (*p % limit == 0) break;
+ }
+ return p - current_ - offset;
+ }
+
+ int NextInt8(int limit) {
+ DCHECK_LE(limit, std::numeric_limits<uint8_t>::max());
+ if (current_ == end_) return 0;
+ uint8_t result = *current_;
+ current_++;
+ return static_cast<int>(result) % limit;
+ }
+
+ int NextInt32(int limit) {
+ if (current_ + sizeof(uint32_t) > end_) return 0;
+ int result = ReadLittleEndianValue<int>(current_);
+ current_ += sizeof(uint32_t);
+ return result % limit;
+ }
+
+ private:
+ const uint8_t* current_;
+ const uint8_t* end_;
+};
+
+MachineType RandomType(InputProvider* input) {
+ return kTypes[input->NextInt8(kNumTypes)];
+}
+
+int num_registers(MachineType type) {
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
+ switch (type.representation()) {
+ case MachineRepresentation::kWord32:
+ case MachineRepresentation::kWord64:
+ return config->num_allocatable_general_registers();
+ case MachineRepresentation::kFloat32:
+ return config->num_allocatable_float_registers();
+ case MachineRepresentation::kFloat64:
+ return config->num_allocatable_double_registers();
+ default:
+ UNREACHABLE();
+ }
+}
+
+int size(MachineType type) {
+ return 1 << ElementSizeLog2Of(type.representation());
+}
+
+int index(MachineType type) { return static_cast<int>(type.representation()); }
+
+const int* codes(MachineType type) {
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
+ switch (type.representation()) {
+ case MachineRepresentation::kWord32:
+ case MachineRepresentation::kWord64:
+ return config->allocatable_general_codes();
+ case MachineRepresentation::kFloat32:
+ return config->allocatable_float_codes();
+ case MachineRepresentation::kFloat64:
+ return config->allocatable_double_codes();
+ default:
+ UNREACHABLE();
+ }
+}
+
+LinkageLocation AllocateLocation(MachineType type, int* int_count,
+ int* float_count, int* stack_slots) {
+ int* count = IsFloatingPoint(type.representation()) ? float_count : int_count;
+ int reg_code = *count;
+#if V8_TARGET_ARCH_ARM
+ // Allocate floats using a double register, but modify the code to
+ // reflect how ARM FP registers alias.
+ if (type == MachineType::Float32()) {
+ reg_code *= 2;
+ }
+#endif
+ LinkageLocation location = LinkageLocation::ForAnyRegister(); // Dummy.
+ if (reg_code < num_registers(type)) {
+ location = LinkageLocation::ForRegister(codes(type)[reg_code], type);
+ } else {
+ location = LinkageLocation::ForCallerFrameSlot(-*stack_slots - 1, type);
+ *stack_slots += std::max(1, size(type) / kPointerSize);
+ }
+ ++*count;
+ return location;
+}
+
+Node* Constant(RawMachineAssembler& m, MachineType type, int value) {
+ switch (type.representation()) {
+ case MachineRepresentation::kWord32:
+ return m.Int32Constant(static_cast<int32_t>(value));
+ case MachineRepresentation::kWord64:
+ return m.Int64Constant(static_cast<int64_t>(value));
+ case MachineRepresentation::kFloat32:
+ return m.Float32Constant(static_cast<float>(value));
+ case MachineRepresentation::kFloat64:
+ return m.Float64Constant(static_cast<double>(value));
+ default:
+ UNREACHABLE();
+ }
+}
+
+Node* ToInt32(RawMachineAssembler& m, MachineType type, Node* a) {
+ switch (type.representation()) {
+ case MachineRepresentation::kWord32:
+ return a;
+ case MachineRepresentation::kWord64:
+ return m.TruncateInt64ToInt32(a);
+ case MachineRepresentation::kFloat32:
+ return m.TruncateFloat32ToInt32(a);
+ case MachineRepresentation::kFloat64:
+ return m.RoundFloat64ToInt32(a);
+ default:
+ UNREACHABLE();
+ }
+}
+
+CallDescriptor* CreateRandomCallDescriptor(Zone* zone, size_t return_count,
+ size_t param_count,
+ InputProvider* input) {
+ LocationSignature::Builder locations(zone, return_count, param_count);
+
+ int stack_slots = 0;
+ int int_params = 0;
+ int float_params = 0;
+ for (size_t i = 0; i < param_count; i++) {
+ MachineType type = RandomType(input);
+ LinkageLocation location =
+ AllocateLocation(type, &int_params, &float_params, &stack_slots);
+ locations.AddParam(location);
+ }
+ // Read the end byte of the parameters.
+ input->NextInt8(1);
+
+ int stack_params = stack_slots;
+#if V8_TARGET_ARCH_ARM64
+ // Align the stack slots.
+ stack_slots = stack_slots + (stack_slots % 2);
+#endif
+ int aligned_stack_params = stack_slots;
+ int int_returns = 0;
+ int float_returns = 0;
+ for (size_t i = 0; i < return_count; i++) {
+ MachineType type = RandomType(input);
+ LinkageLocation location =
+ AllocateLocation(type, &int_returns, &float_returns, &stack_slots);
+ locations.AddReturn(location);
+ }
+ int stack_returns = stack_slots - aligned_stack_params;
+
+ MachineType target_type = MachineType::AnyTagged();
+ LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
+ return new (zone) CallDescriptor( // --
+ CallDescriptor::kCallCodeObject, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ locations.Build(), // location_sig
+ stack_params, // on-stack parameter count
+ compiler::Operator::kNoProperties, // properties
+ 0, // callee-saved registers
+ 0, // callee-saved fp regs
+ CallDescriptor::kNoFlags, // flags
+ "c-call", // debug name
+ 0, // allocatable registers
+ stack_returns); // on-stack return count
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
+ v8::Isolate* isolate = support->GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::Scope context_scope(support->GetContext());
+ v8::TryCatch try_catch(isolate);
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ InputProvider input(data, size);
+ // Create randomized descriptor.
+ size_t param_count = input.NumNonZeroBytes(0, kNumTypes);
+ size_t return_count = input.NumNonZeroBytes(param_count + 1, kNumTypes);
+ CallDescriptor* desc =
+ CreateRandomCallDescriptor(&zone, return_count, param_count, &input);
+
+ if (FLAG_wasm_fuzzer_gen_test) {
+ // Print some debugging output which describes the produced signature.
+ printf("[");
+ for (size_t j = 0; j < desc->ParameterCount(); ++j) {
+ printf(" %s",
+ MachineReprToString(desc->GetParameterType(j).representation()));
+ }
+ printf(" ] -> [");
+ for (size_t j = 0; j < desc->ReturnCount(); ++j) {
+ printf(" %s",
+ MachineReprToString(desc->GetReturnType(j).representation()));
+ }
+ printf(" ]\n\n");
+ }
+
+ // Count parameters of each type.
+ constexpr size_t kNumMachineRepresentations =
+ static_cast<size_t>(MachineRepresentation::kLastRepresentation) + 1;
+
+ // Trivial hash table for the number of occurrences of parameter types. The
+ // MachineRepresentation of the parameter types is used as hash code.
+ int counts[kNumMachineRepresentations] = {0};
+ for (size_t i = 0; i < desc->ParameterCount(); ++i) {
+ ++counts[index(desc->GetParameterType(i))];
+ }
+
+ // Generate random inputs.
+ std::unique_ptr<int[]> inputs(new int[desc->ParameterCount()]);
+ std::unique_ptr<int[]> outputs(new int[desc->ReturnCount()]);
+ for (size_t i = 0; i < desc->ParameterCount(); ++i) {
+ inputs[i] = input.NextInt32(10000);
+ }
+
+ RawMachineAssembler callee(
+ i_isolate, new (&zone) Graph(&zone), desc,
+ MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags());
+
+ // Generate callee, returning random picks of its parameters.
+ std::unique_ptr<Node* []> params(new Node*[desc->ParameterCount() + 1]);
+ std::unique_ptr<Node* []> returns(new Node*[desc->ReturnCount()]);
+ for (size_t i = 0; i < desc->ParameterCount(); ++i) {
+ params[i] = callee.Parameter(i);
+ }
+ for (size_t i = 0; i < desc->ReturnCount(); ++i) {
+ MachineType type = desc->GetReturnType(i);
+ // Find a random same-type parameter to return. Use a constant if none.
+ if (counts[index(type)] == 0) {
+ returns[i] = Constant(callee, type, 42);
+ outputs[i] = 42;
+ } else {
+ int n = input.NextInt8(counts[index(type)]);
+ int k = 0;
+ while (desc->GetParameterType(k) != desc->GetReturnType(i) || --n > 0) {
+ ++k;
+ }
+ returns[i] = params[k];
+ outputs[i] = inputs[k];
+ }
+ }
+ callee.Return(static_cast<int>(desc->ReturnCount()), returns.get());
+
+ CompilationInfo info(ArrayVector("testing"), &zone, Code::STUB);
+ Handle<Code> code = Pipeline::GenerateCodeForTesting(
+ &info, i_isolate, desc, callee.graph(), callee.Export());
+
+ // Generate wrapper.
+ int expect = 0;
+
+ MachineSignature::Builder sig_builder(&zone, 1, 0);
+ sig_builder.AddReturn(MachineType::Int32());
+
+ CallDescriptor* wrapper_desc =
+ Linkage::GetSimplifiedCDescriptor(&zone, sig_builder.Build());
+ RawMachineAssembler caller(
+ i_isolate, new (&zone) Graph(&zone), wrapper_desc,
+ MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags());
+
+ params[0] = caller.HeapConstant(code);
+ for (size_t i = 0; i < desc->ParameterCount(); ++i) {
+ params[i + 1] = Constant(caller, desc->GetParameterType(i), inputs[i]);
+ }
+ Node* call = caller.AddNode(caller.common()->Call(desc),
+ static_cast<int>(desc->ParameterCount() + 1),
+ params.get());
+ Node* ret = Constant(caller, MachineType::Int32(), 0);
+ for (size_t i = 0; i < desc->ReturnCount(); ++i) {
+ // Skip roughly one third of the outputs.
+ if (input.NextInt8(3) == 0) continue;
+ Node* ret_i = (desc->ReturnCount() == 1)
+ ? call
+ : caller.AddNode(caller.common()->Projection(i), call);
+ ret = caller.Int32Add(ret, ToInt32(caller, desc->GetReturnType(i), ret_i));
+ expect += outputs[i];
+ }
+ caller.Return(ret);
+
+ // Call the wrapper.
+ CompilationInfo wrapper_info(ArrayVector("wrapper"), &zone, Code::STUB);
+ Handle<Code> wrapper_code = Pipeline::GenerateCodeForTesting(
+ &wrapper_info, i_isolate, wrapper_desc, caller.graph(), caller.Export());
+ auto fn = GeneratedCode<int32_t>::FromCode(*wrapper_code);
+ int result = fn.Call();
+
+ CHECK_EQ(expect, result);
+ return 0;
+}
+
+} // namespace fuzzer
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/fuzzer/multi_return/README.md b/deps/v8/test/fuzzer/multi_return/README.md
new file mode 100644
index 0000000000..a3764e8a7c
--- /dev/null
+++ b/deps/v8/test/fuzzer/multi_return/README.md
@@ -0,0 +1,4 @@
+All files in this directory are used by the trybots to check that the fuzzer
+executes correctly, see
+https://github.com/v8/v8/blob/master/test/fuzzer/README.md. There should be at
+least one file in this directory, e.g. this README file.
diff --git a/deps/v8/test/fuzzer/regexp.cc b/deps/v8/test/fuzzer/regexp.cc
index c73901b0e0..b652bd7e3f 100644
--- a/deps/v8/test/fuzzer/regexp.cc
+++ b/deps/v8/test/fuzzer/regexp.cc
@@ -49,7 +49,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
static const int kAllFlags = i::JSRegExp::kGlobal | i::JSRegExp::kIgnoreCase |
i::JSRegExp::kMultiline | i::JSRegExp::kSticky |
- i::JSRegExp::kUnicode;
+ i::JSRegExp::kUnicode | i::JSRegExp::kDotAll;
const uint8_t one_byte_array[6] = {'f', 'o', 'o', 'b', 'a', 'r'};
const i::uc16 two_byte_array[6] = {'f', 0xD83D, 0xDCA9, 'b', 'a', 0x2603};
diff --git a/deps/v8/test/fuzzer/testcfg.py b/deps/v8/test/fuzzer/testcfg.py
index 17cb0ef588..b3fe174d95 100644
--- a/deps/v8/test/fuzzer/testcfg.py
+++ b/deps/v8/test/fuzzer/testcfg.py
@@ -8,47 +8,57 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
-class FuzzerVariantGenerator(testsuite.VariantGenerator):
- # Only run the fuzzer with standard variant.
- def FilterVariantsByTest(self, testcase):
- return self.standard_variant
+class VariantsGenerator(testsuite.VariantsGenerator):
+ def _get_variants(self, test):
+ return self._standard_variant
- def GetFlagSets(self, testcase, variant):
- return testsuite.FAST_VARIANT_FLAGS[variant]
-
-class FuzzerTestSuite(testsuite.TestSuite):
- SUB_TESTS = ( 'json', 'parser', 'regexp', 'wasm', 'wasm_async',
- 'wasm_call', 'wasm_code', 'wasm_compile', 'wasm_data_section',
- 'wasm_function_sigs_section', 'wasm_globals_section',
- 'wasm_imports_section', 'wasm_memory_section', 'wasm_names_section',
- 'wasm_types_section' )
-
- def __init__(self, name, root):
- super(FuzzerTestSuite, self).__init__(name, root)
+class TestSuite(testsuite.TestSuite):
+ SUB_TESTS = ( 'json', 'parser', 'regexp', 'multi_return', 'wasm',
+ 'wasm_async', 'wasm_call', 'wasm_code', 'wasm_compile',
+ 'wasm_data_section', 'wasm_function_sigs_section',
+ 'wasm_globals_section', 'wasm_imports_section', 'wasm_memory_section',
+ 'wasm_names_section', 'wasm_types_section' )
def ListTests(self, context):
tests = []
- for subtest in FuzzerTestSuite.SUB_TESTS:
+ for subtest in TestSuite.SUB_TESTS:
for fname in os.listdir(os.path.join(self.root, subtest)):
if not os.path.isfile(os.path.join(self.root, subtest, fname)):
continue
- test = testcase.TestCase(self, '%s/%s' % (subtest, fname))
+ test = self._create_test('%s/%s' % (subtest, fname))
tests.append(test)
tests.sort()
return tests
- def GetShellForTestCase(self, testcase):
- group, _ = testcase.path.split('/', 1)
- return 'v8_simple_%s_fuzzer' % group
+ def _test_class(self):
+ return TestCase
+
+ def _variants_gen_class(self):
+ return VariantsGenerator
+
+ def _LegacyVariantsGeneratorFactory(self):
+ return testsuite.StandardLegacyVariantsGenerator
- def GetParametersForTestCase(self, testcase, context):
- suite, name = testcase.path.split('/')
- return [os.path.join(self.root, suite, name)], [], {}
- def _VariantGeneratorFactory(self):
- return FuzzerVariantGenerator
+class TestCase(testcase.TestCase):
+ def _get_files_params(self, ctx):
+ suite, name = self.path.split('/')
+ return [os.path.join(self.suite.root, suite, name)]
+
+ def _get_variant_flags(self):
+ return []
+
+ def _get_statusfile_flags(self):
+ return []
+
+ def _get_mode_flags(self, ctx):
+ return []
+
+ def get_shell(self):
+ group, _ = self.path.split('/', 1)
+ return 'v8_simple_%s_fuzzer' % group
def GetSuite(name, root):
- return FuzzerTestSuite(name, root)
+ return TestSuite(name, root)
diff --git a/deps/v8/test/fuzzer/wasm-async.cc b/deps/v8/test/fuzzer/wasm-async.cc
index 13b15a9d70..4718601b0f 100644
--- a/deps/v8/test/fuzzer/wasm-async.cc
+++ b/deps/v8/test/fuzzer/wasm-async.cc
@@ -94,7 +94,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
Local<Promise> promise = resolver->GetPromise();
AsyncCompile(i_isolate, Utils::OpenHandle(*promise),
- ModuleWireBytes(data, data + size));
+ ModuleWireBytes(data, data + size), false);
ASSIGN(Function, instantiate_impl,
Function::New(support->GetContext(), &InstantiateCallback,
diff --git a/deps/v8/test/fuzzer/wasm-compile.cc b/deps/v8/test/fuzzer/wasm-compile.cc
index ded3a101f2..4192a938e8 100644
--- a/deps/v8/test/fuzzer/wasm-compile.cc
+++ b/deps/v8/test/fuzzer/wasm-compile.cc
@@ -30,6 +30,8 @@ namespace fuzzer {
namespace {
+constexpr int kMaxFunctions = 4;
+
class DataRange {
const uint8_t* data_;
size_t size_;
@@ -37,46 +39,67 @@ class DataRange {
public:
DataRange(const uint8_t* data, size_t size) : data_(data), size_(size) {}
- size_t size() const { return size_; }
-
- std::pair<DataRange, DataRange> split(uint32_t index) const {
- return std::make_pair(DataRange(data_, index),
- DataRange(data_ + index, size() - index));
+ // Don't accidentally pass DataRange by value. This will reuse bytes and might
+ // lead to OOM because the end might not be reached.
+ // Define move constructor and move assignment, disallow copy constructor and
+ // copy assignment (below).
+ DataRange(DataRange&& other) : DataRange(other.data_, other.size_) {
+ other.data_ = nullptr;
+ other.size_ = 0;
+ }
+ DataRange& operator=(DataRange&& other) {
+ data_ = other.data_;
+ size_ = other.size_;
+ other.data_ = nullptr;
+ other.size_ = 0;
+ return *this;
}
- std::pair<DataRange, DataRange> split() {
- uint16_t index = get<uint16_t>();
- if (size() > 0) {
- index = index % size();
- } else {
- index = 0;
- }
- return split(index);
+ size_t size() const { return size_; }
+
+ DataRange split() {
+ uint16_t num_bytes = get<uint16_t>() % std::max(size_t{1}, size_);
+ DataRange split(data_, num_bytes);
+ data_ += num_bytes;
+ size_ -= num_bytes;
+ return split;
}
template <typename T>
T get() {
- if (size() == 0) {
- return T();
- } else {
- // We want to support the case where we have less than sizeof(T) bytes
- // remaining in the slice. For example, if we emit an i32 constant, it's
- // okay if we don't have a full four bytes available, we'll just use what
- // we have. We aren't concerned about endianness because we are generating
- // arbitrary expressions.
- const size_t num_bytes = std::min(sizeof(T), size());
- T result = T();
- memcpy(&result, data_, num_bytes);
- data_ += num_bytes;
- size_ -= num_bytes;
- return result;
- }
+ // We want to support the case where we have less than sizeof(T) bytes
+ // remaining in the slice. For example, if we emit an i32 constant, it's
+ // okay if we don't have a full four bytes available, we'll just use what
+ // we have. We aren't concerned about endianness because we are generating
+ // arbitrary expressions.
+ const size_t num_bytes = std::min(sizeof(T), size_);
+ T result = T();
+ memcpy(&result, data_, num_bytes);
+ data_ += num_bytes;
+ size_ -= num_bytes;
+ return result;
}
+
+ DISALLOW_COPY_AND_ASSIGN(DataRange);
};
+ValueType GetValueType(DataRange& data) {
+ switch (data.get<uint8_t>() % 4) {
+ case 0:
+ return kWasmI32;
+ case 1:
+ return kWasmI64;
+ case 2:
+ return kWasmF32;
+ case 3:
+ return kWasmF64;
+ }
+ UNREACHABLE();
+}
+
class WasmGenerator {
template <WasmOpcode Op, ValueType... Args>
- void op(DataRange data) {
+ void op(DataRange& data) {
Generate<Args...>(data);
builder_->Emit(Op);
}
@@ -101,20 +124,20 @@ class WasmGenerator {
};
template <ValueType T>
- void block(DataRange data) {
+ void block(DataRange& data) {
BlockScope block_scope(this, kExprBlock, T, T);
Generate<T>(data);
}
template <ValueType T>
- void loop(DataRange data) {
+ void loop(DataRange& data) {
// When breaking to a loop header, don't provide any input value (hence
// kWasmStmt).
BlockScope block_scope(this, kExprLoop, T, kWasmStmt);
Generate<T>(data);
}
- void br(DataRange data) {
+ void br(DataRange& data) {
// There is always at least the block representing the function body.
DCHECK(!blocks_.empty());
const uint32_t target_block = data.get<uint32_t>() % blocks_.size();
@@ -161,7 +184,7 @@ class WasmGenerator {
}
template <WasmOpcode memory_op, ValueType... arg_types>
- void memop(DataRange data) {
+ void memop(DataRange& data) {
const uint8_t align = data.get<uint8_t>() % (max_alignment(memory_op) + 1);
const uint32_t offset = data.get<uint32_t>();
@@ -173,21 +196,131 @@ class WasmGenerator {
builder_->EmitU32V(offset);
}
+ void drop(DataRange& data) {
+ Generate(GetValueType(data), data);
+ builder_->Emit(kExprDrop);
+ }
+
+ template <ValueType wanted_type>
+ void call(DataRange& data) {
+ call(data, wanted_type);
+ }
+
+ void Convert(ValueType src, ValueType dst) {
+ auto idx = [](ValueType t) -> int {
+ switch (t) {
+ case kWasmI32:
+ return 0;
+ case kWasmI64:
+ return 1;
+ case kWasmF32:
+ return 2;
+ case kWasmF64:
+ return 3;
+ default:
+ UNREACHABLE();
+ }
+ };
+ static constexpr WasmOpcode kConvertOpcodes[] = {
+ // {i32, i64, f32, f64} -> i32
+ kExprNop, kExprI32ConvertI64, kExprI32SConvertF32, kExprI32SConvertF64,
+ // {i32, i64, f32, f64} -> i64
+ kExprI64SConvertI32, kExprNop, kExprI64SConvertF32, kExprI64SConvertF64,
+ // {i32, i64, f32, f64} -> f32
+ kExprF32SConvertI32, kExprF32SConvertI64, kExprNop, kExprF32ConvertF64,
+ // {i32, i64, f32, f64} -> f64
+ kExprF64SConvertI32, kExprF64SConvertI64, kExprF64ConvertF32, kExprNop};
+ int arr_idx = idx(dst) << 2 | idx(src);
+ builder_->Emit(kConvertOpcodes[arr_idx]);
+ }
+
+ void call(DataRange& data, ValueType wanted_type) {
+ int func_index = data.get<uint8_t>() % functions_.size();
+ FunctionSig* sig = functions_[func_index];
+ // Generate arguments.
+ for (size_t i = 0; i < sig->parameter_count(); ++i) {
+ Generate(sig->GetParam(i), data);
+ }
+ // Emit call.
+ builder_->EmitWithU32V(kExprCallFunction, func_index);
+ // Convert the return value to the wanted type.
+ ValueType return_type =
+ sig->return_count() == 0 ? kWasmStmt : sig->GetReturn(0);
+ if (return_type == kWasmStmt && wanted_type != kWasmStmt) {
+ // The call did not generate a value. Thus just generate it here.
+ Generate(wanted_type, data);
+ } else if (return_type != kWasmStmt && wanted_type == kWasmStmt) {
+ // The call did generate a value, but we did not want one.
+ builder_->Emit(kExprDrop);
+ } else if (return_type != wanted_type) {
+ // If the returned type does not match the wanted type, convert it.
+ Convert(return_type, wanted_type);
+ }
+ }
+
+ struct Local {
+ uint32_t index;
+ ValueType type = kWasmStmt;
+ Local() = default;
+ Local(uint32_t index, ValueType type) : index(index), type(type) {}
+ bool is_valid() const { return type != kWasmStmt; }
+ };
+
+ Local GetRandomLocal(DataRange& data) {
+ uint32_t num_params =
+ static_cast<uint32_t>(builder_->signature()->parameter_count());
+ uint32_t num_locals = static_cast<uint32_t>(locals_.size());
+ if (num_params + num_locals == 0) return {};
+ uint32_t index = data.get<uint8_t>() % (num_params + num_locals);
+ ValueType type = index < num_params ? builder_->signature()->GetParam(index)
+ : locals_[index - num_params];
+ return {index, type};
+ }
+
+ template <ValueType wanted_type>
+ void local_op(DataRange& data, WasmOpcode opcode) {
+ Local local = GetRandomLocal(data);
+ // If there are no locals and no parameters, just generate any value (if a
+ // value is needed), or do nothing.
+ if (!local.is_valid()) {
+ if (wanted_type == kWasmStmt) return;
+ return Generate<wanted_type>(data);
+ }
+
+ if (opcode != kExprGetLocal) Generate(local.type, data);
+ builder_->EmitWithU32V(opcode, local.index);
+ if (wanted_type != kWasmStmt && local.type != wanted_type) {
+ Convert(local.type, wanted_type);
+ }
+ }
+
+ template <ValueType wanted_type>
+ void get_local(DataRange& data) {
+ local_op<wanted_type>(data, kExprGetLocal);
+ }
+
+ void set_local(DataRange& data) { local_op<kWasmStmt>(data, kExprSetLocal); }
+
+ template <ValueType wanted_type>
+ void tee_local(DataRange& data) {
+ local_op<wanted_type>(data, kExprTeeLocal);
+ }
+
template <ValueType T1, ValueType T2>
- void sequence(DataRange data) {
+ void sequence(DataRange& data) {
Generate<T1, T2>(data);
}
- void current_memory(DataRange data) {
+ void current_memory(DataRange& data) {
builder_->EmitWithU8(kExprMemorySize, 0);
}
- void grow_memory(DataRange data);
+ void grow_memory(DataRange& data);
- using generate_fn = void (WasmGenerator::*const)(DataRange);
+ using generate_fn = void (WasmGenerator::*const)(DataRange&);
template <size_t N>
- void GenerateOneOf(generate_fn (&alternates)[N], DataRange data) {
+ void GenerateOneOf(generate_fn (&alternates)[N], DataRange& data) {
static_assert(N < std::numeric_limits<uint8_t>::max(),
"Too many alternates. Replace with a bigger type if needed.");
const auto which = data.get<uint8_t>();
@@ -209,26 +342,39 @@ class WasmGenerator {
};
public:
- explicit WasmGenerator(WasmFunctionBuilder* fn) : builder_(fn) {
- DCHECK_EQ(1, fn->signature()->return_count());
- blocks_.push_back(fn->signature()->GetReturn(0));
+ WasmGenerator(WasmFunctionBuilder* fn,
+ const std::vector<FunctionSig*>& functions, DataRange& data)
+ : builder_(fn), functions_(functions) {
+ FunctionSig* sig = fn->signature();
+ DCHECK_GE(1, sig->return_count());
+ blocks_.push_back(sig->return_count() == 0 ? kWasmStmt : sig->GetReturn(0));
+
+ constexpr uint32_t kMaxLocals = 32;
+ locals_.resize(data.get<uint8_t>() % kMaxLocals);
+ for (ValueType& local : locals_) {
+ local = GetValueType(data);
+ fn->AddLocal(local);
+ }
}
- void Generate(ValueType type, DataRange data);
+ void Generate(ValueType type, DataRange& data);
template <ValueType T>
- void Generate(DataRange data);
+ void Generate(DataRange& data);
template <ValueType T1, ValueType T2, ValueType... Ts>
- void Generate(DataRange data) {
- const auto parts = data.split();
- Generate<T1>(parts.first);
- Generate<T2, Ts...>(parts.second);
+ void Generate(DataRange& data) {
+ // TODO(clemensh): Implement a more even split.
+ auto first_data = data.split();
+ Generate<T1>(first_data);
+ Generate<T2, Ts...>(data);
}
private:
WasmFunctionBuilder* builder_;
std::vector<ValueType> blocks_;
+ const std::vector<FunctionSig*>& functions_;
+ std::vector<ValueType> locals_;
uint32_t recursion_depth = 0;
static constexpr uint32_t kMaxRecursionDepth = 64;
@@ -239,7 +385,7 @@ class WasmGenerator {
};
template <>
-void WasmGenerator::Generate<kWasmStmt>(DataRange data) {
+void WasmGenerator::Generate<kWasmStmt>(DataRange& data) {
GeneratorRecursionScope rec_scope(this);
if (recursion_limit_reached() || data.size() == 0) return;
@@ -257,13 +403,18 @@ void WasmGenerator::Generate<kWasmStmt>(DataRange data) {
&WasmGenerator::memop<kExprI64StoreMem32, kWasmI64>,
&WasmGenerator::memop<kExprF32StoreMem, kWasmF32>,
&WasmGenerator::memop<kExprF64StoreMem, kWasmF64>,
- };
+
+ &WasmGenerator::drop,
+
+ &WasmGenerator::call<kWasmStmt>,
+
+ &WasmGenerator::set_local};
GenerateOneOf(alternates, data);
}
template <>
-void WasmGenerator::Generate<kWasmI32>(DataRange data) {
+void WasmGenerator::Generate<kWasmI32>(DataRange& data) {
GeneratorRecursionScope rec_scope(this);
if (recursion_limit_reached() || data.size() <= sizeof(uint32_t)) {
builder_->EmitI32Const(data.get<uint32_t>());
@@ -338,13 +489,18 @@ void WasmGenerator::Generate<kWasmI32>(DataRange data) {
&WasmGenerator::memop<kExprI32LoadMem16U>,
&WasmGenerator::current_memory,
- &WasmGenerator::grow_memory};
+ &WasmGenerator::grow_memory,
+
+ &WasmGenerator::get_local<kWasmI32>,
+ &WasmGenerator::tee_local<kWasmI32>,
+
+ &WasmGenerator::call<kWasmI32>};
GenerateOneOf(alternates, data);
}
template <>
-void WasmGenerator::Generate<kWasmI64>(DataRange data) {
+void WasmGenerator::Generate<kWasmI64>(DataRange& data) {
GeneratorRecursionScope rec_scope(this);
if (recursion_limit_reached() || data.size() <= sizeof(uint64_t)) {
builder_->EmitI64Const(data.get<int64_t>());
@@ -385,13 +541,18 @@ void WasmGenerator::Generate<kWasmI64>(DataRange data) {
&WasmGenerator::memop<kExprI64LoadMem16S>,
&WasmGenerator::memop<kExprI64LoadMem16U>,
&WasmGenerator::memop<kExprI64LoadMem32S>,
- &WasmGenerator::memop<kExprI64LoadMem32U>};
+ &WasmGenerator::memop<kExprI64LoadMem32U>,
+
+ &WasmGenerator::get_local<kWasmI64>,
+ &WasmGenerator::tee_local<kWasmI64>,
+
+ &WasmGenerator::call<kWasmI64>};
GenerateOneOf(alternates, data);
}
template <>
-void WasmGenerator::Generate<kWasmF32>(DataRange data) {
+void WasmGenerator::Generate<kWasmF32>(DataRange& data) {
GeneratorRecursionScope rec_scope(this);
if (recursion_limit_reached() || data.size() <= sizeof(float)) {
builder_->EmitF32Const(data.get<float>());
@@ -408,13 +569,18 @@ void WasmGenerator::Generate<kWasmF32>(DataRange data) {
&WasmGenerator::block<kWasmF32>,
&WasmGenerator::loop<kWasmF32>,
- &WasmGenerator::memop<kExprF32LoadMem>};
+ &WasmGenerator::memop<kExprF32LoadMem>,
+
+ &WasmGenerator::get_local<kWasmF32>,
+ &WasmGenerator::tee_local<kWasmF32>,
+
+ &WasmGenerator::call<kWasmF32>};
GenerateOneOf(alternates, data);
}
template <>
-void WasmGenerator::Generate<kWasmF64>(DataRange data) {
+void WasmGenerator::Generate<kWasmF64>(DataRange& data) {
GeneratorRecursionScope rec_scope(this);
if (recursion_limit_reached() || data.size() <= sizeof(double)) {
builder_->EmitF64Const(data.get<double>());
@@ -431,17 +597,22 @@ void WasmGenerator::Generate<kWasmF64>(DataRange data) {
&WasmGenerator::block<kWasmF64>,
&WasmGenerator::loop<kWasmF64>,
- &WasmGenerator::memop<kExprF64LoadMem>};
+ &WasmGenerator::memop<kExprF64LoadMem>,
+
+ &WasmGenerator::get_local<kWasmF64>,
+ &WasmGenerator::tee_local<kWasmF64>,
+
+ &WasmGenerator::call<kWasmF64>};
GenerateOneOf(alternates, data);
}
-void WasmGenerator::grow_memory(DataRange data) {
+void WasmGenerator::grow_memory(DataRange& data) {
Generate<kWasmI32>(data);
builder_->EmitWithU8(kExprGrowMemory, 0);
}
-void WasmGenerator::Generate(ValueType type, DataRange data) {
+void WasmGenerator::Generate(ValueType type, DataRange& data) {
switch (type) {
case kWasmStmt:
return Generate<kWasmStmt>(data);
@@ -457,6 +628,19 @@ void WasmGenerator::Generate(ValueType type, DataRange data) {
UNREACHABLE();
}
}
+
+FunctionSig* GenerateSig(Zone* zone, DataRange& data) {
+ // Generate enough parameters to spill some to the stack.
+ constexpr int kMaxParameters = 15;
+ int num_params = int{data.get<uint8_t>()} % (kMaxParameters + 1);
+ bool has_return = data.get<bool>();
+
+ FunctionSig::Builder builder(zone, has_return ? 1 : 0, num_params);
+ if (has_return) builder.AddReturn(GetValueType(data));
+ for (int i = 0; i < num_params; ++i) builder.AddParam(GetValueType(data));
+ return builder.Build();
+}
+
} // namespace
class WasmCompileFuzzer : public WasmExecutionFuzzer {
@@ -469,13 +653,32 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
WasmModuleBuilder builder(zone);
- WasmFunctionBuilder* f = builder.AddFunction(sigs.i_iii());
+ DataRange range(data, static_cast<uint32_t>(size));
+ std::vector<FunctionSig*> function_signatures;
+ function_signatures.push_back(sigs.i_iii());
+
+ static_assert(kMaxFunctions >= 1, "need min. 1 function");
+ int num_functions = 1 + (range.get<uint8_t>() % kMaxFunctions);
+
+ for (int i = 1; i < num_functions; ++i) {
+ function_signatures.push_back(GenerateSig(zone, range));
+ }
- WasmGenerator gen(f);
- gen.Generate<kWasmI32>(DataRange(data, static_cast<uint32_t>(size)));
+ for (int i = 0; i < num_functions; ++i) {
+ DataRange function_range =
+ i == num_functions - 1 ? std::move(range) : range.split();
- f->Emit(kExprEnd);
- builder.AddExport(CStrVector("main"), f);
+ FunctionSig* sig = function_signatures[i];
+ WasmFunctionBuilder* f = builder.AddFunction(sig);
+
+ WasmGenerator gen(f, function_signatures, function_range);
+ ValueType return_type =
+ sig->return_count() == 0 ? kWasmStmt : sig->GetReturn(0);
+ gen.Generate(return_type, function_range);
+
+ f->Emit(kExprEnd);
+ if (i == 0) builder.AddExport(CStrVector("main"), f);
+ }
builder.SetMaxMemorySize(32);
builder.WriteTo(buffer);
@@ -485,8 +688,8 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
new WasmValue[3]{WasmValue(1), WasmValue(2), WasmValue(3)});
compiler_args.reset(new Handle<Object>[3]{
- handle(Smi::FromInt(1), isolate), handle(Smi::FromInt(1), isolate),
- handle(Smi::FromInt(1), isolate)});
+ handle(Smi::FromInt(1), isolate), handle(Smi::FromInt(2), isolate),
+ handle(Smi::FromInt(3), isolate)});
return true;
}
};
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
index 4e6aed1a25..46f5133486 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
@@ -9,6 +9,7 @@
#include "src/objects-inl.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/wasm-api.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
#include "src/zone/accounting-allocator.h"
@@ -89,29 +90,123 @@ void InterpretAndExecuteModule(i::Isolate* isolate,
testing::RunWasmModuleForTesting(isolate, instance, 0, nullptr);
}
+namespace {
+struct PrintSig {
+ const size_t num;
+ const std::function<ValueType(size_t)> getter;
+};
+PrintSig PrintParameters(const FunctionSig* sig) {
+ return {sig->parameter_count(), [=](size_t i) { return sig->GetParam(i); }};
+}
+PrintSig PrintReturns(const FunctionSig* sig) {
+ return {sig->return_count(), [=](size_t i) { return sig->GetReturn(i); }};
+}
+const char* ValueTypeToConstantName(ValueType type) {
+ switch (type) {
+ case kWasmI32:
+ return "kWasmI32";
+ case kWasmI64:
+ return "kWasmI64";
+ case kWasmF32:
+ return "kWasmF32";
+ case kWasmF64:
+ return "kWasmF64";
+ default:
+ UNREACHABLE();
+ }
+}
+std::ostream& operator<<(std::ostream& os, const PrintSig& print) {
+ os << "[";
+ for (size_t i = 0; i < print.num; ++i) {
+ os << (i == 0 ? "" : ", ") << ValueTypeToConstantName(print.getter(i));
+ }
+ return os << "]";
+}
+
+void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
+ bool compiles) {
+ constexpr bool kVerifyFunctions = false;
+ ModuleResult module_res =
+ SyncDecodeWasmModule(isolate, wire_bytes.start(), wire_bytes.end(),
+ kVerifyFunctions, ModuleOrigin::kWasmOrigin);
+ CHECK(module_res.ok());
+ WasmModule* module = module_res.val.get();
+ CHECK_NOT_NULL(module);
+
+ OFStream os(stdout);
+
+ os << "// Copyright 2018 the V8 project authors. All rights reserved.\n"
+ "// Use of this source code is governed by a BSD-style license that "
+ "can be\n"
+ "// found in the LICENSE file.\n"
+ "\n"
+ "load('test/mjsunit/wasm/wasm-constants.js');\n"
+ "load('test/mjsunit/wasm/wasm-module-builder.js');\n"
+ "\n"
+ "(function() {\n"
+ " var builder = new WasmModuleBuilder();\n";
+
+ if (module->has_memory) {
+ os << " builder.addMemory(" << module->initial_pages;
+ if (module->has_maximum_pages) {
+ os << ", " << module->maximum_pages << ");\n";
+ } else {
+ os << ");\n";
+ }
+ }
+
+ Zone tmp_zone(isolate->allocator(), ZONE_NAME);
+
+ for (const WasmFunction& func : module->functions) {
+ Vector<const uint8_t> func_code = wire_bytes.GetFunctionBytes(&func);
+ os << " // Generate function " << func.func_index + 1 << " of "
+ << module->functions.size() << ".\n";
+ // Generate signature.
+ os << " sig" << func.func_index << " = makeSig("
+ << PrintParameters(func.sig) << ", " << PrintReturns(func.sig) << ");\n";
+
+ // Add function.
+ os << " builder.addFunction(undefined, sig" << func.func_index << ")\n";
+
+ // Add locals.
+ BodyLocalDecls decls(&tmp_zone);
+ DecodeLocalDecls(&decls, func_code.start(), func_code.end());
+ if (!decls.type_list.empty()) {
+ os << " ";
+ for (size_t pos = 0, count = 1, locals = decls.type_list.size();
+ pos < locals; pos += count, count = 1) {
+ ValueType type = decls.type_list[pos];
+ while (pos + count < locals && decls.type_list[pos + count] == type)
+ ++count;
+ os << ".addLocals({" << WasmOpcodes::TypeName(type)
+ << "_count: " << count << "})";
+ }
+ os << "\n";
+ }
+
+ // Add body.
+ os << " .addBodyWithEnd([\n";
+
+ FunctionBody func_body(func.sig, func.code.offset(), func_code.start(),
+ func_code.end());
+ PrintRawWasmCode(isolate->allocator(), func_body, module, kOmitLocals);
+ os << " ])";
+ if (func.func_index == 0) os << "\n .exportAs('main')";
+ os << ";\n ";
+ }
+
+ if (compiles) {
+ os << " var module = builder.instantiate();\n"
+ " module.exports.main(1, 2, 3);\n";
+ } else {
+ os << " assertThrows(function() { builder.instantiate(); });\n";
+ }
+ os << "})();\n";
+}
+} // namespace
+
int WasmExecutionFuzzer::FuzzWasmModule(const uint8_t* data, size_t size,
bool require_valid) {
- // Save the flag so that we can change it and restore it later.
- bool generate_test = FLAG_wasm_code_fuzzer_gen_test;
- if (generate_test) {
- OFStream os(stdout);
-
- os << "// Copyright 2017 the V8 project authors. All rights reserved."
- << std::endl;
- os << "// Use of this source code is governed by a BSD-style license that "
- "can be"
- << std::endl;
- os << "// found in the LICENSE file." << std::endl;
- os << std::endl;
- os << "load(\"test/mjsunit/wasm/wasm-constants.js\");" << std::endl;
- os << "load(\"test/mjsunit/wasm/wasm-module-builder.js\");" << std::endl;
- os << std::endl;
- os << "(function() {" << std::endl;
- os << " var builder = new WasmModuleBuilder();" << std::endl;
- os << " builder.addMemory(16, 32, false);" << std::endl;
- os << " builder.addFunction(\"test\", kSig_i_iii)" << std::endl;
- os << " .addBodyWithEnd([" << std::endl;
- }
v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
v8::Isolate* isolate = support->GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
@@ -148,26 +243,14 @@ int WasmExecutionFuzzer::FuzzWasmModule(const uint8_t* data, size_t size,
FlagScope<bool> no_liftoff(&FLAG_liftoff, false);
compiled_module = SyncCompile(i_isolate, &interpreter_thrower, wire_bytes);
}
- // Clear the flag so that the WebAssembly code is not printed twice.
- FLAG_wasm_code_fuzzer_gen_test = false;
bool compiles = !compiled_module.is_null();
- if (generate_test) {
- OFStream os(stdout);
- os << " ])" << std::endl
- << " .exportFunc();" << std::endl;
- if (compiles) {
- os << " var module = builder.instantiate();" << std::endl
- << " module.exports.test(1, 2, 3);" << std::endl;
- } else {
- OFStream os(stdout);
- os << " assertThrows(function() { builder.instantiate(); });"
- << std::endl;
- }
- os << "})();" << std::endl;
+ if (FLAG_wasm_fuzzer_gen_test) {
+ GenerateTestCase(i_isolate, wire_bytes, compiles);
}
- bool validates = SyncValidate(i_isolate, wire_bytes);
+ bool validates =
+ i_isolate->wasm_engine()->SyncValidate(i_isolate, wire_bytes);
CHECK_EQ(compiles, validates);
CHECK_IMPLIES(require_valid, validates);
@@ -198,7 +281,7 @@ int WasmExecutionFuzzer::FuzzWasmModule(const uint8_t* data, size_t size,
}
bool expect_exception =
- result_interpreter == static_cast<int32_t>(0xdeadbeef);
+ result_interpreter == static_cast<int32_t>(0xDEADBEEF);
int32_t result_turbofan;
{
diff --git a/deps/v8/test/inspector/debugger/async-chains-expected.txt b/deps/v8/test/inspector/debugger/async-chains-expected.txt
new file mode 100644
index 0000000000..e5a57fdb61
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/async-chains-expected.txt
@@ -0,0 +1,152 @@
+Tests different combinations of async stacks in chains.
+Regular
+userFunction (test.js:1:36)
+-- inner async --
+runWithRegular (utils.js:2:12)
+inner (test.js:2:28)
+runWithNone (utils.js:27:2)
+(anonymous) (test.js:3:10)
+
+Regular - Regular
+userFunction (test.js:1:36)
+-- inner async --
+runWithRegular (utils.js:2:12)
+inner (test.js:2:28)
+-- outer async --
+runWithRegular (utils.js:2:12)
+(anonymous) (test.js:3:10)
+
+Regular - EmptyName
+userFunction (test.js:1:36)
+-- inner async --
+runWithRegular (utils.js:2:12)
+inner (test.js:2:28)
+-- <empty> --
+runWithEmptyName (utils.js:6:12)
+(anonymous) (test.js:3:10)
+
+Regular - EmptyStack
+userFunction (test.js:1:36)
+-- inner async --
+runWithRegular (utils.js:2:12)
+inner (test.js:2:28)
+
+Regular - EmptyNameEmptyStack
+userFunction (test.js:1:36)
+-- inner async --
+runWithRegular (utils.js:2:12)
+inner (test.js:2:28)
+
+Regular - External
+userFunction (test.js:1:36)
+-- inner async --
+runWithRegular (utils.js:2:12)
+inner (test.js:2:28)
+runWithRegular (utils.js:21:4)
+ <external stack>
+
+EmptyName
+userFunction (test.js:1:36)
+-- <empty> --
+runWithEmptyName (utils.js:6:12)
+inner (test.js:2:28)
+runWithNone (utils.js:27:2)
+(anonymous) (test.js:3:10)
+
+EmptyName - Regular
+userFunction (test.js:1:36)
+-- <empty> --
+runWithEmptyName (utils.js:6:12)
+inner (test.js:2:28)
+-- outer async --
+runWithRegular (utils.js:2:12)
+(anonymous) (test.js:3:10)
+
+EmptyName - EmptyName
+userFunction (test.js:1:36)
+-- <empty> --
+runWithEmptyName (utils.js:6:12)
+inner (test.js:2:28)
+-- <empty> --
+runWithEmptyName (utils.js:6:12)
+(anonymous) (test.js:3:10)
+
+EmptyName - EmptyStack
+userFunction (test.js:1:36)
+-- <empty> --
+runWithEmptyName (utils.js:6:12)
+inner (test.js:2:28)
+
+EmptyName - EmptyNameEmptyStack
+userFunction (test.js:1:36)
+-- <empty> --
+runWithEmptyName (utils.js:6:12)
+inner (test.js:2:28)
+
+EmptyName - External
+userFunction (test.js:1:36)
+-- <empty> --
+runWithEmptyName (utils.js:6:12)
+inner (test.js:2:28)
+runWithRegular (utils.js:21:4)
+ <external stack>
+
+EmptyStack
+userFunction (test.js:1:36)
+
+EmptyStack - Regular
+userFunction (test.js:1:36)
+-- inner async --
+-- outer async --
+runWithRegular (utils.js:2:12)
+(anonymous) (test.js:3:10)
+
+EmptyStack - EmptyName
+userFunction (test.js:1:36)
+-- inner async --
+-- <empty> --
+runWithEmptyName (utils.js:6:12)
+(anonymous) (test.js:3:10)
+
+EmptyStack - EmptyStack
+userFunction (test.js:1:36)
+
+EmptyStack - EmptyNameEmptyStack
+userFunction (test.js:1:36)
+
+EmptyStack - External
+userFunction (test.js:1:36)
+-- inner async --
+ <external stack>
+
+EmptyNameEmptyStack
+userFunction (test.js:1:36)
+
+EmptyNameEmptyStack - Regular
+userFunction (test.js:1:36)
+-- outer async --
+runWithRegular (utils.js:2:12)
+(anonymous) (test.js:3:10)
+
+EmptyNameEmptyStack - EmptyName
+userFunction (test.js:1:36)
+-- <empty> --
+runWithEmptyName (utils.js:6:12)
+(anonymous) (test.js:3:10)
+
+EmptyNameEmptyStack - EmptyStack
+userFunction (test.js:1:36)
+
+EmptyNameEmptyStack - EmptyNameEmptyStack
+userFunction (test.js:1:36)
+
+EmptyNameEmptyStack - External
+userFunction (test.js:1:36)
+-- <empty> --
+ <external stack>
+
+External
+userFunction (test.js:1:36)
+runWithRegular (utils.js:21:4)
+ <external stack>
+
diff --git a/deps/v8/test/inspector/debugger/async-chains.js b/deps/v8/test/inspector/debugger/async-chains.js
new file mode 100644
index 0000000000..766003719e
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/async-chains.js
@@ -0,0 +1,77 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Tests different combinations of async stacks in chains.');
+
+contextGroup.addScript(`
+function runWithRegular(f, name) {
+ inspector.scheduleWithAsyncStack(f, name, false);
+}
+
+function runWithEmptyName(f) {
+ inspector.scheduleWithAsyncStack(f, '', false);
+}
+
+function runWithEmptyStack(f, name) {
+ inspector.scheduleWithAsyncStack(f, name, true);
+}
+
+function runWithEmptyNameEmptyStack(f) {
+ inspector.scheduleWithAsyncStack(f, '', true);
+}
+
+function runWithExternal(f) {
+ const id = inspector.storeCurrentStackTrace('external');
+ runWithRegular(() => {
+ inspector.externalAsyncTaskStarted(id);
+ f();
+ inspector.externalAsyncTaskFinished(id);
+ }, 'not-used-async');
+}
+
+function runWithNone(f) {
+ f();
+}
+//# sourceURL=utils.js`);
+
+session.setupScriptMap();
+(async function test() {
+ Protocol.Debugger.enable();
+ Protocol.Debugger.setAsyncCallStackDepth({maxDepth: 128});
+
+ const first = ['Regular', 'EmptyName', 'EmptyStack', 'EmptyNameEmptyStack', 'External']
+ const second = ['None', 'Regular', 'EmptyName', 'EmptyStack', 'EmptyNameEmptyStack', 'External']
+
+ for (const stack1 of first) {
+ for (const stack2 of second) {
+ if (stack1 === 'External' && stack2 !== 'None') continue;
+
+ InspectorTest.log(stack2 === 'None' ? stack1 : `${stack1} - ${stack2}`);
+ Protocol.Runtime.evaluate({
+ expression: `
+ var userFunction = () => {debugger};
+ var inner = () => runWith${stack1}(userFunction, 'inner async');
+ runWith${stack2}(inner, 'outer async');
+ //# sourceURL=test.js`
+ });
+ await pauseAndDumpStack();
+ }
+ }
+
+ await Protocol.Debugger.disable();
+ InspectorTest.completeTest();
+})();
+
+async function pauseAndDumpStack() {
+ const {params:{callFrames, asyncStackTrace, asyncStackTraceId}}
+ = await Protocol.Debugger.oncePaused();
+ session.logCallFrames(callFrames);
+ if (asyncStackTrace)
+ session.logAsyncStackTrace(asyncStackTrace);
+ if (asyncStackTraceId)
+ InspectorTest.log(' <external stack>');
+ InspectorTest.log('');
+ return Protocol.Debugger.resume();
+}
diff --git a/deps/v8/test/inspector/debugger/async-for-await-of-promise-stack.js b/deps/v8/test/inspector/debugger/async-for-await-of-promise-stack.js
index 6a2f4ce972..c9df677cbd 100644
--- a/deps/v8/test/inspector/debugger/async-for-await-of-promise-stack.js
+++ b/deps/v8/test/inspector/debugger/async-for-await-of-promise-stack.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-async-iteration
-
let {session, contextGroup, Protocol} = InspectorTest.start('Checks that async chains for for-await-of are correct.');
contextGroup.addScript(`
diff --git a/deps/v8/test/inspector/debugger/evaluate-on-call-frame-in-module-expected.txt b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-in-module-expected.txt
index e4819bd6d1..d2b36e3489 100644
--- a/deps/v8/test/inspector/debugger/evaluate-on-call-frame-in-module-expected.txt
+++ b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-in-module-expected.txt
@@ -12,9 +12,8 @@ local:foo1
module
[
[0] : a1 = 10
- [1] : g1 = 1
- [2] : b1 = 11
- [3] : foo1 = function foo1() { let c1 = 12; let g1 = 2; debugger; return a1 + b1 + c1 + g1; }
+ [1] : b1 = 11
+ [2] : foo1 = function foo1() { let c1 = 12; let g1 = 2; debugger; return a1 + b1 + c1 + g1; }
]
global
[
@@ -180,9 +179,8 @@ foo2 =
}
module
[
- [0] : a3 = 30
- [1] : foo2 = function foo2() { let c2 = 22; return foo1() + a2 + b2 + c2; }
- [2] : b3 = 31
+ [0] : foo2 = function foo2() { let c2 = 22; return foo1() + a2 + b2 + c2; }
+ [1] : b3 = 31
]
global
[
@@ -200,20 +198,6 @@ Array =
objectId : <objectId>
type : function
}
-a3 =
-{
- description : 30
- type : number
- value : 30
-}
-Evaluating: ++a3
-updated a3 =
-{
- description : 31
- type : number
- value : 31
-}
-Evaluating: --a3
foo2 =
{
className : Function
@@ -247,12 +231,6 @@ closure:bar
[
[0] : a = 0
]
-module
-[
- [0] : a = 1
- [1] : b = 2
- [2] : bar = function bar() { let a = 0; (() => {a; debugger;})(); }
-]
global
[
...
@@ -283,37 +261,10 @@ updated a =
value : 1
}
Evaluating: --a
-b =
-{
- description : 2
- type : number
- value : 2
-}
-Evaluating: ++b
-updated b =
-{
- description : 3
- type : number
- value : 3
-}
-Evaluating: --b
-bar =
-{
- className : Function
- description : function bar() { let a = 0; (() => {a; debugger;})(); }
- objectId : <objectId>
- type : function
-}
local:bar
[
[0] : a = 0
]
-module
-[
- [0] : a = 1
- [1] : b = 2
- [2] : bar = function bar() { let a = 0; (() => {a; debugger;})(); }
-]
global
[
...
@@ -344,33 +295,6 @@ updated a =
value : 1
}
Evaluating: --a
-b =
-{
- description : 2
- type : number
- value : 2
-}
-Evaluating: ++b
-updated b =
-{
- description : 3
- type : number
- value : 3
-}
-Evaluating: --b
-bar =
-{
- className : Function
- description : function bar() { let a = 0; (() => {a; debugger;})(); }
- objectId : <objectId>
- type : function
-}
-module
-[
- [0] : a = 1
- [1] : b = 2
- [2] : bar = function bar() { let a = 0; (() => {a; debugger;})(); }
-]
global
[
...
@@ -386,41 +310,6 @@ Array =
objectId : <objectId>
type : function
}
-a =
-{
- description : 1
- type : number
- value : 1
-}
-Evaluating: ++a
-updated a =
-{
- description : 2
- type : number
- value : 2
-}
-Evaluating: --a
-b =
-{
- description : 2
- type : number
- value : 2
-}
-Evaluating: ++b
-updated b =
-{
- description : 3
- type : number
- value : 3
-}
-Evaluating: --b
-bar =
-{
- className : Function
- description : function bar() { let a = 0; (() => {a; debugger;})(); }
- objectId : <objectId>
- type : function
-}
Running test: testDifferentModuleVariables
(anonymous) (module5:5:0)
@@ -503,3 +392,112 @@ updated c =
value : 1
}
Evaluating: --c
+
+Running test: testCapturedLocalVariable
+(anonymous) (module6:2:25)
+(anonymous) (module6:2:37)
+local
+[
+ [0] : y = 5
+]
+module
+[
+ [0] : x = 5
+]
+global
+[
+ ...
+]
+Check variables in frame#0
+let x = 5;
+(function() { let y = x; #debugger; })()
+
+
+Array =
+{
+ className : Function
+ description : function Array() { [native code] }
+ objectId : <objectId>
+ type : function
+}
+y =
+{
+ description : 5
+ type : number
+ value : 5
+}
+Evaluating: ++y
+updated y =
+{
+ description : 6
+ type : number
+ value : 6
+}
+Evaluating: --y
+x =
+{
+ description : 5
+ type : number
+ value : 5
+}
+Evaluating: ++x
+updated x =
+{
+ description : 6
+ type : number
+ value : 6
+}
+Evaluating: --x
+module
+[
+ [0] : x = 5
+]
+global
+[
+ ...
+]
+Check variables in frame#1
+let x = 5;
+(function() { let y = x; debugger; })#()
+
+
+Array =
+{
+ className : Function
+ description : function Array() { [native code] }
+ objectId : <objectId>
+ type : function
+}
+x =
+{
+ description : 5
+ type : number
+ value : 5
+}
+Evaluating: ++x
+updated x =
+{
+ description : 6
+ type : number
+ value : 6
+}
+Evaluating: --x
+
+Running test: testLocalVariableToplevel
+(anonymous) (module7:2:0)
+global
+[
+ ...
+]
+Check variables in frame#0
+let x = 5;
+#debugger;
+
+
+Array =
+{
+ className : Function
+ description : function Array() { [native code] }
+ objectId : <objectId>
+ type : function
+}
diff --git a/deps/v8/test/inspector/debugger/evaluate-on-call-frame-in-module.js b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-in-module.js
index ef9050ec48..e656646cda 100644
--- a/deps/v8/test/inspector/debugger/evaluate-on-call-frame-in-module.js
+++ b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-in-module.js
@@ -49,6 +49,16 @@ export var c = 0;
debugger;
`;
+var module6 = `
+let x = 5;
+(function() { let y = x; debugger; })()
+`;
+
+var module7 = `
+let x = 5;
+debugger;
+`;
+
InspectorTest.runAsyncTestSuite([
async function testTotal() {
session.setupScriptMap();
@@ -82,6 +92,26 @@ InspectorTest.runAsyncTestSuite([
await checkFrame(callFrames[i], i);
}
await Protocol.Debugger.resume();
+ },
+
+ async function testCapturedLocalVariable() {
+ contextGroup.addModule(module6, 'module6');
+ let {params:{callFrames}} = (await Protocol.Debugger.oncePaused());
+ session.logCallFrames(callFrames);
+ for (let i = 0; i < callFrames.length; ++i) {
+ await checkFrame(callFrames[i], i);
+ }
+ await Protocol.Debugger.resume();
+ },
+
+ async function testLocalVariableToplevel() {
+ contextGroup.addModule(module7, 'module7');
+ let {params:{callFrames}} = (await Protocol.Debugger.oncePaused());
+ session.logCallFrames(callFrames);
+ for (let i = 0; i < callFrames.length; ++i) {
+ await checkFrame(callFrames[i], i);
+ }
+ await Protocol.Debugger.resume();
}
]);
diff --git a/deps/v8/test/inspector/debugger/external-stack-trace.js b/deps/v8/test/inspector/debugger/external-stack-trace.js
index c8392e28c7..0b5c084e02 100644
--- a/deps/v8/test/inspector/debugger/external-stack-trace.js
+++ b/deps/v8/test/inspector/debugger/external-stack-trace.js
@@ -119,7 +119,6 @@ InspectorTest.runAsyncTestSuite([
},
async function testExternalStacks() {
-
let debuggerId1 = (await Protocol1.Debugger.enable()).result.debuggerId;
let debuggerId2 = (await Protocol2.Debugger.enable()).result.debuggerId;
Protocol1.Debugger.setAsyncCallStackDepth({maxDepth: 32});
diff --git a/deps/v8/test/inspector/debugger/for-of-loops-expected.txt b/deps/v8/test/inspector/debugger/for-of-loops-expected.txt
index 6d23c941cb..bc1bc5ad6a 100644
--- a/deps/v8/test/inspector/debugger/for-of-loops-expected.txt
+++ b/deps/v8/test/inspector/debugger/for-of-loops-expected.txt
@@ -9,10 +9,10 @@ function testFunction() {
var arr = |_|[1];
var all = |_|[];
for (var |_|k in |_|arr) { all.|C|push(k); }
- for (var |_|k of |_|arr) { all.|C|push(k); }
+ for (var |C|k of |_|arr) { all.|C|push(k); }
for (var |_|k in |_|obj) { all.|C|push(k); }
for (let |_|k in |_|arr) { all.|C|push(k); }
- for (let |_|k of |_|arr) { all.|C|push(k); }
+ for (let |C|k of |_|arr) { all.|C|push(k); }
for (let |_|k in |_|obj) { all.|C|push(k); }
var iterable = |_|{
@@ -28,9 +28,9 @@ function testFunction() {
};|R|
}
};
- for (var |_|k of |_|iterable) { all.|C|push(k); }
+ for (var |C|k of |_|iterable) { all.|C|push(k); }
|_|iterable.i = 0;
- for (let |_|k of |_|iterable) { all.|C|push(k); }
+ for (let |C|k of |_|iterable) { all.|C|push(k); }
|R|}
(anonymous) (expr.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt b/deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt
index 7ea0920182..c304fe677b 100644
--- a/deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt
@@ -95,10 +95,10 @@ function testForLoop() {
|R|}
function testForOfLoop() {
- for (var |_|k of |_|[]) {}
- for (var |_|k of |_|[1]) |_|k;
+ for (var |C|k of |_|[]) {}
+ for (var |C|k of |_|[1]) |_|k;
var a = |_|[];
- for (var |_|k of |_|a) {}
+ for (var |C|k of |_|a) {}
|R|}
function testForInLoop() {
@@ -116,7 +116,7 @@ function testSimpleExpressions() {
|_|a--;
|R|}
-Object.defineProperty(this, 'getterFoo', {
+|_|Object.|C|defineProperty(this, 'getterFoo', {
get: () => |_|return42|R|
});
@@ -124,7 +124,7 @@ function testGetter() {
|C|getterFoo();
|R|}
-var obj = {
+var obj = |_|{
foo: () => (|_|{
boo: () => |_|return42|R|
})|R|
@@ -260,7 +260,7 @@ async function testPromiseComplex() {
|R|}
function twiceDefined() {
- return a + b;
+ |_|return a + b;|R|
}
function twiceDefined() {
diff --git a/deps/v8/test/inspector/debugger/pause-on-async-call-expected.txt b/deps/v8/test/inspector/debugger/pause-on-async-call-expected.txt
index a835810e7a..709eea895a 100644
--- a/deps/v8/test/inspector/debugger/pause-on-async-call-expected.txt
+++ b/deps/v8/test/inspector/debugger/pause-on-async-call-expected.txt
@@ -206,3 +206,81 @@ paused at:
createPromise().then(#v => v * 2);
}
+
+Running test: testWithBlackboxedCode
+paused at:
+function testBlackboxedCreatePromise() {
+ #debugger;
+ createPromise().then(v => v * 2);
+
+paused at:
+ debugger;
+ #createPromise().then(v => v * 2);
+}
+
+paused at:
+ debugger;
+ createPromise().#then(v => v * 2);
+}
+
+asyncCallStackTraceId is set
+
+paused at:
+ debugger;
+ createPromise().then(#v => v * 2);
+}
+
+
+Running test: testAsyncFunction
+paused at:
+async function testAsyncFunction() {
+ #debugger;
+ foo();
+
+paused at:
+ debugger;
+ #foo();
+ await foo();
+
+paused at:
+ async function foo() {
+ #return 42;
+ }
+
+paused at:
+ foo();
+ await #foo();
+ foo().then(boo);
+
+paused at:
+ async function foo() {
+ #return 42;
+ }
+
+paused at:
+ await foo();
+ #foo().then(boo);
+
+
+paused at:
+ async function foo() {
+ #return 42;
+ }
+
+paused at:
+ await foo();
+ foo().#then(boo);
+
+
+paused at:
+ await foo();
+ foo().#then(boo);
+
+
+asyncCallStackTraceId is set
+
+paused at:
+
+ function boo#() {
+ }
+
diff --git a/deps/v8/test/inspector/debugger/pause-on-async-call.js b/deps/v8/test/inspector/debugger/pause-on-async-call.js
index ef29905849..7245824f66 100644
--- a/deps/v8/test/inspector/debugger/pause-on-async-call.js
+++ b/deps/v8/test/inspector/debugger/pause-on-async-call.js
@@ -45,6 +45,20 @@ function testBlackboxedCreatePromise() {
debugger;
createPromise().then(v => v * 2);
}
+
+async function testAsyncFunction() {
+ debugger;
+ foo();
+ await foo();
+ foo().then(boo);
+
+ async function foo() {
+ return 42;
+ }
+
+ function boo() {
+ }
+}
//# sourceURL=test.js`);
contextGroup.addScript(`
@@ -168,6 +182,52 @@ InspectorTest.runAsyncTestSuite([
Protocol.Debugger.resume();
await waitPauseAndDumpLocation();
await Protocol.Debugger.resume();
+ },
+
+ async function testWithBlackboxedCode() {
+ Protocol.Runtime.evaluate({expression: 'testBlackboxedCreatePromise()'});
+ await waitPauseAndDumpLocation();
+ Protocol.Debugger.stepOver();
+ await waitPauseAndDumpLocation();
+ await Protocol.Debugger.setBlackboxPatterns({patterns: ['framework\.js']});
+ Protocol.Debugger.stepInto({breakOnAsyncCall: true});
+ let parentStackTraceId = await waitPauseAndDumpLocation();
+ Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
+ Protocol.Debugger.resume();
+ await waitPauseAndDumpLocation();
+ await Protocol.Debugger.resume();
+ },
+
+ async function testAsyncFunction() {
+ Protocol.Runtime.evaluate({expression: 'testAsyncFunction()'});
+ await waitPauseAndDumpLocation();
+ Protocol.Debugger.stepOver();
+ await waitPauseAndDumpLocation();
+ Protocol.Debugger.stepInto({breakOnAsyncCall: true});
+ let parentStackTraceId = await waitPauseAndDumpLocation();
+ if (parentStackTraceId)
+ InspectorTest.log(
+ 'ERROR: we should not report parent stack trace id on async call');
+ Protocol.Debugger.stepOut();
+ await waitPauseAndDumpLocation();
+ Protocol.Debugger.stepInto({breakOnAsyncCall: true});
+ parentStackTraceId = await waitPauseAndDumpLocation();
+ if (parentStackTraceId)
+ InspectorTest.log(
+ 'ERROR: we should not report parent stack trace id on async call');
+ Protocol.Debugger.stepOut();
+ await waitPauseAndDumpLocation();
+ Protocol.Debugger.stepInto({breakOnAsyncCall: true});
+ await waitPauseAndDumpLocation();
+ Protocol.Debugger.stepOut();
+ await waitPauseAndDumpLocation();
+ Protocol.Debugger.stepInto({breakOnAsyncCall: true});
+ parentStackTraceId = await waitPauseAndDumpLocation();
+ Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
+ Protocol.Debugger.resume();
+ await waitPauseAndDumpLocation();
+
+ await Protocol.Debugger.resume();
}
]);
diff --git a/deps/v8/test/inspector/debugger/return-break-locations-expected.txt b/deps/v8/test/inspector/debugger/return-break-locations-expected.txt
index 688faf89c3..4b7d4872a6 100644
--- a/deps/v8/test/inspector/debugger/return-break-locations-expected.txt
+++ b/deps/v8/test/inspector/debugger/return-break-locations-expected.txt
@@ -26,4 +26,10 @@ Running test: testTailCall
scriptId : <scriptId>
type : return
}
+ [4] : {
+ columnNumber : 1
+ lineNumber : 6
+ scriptId : <scriptId>
+ type : return
+ }
]
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-ignore-hint-when-no-location-expected.txt b/deps/v8/test/inspector/debugger/set-breakpoint-ignore-hint-when-no-location-expected.txt
new file mode 100644
index 0000000000..ad51391e13
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-ignore-hint-when-no-location-expected.txt
@@ -0,0 +1,11 @@
+Tests breakpoint when two scripts have the same url.
+{
+ breakpointId : <breakpointId>
+ locations : [
+ [0] : {
+ columnNumber : 2
+ lineNumber : 2
+ scriptId : <scriptId>
+ }
+ ]
+}
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-ignore-hint-when-no-location.js b/deps/v8/test/inspector/debugger/set-breakpoint-ignore-hint-when-no-location.js
new file mode 100644
index 0000000000..d72d31b799
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-ignore-hint-when-no-location.js
@@ -0,0 +1,27 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Tests breakpoint when two scripts have the same url.');
+
+// Order of addScript is important!
+contextGroup.addScript(`
+function boo() {
+ return 42;
+}
+function foo() {}
+`, 0, 0, 'test.js');
+
+contextGroup.addScript(`function foo() {}`, 15, 0, 'test.js');
+
+(async function test() {
+ await Protocol.Debugger.enable();
+ let {result} = await Protocol.Debugger.setBreakpointByUrl({
+ url: 'test.js',
+ lineNumber: 2,
+ columnNumber: 2
+ });
+ InspectorTest.logMessage(result);
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/debugger/step-into-external-async-task-same-context-expected.txt b/deps/v8/test/inspector/debugger/step-into-external-async-task-same-context-expected.txt
new file mode 100644
index 0000000000..e6ab816810
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/step-into-external-async-task-same-context-expected.txt
@@ -0,0 +1,14 @@
+Test for step-into remote async task.
+Setup debugger agents..
+Pause before stack trace is captured..
+Run stepInto with breakOnAsyncCall flag
+Call pauseOnAsyncCall
+Trigger external async task on another context group
+Dump stack trace
+boo (target.js:1:18)
+call (framework.js:3:2)
+(anonymous) (target.js:0:0)
+-- remote-task --
+store (utils.js:2:25)
+foo (source.js:1:13)
+(anonymous) (source.js:2:6)
diff --git a/deps/v8/test/inspector/debugger/step-into-external-async-task-same-context.js b/deps/v8/test/inspector/debugger/step-into-external-async-task-same-context.js
new file mode 100644
index 0000000000..fec786422e
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/step-into-external-async-task-same-context.js
@@ -0,0 +1,81 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} =
+ InspectorTest.start('Test for step-into remote async task.');
+
+contextGroup.addScript(`
+function store(description) {
+ let buffer = inspector.storeCurrentStackTrace(description);
+ return '[' + new Int32Array(buffer).join(',') + ']';
+}
+//# sourceURL=utils.js`);
+
+contextGroup.addScript(`
+function call(id, f) {
+ inspector.externalAsyncTaskStarted(Int32Array.from(JSON.parse(id)).buffer);
+ f();
+ inspector.externalAsyncTaskFinished(Int32Array.from(JSON.parse(id)).buffer);
+}
+//# sourceURL=framework.js`);
+
+session.setupScriptMap();
+
+(async function test() {
+ InspectorTest.log('Setup debugger agents..');
+ let debuggerId = (await Protocol.Debugger.enable()).result.debuggerId;
+
+ Protocol.Debugger.setAsyncCallStackDepth({maxDepth: 128});
+ Protocol.Debugger.setBlackboxPatterns({patterns: ['framework\.js']});
+
+ InspectorTest.log('Pause before stack trace is captured..');
+ Protocol.Debugger.setBreakpointByUrl(
+ {lineNumber: 2, columnNumber: 25, url: 'utils.js'});
+ let evaluatePromise = Protocol.Runtime.evaluate({
+ expression: `(function foo() {
+ return store('remote-task');
+ })()
+ //# sourceURL=source.js`
+ });
+ await Protocol.Debugger.oncePaused();
+
+ InspectorTest.log('Run stepInto with breakOnAsyncCall flag');
+ Protocol.Debugger.stepInto({breakOnAsyncCall: true});
+ let {params: {asyncCallStackTraceId}} = await Protocol.Debugger.oncePaused();
+
+ InspectorTest.log('Call pauseOnAsyncCall');
+ Protocol.Debugger.pauseOnAsyncCall({
+ parentStackTraceId: asyncCallStackTraceId,
+ });
+ Protocol.Debugger.resume();
+
+ InspectorTest.log('Trigger external async task on another context group');
+ let stackTraceId = (await evaluatePromise).result.result.value;
+ Protocol.Runtime.evaluate({
+ expression: `call('${stackTraceId}',
+ function boo() {})
+ //# sourceURL=target.js`
+ });
+
+ InspectorTest.log('Dump stack trace');
+ let {params: {callFrames, asyncStackTraceId}} =
+ await Protocol.Debugger.oncePaused();
+ while (true) {
+ session.logCallFrames(callFrames);
+ if (asyncStackTraceId) {
+ let {result: {stackTrace}} = await Protocol.Debugger.getStackTrace(
+ {stackTraceId: asyncStackTraceId});
+ InspectorTest.log(`-- ${stackTrace.description} --`);
+ callFrames = stackTrace.callFrames;
+ asyncStackTraceId = stackTrace.parentId;
+ } else {
+ break;
+ }
+ }
+
+ Protocol.Debugger.setAsyncCallStackDepth({maxDepth: 0});
+ await Protocol.Debugger.disable();
+
+ InspectorTest.completeTest();
+})()
diff --git a/deps/v8/test/inspector/debugger/wasm-scripts-expected.txt b/deps/v8/test/inspector/debugger/wasm-scripts-expected.txt
index 5d23605566..0afcc861c4 100644
--- a/deps/v8/test/inspector/debugger/wasm-scripts-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-scripts-expected.txt
@@ -1,9 +1,10 @@
Tests how wasm scripts are reported
Check that inspector gets two wasm scripts at module creation time.
Script #0 parsed. URL: v8://test/testFunction
-Script #1 parsed. URL: v8://test/runTestRunction
-Script #2 parsed. URL: wasm://wasm/wasm-7b04570e/wasm-7b04570e-0
-Script #3 parsed. URL: wasm://wasm/wasm-7b04570e/wasm-7b04570e-1
+Script #1 parsed. URL:
+Script #2 parsed. URL: v8://test/runTestRunction
+Script #3 parsed. URL: wasm://wasm/wasm-7b04570e/wasm-7b04570e-0
+Script #4 parsed. URL: wasm://wasm/wasm-7b04570e/wasm-7b04570e-1
Source for wasm://wasm/wasm-7b04570e/wasm-7b04570e-0:
func $nopFunction
nop
diff --git a/deps/v8/test/inspector/inspector-test.cc b/deps/v8/test/inspector/inspector-test.cc
index 56c7431af6..04a23df528 100644
--- a/deps/v8/test/inspector/inspector-test.cc
+++ b/deps/v8/test/inspector/inspector-test.cc
@@ -175,7 +175,8 @@ class SendMessageToBackendTask : public TaskRunner::Task {
v8::internal::Vector<uint16_t> message_;
};
-void RunAsyncTask(TaskRunner* task_runner, const char* task_name,
+void RunAsyncTask(TaskRunner* task_runner,
+ const v8_inspector::StringView& task_name,
TaskRunner::Task* task) {
class AsyncTask : public TaskRunner::Task {
public:
@@ -193,10 +194,7 @@ void RunAsyncTask(TaskRunner* task_runner, const char* task_name,
DISALLOW_COPY_AND_ASSIGN(AsyncTask);
};
- task_runner->data()->AsyncTaskScheduled(
- v8_inspector::StringView(reinterpret_cast<const uint8_t*>(task_name),
- strlen(task_name)),
- task, false);
+ task_runner->data()->AsyncTaskScheduled(task_name, task, false);
task_runner->Append(new AsyncTask(task));
}
@@ -626,13 +624,16 @@ class SetTimeoutExtension : public IsolateData::SetupGlobalTask {
v8::Local<v8::Context> context = isolate->GetCurrentContext();
IsolateData* data = IsolateData::FromContext(context);
int context_group_id = data->GetContextGroupId(context);
+ const char* task_name = "setTimeout";
+ v8_inspector::StringView task_name_view(
+ reinterpret_cast<const uint8_t*>(task_name), strlen(task_name));
if (args[0]->IsFunction()) {
- RunAsyncTask(data->task_runner(), "setTimeout",
+ RunAsyncTask(data->task_runner(), task_name_view,
new SetTimeoutTask(context_group_id, isolate,
v8::Local<v8::Function>::Cast(args[0])));
} else {
RunAsyncTask(
- data->task_runner(), "setTimeout",
+ data->task_runner(), task_name_view,
new ExecuteStringTask(
context_group_id, ToVector(args[0].As<v8::String>()),
v8::String::Empty(isolate), v8::Integer::New(isolate, 0),
@@ -703,6 +704,9 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
ToV8String(isolate, "externalAsyncTaskFinished"),
v8::FunctionTemplate::New(
isolate, &InspectorExtension::ExternalAsyncTaskFinished));
+ inspector->Set(ToV8String(isolate, "scheduleWithAsyncStack"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::ScheduleWithAsyncStack));
global->Set(ToV8String(isolate, "inspector"), inspector);
}
@@ -926,6 +930,33 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
args[0].As<v8::ArrayBuffer>()->GetContents().Data());
data->ExternalAsyncTaskFinished(*id);
}
+
+ static void ScheduleWithAsyncStack(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 3 || !args[0]->IsFunction() || !args[1]->IsString() ||
+ !args[2]->IsBoolean()) {
+ fprintf(stderr,
+ "Internal error: scheduleWithAsyncStack(function, "
+ "'task-name', with_empty_stack).");
+ Exit();
+ }
+ v8::Isolate* isolate = args.GetIsolate();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ IsolateData* data = IsolateData::FromContext(context);
+ int context_group_id = data->GetContextGroupId(context);
+ bool with_empty_stack = args[2].As<v8::Boolean>()->Value();
+ if (with_empty_stack) context->Exit();
+
+ v8::internal::Vector<uint16_t> task_name =
+ ToVector(args[1].As<v8::String>());
+ v8_inspector::StringView task_name_view(task_name.start(),
+ task_name.length());
+
+ RunAsyncTask(data->task_runner(), task_name_view,
+ new SetTimeoutTask(context_group_id, isolate,
+ v8::Local<v8::Function>::Cast(args[0])));
+ if (with_empty_stack) context->Enter();
+ }
};
} // namespace
diff --git a/deps/v8/test/inspector/protocol-test.js b/deps/v8/test/inspector/protocol-test.js
index 749aa3fecc..91f55e442a 100644
--- a/deps/v8/test/inspector/protocol-test.js
+++ b/deps/v8/test/inspector/protocol-test.js
@@ -301,13 +301,9 @@ InspectorTest.Session = class {
logAsyncStackTrace(asyncStackTrace) {
while (asyncStackTrace) {
- if (asyncStackTrace.promiseCreationFrame) {
- var frame = asyncStackTrace.promiseCreationFrame;
- InspectorTest.log(`-- ${asyncStackTrace.description} (${frame.url}:${frame.lineNumber}:${frame.columnNumber})--`);
- } else {
- InspectorTest.log(`-- ${asyncStackTrace.description} --`);
- }
+ InspectorTest.log(`-- ${asyncStackTrace.description || '<empty>'} --`);
this.logCallFrames(asyncStackTrace.callFrames);
+ if (asyncStackTrace.parentId) InspectorTest.log(' <external stack>');
asyncStackTrace = asyncStackTrace.parent;
}
}
diff --git a/deps/v8/test/inspector/runtime/console-methods-expected.txt b/deps/v8/test/inspector/runtime/console-methods-expected.txt
index 81c3c76813..fea4ca69e0 100644
--- a/deps/v8/test/inspector/runtime/console-methods-expected.txt
+++ b/deps/v8/test/inspector/runtime/console-methods-expected.txt
@@ -653,7 +653,7 @@ Checks console methods
args : [
[0] : {
type : string
- value : 1
+ value : default: 1
}
]
executionContextId : <executionContextId>
@@ -692,7 +692,7 @@ Checks console methods
args : [
[0] : {
type : string
- value : 2
+ value : default: 2
}
]
executionContextId : <executionContextId>
@@ -725,3 +725,99 @@ Checks console methods
type : count
}
}
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ type : string
+ value : default: 3
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 10
+ functionName : testFunction
+ lineNumber : 31
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [1] : {
+ columnNumber : 0
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ timestamp : <timestamp>
+ type : count
+ }
+}
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ type : string
+ value : default: 4
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 10
+ functionName : testFunction
+ lineNumber : 32
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [1] : {
+ columnNumber : 0
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ timestamp : <timestamp>
+ type : count
+ }
+}
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ type : string
+ value : default: 5
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 10
+ functionName : testFunction
+ lineNumber : 33
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [1] : {
+ columnNumber : 0
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ timestamp : <timestamp>
+ type : count
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/console-methods.js b/deps/v8/test/inspector/runtime/console-methods.js
index c9e0e1d89b..f2919a6bae 100644
--- a/deps/v8/test/inspector/runtime/console-methods.js
+++ b/deps/v8/test/inspector/runtime/console-methods.js
@@ -29,6 +29,9 @@ function testFunction() {
}
foo();
foo();
+ console.count();
+ console.count(undefined);
+ console.count('default');
}
//# sourceURL=test.js`, 7, 26);
diff --git a/deps/v8/test/inspector/runtime/console-time-repeat-expected.txt b/deps/v8/test/inspector/runtime/console-time-repeat-expected.txt
new file mode 100644
index 0000000000..b675532adb
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/console-time-repeat-expected.txt
@@ -0,0 +1,76 @@
+Checks that repeated console.time do not reset
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ type : string
+ value : Timer 'a' already exists
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 8
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ timestamp : <timestamp>
+ type : warning
+ }
+}
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ type : string
+ value : a: 2ms
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 8
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ timestamp : <timestamp>
+ type : timeEnd
+ }
+}
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ type : string
+ value : Timer 'a' does not exist
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 8
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ timestamp : <timestamp>
+ type : warning
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/console-time-repeat.js b/deps/v8/test/inspector/runtime/console-time-repeat.js
new file mode 100644
index 0000000000..f9a5f6ddf8
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/console-time-repeat.js
@@ -0,0 +1,20 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks that repeated console.time do not reset');
+
+Protocol.Runtime.onConsoleAPICalled(InspectorTest.logMessage);
+Protocol.Runtime.enable();
+(async function() {
+ utils.setCurrentTimeMSForTest(0.0);
+ await Protocol.Runtime.evaluate({expression: `console.time('a')`});
+ utils.setCurrentTimeMSForTest(1.0);
+ await Protocol.Runtime.evaluate({expression: `console.time('a')`});
+ utils.setCurrentTimeMSForTest(2.0);
+ await Protocol.Runtime.evaluate({expression: `console.timeEnd('a')`});
+ utils.setCurrentTimeMSForTest(5.0);
+ await Protocol.Runtime.evaluate({expression: `console.timeEnd('a')`});
+
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/runtime/evaluate-async-expected.txt b/deps/v8/test/inspector/runtime/evaluate-async-expected.txt
index 763b4295c7..ea47520ef8 100644
--- a/deps/v8/test/inspector/runtime/evaluate-async-expected.txt
+++ b/deps/v8/test/inspector/runtime/evaluate-async-expected.txt
@@ -43,7 +43,7 @@ Running test: testRejectedPromiseWithError
columnNumber : 11
exception : {
className : Error
- description : Error: MyError at foo (<anonymous>:13:11) at throwError (<anonymous>:15:3) at <anonymous>
+ description : Error: MyError at foo (<anonymous>:13:11) at throwError (<anonymous>:15:3)
objectId : <objectId>
subtype : error
type : object
@@ -58,14 +58,14 @@ Running test: testRejectedPromiseWithError
functionName : foo
lineNumber : 12
scriptId : <scriptId>
- url :
+ url :
}
[1] : {
columnNumber : 2
functionName : throwError
lineNumber : 14
scriptId : <scriptId>
- url :
+ url :
}
]
}
@@ -73,7 +73,7 @@ Running test: testRejectedPromiseWithError
}
result : {
className : Error
- description : Error: MyError at foo (<anonymous>:13:11) at throwError (<anonymous>:15:3) at <anonymous>
+ description : Error: MyError at foo (<anonymous>:13:11) at throwError (<anonymous>:15:3)
objectId : <objectId>
subtype : error
type : object
@@ -89,7 +89,7 @@ Running test: testRejectedPromiseWithSyntaxError
columnNumber : 5
exception : {
className : SyntaxError
- description : SyntaxError: Unexpected token } at foo (<anonymous>:21:5) at throwSyntaxError (<anonymous>:23:3) at <anonymous>
+ description : SyntaxError: Unexpected token } at foo (<anonymous>:21:5) at throwSyntaxError (<anonymous>:23:3)
objectId : <objectId>
subtype : error
type : object
@@ -104,14 +104,14 @@ Running test: testRejectedPromiseWithSyntaxError
functionName : foo
lineNumber : 20
scriptId : <scriptId>
- url :
+ url :
}
[1] : {
columnNumber : 2
functionName : throwSyntaxError
lineNumber : 22
scriptId : <scriptId>
- url :
+ url :
}
]
}
@@ -119,7 +119,7 @@ Running test: testRejectedPromiseWithSyntaxError
}
result : {
className : SyntaxError
- description : SyntaxError: Unexpected token } at foo (<anonymous>:21:5) at throwSyntaxError (<anonymous>:23:3) at <anonymous>
+ description : SyntaxError: Unexpected token } at foo (<anonymous>:21:5) at throwSyntaxError (<anonymous>:23:3)
objectId : <objectId>
subtype : error
type : object
diff --git a/deps/v8/test/inspector/runtime/get-properties-expected.txt b/deps/v8/test/inspector/runtime/get-properties-expected.txt
index 1f7aa507e5..340a51c4a5 100644
--- a/deps/v8/test/inspector/runtime/get-properties-expected.txt
+++ b/deps/v8/test/inspector/runtime/get-properties-expected.txt
@@ -50,3 +50,40 @@ Running test: testObjectThrowsLength
Running test: testTypedArrayWithoutLength
__proto__ own object undefined
+
+Running test: testArrayBuffer
+[[Int8Array]]
+ 0 own number 1
+ 1 own number 2
+ 2 own number 3
+ 3 own number 4
+ 4 own number 5
+ 5 own number 6
+ 6 own number 7
+ 7 own number 8
+ __proto__ own object undefined
+[[Uint8Array]]
+ 0 own number 1
+ 1 own number 2
+ 2 own number 3
+ 3 own number 4
+ 4 own number 5
+ 5 own number 6
+ 6 own number 7
+ 7 own number 8
+ __proto__ own object undefined
+[[Int16Array]]
+ 0 own number 513
+ 1 own number 1027
+ 2 own number 1541
+ 3 own number 2055
+ __proto__ own object undefined
+[[Int32Array]]
+ 0 own number 67305985
+ 1 own number 134678021
+ __proto__ own object undefined
+
+Running test: testArrayBufferWithBrokenUintCtor
+ [[Int8Array]] own object undefined
+ [[Uint8Array]] own object undefined
+ __proto__ own object undefined
diff --git a/deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt b/deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt
index 339595608a..a0437f4af6 100644
--- a/deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt
+++ b/deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt
@@ -1,4 +1,177 @@
Check that while Runtime.getProperties call on proxy object no user defined trap will be executed.
+Testing regular Proxy
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Proxy
+ objectId : <objectId>
+ preview : {
+ description : Proxy
+ overflow : false
+ properties : [
+ [0] : {
+ name : a
+ type : number
+ value : 1
+ }
+ ]
+ subtype : proxy
+ type : object
+ }
+ subtype : proxy
+ type : object
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ internalProperties : [
+ [0] : {
+ name : [[Handler]]
+ value : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ }
+ [1] : {
+ name : [[Target]]
+ value : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ }
+ [2] : {
+ name : [[IsRevoked]]
+ value : {
+ type : boolean
+ value : false
+ }
+ }
+ ]
+ result : [
+ ]
+ }
+}
+Testing revocable Proxy
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Proxy
+ objectId : <objectId>
+ preview : {
+ description : Proxy
+ overflow : false
+ properties : [
+ [0] : {
+ name : a
+ type : number
+ value : 1
+ }
+ ]
+ subtype : proxy
+ type : object
+ }
+ subtype : proxy
+ type : object
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ internalProperties : [
+ [0] : {
+ name : [[Handler]]
+ value : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ }
+ [1] : {
+ name : [[Target]]
+ value : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ }
+ [2] : {
+ name : [[IsRevoked]]
+ value : {
+ type : boolean
+ value : false
+ }
+ }
+ ]
+ result : [
+ ]
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Proxy
+ objectId : <objectId>
+ preview : {
+ description : Proxy
+ overflow : false
+ properties : [
+ ]
+ subtype : proxy
+ type : object
+ }
+ subtype : proxy
+ type : object
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ internalProperties : [
+ [0] : {
+ name : [[Handler]]
+ value : {
+ subtype : null
+ type : object
+ value : null
+ }
+ }
+ [1] : {
+ name : [[Target]]
+ value : {
+ subtype : null
+ type : object
+ value : null
+ }
+ }
+ [2] : {
+ name : [[IsRevoked]]
+ value : {
+ type : boolean
+ value : true
+ }
+ }
+ ]
+ result : [
+ ]
+ }
+}
+Checking counter
{
id : <messageId>
result : {
diff --git a/deps/v8/test/inspector/runtime/get-properties-on-proxy.js b/deps/v8/test/inspector/runtime/get-properties-on-proxy.js
index b2981df290..0c96a985ac 100644
--- a/deps/v8/test/inspector/runtime/get-properties-on-proxy.js
+++ b/deps/v8/test/inspector/runtime/get-properties-on-proxy.js
@@ -6,96 +6,139 @@ let {session, contextGroup, Protocol} = InspectorTest.start("Check that while Ru
contextGroup.addScript(`
var self = this;
-function testFunction()
+function testFunction(revocable)
{
- self.counter = 0;
- var handler = {
- get: function(target, name){
- self.counter++;
- return Reflect.get.apply(this, arguments);
- },
- set: function(target, name){
- self.counter++;
- return Reflect.set.apply(this, arguments);
- },
- getPrototypeOf: function(target) {
- self.counter++;
- return Reflect.getPrototypeOf.apply(this, arguments);
- },
- setPrototypeOf: function(target) {
- self.counter++;
- return Reflect.setPrototypeOf.apply(this, arguments);
- },
- isExtensible: function(target) {
- self.counter++;
- return Reflect.isExtensible.apply(this, arguments);
- },
- isExtensible: function(target) {
- self.counter++;
- return Reflect.isExtensible.apply(this, arguments);
- },
- isExtensible: function(target) {
- self.counter++;
- return Reflect.isExtensible.apply(this, arguments);
- },
- preventExtensions: function() {
- self.counter++;
- return Reflect.preventExtensions.apply(this, arguments);
- },
- getOwnPropertyDescriptor: function() {
- self.counter++;
- return Reflect.getOwnPropertyDescriptor.apply(this, arguments);
- },
- defineProperty: function() {
- self.counter++;
- return Reflect.defineProperty.apply(this, arguments);
- },
- has: function() {
- self.counter++;
- return Reflect.has.apply(this, arguments);
- },
- get: function() {
- self.counter++;
- return Reflect.get.apply(this, arguments);
- },
- set: function() {
- self.counter++;
- return Reflect.set.apply(this, arguments);
- },
- deleteProperty: function() {
- self.counter++;
- return Reflect.deleteProperty.apply(this, arguments);
- },
- ownKeys: function() {
- self.counter++;
- return Reflect.ownKeys.apply(this, arguments);
- },
- apply: function() {
- self.counter++;
- return Reflect.apply.apply(this, arguments);
- },
- construct: function() {
- self.counter++;
- return Reflect.construct.apply(this, arguments);
- }
- };
- return new Proxy({ a : 1}, handler);
+ self.counter = 0;
+ var handler = {
+ get: function(target, name){
+ self.counter++;
+ return Reflect.get.apply(this, arguments);
+ },
+ set: function(target, name){
+ self.counter++;
+ return Reflect.set.apply(this, arguments);
+ },
+ getPrototypeOf: function(target) {
+ self.counter++;
+ return Reflect.getPrototypeOf.apply(this, arguments);
+ },
+ setPrototypeOf: function(target) {
+ self.counter++;
+ return Reflect.setPrototypeOf.apply(this, arguments);
+ },
+ isExtensible: function(target) {
+ self.counter++;
+ return Reflect.isExtensible.apply(this, arguments);
+ },
+ isExtensible: function(target) {
+ self.counter++;
+ return Reflect.isExtensible.apply(this, arguments);
+ },
+ isExtensible: function(target) {
+ self.counter++;
+ return Reflect.isExtensible.apply(this, arguments);
+ },
+ preventExtensions: function() {
+ self.counter++;
+ return Reflect.preventExtensions.apply(this, arguments);
+ },
+ getOwnPropertyDescriptor: function() {
+ self.counter++;
+ return Reflect.getOwnPropertyDescriptor.apply(this, arguments);
+ },
+ defineProperty: function() {
+ self.counter++;
+ return Reflect.defineProperty.apply(this, arguments);
+ },
+ has: function() {
+ self.counter++;
+ return Reflect.has.apply(this, arguments);
+ },
+ get: function() {
+ self.counter++;
+ return Reflect.get.apply(this, arguments);
+ },
+ set: function() {
+ self.counter++;
+ return Reflect.set.apply(this, arguments);
+ },
+ deleteProperty: function() {
+ self.counter++;
+ return Reflect.deleteProperty.apply(this, arguments);
+ },
+ ownKeys: function() {
+ self.counter++;
+ return Reflect.ownKeys.apply(this, arguments);
+ },
+ apply: function() {
+ self.counter++;
+ return Reflect.apply.apply(this, arguments);
+ },
+ construct: function() {
+ self.counter++;
+ return Reflect.construct.apply(this, arguments);
+ }
+ };
+ var obj = { a : 1 };
+ if (revocable) {
+ var revocableProxy = Proxy.revocable(obj, handler);
+ return [revocableProxy.proxy, revocableProxy.revoke]
+ } else {
+ return new Proxy(obj, handler);
+ }
}`);
-Protocol.Runtime.evaluate({ expression: "testFunction()"}).then(requestProperties);
+function getArrayElement(arrayObjectId, idx) {
+ return Protocol.Runtime.callFunctionOn({
+ functionDeclaration: `function() { return this[${idx}]; }`,
+ objectId: arrayObjectId
+ });
+}
-function requestProperties(result)
-{
- Protocol.Runtime.getProperties({ objectId: result.result.objectId, generatePreview: true }).then(checkCounter);
+async function testRegular() {
+ InspectorTest.logMessage("Testing regular Proxy");
+
+ var result = await Protocol.Runtime.evaluate({ expression: "testFunction(false)", generatePreview: true });
+ InspectorTest.logMessage(result);
+ var proxyId = result.result.result.objectId;
+ InspectorTest.logMessage(await Protocol.Runtime.getProperties({ objectId: proxyId, generatePreview: true }));
}
-function checkCounter(result)
-{
- Protocol.Runtime.evaluate({ expression: "self.counter" }).then(dumpCounter);
+async function testRevocable() {
+ InspectorTest.logMessage("Testing revocable Proxy");
+
+ var result = await Protocol.Runtime.evaluate({ expression: "testFunction(true)" });
+ var proxyInfo = await getArrayElement(result.result.result.objectId, 0);
+ var revokeInfo = await getArrayElement(result.result.result.objectId, 1);
+ var proxyId = proxyInfo.result.result.objectId;
+ InspectorTest.logMessage(await Protocol.Runtime.callFunctionOn({
+ functionDeclaration: `function() { return this; }`,
+ objectId: proxyId,
+ generatePreview: true
+ }))
+ InspectorTest.logMessage(await Protocol.Runtime.getProperties({ objectId: proxyId, generatePreview: true }));
+ await Protocol.Runtime.callFunctionOn({
+ functionDeclaration: `function() { this(); }`,
+ objectId: revokeInfo.result.result.objectId
+ });
+ InspectorTest.logMessage(await Protocol.Runtime.callFunctionOn({
+ functionDeclaration: `function() { return this; }`,
+ objectId: proxyId,
+ generatePreview: true
+ }))
+ InspectorTest.logMessage(await Protocol.Runtime.getProperties({ objectId: proxyId, generatePreview: true }));
}
-function dumpCounter(result)
-{
+async function checkCounter() {
+ InspectorTest.logMessage("Checking counter");
+
+ var result = await Protocol.Runtime.evaluate({ expression: "self.counter" });
InspectorTest.logMessage(result);
- InspectorTest.completeTest();
}
+
+(async function test() {
+ await testRegular();
+ await testRevocable();
+ await checkCounter();
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/runtime/get-properties.js b/deps/v8/test/inspector/runtime/get-properties.js
index 56a4c4be0b..d8132bb678 100644
--- a/deps/v8/test/inspector/runtime/get-properties.js
+++ b/deps/v8/test/inspector/runtime/get-properties.js
@@ -5,71 +5,77 @@
let {session, contextGroup, Protocol} = InspectorTest.start('Checks Runtime.getProperties method');
InspectorTest.runAsyncTestSuite([
- async function testObject5() {
- let objectId = (await Protocol.Runtime.evaluate({
- expression: '(function(){var r = Object(5); r.foo = \'cat\';return r;})()'
- })).result.result.objectId;
- let props = await Protocol.Runtime.getProperties({ objectId, ownProperties: true });
- logGetPropertiesResult(props.result);
+ function testObject5() {
+ return logExpressionProperties('(function(){var r = Object(5); r.foo = \'cat\';return r;})()');
},
- async function testNotOwn() {
- let objectId = (await Protocol.Runtime.evaluate({
- expression: '({ a: 2, set b(_) {}, get b() {return 5;}, __proto__: { a: 3, c: 4, get d() {return 6;} }})'
- })).result.result.objectId;
- let props = await Protocol.Runtime.getProperties({ objectId, ownProperties: false });
- logGetPropertiesResult(props.result);
+ function testNotOwn() {
+ return logExpressionProperties('({ a: 2, set b(_) {}, get b() {return 5;}, __proto__: { a: 3, c: 4, get d() {return 6;} }})', { ownProperties: false });
},
- async function testAccessorsOnly() {
- let objectId = (await Protocol.Runtime.evaluate({
- expression: '({ a: 2, set b(_) {}, get b() {return 5;}, c: \'c\', set d(_){} })'
- })).result.result.objectId;
- let props = await Protocol.Runtime.getProperties({ objectId, ownProperties: true, accessorPropertiesOnly: true });
- logGetPropertiesResult(props.result);
+ function testAccessorsOnly() {
+ return logExpressionProperties('({ a: 2, set b(_) {}, get b() {return 5;}, c: \'c\', set d(_){} })', { ownProperties: true, accessorPropertiesOnly: true});
},
- async function testArray() {
- let objectId = (await Protocol.Runtime.evaluate({
- expression: '[\'red\', \'green\', \'blue\']'
- })).result.result.objectId;
- let props = await Protocol.Runtime.getProperties({ objectId, ownProperties: true });
- logGetPropertiesResult(props.result);
+ function testArray() {
+ return logExpressionProperties('[\'red\', \'green\', \'blue\']');
},
- async function testBound() {
- let objectId = (await Protocol.Runtime.evaluate({
- expression: 'Number.bind({}, 5)'
- })).result.result.objectId;
- let props = await Protocol.Runtime.getProperties({ objectId, ownProperties: true });
- logGetPropertiesResult(props.result);
+ function testBound() {
+ return logExpressionProperties('Number.bind({}, 5)');
},
- async function testObjectThrowsLength() {
- let objectId = (await Protocol.Runtime.evaluate({
- expression: '({get length() { throw \'Length called\'; }})'
- })).result.result.objectId;
- let props = await Protocol.Runtime.getProperties({ objectId, ownProperties: true });
- logGetPropertiesResult(props.result);
+ function testObjectThrowsLength() {
+ return logExpressionProperties('({get length() { throw \'Length called\'; }})');
},
- async function testTypedArrayWithoutLength() {
- let objectId = (await Protocol.Runtime.evaluate({
- expression: '({__proto__: Uint8Array.prototype})'
- })).result.result.objectId;
+ function testTypedArrayWithoutLength() {
+ return logExpressionProperties('({__proto__: Uint8Array.prototype})');
+ },
+
+ async function testArrayBuffer() {
+ let objectId = await evaluateToObjectId('new Uint8Array([1, 2, 3, 4, 5, 6, 7, 8]).buffer');
let props = await Protocol.Runtime.getProperties({ objectId, ownProperties: true });
- logGetPropertiesResult(props.result);
+ for (let prop of props.result.result) {
+ if (prop.name === '__proto__')
+ continue;
+ InspectorTest.log(prop.name);
+ await logGetPropertiesResult(prop.value.objectId);
+ }
},
+
+ async function testArrayBufferWithBrokenUintCtor() {
+ await evaluateToObjectId(`(function() {
+ this.uint8array_old = this.Uint8Array;
+ this.Uint8Array = 42;
+ })()`);
+ await logExpressionProperties('new Int8Array([1, 2, 3, 4, 5, 6, 7]).buffer');
+ await evaluateToObjectId(`(function() {
+ this.Uint8Array = this.uint8array_old;
+ delete this.uint8array_old;
+ })()`);
+ }
]);
-function logGetPropertiesResult(protocolResult) {
+async function logExpressionProperties(expression, flags) {
+ const objectId = await evaluateToObjectId(expression);
+ return await logGetPropertiesResult(objectId, flags);
+}
+
+async function evaluateToObjectId(expression) {
+ return (await Protocol.Runtime.evaluate({ expression })).result.result.objectId;
+}
+
+async function logGetPropertiesResult(objectId, flags = { ownProperties: true }) {
function hasGetterSetter(property, fieldName) {
var v = property[fieldName];
if (!v) return false;
return v.type !== "undefined"
}
- var propertyArray = protocolResult.result;
+ flags.objectId = objectId;
+ let props = await Protocol.Runtime.getProperties(flags);
+ var propertyArray = props.result.result;
propertyArray.sort(NamedThingComparator);
for (var i = 0; i < propertyArray.length; i++) {
var p = propertyArray[i];
@@ -81,7 +87,7 @@ function logGetPropertiesResult(protocolResult) {
InspectorTest.log(" " + p.name + " " + own + " no value" +
(hasGetterSetter(p, "get") ? ", getter" : "") + (hasGetterSetter(p, "set") ? ", setter" : ""));
}
- var internalPropertyArray = protocolResult.internalProperties;
+ var internalPropertyArray = props.result.internalProperties;
if (internalPropertyArray) {
InspectorTest.log("Internal properties");
internalPropertyArray.sort(NamedThingComparator);
diff --git a/deps/v8/test/inspector/runtime/regression-736302-expected.txt b/deps/v8/test/inspector/runtime/regression-736302-expected.txt
index 17243f2745..2174bc54a2 100644
--- a/deps/v8/test/inspector/runtime/regression-736302-expected.txt
+++ b/deps/v8/test/inspector/runtime/regression-736302-expected.txt
@@ -7,7 +7,7 @@ Running test: testThrowException
args : [
[0] : {
type : string
- value : 1
+ value : default: 1
}
]
executionContextId : <executionContextId>
diff --git a/deps/v8/test/inspector/runtime/runtime-evaluate-null-property-expected.txt b/deps/v8/test/inspector/runtime/runtime-evaluate-null-property-expected.txt
new file mode 100644
index 0000000000..a001cb80ef
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/runtime-evaluate-null-property-expected.txt
@@ -0,0 +1,18 @@
+Tests Runtime.evaluate returns object with undefined property.
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : object
+ value : {
+ b : null
+ c : [
+ [0] : 1
+ [1] : null
+ [2] : null
+ [3] : 4
+ ]
+ }
+ }
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/runtime-evaluate-null-property.js b/deps/v8/test/inspector/runtime/runtime-evaluate-null-property.js
new file mode 100644
index 0000000000..665eed8ebd
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/runtime-evaluate-null-property.js
@@ -0,0 +1,14 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Tests Runtime.evaluate returns object with undefined property.');
+
+(async function test() {
+ InspectorTest.logMessage(await Protocol.Runtime.evaluate({
+ expression: '({a:undefined,b:null,c:[1, null, undefined, 4]})',
+ returnByValue: true
+ }));
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/testcfg.py b/deps/v8/test/inspector/testcfg.py
index f33384e1cf..64fa163a5e 100644
--- a/deps/v8/test/inspector/testcfg.py
+++ b/deps/v8/test/inspector/testcfg.py
@@ -2,27 +2,22 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-import itertools
import os
-import re
-import shlex
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.objects import testcase
+from testrunner.outproc import base as outproc
-FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
PROTOCOL_TEST_JS = "protocol-test.js"
EXPECTED_SUFFIX = "-expected.txt"
RESOURCES_FOLDER = "resources"
-class InspectorProtocolTestSuite(testsuite.TestSuite):
- def __init__(self, name, root):
- super(InspectorProtocolTestSuite, self).__init__(name, root)
-
+class TestSuite(testsuite.TestSuite):
def ListTests(self, context):
tests = []
- for dirname, dirs, files in os.walk(os.path.join(self.root), followlinks=True):
+ for dirname, dirs, files in os.walk(
+ os.path.join(self.root), followlinks=True):
for dotted in [x for x in dirs if x.startswith('.')]:
dirs.remove(dotted)
if dirname.endswith(os.path.sep + RESOURCES_FOLDER):
@@ -34,80 +29,41 @@ class InspectorProtocolTestSuite(testsuite.TestSuite):
fullpath = os.path.join(dirname, filename)
relpath = fullpath[len(self.root) + 1 : -3]
testname = relpath.replace(os.path.sep, "/")
- test = testcase.TestCase(self, testname)
+ test = self._create_test(testname)
tests.append(test)
return tests
- def GetShellForTestCase(self, testcase):
- return 'inspector-test'
+ def _test_class(self):
+ return TestCase
- def GetParametersForTestCase(self, testcase, context):
- source = self.GetSourceForTest(testcase)
- flags = testcase.flags + context.mode_flags
- flags_match = re.findall(FLAGS_PATTERN, source)
- for match in flags_match:
- flags += shlex.split(match.strip())
- files = [
- os.path.join(self.root, PROTOCOL_TEST_JS),
- os.path.join(self.root, testcase.path + self.suffix()),
- ]
- return files, flags, {}
- def GetSourceForTest(self, testcase):
- filename = os.path.join(self.root, testcase.path + self.suffix())
- with open(filename) as f:
- return f.read()
+class TestCase(testcase.TestCase):
+ def __init__(self, *args, **kwargs):
+ super(TestCase, self).__init__(*args, **kwargs)
- def _IgnoreLine(self, string):
- """Ignore empty lines, valgrind output and Android output."""
- if not string: return True
- return (string.startswith("==") or string.startswith("**") or
- string.startswith("ANDROID") or
- # FIXME(machenbach): The test driver shouldn't try to use slow
- # asserts if they weren't compiled. This fails in optdebug=2.
- string == "Warning: unknown flag --enable-slow-asserts." or
- string == "Try --help for options")
+ self._source_flags = self._parse_source_flags()
- def IsFailureOutput(self, testcase):
- file_name = os.path.join(self.root, testcase.path) + EXPECTED_SUFFIX
- with file(file_name, "r") as expected:
- expected_lines = expected.readlines()
+ def _get_files_params(self, ctx):
+ return [
+ os.path.join(self.suite.root, PROTOCOL_TEST_JS),
+ os.path.join(self.suite.root, self.path + self._get_suffix()),
+ ]
+
+ def _get_source_flags(self):
+ return self._source_flags
- def ExpIterator():
- for line in expected_lines:
- if not line.strip(): continue
- yield line.strip()
+ def _get_source_path(self):
+ return os.path.join(self.suite.root, self.path + self._get_suffix())
- def ActIterator(lines):
- for line in lines:
- if self._IgnoreLine(line.strip()): continue
- yield line.strip()
+ def get_shell(self):
+ return 'inspector-test'
- def ActBlockIterator():
- """Iterates over blocks of actual output lines."""
- lines = testcase.output.stdout.splitlines()
- start_index = 0
- found_eqeq = False
- for index, line in enumerate(lines):
- # If a stress test separator is found:
- if line.startswith("=="):
- # Iterate over all lines before a separator except the first.
- if not found_eqeq:
- found_eqeq = True
- else:
- yield ActIterator(lines[start_index:index])
- # The next block of output lines starts after the separator.
- start_index = index + 1
- # Iterate over complete output if no separator was found.
- if not found_eqeq:
- yield ActIterator(lines)
+ @property
+ def output_proc(self):
+ return outproc.ExpectedOutProc(
+ self.expected_outcomes,
+ os.path.join(self.suite.root, self.path) + EXPECTED_SUFFIX)
- for act_iterator in ActBlockIterator():
- for (expected, actual) in itertools.izip_longest(
- ExpIterator(), act_iterator, fillvalue=''):
- if expected != actual:
- return True
- return False
def GetSuite(name, root):
- return InspectorProtocolTestSuite(name, root)
+ return TestSuite(name, root)
diff --git a/deps/v8/test/intl/testcfg.py b/deps/v8/test/intl/testcfg.py
index 977dc11e2e..87aece3da6 100644
--- a/deps/v8/test/intl/testcfg.py
+++ b/deps/v8/test/intl/testcfg.py
@@ -26,18 +26,11 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
-import re
from testrunner.local import testsuite
from testrunner.objects import testcase
-FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
-
-class IntlTestSuite(testsuite.TestSuite):
-
- def __init__(self, name, root):
- super(IntlTestSuite, self).__init__(name, root)
-
+class TestSuite(testsuite.TestSuite):
def ListTests(self, context):
tests = []
for dirname, dirs, files in os.walk(self.root):
@@ -52,34 +45,42 @@ class IntlTestSuite(testsuite.TestSuite):
fullpath = os.path.join(dirname, filename)
relpath = fullpath[len(self.root) + 1 : -3]
testname = relpath.replace(os.path.sep, "/")
- test = testcase.TestCase(self, testname)
+ test = self._create_test(testname)
tests.append(test)
return tests
- def GetParametersForTestCase(self, testcase, context):
- source = self.GetSourceForTest(testcase)
- flags = testcase.flags + ["--allow-natives-syntax"] + context.mode_flags
- flags_match = re.findall(FLAGS_PATTERN, source)
- for match in flags_match:
- flags += match.strip().split()
+ def _test_class(self):
+ return TestCase
+
+
+class TestCase(testcase.TestCase):
+ def __init__(self, *args, **kwargs):
+ super(TestCase, self).__init__(*args, **kwargs)
+
+ self._source_flags = self._parse_source_flags()
+
+ def _get_files_params(self, ctx):
+ files = map(lambda f: os.path.join(self.suite.root, f), [
+ 'assert.js',
+ 'utils.js',
+ 'regexp-prepare.js',
+ self.path + self._get_suffix(),
+ 'regexp-assert.js',
+ ])
+
+ if ctx.isolates:
+ files += ['--isolate'] + files
+ return files
- files = []
- files.append(os.path.join(self.root, "assert.js"))
- files.append(os.path.join(self.root, "utils.js"))
- files.append(os.path.join(self.root, "regexp-prepare.js"))
- files.append(os.path.join(self.root, testcase.path + self.suffix()))
- files.append(os.path.join(self.root, "regexp-assert.js"))
+ def _get_source_flags(self):
+ return self._source_flags
- all_files = list(files)
- if context.isolates:
- all_files += ["--isolate"] + files
+ def _get_suite_flags(self, ctx):
+ return ['--allow-natives-syntax']
- return all_files, flags, {}
+ def _get_source_path(self):
+ return os.path.join(self.suite.root, self.path + self._get_suffix())
- def GetSourceForTest(self, testcase):
- filename = os.path.join(self.root, testcase.path + self.suffix())
- with open(filename) as f:
- return f.read()
def GetSuite(name, root):
- return IntlTestSuite(name, root)
+ return TestSuite(name, root)
diff --git a/deps/v8/test/js-perf-test/Array/every.js b/deps/v8/test/js-perf-test/Array/every.js
index 76b68b7a45..5a29f44e41 100644
--- a/deps/v8/test/js-perf-test/Array/every.js
+++ b/deps/v8/test/js-perf-test/Array/every.js
@@ -1,38 +1,7 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-function benchy(name, test, testSetup) {
- new BenchmarkSuite(name, [1000],
- [
- new Benchmark(name, false, false, 0, test, testSetup, ()=>{})
- ]);
-}
-
-benchy('DoubleEvery', DoubleEvery, DoubleEverySetup);
-benchy('SmiEvery', SmiEvery, SmiEverySetup);
-benchy('FastEvery', FastEvery, FastEverySetup);
-benchy('OptFastEvery', OptFastEvery, FastEverySetup);
-
-var array;
-// Initialize func variable to ensure the first test doesn't benefit from
-// global object property tracking.
-var func = 0;
-var this_arg;
-var result;
-var array_size = 100;
-
-// Although these functions have the same code, they are separated for
-// clean IC feedback.
-function DoubleEvery() {
- result = array.every(func, this_arg);
-}
-function SmiEvery() {
- result = array.every(func, this_arg);
-}
-function FastEvery() {
- result = array.every(func, this_arg);
-}
+(() => {
// Make sure we inline the callback, pick up all possible TurboFan
// optimizations.
@@ -50,20 +19,19 @@ function RunOptFastEvery(multiple) {
%NeverOptimizeFunction(OptFastEvery);
function OptFastEvery() { RunOptFastEvery(3); }
-function SmiEverySetup() {
- array = new Array();
- for (var i = 0; i < array_size; i++) array[i] = i;
- func = (value, index, object) => { return value != 34343; };
+function side_effect(a) { return a; }
+%NeverOptimizeFunction(side_effect);
+function OptUnreliableEvery() {
+ result = array.every(func, side_effect(array));
}
-function DoubleEverySetup() {
- array = new Array();
- for (var i = 0; i < array_size; i++) array[i] = (i + 0.5);
- func = (value, index, object) => { return value > 0.0; };
-}
+DefineHigherOrderTests([
+ // name, test function, setup function, user callback
+ "DoubleEvery", mc("every"), DoubleSetup, v => v > 0.0,
+ "SmiEvery", mc("every"), SmiSetup, v => v != 34343,
+ "FastEvery", mc("every"), FastSetup, v => v !== 'hi',
+ "OptFastEvery", OptFastEvery, FastSetup, v => true,
+ "OptUnreliableEvery", OptUnreliableEvery, FastSetup, v => true
+]);
-function FastEverySetup() {
- array = new Array();
- for (var i = 0; i < array_size; i++) array[i] = 'value ' + i;
- func = (value, index, object) => { return value !== 'hi'; };
-}
+})();
diff --git a/deps/v8/test/js-perf-test/Array/filter.js b/deps/v8/test/js-perf-test/Array/filter.js
index 94ba2651ec..e0d4327dd6 100644
--- a/deps/v8/test/js-perf-test/Array/filter.js
+++ b/deps/v8/test/js-perf-test/Array/filter.js
@@ -1,61 +1,7 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-function benchy(name, test, testSetup) {
- new BenchmarkSuite(name, [1000],
- [
- new Benchmark(name, false, false, 0, test, testSetup, ()=>{})
- ]);
-}
-
-benchy('NaiveFilterReplacement', NaiveFilter, NaiveFilterSetup);
-benchy('DoubleFilter', DoubleFilter, DoubleFilterSetup);
-benchy('SmiFilter', SmiFilter, SmiFilterSetup);
-benchy('FastFilter', FastFilter, FastFilterSetup);
-benchy('GenericFilter', GenericFilter, ObjectFilterSetup);
-benchy('OptFastFilter', OptFastFilter, FastFilterSetup);
-
-var array;
-// Initialize func variable to ensure the first test doesn't benefit from
-// global object property tracking.
-var func = 0;
-var this_arg;
-var result;
-var array_size = 100;
-
-// Although these functions have the same code, they are separated for
-// clean IC feedback.
-function DoubleFilter() {
- result = array.filter(func, this_arg);
-}
-function SmiFilter() {
- result = array.filter(func, this_arg);
-}
-function FastFilter() {
- result = array.filter(func, this_arg);
-}
-
-// Make sure we inline the callback, pick up all possible TurboFan
-// optimizations.
-function RunOptFastFilter(multiple) {
- // Use of variable multiple in the callback function forces
- // context creation without escape analysis.
- //
- // Also, the arrow function requires inlining based on
- // SharedFunctionInfo.
- result = array.filter((v, i, a) => multiple === 3);
-}
-
-// Don't optimize because I want to optimize RunOptFastMap with a parameter
-// to be used in the callback.
-%NeverOptimizeFunction(OptFastFilter);
-function OptFastFilter() { RunOptFastFilter(3); }
-
-
-function GenericFilter() {
- result = Array.prototype.filter.call(array, func, this_arg);
-}
+(() => {
// From the lodash implementation.
function NaiveFilter() {
@@ -76,37 +22,45 @@ function NaiveFilter() {
function NaiveFilterSetup() {
// Prime NaiveFilter with polymorphic cases.
array = [1, 2, 3];
- func = ()=>true;
NaiveFilter();
NaiveFilter();
array = [3.4]; NaiveFilter();
array = new Array(10); array[0] = 'hello'; NaiveFilter();
- SmiFilterSetup();
+ SmiSetup();
delete array[1];
}
-function SmiFilterSetup() {
- array = new Array();
- for (var i = 0; i < array_size; i++) array[i] = i;
- func = (value, index, object) => { return value % 2 === 0; };
+// Make sure we inline the callback, pick up all possible TurboFan
+// optimizations.
+function RunOptFastFilter(multiple) {
+ // Use of variable multiple in the callback function forces
+ // context creation without escape analysis.
+ //
+ // Also, the arrow function requires inlining based on
+ // SharedFunctionInfo.
+ result = array.filter((v, i, a) => multiple === 3);
}
-function DoubleFilterSetup() {
- array = new Array();
- for (var i = 0; i < array_size; i++) array[i] = (i + 0.5);
- func = (value, index, object) => { return Math.floor(value) % 2 === 0; };
-}
+// Don't optimize because I want to optimize RunOptFastMap with a parameter
+// to be used in the callback.
+%NeverOptimizeFunction(OptFastFilter);
+function OptFastFilter() { RunOptFastFilter(3); }
-function FastFilterSetup() {
- array = new Array();
- for (var i = 0; i < array_size; i++) array[i] = 'value ' + i;
- func = (value, index, object) => { return index % 2 === 0; };
+function side_effect(a) { return a; }
+%NeverOptimizeFunction(side_effect);
+function OptUnreliableFilter() {
+ result = array.filter(func, side_effect(array));
}
-function ObjectFilterSetup() {
- array = { length: array_size };
- for (var i = 0; i < array_size; i++) {
- array[i] = i;
- }
- func = (value, index, object) => { return index % 2 === 0; };
-}
+DefineHigherOrderTests([
+ // name, test function, setup function, user callback
+ "NaiveFilterReplacement", NaiveFilter, NaiveFilterSetup, v => true,
+ "DoubleFilter", mc("filter"), DoubleSetup, v => Math.floor(v) % 2 === 0,
+ "SmiFilter", mc("filter"), SmiSetup, v => v % 2 === 0,
+ "FastFilter", mc("filter"), FastSetup, (_, i) => i % 2 === 0,
+ "GenericFilter", mc("filter", true), ObjectSetup, (_, i) => i % 2 === 0,
+ "OptFastFilter", OptFastFilter, FastSetup, undefined,
+ "OptUnreliableFilter", OptUnreliableFilter, FastSetup, v => true
+]);
+
+})();
diff --git a/deps/v8/test/js-perf-test/Array/find-index.js b/deps/v8/test/js-perf-test/Array/find-index.js
new file mode 100644
index 0000000000..716aa710bb
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Array/find-index.js
@@ -0,0 +1,63 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+(() => {
+
+// Make sure we inline the callback, pick up all possible TurboFan
+// optimizations.
+function RunOptFast(multiple) {
+ // Use of variable multiple in the callback function forces
+ // context creation without escape analysis.
+ //
+ // Also, the arrow function requires inlining based on
+ // SharedFunctionInfo.
+ result = array.findIndex((v, i, a) => v === `value ${multiple}`);
+}
+
+// Don't optimize because I want to optimize RunOptFast with a parameter
+// to be used in the callback.
+%NeverOptimizeFunction(OptFast);
+function OptFast() { RunOptFast(max_index); }
+
+function side_effect(a) { return a; }
+%NeverOptimizeFunction(side_effect);
+function OptUnreliable() {
+ result = array.findIndex(func, side_effect(array));
+}
+
+function Naive() {
+ let index = -1;
+ const length = array == null ? 0 : array.length;
+
+ for (let index = 0; index < length; index++) {
+ const value = array[index];
+ if (func(value, index, array)) {
+ result = value;
+ break;
+ }
+ }
+}
+
+function NaiveSetup() {
+ // Prime Naive with polymorphic cases.
+ array = [1, 2, 3];
+ Naive();
+ Naive();
+ array = [3.4]; Naive();
+ array = new Array(10); array[0] = 'hello'; Naive();
+ SmiSetup();
+ delete array[1];
+}
+
+DefineHigherOrderTests([
+ // name, test function, setup function, user callback
+ "NaiveFindIndexReplacement", Naive, NaiveSetup, v => v === max_index,
+ "DoubleFindIndex", mc("findIndex"), DoubleSetup, v => v === max_index + 0.5,
+ "SmiFindIndex", mc("findIndex"), SmiSetup, v => v === max_index,
+ "FastFindIndex", mc("findIndex"), FastSetup, v => v === `value ${max_index}`,
+ "GenericFindIndex", mc("findIndex", true), ObjectSetup, v => v === max_index,
+ "OptFastFindIndex", OptFast, FastSetup, undefined,
+ "OptUnreliableFindIndex", OptUnreliable, FastSetup, v => v === max_index
+]);
+
+})();
diff --git a/deps/v8/test/js-perf-test/Array/find.js b/deps/v8/test/js-perf-test/Array/find.js
new file mode 100644
index 0000000000..9b9a19f1c4
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Array/find.js
@@ -0,0 +1,63 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+(() => {
+
+// Make sure we inline the callback, pick up all possible TurboFan
+// optimizations.
+function RunOptFast(multiple) {
+ // Use of variable multiple in the callback function forces
+ // context creation without escape analysis.
+ //
+ // Also, the arrow function requires inlining based on
+ // SharedFunctionInfo.
+ result = array.find((v, i, a) => v === `value ${multiple}`);
+}
+
+// Don't optimize because I want to optimize RunOptFast with a parameter
+// to be used in the callback.
+%NeverOptimizeFunction(OptFast);
+function OptFast() { RunOptFast(max_index); }
+
+function side_effect(a) { return a; }
+%NeverOptimizeFunction(side_effect);
+function OptUnreliable() {
+ result = array.find(func, side_effect(array));
+}
+
+function Naive() {
+ let index = -1;
+ const length = array == null ? 0 : array.length;
+
+ for (let index = 0; index < length; index++) {
+ const value = array[index];
+ if (func(value, index, array)) {
+ result = value;
+ break;
+ }
+ }
+}
+
+function NaiveSetup() {
+ // Prime Naive with polymorphic cases.
+ array = [1, 2, 3];
+ Naive();
+ Naive();
+ array = [3.4]; Naive();
+ array = new Array(10); array[0] = 'hello'; Naive();
+ SmiSetup();
+ delete array[1];
+}
+
+DefineHigherOrderTests([
+ // name, test function, setup function, user callback
+ "NaiveFindReplacement", Naive, NaiveSetup, v => v === max_index,
+ "DoubleFind", mc("find"), DoubleSetup, v => v === max_index + 0.5,
+ "SmiFind", mc("find"), SmiSetup, v => v === max_index,
+ "FastFind", mc("find"), FastSetup, v => v === `value ${max_index}`,
+ "GenericFind", mc("find", true), ObjectSetup, v => v === max_index,
+ "OptFastFind", OptFast, FastSetup, undefined,
+ "OptUnreliableFind", OptUnreliable, FastSetup, v => v === max_index
+]);
+
+})();
diff --git a/deps/v8/test/js-perf-test/Array/for-each.js b/deps/v8/test/js-perf-test/Array/for-each.js
new file mode 100644
index 0000000000..79d279894b
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Array/for-each.js
@@ -0,0 +1,62 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+(() => {
+
+function Naive() {
+ let index = -1;
+ const length = array == null ? 0 : array.length;
+
+ for (let index = 0; index < length; index++) {
+ const value = array[index];
+ if (func(value, index, array)) {
+ result = value;
+ break;
+ }
+ }
+}
+
+function NaiveSetup() {
+ // Prime Naive with polymorphic cases.
+ array = [1, 2, 3];
+ Naive();
+ Naive();
+ array = [3.4]; Naive();
+ array = new Array(10); array[0] = 'hello'; Naive();
+ SmiSetup();
+ delete array[1];
+}
+
+// Make sure we inline the callback, pick up all possible TurboFan
+// optimizations.
+function RunOptFast(multiple) {
+ // Use of variable multiple in the callback function forces
+ // context creation without escape analysis.
+ //
+ // Also, the arrow function requires inlining based on
+ // SharedFunctionInfo.
+ result = array.forEach((v, i, a) => v === `value ${multiple}`);
+}
+
+// Don't optimize because I want to optimize RunOptFast with a parameter
+// to be used in the callback.
+%NeverOptimizeFunction(OptFast);
+function OptFast() { RunOptFast(max_index); }
+
+function side_effect(a) { return a; }
+%NeverOptimizeFunction(side_effect);
+function OptUnreliable() {
+ result = array.forEach(func, side_effect(array));
+}
+
+DefineHigherOrderTests([
+ "NaiveForEachReplacement", Naive, NaiveSetup, v => v === max_index,
+ "DoubleForEach", mc("forEach"), DoubleSetup, v => v === max_index + 0.5,
+ "SmiForEach", mc("forEach"), SmiSetup, v => v === max_index,
+ "FastForEach", mc("forEach"), FastSetup, v => v === `value ${max_index}`,
+ "GenericForEach", mc("forEach", true), ObjectSetup, v => v === max_index,
+ "OptFastForEach", OptFast, FastSetup, undefined,
+ "OptUnreliableForEach", OptUnreliable, FastSetup, v => v === `value ${max_index}`
+]);
+
+})();
diff --git a/deps/v8/test/js-perf-test/Array/join.js b/deps/v8/test/js-perf-test/Array/join.js
index 7330ae0459..ef9d298421 100644
--- a/deps/v8/test/js-perf-test/Array/join.js
+++ b/deps/v8/test/js-perf-test/Array/join.js
@@ -1,39 +1,21 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-function benchy(name, test, testSetup) {
- new BenchmarkSuite(name, [1000],
- [
- new Benchmark(name, false, false, 0, test, testSetup, ()=>{})
- ]);
-}
-
-benchy('SmiJoin', SmiJoin, SmiJoinSetup);
-benchy('StringJoin', StringJoin, StringJoinSetup);
-benchy('SparseSmiJoin', SparseSmiJoin, SparseSmiJoinSetup);
-benchy('SparseStringJoin', SparseStringJoin, SparseStringJoinSetup);
+(() => {
var array;
var result;
var array_size = 1000;
-
-// Although these functions have the same code, they are separated for
-// clean IC feedback.
-function SmiJoin() {
- result = array.join();
-}
-function StringJoin() {
- result = array.join();
-}
-function SparseSmiJoin() {
- result = array.join();
-}
-function SparseStringJoin() {
- result = array.join();
+function make_join() {
+ return new Function('result = array.join();');
}
+benchy('SmiJoin', make_join(), SmiJoinSetup);
+benchy('StringJoin', make_join(), StringJoinSetup);
+benchy('SparseSmiJoin', make_join(), SparseSmiJoinSetup);
+benchy('SparseStringJoin', make_join(), SparseStringJoinSetup);
+
function SmiJoinSetup() {
array = new Array();
for (var i = 0; i < array_size; ++i) array[i] = i;
@@ -50,3 +32,5 @@ function SparseStringJoinSetup() {
StringJoinSetup();
array.length = array.length * 2;
}
+
+})();
diff --git a/deps/v8/test/js-perf-test/Array/map.js b/deps/v8/test/js-perf-test/Array/map.js
index f4ab95b065..9179aa3c88 100644
--- a/deps/v8/test/js-perf-test/Array/map.js
+++ b/deps/v8/test/js-perf-test/Array/map.js
@@ -1,58 +1,7 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-function benchy(name, test, testSetup) {
- new BenchmarkSuite(name, [1000],
- [
- new Benchmark(name, false, false, 0, test, testSetup, ()=>{})
- ]);
-}
-
-benchy('NaiveMapReplacement', NaiveMap, NaiveMapSetup);
-benchy('DoubleMap', DoubleMap, DoubleMapSetup);
-benchy('SmallSmiToDoubleMap', SmiMap, SmiToDoubleMapSetup);
-benchy('SmallSmiToFastMap', SmiMap, SmiToFastMapSetup);
-benchy('SmiMap', SmiMap, SmiMapSetup);
-benchy('FastMap', FastMap, FastMapSetup);
-benchy('GenericMap', GenericMap, ObjectMapSetup);
-benchy('OptFastMap', OptFastMap, FastMapSetup);
-
-var array;
-// Initialize func variable to ensure the first test doesn't benefit from
-// global object property tracking.
-var func = 0;
-var this_arg;
-var result;
-var array_size = 100;
-
-// Although these functions have the same code, they are separated for
-// clean IC feedback.
-function DoubleMap() {
- result = array.map(func, this_arg);
-}
-function SmiMap() {
- result = array.map(func, this_arg);
-}
-function FastMap() {
- result = array.map(func, this_arg);
-}
-
-// Make sure we inline the callback, pick up all possible TurboFan
-// optimizations.
-function RunOptFastMap(multiple) {
- // Use of variable multiple in the callback function forces
- // context creation without escape analysis.
- //
- // Also, the arrow function requires inlining based on
- // SharedFunctionInfo.
- result = array.map((v, i, a) => v + ' ' + multiple);
-}
-
-// Don't optimize because I want to optimize RunOptFastMap with a parameter
-// to be used in the callback.
-%NeverOptimizeFunction(OptFastMap);
-function OptFastMap() { RunOptFastMap(3); }
+(() => {
function NaiveMap() {
let index = -1
@@ -65,57 +14,50 @@ function NaiveMap() {
return result
}
-
-function GenericMap() {
- result = Array.prototype.map.call(array, func, this_arg);
-}
-
function NaiveMapSetup() {
// Prime NaiveMap with polymorphic cases.
array = [1, 2, 3];
- func = (v, i, a) => v;
NaiveMap();
NaiveMap();
array = [3.4]; NaiveMap();
array = new Array(10); array[0] = 'hello'; NaiveMap();
- SmiMapSetup();
+ SmiSetup();
delete array[1];
}
-function SmiMapSetup() {
- array = new Array();
- for (var i = 0; i < array_size; i++) array[i] = i;
- func = (value, index, object) => { return value; };
-}
-
-function SmiToDoubleMapSetup() {
- array = new Array();
- for (var i = 0; i < 1; i++) array[i] = i;
- func = (value, index, object) => { return value + 0.5; };
-}
-
-function SmiToFastMapSetup() {
- array = new Array();
- for (var i = 0; i < 1; i++) array[i] = i;
- func = (value, index, object) => { return "hi" + value; };
-}
-
-function DoubleMapSetup() {
- array = new Array();
- for (var i = 0; i < array_size; i++) array[i] = (i + 0.5);
- func = (value, index, object) => { return value; };
+// Make sure we inline the callback, pick up all possible TurboFan
+// optimizations.
+function RunOptFastMap(multiple) {
+ // Use of variable multiple in the callback function forces
+ // context creation without escape analysis.
+ //
+ // Also, the arrow function requires inlining based on
+ // SharedFunctionInfo.
+ result = array.map((v, i, a) => v + ' ' + multiple);
}
-function FastMapSetup() {
- array = new Array();
- for (var i = 0; i < array_size; i++) array[i] = 'value ' + i;
- func = (value, index, object) => { return value; };
-}
+// Don't optimize because I want to optimize RunOptFastMap with a parameter
+// to be used in the callback.
+%NeverOptimizeFunction(OptFastMap);
+function OptFastMap() { RunOptFastMap(3); }
-function ObjectMapSetup() {
- array = { length: array_size };
- for (var i = 0; i < array_size; i++) {
- array[i] = i;
- }
- func = (value, index, object) => { return value; };
-}
+function side_effect(a) { return a; }
+%NeverOptimizeFunction(side_effect);
+function OptUnreliableMap() {
+ result = array.map(func, side_effect(array));
+}
+
+DefineHigherOrderTests([
+ // name, test function, setup function, user callback
+ "NaiveMapReplacement", NaiveMap, NaiveMapSetup, v => v,
+ "SmiMap", mc("map"), SmiSetup, v => v,
+ "DoubleMap", mc("map"), DoubleSetup, v => v,
+ "FastMap", mc("map"), FastSetup, v => v,
+ "SmallSmiToDoubleMap", mc("map"), SmiSetup, v => v + 0.5,
+ "SmallSmiToFastMap", mc("map"), SmiSetup, v => "hi" + v,
+ "GenericMap", mc("map", true), ObjectSetup, v => v,
+ "OptFastMap", OptFastMap, FastSetup, undefined,
+ "OptUnreliableMap", OptUnreliableMap, FastSetup, v => v
+]);
+
+})();
diff --git a/deps/v8/test/js-perf-test/Array/of.js b/deps/v8/test/js-perf-test/Array/of.js
new file mode 100644
index 0000000000..bfd471245c
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Array/of.js
@@ -0,0 +1,117 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+(() => {
+
+benchy('EmptyArrayOf', EmptyArrayOf, EmptyArrayOfSetup);
+benchy('SmallTransplantedArrayOf', SmallTransplantedArrayOf,
+ SmallTransplantedArrayOfSetup);
+benchy('SmallSmiArrayOf', SmallSmiArrayOf, SmallSmiArrayOfSetup);
+benchy('LargeSmiArrayOf', LargeSmiArrayOf, LargeSmiArrayOfSetup);
+benchy('SmallDoubleArrayOf', SmallDoubleArrayOf, SmallDoubleArrayOfSetup);
+benchy('SmallStringArrayOf', SmallStringArrayOf, SmallStringArrayOfSetup);
+benchy('SmallMixedArrayOf', SmallMixedArrayOf, SmallMixedArrayOfSetup);
+
+function ArrayLike() {}
+ArrayLike.of = Array.of;
+
+var arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10
+var arg11, arg12, arg13, arg14, arg15, arg16, arg17, arg18, arg19, arg20
+var result;
+
+function EmptyArrayOf() {
+ result = Array.of();
+}
+
+function BaselineArray() {
+ result = [arg1, arg2, arg3];
+}
+
+function SmallSmiArrayOf() {
+ result = Array.of(arg1, arg2, arg3);
+}
+
+function LargeSmiArrayOf() {
+ result = Array.of(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10,
+ arg11, arg12, arg13, arg14, arg15, arg16, arg17, arg18, arg19, arg20);
+}
+
+function SmallTransplantedArrayOf() {
+ result = ArrayLike.of(arg1, arg2, arg3);
+}
+
+function SmallDoubleArrayOf() {
+ result = Array.of(arg1, arg2, arg3);
+}
+
+function SmallStringArrayOf() {
+ result = Array.of(arg1, arg2, arg3);
+}
+
+function SmallMixedArrayOf() {
+ result = Array.of(arg1, arg2, arg3);
+}
+
+function EmptyArrayOfSetup() {
+}
+
+function BaselineArraySetup() {
+ arg1 = 1;
+ arg2 = 2;
+ arg3 = 3;
+}
+
+function SmallSmiArrayOfSetup() {
+ arg1 = 1;
+ arg2 = 2;
+ arg3 = 3;
+}
+
+function SmallTransplantedArrayOfSetup() {
+ arg1 = 1;
+ arg2 = 2;
+ arg3 = 3;
+}
+
+function SmallDoubleArrayOfSetup() {
+ arg1 = 1.5;
+ arg2 = 2.5;
+ arg3 = 3.5;
+}
+
+function SmallStringArrayOfSetup() {
+ arg1 = "cat";
+ arg2 = "dog";
+ arg3 = "giraffe";
+}
+
+function SmallMixedArrayOfSetup() {
+ arg1 = 1;
+ arg2 = 2.5;
+ arg3 = "giraffe";
+}
+
+function LargeSmiArrayOfSetup() {
+ arg1 = 1;
+ arg2 = 2;
+ arg3 = 3;
+ arg4 = 4;
+ arg5 = 5;
+ arg6 = 6;
+ arg7 = 7;
+ arg8 = 8;
+ arg9 = 9;
+ arg10 = 10;
+ arg11 = 11;
+ arg12 = 12;
+ arg13 = 13;
+ arg14 = 14;
+ arg15 = 15;
+ arg16 = 16;
+ arg17 = 17;
+ arg18 = 18;
+ arg19 = 19;
+ arg20 = 20;
+}
+
+})();
diff --git a/deps/v8/test/js-perf-test/Array/reduce-right.js b/deps/v8/test/js-perf-test/Array/reduce-right.js
index 29cb67cd1a..ed00f5ac27 100644
--- a/deps/v8/test/js-perf-test/Array/reduce-right.js
+++ b/deps/v8/test/js-perf-test/Array/reduce-right.js
@@ -1,38 +1,7 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-function benchy(name, test, testSetup) {
- new BenchmarkSuite(name, [1000],
- [
- new Benchmark(name, false, false, 0, test, testSetup, ()=>{})
- ]);
-}
-
-benchy('DoubleReduceRight', DoubleReduceRight, DoubleReduceRightSetup);
-benchy('SmiReduceRight', SmiReduceRight, SmiReduceRightSetup);
-benchy('FastReduceRight', FastReduceRight, FastReduceRightSetup);
-benchy('OptFastReduceRight', OptFastReduceRight, FastReduceRightSetup);
-
-var array;
-// Initialize func variable to ensure the first test doesn't benefit from
-// global object property tracking.
-var func = 0;
-var this_arg;
-var result;
-var array_size = 100;
-
-// Although these functions have the same code, they are separated for
-// clean IC feedback.
-function DoubleReduceRight() {
- result = array.reduceRight(func, this_arg);
-}
-function SmiReduceRight() {
- result = array.reduceRight(func, this_arg);
-}
-function FastReduceRight() {
- result = array.reduceRight(func, this_arg);
-}
+(() => {
// Make sure we inline the callback, pick up all possible TurboFan
// optimizations.
@@ -50,20 +19,20 @@ function RunOptFastReduceRight(multiple) {
%NeverOptimizeFunction(OptFastReduceRight);
function OptFastReduceRight() { RunOptFastReduceRight(3); }
-function SmiReduceRightSetup() {
- array = new Array();
- for (var i = 0; i < array_size; i++) array[i] = i;
- func = (prev, value, index, object) => { return prev + 1; };
+function side_effect(a) { return a; }
+%NeverOptimizeFunction(side_effect);
+function OptUnreliableReduceRight() {
+ result = array.reduceRight(func, side_effect(array));
}
-function DoubleReduceRightSetup() {
- array = new Array();
- for (var i = 0; i < array_size; i++) array[i] = (i + 0.5);
- func = (prev, value, index, object) => { return prev + value; };
-}
+DefineHigherOrderTests([
+ // name, test function, setup function, user callback
+ "DoubleReduceRight", mc("reduceRight"), DoubleSetup, (p, v, i, o) => p + v,
+ "SmiReduceRight", mc("reduceRight"), SmiSetup, (p, v, i, a) => p + 1,
+ "FastReduceRight", mc("reduceRight"), FastSetup, (p, v, i, a) => p + v,
+ "OptFastReduceRight", OptFastReduceRight, FastSetup, undefined,
+ "OptUnreliableReduceRight", OptUnreliableReduceRight, FastSetup,
+ (p, v, i, a) => p + v
+]);
-function FastReduceRightSetup() {
- array = new Array();
- for (var i = 0; i < array_size; i++) array[i] = 'value ' + i;
- func = (prev, value, index, object) => { return prev + value; };
-}
+})();
diff --git a/deps/v8/test/js-perf-test/Array/reduce.js b/deps/v8/test/js-perf-test/Array/reduce.js
index 8790c0a861..02d689f7c4 100644
--- a/deps/v8/test/js-perf-test/Array/reduce.js
+++ b/deps/v8/test/js-perf-test/Array/reduce.js
@@ -1,38 +1,7 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-function benchy(name, test, testSetup) {
- new BenchmarkSuite(name, [1000],
- [
- new Benchmark(name, false, false, 0, test, testSetup, ()=>{})
- ]);
-}
-
-benchy('DoubleReduce', DoubleReduce, DoubleReduceSetup);
-benchy('SmiReduce', SmiReduce, SmiReduceSetup);
-benchy('FastReduce', FastReduce, FastReduceSetup);
-benchy('OptFastReduce', OptFastReduce, FastReduceSetup);
-
-var array;
-// Initialize func variable to ensure the first test doesn't benefit from
-// global object property tracking.
-var func = 0;
-var this_arg;
-var result;
-var array_size = 100;
-
-// Although these functions have the same code, they are separated for
-// clean IC feedback.
-function DoubleReduce() {
- result = array.reduce(func, this_arg);
-}
-function SmiReduce() {
- result = array.reduce(func, this_arg);
-}
-function FastReduce() {
- result = array.reduce(func, this_arg);
-}
+(() => {
// Make sure we inline the callback, pick up all possible TurboFan
// optimizations.
@@ -50,20 +19,20 @@ function RunOptFastReduce(multiple) {
%NeverOptimizeFunction(OptFastReduce);
function OptFastReduce() { RunOptFastReduce(3); }
-function SmiReduceSetup() {
- array = new Array();
- for (var i = 0; i < array_size; i++) array[i] = i;
- func = (prev, value, index, object) => { return prev + 1; };
+function side_effect(a) { return a; }
+%NeverOptimizeFunction(side_effect);
+function OptUnreliableReduce() {
+ result = array.reduce(func, side_effect(array));
}
-function DoubleReduceSetup() {
- array = new Array();
- for (var i = 0; i < array_size; i++) array[i] = (i + 0.5);
- func = (prev, value, index, object) => { return prev + value; };
-}
+DefineHigherOrderTests([
+ // name, test function, setup function, user callback
+ "DoubleReduce", mc("reduce"), DoubleSetup, (p, v, i, o) => p + v,
+ "SmiReduce", mc("reduce"), SmiSetup, (p, v, i, a) => p + 1,
+ "FastReduce", mc("reduce"), FastSetup, (p, v, i, a) => p + v,
+ "OptFastReduce", OptFastReduce, FastSetup, undefined,
+ "OptUnreliableReduce", OptUnreliableReduce, FastSetup,
+ (p, v, i, a) => p = v
+]);
-function FastReduceSetup() {
- array = new Array();
- for (var i = 0; i < array_size; i++) array[i] = 'value ' + i;
- func = (prev, value, index, object) => { return prev + value; };
-}
+})();
diff --git a/deps/v8/test/js-perf-test/Array/run.js b/deps/v8/test/js-perf-test/Array/run.js
index baf2553eb3..15b4da94c2 100644
--- a/deps/v8/test/js-perf-test/Array/run.js
+++ b/deps/v8/test/js-perf-test/Array/run.js
@@ -2,16 +2,85 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
load('../base.js');
+let array;
+// Initialize func variable to ensure the first test doesn't benefit from
+// global object property tracking.
+let func = 0;
+let this_arg;
+let result;
+const array_size = 100;
+const max_index = array_size - 1;
+
+// mc stands for "Make Closure," it's a handy function to get a fresh
+// closure unpolluted by IC feedback for a 2nd-order array builtin
+// test.
+function mc(name, generic = false) {
+ if (generic) {
+ return new Function(
+ `result = Array.prototype.${name}.call(array, func, this_arg);`);
+ }
+ return new Function(`result = array.${name}(func, this_arg);`);
+}
+
+function benchy(name, test, testSetup) {
+ new BenchmarkSuite(name, [1000],
+ [
+ new Benchmark(name, false, false, 0, test, testSetup, ()=>{})
+ ]);
+}
+
+function SmiSetup() {
+ array = Array.from({ length: array_size }, (_, i) => i);
+}
+
+function DoubleSetup() {
+ array = Array.from({ length: array_size }, (_, i) => i + 0.5);
+}
+
+function FastSetup() {
+ array = Array.from({ length: array_size }, (_, i) => `value ${i}`);
+}
+
+function ObjectSetup() {
+ array = { length: array_size };
+ for (var i = 0; i < array_size; i++) {
+ array[i] = i;
+ }
+}
+
+function DefineHigherOrderTests(tests) {
+ let i = 0;
+ while (i < tests.length) {
+ const name = tests[i++];
+ const testFunc = tests[i++];
+ const setupFunc = tests[i++];
+ const callback = tests[i++];
+
+ let setupFuncWrapper = () => {
+ func = callback;
+ this_arg = undefined;
+ setupFunc();
+ };
+ benchy(name, testFunc, setupFuncWrapper);
+ }
+}
+
+// Higher-order Array builtins.
load('filter.js');
load('map.js');
load('every.js');
-load('join.js');
load('some.js');
+load('for-each.js');
load('reduce.js');
load('reduce-right.js');
+load('find.js');
+load('find-index.js');
+load('of.js');
+
+// Other Array builtins.
+load('join.js');
load('to-string.js');
var success = true;
diff --git a/deps/v8/test/js-perf-test/Array/some.js b/deps/v8/test/js-perf-test/Array/some.js
index 246ea95c13..ea820e9801 100644
--- a/deps/v8/test/js-perf-test/Array/some.js
+++ b/deps/v8/test/js-perf-test/Array/some.js
@@ -1,38 +1,7 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-function benchy(name, test, testSetup) {
- new BenchmarkSuite(name, [1000],
- [
- new Benchmark(name, false, false, 0, test, testSetup, ()=>{})
- ]);
-}
-
-benchy('DoubleSome', DoubleSome, DoubleSomeSetup);
-benchy('SmiSome', SmiSome, SmiSomeSetup);
-benchy('FastSome', FastSome, FastSomeSetup);
-benchy('OptFastSome', OptFastSome, FastSomeSetup);
-
-var array;
-// Initialize func variable to ensure the first test doesn't benefit from
-// global object property tracking.
-var func = 0;
-var this_arg;
-var result;
-var array_size = 100;
-
-// Although these functions have the same code, they are separated for
-// clean IC feedback.
-function DoubleSome() {
- result = array.some(func, this_arg);
-}
-function SmiSome() {
- result = array.some(func, this_arg);
-}
-function FastSome() {
- result = array.some(func, this_arg);
-}
+(() => {
// Make sure we inline the callback, pick up all possible TurboFan
// optimizations.
@@ -50,20 +19,19 @@ function RunOptFastSome(multiple) {
%NeverOptimizeFunction(OptFastSome);
function OptFastSome() { RunOptFastSome(3); }
-function SmiSomeSetup() {
- array = new Array();
- for (var i = 0; i < array_size; i++) array[i] = i;
- func = (value, index, object) => { return value === 34343; };
+function side_effect(a) { return a; }
+%NeverOptimizeFunction(side_effect);
+function OptUnreliableSome() {
+ result = array.some(func, side_effect(array));
}
-function DoubleSomeSetup() {
- array = new Array();
- for (var i = 0; i < array_size; i++) array[i] = (i + 0.5);
- func = (value, index, object) => { return value < 0.0; };
-}
+DefineHigherOrderTests([
+ // name, test function, setup function, user callback
+ "DoubleSome", mc("some"), DoubleSetup, v => v < 0.0,
+ "SmiSome", mc("some"), SmiSetup, v => v === 34343,
+ "FastSome", mc("some"), FastSetup, v => v === 'hi',
+ "OptFastSome", OptFastSome, FastSetup, undefined,
+ "OptUnreliableSome", OptUnreliableSome, FastSetup, v => v === 'hi'
+]);
-function FastSomeSetup() {
- array = new Array();
- for (var i = 0; i < array_size; i++) array[i] = 'value ' + i;
- func = (value, index, object) => { return value === 'hi'; };
-}
+})();
diff --git a/deps/v8/test/js-perf-test/Array/to-string.js b/deps/v8/test/js-perf-test/Array/to-string.js
index c6a66d726b..1b6dd36eaa 100644
--- a/deps/v8/test/js-perf-test/Array/to-string.js
+++ b/deps/v8/test/js-perf-test/Array/to-string.js
@@ -1,52 +1,39 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+(() => {
-function benchy(name, test, testSetup) {
- new BenchmarkSuite(name, [1000],
- [
- new Benchmark(name, false, false, 0, test, testSetup, ()=>{})
- ]);
+function make_tostring() {
+ return new Function("result = array.toString();");
}
-benchy('SmiToString', SmiToString, SmiToStringSetup);
-benchy('StringToString', StringToString, StringToStringSetup);
-benchy('SparseSmiToString', SparseSmiToString, SparseSmiToStringSetup);
-benchy('SparseStringToString', SparseStringToString, SparseStringToStringSetup);
+benchy('SmiToString', make_tostring(), SmiToStringSetup);
+benchy('StringToString', make_tostring(), StringToStringSetup);
+benchy('SparseSmiToString', make_tostring(), SparseSmiToStringSetup);
+benchy('SparseStringToString', make_tostring(), SparseStringToStringSetup);
var array;
var result;
var array_size = 1000;
-
-// Although these functions have the same code, they are separated for
-// clean IC feedback.
-function SmiToString() {
- result = array.toString();
-}
-function StringToString() {
- result = array.toString();
-}
-function SparseSmiToString() {
- result = array.toString();
-}
-function SparseStringToString() {
- result = array.toString();
-}
-
function SmiToStringSetup() {
array = new Array();
for (var i = 0; i < array_size; ++i) array[i] = i;
}
+
function StringToStringSetup() {
array = new Array();
for (var i = 0; i < array_size; ++i) array[i] = `Item no. ${i}`;
}
+
function SparseSmiToStringSetup() {
SmiToStringSetup();
array.length = array.length * 2;
}
+
function SparseStringToStringSetup() {
StringToStringSetup();
array.length = array.length * 2;
}
+
+})();
diff --git a/deps/v8/test/js-perf-test/JSTests.json b/deps/v8/test/js-perf-test/JSTests.json
index 6b0076f6fb..a1a19f5988 100644
--- a/deps/v8/test/js-perf-test/JSTests.json
+++ b/deps/v8/test/js-perf-test/JSTests.json
@@ -180,13 +180,17 @@
"name": "Strings",
"path": ["Strings"],
"main": "run.js",
- "resources": ["harmony-string.js"],
+ "resources": ["harmony-string.js", "string-indexof.js"],
"results_regexp": "^%s\\-Strings\\(Score\\): (.+)$",
"run_count": 1,
"timeout": 240,
"timeout_arm": 420,
"tests": [
- {"name": "StringFunctions"}
+ {"name": "StringFunctions"},
+ {"name": "StringIndexOfConstant"},
+ {"name": "StringIndexOfNonConstant"},
+ {"name": "StringCharCodeAtConstant"},
+ {"name": "StringCharCodeAtNonConstant"}
]
},
{
@@ -367,10 +371,22 @@
"test_flags": ["set-from-same-type"]
},
{
+ "name": "SliceNoSpecies",
+ "main": "run.js",
+ "resources": ["slice-nospecies.js"],
+ "test_flags": ["slice-nospecies"]
+ },
+ {
"name": "Sort",
"main": "run.js",
"resources": ["sort.js"],
"test_flags": ["sort"]
+ },
+ {
+ "name": "SubarrayNoSpecies",
+ "main": "run.js",
+ "resources": ["subarray-nospecies.js"],
+ "test_flags": ["subarray-nospecies"]
}
]
},
@@ -380,29 +396,40 @@
"main": "run.js",
"resources": [
"filter.js", "map.js", "every.js", "join.js", "some.js",
- "reduce.js", "reduce-right.js", "to-string.js"
+ "reduce.js", "reduce-right.js", "to-string.js", "find.js",
+ "find-index.js", "of.js"
],
"flags": [
"--allow-natives-syntax"
],
"results_regexp": "^%s\\-Array\\(Score\\): (.+)$",
"tests": [
+ {"name": "NaiveForEachReplacement"},
+ {"name": "DoubleForEach"},
+ {"name": "SmiForEach"},
+ {"name": "FastForEach"},
+ {"name": "GenericForEach"},
+ {"name": "OptFastForEach"},
+ {"name": "OptUnreliableForEach"},
{"name": "NaiveFilterReplacement"},
{"name": "DoubleFilter"},
{"name": "SmiFilter"},
{"name": "FastFilter"},
{"name": "GenericFilter"},
{"name": "OptFastFilter"},
+ {"name": "OptUnreliableFilter"},
{"name": "NaiveMapReplacement"},
{"name": "DoubleMap"},
{"name": "SmiMap"},
{"name": "FastMap"},
{"name": "GenericMap"},
{"name": "OptFastMap"},
+ {"name": "OptUnreliableMap"},
{"name": "DoubleEvery"},
{"name": "SmiEvery"},
{"name": "FastEvery"},
{"name": "OptFastEvery"},
+ {"name": "OptUnreliableEvery"},
{"name": "SmiJoin"},
{"name": "StringJoin"},
{"name": "SparseSmiJoin"},
@@ -411,18 +438,42 @@
{"name": "SmiSome"},
{"name": "FastSome"},
{"name": "OptFastSome"},
+ {"name": "OptUnreliableSome"},
{"name": "DoubleReduce"},
{"name": "SmiReduce"},
{"name": "FastReduce"},
{"name": "OptFastReduce"},
+ {"name": "OptUnreliableReduce"},
{"name": "DoubleReduceRight"},
{"name": "SmiReduceRight"},
{"name": "FastReduceRight"},
{"name": "OptFastReduceRight"},
+ {"name": "OptUnreliableReduceRight"},
{"name": "SmiToString"},
{"name": "StringToString"},
{"name": "SparseSmiToString"},
- {"name": "SparseStringToString"}
+ {"name": "SparseStringToString"},
+ {"name": "NaiveFindReplacement"},
+ {"name": "DoubleFind"},
+ {"name": "SmiFind"},
+ {"name": "FastFind"},
+ {"name": "GenericFind"},
+ {"name": "OptFastFind"},
+ {"name": "OptUnreliableFind"},
+ {"name": "NaiveFindIndexReplacement"},
+ {"name": "DoubleFindIndex"},
+ {"name": "SmiFindIndex"},
+ {"name": "FastFindIndex"},
+ {"name": "GenericFindIndex"},
+ {"name": "OptFastFindIndex"},
+ {"name": "OptUnreliableFindIndex"},
+ {"name": "EmptyArrayOf"},
+ {"name": "SmallSmiArrayOf"},
+ {"name": "LargeSmiArrayOf"},
+ {"name": "SmallTransplantedArrayOf"},
+ {"name": "SmallDoubleArrayOf"},
+ {"name": "SmallStringArrayOf"},
+ {"name": "SmallMixedArrayOf"}
]
},
{
@@ -514,6 +565,7 @@
"resources": [ "arithmetic.js" ],
"test_flags": [ "arithmetic" ],
"results_regexp": "^%s\\-BytecodeHandler\\(Score\\): (.+)$",
+ "timeout": 240,
"tests": [
{"name": "Smi-Add"},
{"name": "Number-Add"},
diff --git a/deps/v8/test/js-perf-test/Strings/run.js b/deps/v8/test/js-perf-test/Strings/run.js
index 79ca26e68a..66dd9b2188 100644
--- a/deps/v8/test/js-perf-test/Strings/run.js
+++ b/deps/v8/test/js-perf-test/Strings/run.js
@@ -5,6 +5,7 @@
load('../base.js');
load('harmony-string.js');
+load('string-indexof.js');
var success = true;
diff --git a/deps/v8/test/js-perf-test/Strings/string-indexof.js b/deps/v8/test/js-perf-test/Strings/string-indexof.js
new file mode 100644
index 0000000000..a2049e0fe6
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Strings/string-indexof.js
@@ -0,0 +1,69 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+new BenchmarkSuite('StringIndexOfConstant', [5], [
+ new Benchmark('StringIndexOfConstant', true, false, 0,
+ StringIndexOfConstant),
+]);
+
+new BenchmarkSuite('StringIndexOfNonConstant', [5], [
+ new Benchmark('StringIndexOfNonConstant', true, false, 0,
+ StringIndexOfNonConstant),
+]);
+
+const subject = "aaaaaaaaaaaaaaaab";
+const searches = ['a', 'b', 'c'];
+
+function StringIndexOfConstant() {
+ var sum = 0;
+
+ for (var j = 0; j < searches.length; ++j) {
+ sum += subject.indexOf("" + searches[j]);
+ }
+
+ return sum;
+}
+
+function StringIndexOfNonConstant() {
+ var sum = 0;
+
+ for (var j = 0; j < searches.length; ++j) {
+ sum += subject.indexOf(searches[j]);
+ }
+
+ return sum;
+}
+
+new BenchmarkSuite('StringCharCodeAtConstant', [3], [
+ new Benchmark('StringIndexOfConstant', true, false, 0,
+ StringCharCodeAtConstant),
+]);
+
+new BenchmarkSuite('StringCharCodeAtNonConstant', [3], [
+ new Benchmark('StringIndexOfNonConstant', true, false, 0,
+ StringCharCodeAtNonConstant),
+]);
+
+const string = "qweruiplkjhgfdsazxccvbnm";
+const indices = [1, 13, 32, 100, "xx"];
+
+function StringCharCodeAtConstant() {
+ var sum = 0;
+
+ for (var j = 0; j < indices.length - 1; ++j) {
+ sum += string.charCodeAt(indices[j] | 0);
+ }
+
+ return sum;
+}
+
+function StringCharCodeAtNonConstant() {
+ var sum = 0;
+
+ for (var j = 0; j < indices.length - 1; ++j) {
+ sum += string.charCodeAt(indices[j]);
+ }
+
+ return sum;
+}
diff --git a/deps/v8/test/js-perf-test/TypedArrays/slice-nospecies.js b/deps/v8/test/js-perf-test/TypedArrays/slice-nospecies.js
new file mode 100644
index 0000000000..4195d71a3d
--- /dev/null
+++ b/deps/v8/test/js-perf-test/TypedArrays/slice-nospecies.js
@@ -0,0 +1,34 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+new BenchmarkSuite('SliceNoSpecies', [1000], [
+ new Benchmark('SliceNoSpecies', false, false, 0,
+ slice, sliceSetup, sliceTearDown),
+]);
+
+var size = 1000;
+var initialFloat64Array = new Float64Array(size);
+for (var i = 0; i < size; ++i) {
+ initialFloat64Array[i] = Math.random();
+}
+var arr;
+var new_arr;
+
+function slice() {
+ new_arr = arr.slice(1, -1);
+}
+
+function sliceSetup() {
+ arr = new Float64Array(initialFloat64Array);
+}
+
+function sliceTearDown() {
+ for (var i = 1; i < size - 1; ++i) {
+ if (arr[i] != new_arr[i - 1]) {
+ throw new TypeError("Unexpected result!\n" + new_arr);
+ }
+ }
+ arr = void 0;
+ new_arr = void 0;
+}
diff --git a/deps/v8/test/js-perf-test/TypedArrays/subarray-nospecies.js b/deps/v8/test/js-perf-test/TypedArrays/subarray-nospecies.js
new file mode 100644
index 0000000000..732dbff1de
--- /dev/null
+++ b/deps/v8/test/js-perf-test/TypedArrays/subarray-nospecies.js
@@ -0,0 +1,34 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+new BenchmarkSuite('SubarrayNoSpecies', [1000], [
+ new Benchmark('SubarrayNoSpecies', false, false, 0,
+ subarray, subarraySetup, subarrayTearDown),
+]);
+
+var size = 1000;
+var initialFloat64Array = new Float64Array(size);
+for (var i = 0; i < size; ++i) {
+ initialFloat64Array[i] = Math.random();
+}
+var arr;
+var new_arr;
+
+function subarray() {
+ new_arr = arr.subarray(1, -1);
+}
+
+function subarraySetup() {
+ arr = new Float64Array(initialFloat64Array);
+}
+
+function subarrayTearDown() {
+ for (var i = 1; i < size - 1; ++i) {
+ if (arr[i] != new_arr[i - 1]) {
+ throw new TypeError("Unexpected result!\n" + new_arr);
+ }
+ }
+ arr = void 0;
+ new_arr = void 0;
+}
diff --git a/deps/v8/test/message/fail/array-binding-pattern-await1.js b/deps/v8/test/message/fail/array-binding-pattern-await1.js
new file mode 100644
index 0000000000..09afccc09f
--- /dev/null
+++ b/deps/v8/test/message/fail/array-binding-pattern-await1.js
@@ -0,0 +1,9 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+async function f() {
+ let [await b] = [];
+ return b;
+}
+f();
diff --git a/deps/v8/test/message/fail/array-binding-pattern-await1.out b/deps/v8/test/message/fail/array-binding-pattern-await1.out
new file mode 100644
index 0000000000..9ebe2433ed
--- /dev/null
+++ b/deps/v8/test/message/fail/array-binding-pattern-await1.out
@@ -0,0 +1,4 @@
+*%(basename)s:6: SyntaxError: Invalid destructuring assignment target
+ let [await b] = [];
+ ^^^^^^^
+SyntaxError: Invalid destructuring assignment target
diff --git a/deps/v8/test/message/fail/class-field-static-constructor.js b/deps/v8/test/message/fail/class-field-static-constructor.js
index b64cf6254c..63ce1c04d7 100644
--- a/deps/v8/test/message/fail/class-field-static-constructor.js
+++ b/deps/v8/test/message/fail/class-field-static-constructor.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-public-fields
+// Flags: --harmony-public-fields --harmony-static-fields
class X {
static constructor = function() {};
diff --git a/deps/v8/test/message/fail/class-field-static-prototype.js b/deps/v8/test/message/fail/class-field-static-prototype.js
index da8120481a..656518879a 100644
--- a/deps/v8/test/message/fail/class-field-static-prototype.js
+++ b/deps/v8/test/message/fail/class-field-static-prototype.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-public-fields
+// Flags: --harmony-public-fields --harmony-static-fields
class X {
static prototype = function() {};
diff --git a/deps/v8/test/message/fail/modules-cycle1.out b/deps/v8/test/message/fail/modules-cycle1.out
index 3e6f31b1d4..ef6b57f074 100644
--- a/deps/v8/test/message/fail/modules-cycle1.out
+++ b/deps/v8/test/message/fail/modules-cycle1.out
@@ -1,5 +1,5 @@
-*%(basename)s:7: SyntaxError: Detected cycle while resolving name 'a'
+*%(basename)s:7: SyntaxError: Detected cycle while resolving name 'a' in 'modules-cycle1.js'
import {a} from "modules-cycle1.js";
^
-SyntaxError: Detected cycle while resolving name 'a'
+SyntaxError: Detected cycle while resolving name 'a' in 'modules-cycle1.js'
diff --git a/deps/v8/test/message/fail/modules-cycle2.out b/deps/v8/test/message/fail/modules-cycle2.out
index f3c19d20ed..c0d785fe6c 100644
--- a/deps/v8/test/message/fail/modules-cycle2.out
+++ b/deps/v8/test/message/fail/modules-cycle2.out
@@ -1,5 +1,5 @@
-*%(basename)s:7: SyntaxError: Detected cycle while resolving name 'a'
+*%(basename)s:7: SyntaxError: Detected cycle while resolving name 'a' in 'modules-skip-cycle2.js'
import {a} from "modules-skip-cycle2.js";
^
-SyntaxError: Detected cycle while resolving name 'a'
+SyntaxError: Detected cycle while resolving name 'a' in 'modules-skip-cycle2.js'
diff --git a/deps/v8/test/message/fail/modules-cycle3.out b/deps/v8/test/message/fail/modules-cycle3.out
index a5b10149f9..51bf9078cc 100644
--- a/deps/v8/test/message/fail/modules-cycle3.out
+++ b/deps/v8/test/message/fail/modules-cycle3.out
@@ -1,5 +1,5 @@
-*%(basename)s:7: SyntaxError: Detected cycle while resolving name 'a'
+*%(basename)s:7: SyntaxError: Detected cycle while resolving name 'a' in 'modules-skip-cycle3.js'
export {a as x} from "modules-skip-cycle3.js";
^^^^^^
-SyntaxError: Detected cycle while resolving name 'a'
+SyntaxError: Detected cycle while resolving name 'a' in 'modules-skip-cycle3.js'
diff --git a/deps/v8/test/message/fail/modules-cycle4.out b/deps/v8/test/message/fail/modules-cycle4.out
index 74789e0ec9..6e27ced3ad 100644
--- a/deps/v8/test/message/fail/modules-cycle4.out
+++ b/deps/v8/test/message/fail/modules-cycle4.out
@@ -1,5 +1,5 @@
-*modules-cycle3.js:7: SyntaxError: Detected cycle while resolving name 'a'
+*modules-cycle3.js:7: SyntaxError: Detected cycle while resolving name 'a' in 'modules-skip-cycle3.js'
export {a as x} from "modules-skip-cycle3.js";
^^^^^^
-SyntaxError: Detected cycle while resolving name 'a'
+SyntaxError: Detected cycle while resolving name 'a' in 'modules-skip-cycle3.js'
diff --git a/deps/v8/test/message/fail/modules-cycle5.out b/deps/v8/test/message/fail/modules-cycle5.out
index 8eb3e606af..c97cc8d0a4 100644
--- a/deps/v8/test/message/fail/modules-cycle5.out
+++ b/deps/v8/test/message/fail/modules-cycle5.out
@@ -1,5 +1,5 @@
-*%(basename)s:8: SyntaxError: Detected cycle while resolving name 'foo'
+*%(basename)s:8: SyntaxError: Detected cycle while resolving name 'foo' in 'modules-cycle5.js'
export {foo} from "modules-cycle5.js";
^^^
-SyntaxError: Detected cycle while resolving name 'foo'
+SyntaxError: Detected cycle while resolving name 'foo' in 'modules-cycle5.js'
diff --git a/deps/v8/test/message/fail/modules-cycle6.out b/deps/v8/test/message/fail/modules-cycle6.out
index d91e1abc14..ed0e409d5c 100644
--- a/deps/v8/test/message/fail/modules-cycle6.out
+++ b/deps/v8/test/message/fail/modules-cycle6.out
@@ -1,5 +1,5 @@
-*modules-skip-cycle6.js:5: SyntaxError: The requested module does not provide an export named 'foo'
+*modules-skip-cycle6.js:5: SyntaxError: The requested module 'modules-cycle6.js' does not provide an export named 'foo'
export {foo} from "modules-cycle6.js";
^^^
-SyntaxError: The requested module does not provide an export named 'foo'
+SyntaxError: The requested module 'modules-cycle6.js' does not provide an export named 'foo'
diff --git a/deps/v8/test/message/fail/modules-import1.out b/deps/v8/test/message/fail/modules-import1.out
index 6facd0fa7c..ce023698f8 100644
--- a/deps/v8/test/message/fail/modules-import1.out
+++ b/deps/v8/test/message/fail/modules-import1.out
@@ -1,5 +1,5 @@
-*%(basename)s:7: SyntaxError: The requested module does not provide an export named 'a'
+*%(basename)s:7: SyntaxError: The requested module 'modules-import1.js' does not provide an export named 'a'
import {a} from "modules-import1.js";
^
-SyntaxError: The requested module does not provide an export named 'a'
+SyntaxError: The requested module 'modules-import1.js' does not provide an export named 'a'
diff --git a/deps/v8/test/message/fail/modules-import2.out b/deps/v8/test/message/fail/modules-import2.out
index 317399a6db..485419721d 100644
--- a/deps/v8/test/message/fail/modules-import2.out
+++ b/deps/v8/test/message/fail/modules-import2.out
@@ -1,5 +1,5 @@
-*%(basename)s:7: SyntaxError: The requested module does not provide an export named 'a'
+*%(basename)s:7: SyntaxError: The requested module 'modules-import2.js' does not provide an export named 'a'
import {a as b} from "modules-import2.js";
^
-SyntaxError: The requested module does not provide an export named 'a'
+SyntaxError: The requested module 'modules-import2.js' does not provide an export named 'a'
diff --git a/deps/v8/test/message/fail/modules-import3.out b/deps/v8/test/message/fail/modules-import3.out
index 75abc74b50..fd6904d003 100644
--- a/deps/v8/test/message/fail/modules-import3.out
+++ b/deps/v8/test/message/fail/modules-import3.out
@@ -1,5 +1,5 @@
-*%(basename)s:7: SyntaxError: The requested module does not provide an export named 'default'
+*%(basename)s:7: SyntaxError: The requested module 'modules-import3.js' does not provide an export named 'default'
import foo from "modules-import3.js";
^^^
-SyntaxError: The requested module does not provide an export named 'default'
+SyntaxError: The requested module 'modules-import3.js' does not provide an export named 'default'
diff --git a/deps/v8/test/message/fail/modules-import4.out b/deps/v8/test/message/fail/modules-import4.out
index bd406e4021..8193d9e4c8 100644
--- a/deps/v8/test/message/fail/modules-import4.out
+++ b/deps/v8/test/message/fail/modules-import4.out
@@ -1,5 +1,5 @@
-*%(basename)s:8: SyntaxError: The requested module does not provide an export named 'c'
+*%(basename)s:8: SyntaxError: The requested module 'modules-import4.js' does not provide an export named 'c'
export {c as a} from "modules-import4.js";
^^^^^^
-SyntaxError: The requested module does not provide an export named 'c'
+SyntaxError: The requested module 'modules-import4.js' does not provide an export named 'c'
diff --git a/deps/v8/test/message/fail/modules-import5.out b/deps/v8/test/message/fail/modules-import5.out
index 8828774c92..3efb69258e 100644
--- a/deps/v8/test/message/fail/modules-import5.out
+++ b/deps/v8/test/message/fail/modules-import5.out
@@ -1,5 +1,5 @@
-*%(basename)s:8: SyntaxError: The requested module does not provide an export named 'c'
+*%(basename)s:8: SyntaxError: The requested module 'modules-import5.js' does not provide an export named 'c'
export {c as a} from "modules-import5.js";
^^^^^^
-SyntaxError: The requested module does not provide an export named 'c'
+SyntaxError: The requested module 'modules-import5.js' does not provide an export named 'c'
diff --git a/deps/v8/test/message/fail/modules-import6.out b/deps/v8/test/message/fail/modules-import6.out
index 9d7eeebe5d..43e81135c4 100644
--- a/deps/v8/test/message/fail/modules-import6.out
+++ b/deps/v8/test/message/fail/modules-import6.out
@@ -1,5 +1,5 @@
-*%(basename)s:9: SyntaxError: The requested module does not provide an export named 'c'
+*%(basename)s:9: SyntaxError: The requested module 'modules-import6.js' does not provide an export named 'c'
import {c} from "modules-import6.js";
^
-SyntaxError: The requested module does not provide an export named 'c'
+SyntaxError: The requested module 'modules-import6.js' does not provide an export named 'c'
diff --git a/deps/v8/test/message/fail/modules-star-conflict1.out b/deps/v8/test/message/fail/modules-star-conflict1.out
index 969a566edc..db8671165b 100644
--- a/deps/v8/test/message/fail/modules-star-conflict1.out
+++ b/deps/v8/test/message/fail/modules-star-conflict1.out
@@ -1,5 +1,5 @@
-*%(basename)s:7: SyntaxError: The requested module contains conflicting star exports for name 'a'
+*%(basename)s:7: SyntaxError: The requested module '../../mjsunit/modules-skip-7.js' contains conflicting star exports for name 'a'
import {a} from "../../mjsunit/modules-skip-7.js";
^
-SyntaxError: The requested module contains conflicting star exports for name 'a'
+SyntaxError: The requested module '../../mjsunit/modules-skip-7.js' contains conflicting star exports for name 'a'
diff --git a/deps/v8/test/message/fail/modules-star-conflict2.out b/deps/v8/test/message/fail/modules-star-conflict2.out
index 34827e0374..d6decf733d 100644
--- a/deps/v8/test/message/fail/modules-star-conflict2.out
+++ b/deps/v8/test/message/fail/modules-star-conflict2.out
@@ -1,5 +1,5 @@
-*%(basename)s:7: SyntaxError: The requested module contains conflicting star exports for name 'a'
+*%(basename)s:7: SyntaxError: The requested module '../../mjsunit/modules-skip-star-exports-conflict.js' contains conflicting star exports for name 'a'
export * from "../../mjsunit/modules-skip-star-exports-conflict.js";
^
-SyntaxError: The requested module contains conflicting star exports for name 'a'
+SyntaxError: The requested module '../../mjsunit/modules-skip-star-exports-conflict.js' contains conflicting star exports for name 'a'
diff --git a/deps/v8/test/message/fail/modules-star-default.out b/deps/v8/test/message/fail/modules-star-default.out
index a3cd5a6107..1524a7cba6 100644
--- a/deps/v8/test/message/fail/modules-star-default.out
+++ b/deps/v8/test/message/fail/modules-star-default.out
@@ -1,5 +1,5 @@
-*modules-import4.js:8: SyntaxError: The requested module does not provide an export named 'c'
+*modules-import4.js:8: SyntaxError: The requested module 'modules-import4.js' does not provide an export named 'c'
export {c as a} from "modules-import4.js";
^^^^^^
-SyntaxError: The requested module does not provide an export named 'c'
+SyntaxError: The requested module 'modules-import4.js' does not provide an export named 'c'
diff --git a/deps/v8/test/message/fail/object-binding-pattern-await1.js b/deps/v8/test/message/fail/object-binding-pattern-await1.js
new file mode 100644
index 0000000000..8ef7b25866
--- /dev/null
+++ b/deps/v8/test/message/fail/object-binding-pattern-await1.js
@@ -0,0 +1,9 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+async function f() {
+ let { a: await b } = { a: 1 };
+ return b;
+}
+f();
diff --git a/deps/v8/test/message/fail/object-binding-pattern-await1.out b/deps/v8/test/message/fail/object-binding-pattern-await1.out
new file mode 100644
index 0000000000..ee7a3eafb6
--- /dev/null
+++ b/deps/v8/test/message/fail/object-binding-pattern-await1.out
@@ -0,0 +1,4 @@
+*%(basename)s:6: SyntaxError: Invalid destructuring assignment target
+ let { a: await b } = { a: 1 };
+ ^^^^^^^
+SyntaxError: Invalid destructuring assignment target
diff --git a/deps/v8/test/message/message.status b/deps/v8/test/message/message.status
index 1c40a25186..6aba054251 100644
--- a/deps/v8/test/message/message.status
+++ b/deps/v8/test/message/message.status
@@ -37,9 +37,10 @@
'fail/modules-skip*': [SKIP],
}], # ALWAYS
-##############################################################################
-# BUG(v8:7138).
-['arch == arm and not simulator_run and variant == wasm_traps', {
- '*': [SKIP],
-}], # arch == arm and not simulator_run and variant == wasm_traps
+# Liftoff is currently only sufficiently implemented on x64 and ia32.
+# TODO(clemensh): Implement on all other platforms (crbug.com/v8/6600).
+['arch != x64 and arch != ia32', {
+ 'wasm-trace-memory-liftoff': [SKIP],
+}], # arch != x64 and arch != ia32
+
]
diff --git a/deps/v8/test/message/object-binding-pattern-await-computed-name.js b/deps/v8/test/message/object-binding-pattern-await-computed-name.js
new file mode 100644
index 0000000000..1b40d0d4a9
--- /dev/null
+++ b/deps/v8/test/message/object-binding-pattern-await-computed-name.js
@@ -0,0 +1,9 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+async function f() {
+ let { [await "a"]: a } = { a: 1 };
+ return a;
+}
+f();
diff --git a/deps/v8/tools/release/testdata/v8/base/trace_event/common/common b/deps/v8/test/message/object-binding-pattern-await-computed-name.out
index e69de29bb2..e69de29bb2 100644
--- a/deps/v8/tools/release/testdata/v8/base/trace_event/common/common
+++ b/deps/v8/test/message/object-binding-pattern-await-computed-name.out
diff --git a/deps/v8/test/message/testcfg.py b/deps/v8/test/message/testcfg.py
index 28a1e641f6..cd1495f390 100644
--- a/deps/v8/test/message/testcfg.py
+++ b/deps/v8/test/message/testcfg.py
@@ -25,24 +25,20 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import itertools
import os
import re
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.objects import testcase
+from testrunner.outproc import message
-FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
INVALID_FLAGS = ["--enable-slow-asserts"]
MODULE_PATTERN = re.compile(r"^// MODULE$", flags=re.MULTILINE)
-class MessageTestSuite(testsuite.TestSuite):
- def __init__(self, name, root):
- super(MessageTestSuite, self).__init__(name, root)
-
+class TestSuite(testsuite.TestSuite):
def ListTests(self, context):
tests = []
for dirname, dirs, files in os.walk(self.root):
@@ -55,80 +51,64 @@ class MessageTestSuite(testsuite.TestSuite):
fullpath = os.path.join(dirname, filename)
relpath = fullpath[len(self.root) + 1 : -3]
testname = relpath.replace(os.path.sep, "/")
- test = testcase.TestCase(self, testname)
+ test = self._create_test(testname)
tests.append(test)
return tests
- def CreateVariantGenerator(self, variants):
- return super(MessageTestSuite, self).CreateVariantGenerator(
+ def _test_class(self):
+ return TestCase
+
+ def CreateLegacyVariantsGenerator(self, variants):
+ return super(TestSuite, self).CreateLegacyVariantsGenerator(
variants + ["preparser"])
- def GetParametersForTestCase(self, testcase, context):
- source = self.GetSourceForTest(testcase)
+ def create_variant_proc(self, variants):
+ return super(TestSuite, self).create_variant_proc(variants + ['preparser'])
+
+
+class TestCase(testcase.TestCase):
+ def __init__(self, *args, **kwargs):
+ super(TestCase, self).__init__(*args, **kwargs)
+
+ source = self.get_source()
+ self._source_files = self._parse_source_files(source)
+ self._source_flags = self._parse_source_flags(source)
+
+ def _parse_source_files(self, source):
files = []
if MODULE_PATTERN.search(source):
files.append("--module")
- files.append(os.path.join(self.root, testcase.path + ".js"))
- flags = testcase.flags + context.mode_flags
- flags_match = re.findall(FLAGS_PATTERN, source)
- for match in flags_match:
- flags += match.strip().split()
- flags = [x for x in flags if x not in INVALID_FLAGS]
- return files, flags, {}
-
- def GetSourceForTest(self, testcase):
- filename = os.path.join(self.root, testcase.path + self.suffix())
- with open(filename) as f:
- return f.read()
-
- def _IgnoreLine(self, string):
- """Ignore empty lines, valgrind output, Android output."""
- if not string: return True
- if not string.strip(): return True
- return (string.startswith("==") or string.startswith("**") or
- string.startswith("ANDROID"))
-
- def _GetExpectedFail(self, testcase):
- path = testcase.path
+ files.append(os.path.join(self.suite.root, self.path + ".js"))
+ return files
+
+ def _expected_fail(self):
+ path = self.path
while path:
- (head, tail) = os.path.split(path)
- if tail == "fail":
+ head, tail = os.path.split(path)
+ if tail == 'fail':
return True
path = head
return False
- def IsFailureOutput(self, testcase):
- output = testcase.output
- testpath = testcase.path
- expected_fail = self._GetExpectedFail(testcase)
- fail = testcase.output.exit_code != 0
- if expected_fail != fail:
- return True
- expected_path = os.path.join(self.root, testpath + ".out")
- expected_lines = []
- # Can't use utils.ReadLinesFrom() here because it strips whitespace.
- with open(expected_path) as f:
- for line in f:
- if line.startswith("#") or not line.strip(): continue
- expected_lines.append(line)
- raw_lines = output.stdout.splitlines()
- actual_lines = [ s for s in raw_lines if not self._IgnoreLine(s) ]
- env = { "basename": os.path.basename(testpath + ".js") }
- if len(expected_lines) != len(actual_lines):
- return True
- for (expected, actual) in itertools.izip_longest(
- expected_lines, actual_lines, fillvalue=''):
- pattern = re.escape(expected.rstrip() % env)
- pattern = pattern.replace("\\*", ".*")
- pattern = pattern.replace("\\{NUMBER\\}", "\d+(?:\.\d*)?")
- pattern = "^%s$" % pattern
- if not re.match(pattern, actual):
- return True
- return False
+ def _get_cmd_params(self, ctx):
+ params = super(TestCase, self)._get_cmd_params(ctx)
+ return [p for p in params if p not in INVALID_FLAGS]
+
+ def _get_files_params(self, ctx):
+ return self._source_files
+
+ def _get_source_flags(self):
+ return self._source_flags
+
+ def _get_source_path(self):
+ return os.path.join(self.suite.root, self.path + self._get_suffix())
- def StripOutputForTransmit(self, testcase):
- pass
+ @property
+ def output_proc(self):
+ return message.OutProc(self.expected_outcomes,
+ os.path.join(self.suite.root, self.path),
+ self._expected_fail())
def GetSuite(name, root):
- return MessageTestSuite(name, root)
+ return TestSuite(name, root)
diff --git a/deps/v8/test/message/wasm-trace-memory-interpreted.out b/deps/v8/test/message/wasm-trace-memory-interpreted.out
index 6854727885..7865195649 100644
--- a/deps/v8/test/message/wasm-trace-memory-interpreted.out
+++ b/deps/v8/test/message/wasm-trace-memory-interpreted.out
@@ -1,9 +1,9 @@
-I 0+0x3 read @00000004 i32:0 / 00000000
-I 1+0x3 read @00000001 i8:0 / 00
+I 0+0x3 load @00000004 i32:0 / 00000000
+I 1+0x3 load @00000001 i8:0 / 00
I 3+0x5 store @00000004 i32:305419896 / 12345678
-I 0+0x3 read @00000002 i32:1450704896 / 56780000
-I 1+0x3 read @00000006 i8:52 / 34
-I 2+0x3 read @00000002 f32:68169720922112.000000 / 56780000
+I 0+0x3 load @00000002 i32:1450704896 / 56780000
+I 1+0x3 load @00000006 i8:52 / 34
+I 2+0x3 load @00000002 f32:68169720922112.000000 / 56780000
I 4+0x5 store @00000004 i8:171 / ab
-I 0+0x3 read @00000002 i32:1454047232 / 56ab0000
-I 2+0x3 read @00000002 f32:94008244174848.000000 / 56ab0000
+I 0+0x3 load @00000002 i32:1454047232 / 56ab0000
+I 2+0x3 load @00000002 f32:94008244174848.000000 / 56ab0000
diff --git a/deps/v8/test/message/wasm-trace-memory-liftoff.js b/deps/v8/test/message/wasm-trace-memory-liftoff.js
new file mode 100644
index 0000000000..00b6421c1b
--- /dev/null
+++ b/deps/v8/test/message/wasm-trace-memory-liftoff.js
@@ -0,0 +1,7 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-stress-opt --expose-wasm --wasm-trace-memory --liftoff
+
+load("test/message/wasm-trace-memory.js");
diff --git a/deps/v8/test/message/wasm-trace-memory-liftoff.out b/deps/v8/test/message/wasm-trace-memory-liftoff.out
new file mode 100644
index 0000000000..26f22a5498
--- /dev/null
+++ b/deps/v8/test/message/wasm-trace-memory-liftoff.out
@@ -0,0 +1,9 @@
+L 0+0x3 load @00000004 i32:0 / 00000000
+L 1+0x3 load @00000001 i8:0 / 00
+L 3+0x5 store @00000004 i32:305419896 / 12345678
+L 0+0x3 load @00000002 i32:1450704896 / 56780000
+L 1+0x3 load @00000006 i8:52 / 34
+L 2+0x3 load @00000002 f32:68169720922112.000000 / 56780000
+L 4+0x5 store @00000004 i8:171 / ab
+L 0+0x3 load @00000002 i32:1454047232 / 56ab0000
+L 2+0x3 load @00000002 f32:94008244174848.000000 / 56ab0000
diff --git a/deps/v8/test/message/wasm-trace-memory.js b/deps/v8/test/message/wasm-trace-memory.js
index 6c33b900b1..1beb76a01b 100644
--- a/deps/v8/test/message/wasm-trace-memory.js
+++ b/deps/v8/test/message/wasm-trace-memory.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --no-stress-opt --expose-wasm --wasm-trace-memory
+// Flags: --no-stress-opt --expose-wasm --wasm-trace-memory --no-liftoff
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/message/wasm-trace-memory.out b/deps/v8/test/message/wasm-trace-memory.out
index 12cbd180a0..37faa6a989 100644
--- a/deps/v8/test/message/wasm-trace-memory.out
+++ b/deps/v8/test/message/wasm-trace-memory.out
@@ -1,9 +1,9 @@
-C 0+0x3 read @00000004 i32:0 / 00000000
-C 1+0x3 read @00000001 i8:0 / 00
-C 3+0x5 store @00000004 i32:305419896 / 12345678
-C 0+0x3 read @00000002 i32:1450704896 / 56780000
-C 1+0x3 read @00000006 i8:52 / 34
-C 2+0x3 read @00000002 f32:68169720922112.000000 / 56780000
-C 4+0x5 store @00000004 i8:171 / ab
-C 0+0x3 read @00000002 i32:1454047232 / 56ab0000
-C 2+0x3 read @00000002 f32:94008244174848.000000 / 56ab0000
+T 0+0x3 load @00000004 i32:0 / 00000000
+T 1+0x3 load @00000001 i8:0 / 00
+T 3+0x5 store @00000004 i32:305419896 / 12345678
+T 0+0x3 load @00000002 i32:1450704896 / 56780000
+T 1+0x3 load @00000006 i8:52 / 34
+T 2+0x3 load @00000002 f32:68169720922112.000000 / 56780000
+T 4+0x5 store @00000004 i8:171 / ab
+T 0+0x3 load @00000002 i32:1454047232 / 56ab0000
+T 2+0x3 load @00000002 f32:94008244174848.000000 / 56ab0000
diff --git a/deps/v8/test/mjsunit/array-iteration.js b/deps/v8/test/mjsunit/array-iteration.js
index 9d03ed13ce..4de58208b4 100644
--- a/deps/v8/test/mjsunit/array-iteration.js
+++ b/deps/v8/test/mjsunit/array-iteration.js
@@ -73,6 +73,31 @@
assertEquals(3, count);
for (var i in a) assertEquals(2, a[i]);
+ // Skip over missing properties.
+ a = {
+ "0": 0,
+ "2": 2,
+ length: 3
+ };
+ var received = [];
+ assertArrayEquals([2],
+ Array.prototype.filter.call(a, function(n) {
+ received.push(n);
+ return n == 2;
+ }));
+ assertArrayEquals([0, 2], received);
+
+ // Modify array prototype
+ a = [0, , 2];
+ received = [];
+ assertArrayEquals([2],
+ Array.prototype.filter.call(a, function(n) {
+ a.__proto__ = null;
+ received.push(n);
+ return n == 2;
+ }));
+ assertArrayEquals([0, 2], received);
+
// Create a new object in each function call when receiver is a
// primitive value. See ECMA-262, Annex C.
a = [];
@@ -131,6 +156,26 @@
a.forEach(function(n) { count++; });
assertEquals(1, count);
+ // Skip over missing properties.
+ a = {
+ "0": 0,
+ "2": 2,
+ length: 3
+ };
+ var received = [];
+ Array.prototype.forEach.call(a, function(n) { received.push(n); });
+ assertArrayEquals([0, 2], received);
+
+ // Modify array prototype
+ a = [0, , 2];
+ received = [];
+ Array.prototype.forEach.call(a, function(n) {
+ a.__proto__ = null;
+ received.push(n);
+ return n == 2;
+ });
+ assertArrayEquals([0, 2], received);
+
// Create a new object in each function call when receiver is a
// primitive value. See ECMA-262, Annex C.
a = [];
@@ -194,6 +239,31 @@
assertTrue(a.every(function(n) { count++; return n == 2; }));
assertEquals(2, count);
+ // Skip over missing properties.
+ a = {
+ "0": 2,
+ "2": 2,
+ length: 3
+ };
+ var received = [];
+ assertTrue(
+ Array.prototype.every.call(a, function(n) {
+ received.push(n);
+ return n == 2;
+ }));
+ assertArrayEquals([2, 2], received);
+
+ // Modify array prototype
+ a = [2, , 2];
+ received = [];
+ assertTrue(
+ Array.prototype.every.call(a, function(n) {
+ a.__proto__ = null;
+ received.push(n);
+ return n == 2;
+ }));
+ assertArrayEquals([2, 2], received);
+
// Create a new object in each function call when receiver is a
// primitive value. See ECMA-262, Annex C.
a = [];
@@ -252,6 +322,31 @@
a = a.map(function(n) { return 2*n; });
for (var i in a) assertEquals(4, a[i]);
+ // Skip over missing properties.
+ a = {
+ "0": 1,
+ "2": 2,
+ length: 3
+ };
+ var received = [];
+ assertArrayEquals([2, , 4],
+ Array.prototype.map.call(a, function(n) {
+ received.push(n);
+ return n * 2;
+ }));
+ assertArrayEquals([1, 2], received);
+
+ // Modify array prototype
+ a = [1, , 2];
+ received = [];
+ assertArrayEquals([2, , 4],
+ Array.prototype.map.call(a, function(n) {
+ a.__proto__ = null;
+ received.push(n);
+ return n * 2;
+ }));
+ assertArrayEquals([1, 2], received);
+
// Create a new object in each function call when receiver is a
// primitive value. See ECMA-262, Annex C.
a = [];
diff --git a/deps/v8/test/mjsunit/array-reduce.js b/deps/v8/test/mjsunit/array-reduce.js
index 4a4494a72c..171a40f092 100644
--- a/deps/v8/test/mjsunit/array-reduce.js
+++ b/deps/v8/test/mjsunit/array-reduce.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --allow-natives-syntax
+
/**
* @fileoverview Test reduce and reduceRight
*/
@@ -557,3 +559,685 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
}, 'initial')
}, 'do not continue');
})();
+
+(function OptimizedReduce() {
+ let f = (a,current) => a + current;
+ let g = function(a) {
+ return a.reduce(f);
+ }
+ let a = [1,2,3,4,5,6,7,8,9,10];
+ g(a); g(a);
+ let total = g(a);
+ %OptimizeFunctionOnNextCall(g);
+ assertEquals(total, g(a));
+})();
+
+(function OptimizedReduceEmpty() {
+ let f = (a,current) => a + current;
+ let g = function(a) {
+ return a.reduce(f);
+ }
+ let a = [1,2,3,4,5,6,7,8,9,10];
+ g(a); g(a); g(a);
+ %OptimizeFunctionOnNextCall(g);
+ g(a);
+ assertThrows(() => g([]));
+})();
+
+(function OptimizedReduceLazyDeopt() {
+ let deopt = false;
+ let f = (a,current) => { if (deopt) %DeoptimizeNow(); return a + current; };
+ let g = function(a) {
+ return a.reduce(f);
+ }
+ let a = [1,2,3,4,5,6,7,8,9,10];
+ g(a); g(a);
+ let total = g(a);
+ %OptimizeFunctionOnNextCall(g);
+ g(a);
+ deopt = true;
+ assertEquals(total, g(a));
+})();
+
+(function OptimizedReduceLazyDeoptMiddleOfIteration() {
+ let deopt = false;
+ let f = (a,current) => {
+ if (current == 6 && deopt) %DeoptimizeNow();
+ return a + current;
+ };
+ let g = function(a) {
+ return a.reduce(f);
+ }
+ let a = [11,22,33,45,56,6,77,84,93,101];
+ g(a); g(a);
+ let total = g(a);
+ %OptimizeFunctionOnNextCall(g);
+ g(a);
+ deopt = true;
+ assertEquals(total, g(a));
+})();
+
+(function OptimizedReduceEagerDeoptMiddleOfIteration() {
+ let deopt = false;
+ let array = [11,22,33,45,56,6,77,84,93,101];
+ let f = (a,current) => {
+ if (current == 6 && deopt) {array[0] = 1.5; }
+ return a + current;
+ };
+ let g = function() {
+ return array.reduce(f);
+ }
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ deopt = true;
+ g();
+ deopt = false;
+ array = [11,22,33,45,56,6,77,84,93,101];
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ deopt = true;
+ assertEquals(total, g());
+})();
+
+(function ReduceCatch() {
+ let f = (a,current) => {
+ return a + current;
+ };
+ let g = function() {
+ try {
+ return array.reduce(f);
+ } catch (e) {
+ }
+ }
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ g();
+ assertEquals(total, g());
+})();
+
+(function ReduceThrow() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + current;
+ };
+ let array = [1,2,3];
+ let g = function() {
+ try {
+ return array.reduce(f);
+ } catch (e) {
+ return null;
+ }
+ }
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ done = false;
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+})();
+
+(function ReduceThrow() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + current;
+ };
+ %NeverOptimizeFunction(f);
+ let array = [1,2,3];
+ let g = function() {
+ try {
+ return array.reduce(f);
+ } catch (e) {
+ return null;
+ }
+ }
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ done = false;
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+})();
+
+(function ReduceFinally() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + current;
+ };
+ let array = [1,2,3];
+ let g = function() {
+ try {
+ return array.reduce(f);
+ } catch (e) {
+ } finally {
+ if (done) return null;
+ }
+ }
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ done = false;
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+})();
+
+(function ReduceFinallyNoInline() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + current;
+ };
+ %NeverOptimizeFunction(f);
+ let array = [1,2,3];
+ let g = function() {
+ try {
+ return array.reduce(f);
+ } catch (e) {
+ } finally {
+ if (done) return null;
+ }
+ }
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ done = false;
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+})();
+
+(function ReduceNonCallableOpt() {
+ let done = false;
+ let f = (a, current) => {
+ return a + current;
+ };
+ let array = [1,2,3];
+ let g = function() {
+ return array.reduce(f);
+ }
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g(); g();
+ assertEquals(6, g());
+ f = null;
+ assertThrows(() => g());
+})();
+
+(function ReduceCatchInlineDeopt() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) {
+ %DeoptimizeNow();
+ throw "x";
+ }
+ return a + current;
+ };
+ let array = [1,2,3];
+ let g = function() {
+ try {
+ return array.reduce(f);
+ } catch (e) {
+ if (done) return null;
+ }
+ }
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ done = false;
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+})();
+
+(function ReduceFinallyInlineDeopt() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) {
+ %DeoptimizeNow();
+ throw "x";
+ }
+ return a + current;
+ };
+ let array = [1,2,3];
+ let g = function() {
+ try {
+ return array.reduce(f);
+ } catch (e) {
+ } finally {
+ if (done) return null;
+ }
+ }
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ done = false;
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+})();
+
+(function OptimizedReduceRight() {
+ let count = 0;
+ let f = (a,current,i) => a + current * ++count;
+ let g = function(a) {
+ count = 0;
+ return a.reduceRight(f);
+ }
+ let a = [1,2,3,4,5,6,7,8,9,10];
+ g(a); g(a);
+ let total = g(a);
+ %OptimizeFunctionOnNextCall(g);
+ assertEquals(total, g(a));
+})();
+
+(function OptimizedReduceEmpty() {
+ let count = 0;
+ let f = (a,current,i) => a + current * ++count;
+ let g = function(a) {
+ count = 0;
+ return a.reduceRight(f);
+ }
+ let a = [1,2,3,4,5,6,7,8,9,10];
+ g(a); g(a); g(a);
+ %OptimizeFunctionOnNextCall(g);
+ g(a);
+ assertThrows(() => g([]));
+})();
+
+(function OptimizedReduceLazyDeopt() {
+ let deopt = false;
+ let f = (a,current) => { if (deopt) %DeoptimizeNow(); return a + current; };
+ let g = function(a) {
+ return a.reduceRight(f);
+ }
+ let a = [1,2,3,4,5,6,7,8,9,10];
+ g(a); g(a);
+ let total = g(a);
+ %OptimizeFunctionOnNextCall(g);
+ g(a);
+ deopt = true;
+ assertEquals(total, g(a));
+})();
+
+(function OptimizedReduceLazyDeoptMiddleOfIteration() {
+ let deopt = false;
+ let f = (a,current) => {
+ if (current == 6 && deopt) %DeoptimizeNow();
+ return a + current;
+ };
+ let g = function(a) {
+ return a.reduceRight(f);
+ }
+ let a = [11,22,33,45,56,6,77,84,93,101];
+ g(a); g(a);
+ let total = g(a);
+ %OptimizeFunctionOnNextCall(g);
+ g(a);
+ deopt = true;
+ assertEquals(total, g(a));
+})();
+
+(function OptimizedReduceEagerDeoptMiddleOfIteration() {
+ let deopt = false;
+ let array = [11,22,33,45,56,6,77,84,93,101];
+ let f = (a,current) => {
+ if (current == 6 && deopt) {array[9] = 1.5; }
+ return a + current;
+ };
+ let g = function() {
+ return array.reduceRight(f);
+ }
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ deopt = true;
+ g();
+ deopt = false;
+ array = [11,22,33,45,56,6,77,84,93,101];
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ deopt = true;
+ assertEquals(total, g());
+})();
+
+(function ReduceCatch() {
+ let f = (a,current) => {
+ return a + current;
+ };
+ let g = function() {
+ try {
+ return array.reduceRight(f);
+ } catch (e) {
+ }
+ }
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ g();
+ assertEquals(total, g());
+})();
+
+(function ReduceThrow() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + current;
+ };
+ let array = [1,2,3];
+ let g = function() {
+ try {
+ return array.reduceRight(f);
+ } catch (e) {
+ return null;
+ }
+ }
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ done = false;
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+})();
+
+(function ReduceThrow() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + current;
+ };
+ %NeverOptimizeFunction(f);
+ let array = [1,2,3];
+ let g = function() {
+ try {
+ return array.reduceRight(f);
+ } catch (e) {
+ return null;
+ }
+ }
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ done = false;
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+})();
+
+(function ReduceFinally() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + current;
+ };
+ let array = [1,2,3];
+ let g = function() {
+ try {
+ return array.reduceRight(f);
+ } catch (e) {
+ } finally {
+ if (done) return null;
+ }
+ }
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ done = false;
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+})();
+
+(function ReduceFinallyNoInline() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + current;
+ };
+ %NeverOptimizeFunction(f);
+ let array = [1,2,3];
+ let g = function() {
+ try {
+ return array.reduceRight(f);
+ } catch (e) {
+ } finally {
+ if (done) return null;
+ }
+ }
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ done = false;
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+})();
+
+(function ReduceNonCallableOpt() {
+ let done = false;
+ let f = (a, current) => {
+ return a + current;
+ };
+ let array = [1,2,3];
+ let g = function() {
+ return array.reduceRight(f);
+ }
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g(); g();
+ assertEquals(6, g());
+ f = null;
+ assertThrows(() => g());
+})();
+
+(function ReduceCatchInlineDeopt() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) {
+ %DeoptimizeNow();
+ throw "x";
+ }
+ return a + current;
+ };
+ let array = [1,2,3];
+ let g = function() {
+ try {
+ return array.reduceRight(f);
+ } catch (e) {
+ if (done) return null;
+ }
+ }
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ done = false;
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+})();
+
+(function ReduceFinallyInlineDeopt() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) {
+ %DeoptimizeNow();
+ throw "x";
+ }
+ return a + current;
+ };
+ let array = [1,2,3];
+ let g = function() {
+ try {
+ return array.reduceRight(f);
+ } catch (e) {
+ } finally {
+ if (done) return null;
+ }
+ }
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ done = false;
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+})();
+
+(function ReduceHoleyArrayWithDefaultAccumulator() {
+ var __v_12258 = new Array(10);
+ function __f_3253(a) {
+ let __f_3252 = function(accumulator, currentValue) {
+ return currentValue;
+ }
+ return a.reduce(__f_3252, 13);
+ }
+ assertEquals(13, __f_3253(__v_12258));
+ assertEquals(13, __f_3253(__v_12258));
+ assertEquals(13, __f_3253(__v_12258));
+ %OptimizeFunctionOnNextCall(__f_3253);
+ assertEquals(13, __f_3253(__v_12258));
+})();
+
+(function ReduceRightHoleyArrayWithDefaultAccumulator() {
+ var __v_12258 = new Array(10);
+ function __f_3253(a) {
+ let __f_3252 = function(accumulator, currentValue) {
+ return currentValue;
+ }
+ return a.reduceRight(__f_3252, 13);
+ }
+ assertEquals(13, __f_3253(__v_12258));
+ assertEquals(13, __f_3253(__v_12258));
+ assertEquals(13, __f_3253(__v_12258));
+ %OptimizeFunctionOnNextCall(__f_3253);
+ assertEquals(13, __f_3253(__v_12258));
+})();
+
+(function ReduceHoleyArrayOneElementWithDefaultAccumulator() {
+ var __v_12258 = new Array(10);
+ __v_12258[1] = 5;
+ function __f_3253(a) {
+ let __f_3252 = function(accumulator, currentValue) {
+ return currentValue + accumulator;
+ }
+ return a.reduce(__f_3252, 13);
+ }
+ assertEquals(18, __f_3253(__v_12258));
+ assertEquals(18, __f_3253(__v_12258));
+ assertEquals(18, __f_3253(__v_12258));
+ %OptimizeFunctionOnNextCall(__f_3253);
+ assertEquals(18, __f_3253(__v_12258));
+})();
+
+(function ReduceRightHoleyArrayOneElementWithDefaultAccumulator() {
+ var __v_12258 = new Array(10);
+ __v_12258[1] = 5;
+ function __f_3253(a) {
+ let __f_3252 = function(accumulator, currentValue) {
+ return currentValue + accumulator;
+ }
+ return a.reduceRight(__f_3252, 13);
+ }
+ assertEquals(18, __f_3253(__v_12258));
+ assertEquals(18, __f_3253(__v_12258));
+ assertEquals(18, __f_3253(__v_12258));
+ %OptimizeFunctionOnNextCall(__f_3253);
+ assertEquals(18, __f_3253(__v_12258));
+})();
diff --git a/deps/v8/test/mjsunit/code-coverage-block-noopt.js b/deps/v8/test/mjsunit/code-coverage-block-noopt.js
index 3eba9d3f57..ef68e0394d 100644
--- a/deps/v8/test/mjsunit/code-coverage-block-noopt.js
+++ b/deps/v8/test/mjsunit/code-coverage-block-noopt.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --no-always-opt --harmony-async-iteration
+// Flags: --allow-natives-syntax --no-always-opt
// Flags: --no-opt
// Files: test/mjsunit/code-coverage-utils.js
diff --git a/deps/v8/test/mjsunit/code-coverage-block-opt.js b/deps/v8/test/mjsunit/code-coverage-block-opt.js
index bc4a3f1010..e02775bd45 100644
--- a/deps/v8/test/mjsunit/code-coverage-block-opt.js
+++ b/deps/v8/test/mjsunit/code-coverage-block-opt.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --no-always-opt --harmony-async-iteration --opt
+// Flags: --allow-natives-syntax --no-always-opt --opt
// Files: test/mjsunit/code-coverage-utils.js
%DebugToggleBlockCoverage(true);
@@ -39,7 +39,7 @@ TestCoverage("Partial coverage collection",
}(); // 0400
`,
[{"start":52,"end":153,"count":0},
- {"start":127,"end":152,"count":1}]
+ {"start":121,"end":152,"count":1}]
);
%DebugToggleBlockCoverage(false);
diff --git a/deps/v8/test/mjsunit/code-coverage-block.js b/deps/v8/test/mjsunit/code-coverage-block.js
index 3355fd1259..b9d00bce6d 100644
--- a/deps/v8/test/mjsunit/code-coverage-block.js
+++ b/deps/v8/test/mjsunit/code-coverage-block.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --no-always-opt --harmony-async-iteration
+// Flags: --allow-natives-syntax --no-always-opt
// Files: test/mjsunit/code-coverage-utils.js
%DebugToggleBlockCoverage(true);
@@ -38,20 +38,23 @@ function f(x) { // 0050
} // 0550
f(42); // 0600
f(43); // 0650
-`,
-[{"start":0,"end":699,"count":1},
+if (true) { // 0700
+ const foo = 'bar'; // 0750
+} else { // 0800
+ const bar = 'foo'; // 0850
+} // 0900
+`,
+[{"start":0,"end":949,"count":1},
+ {"start":801,"end":901,"count":0},
{"start":0,"end":15,"count":11},
{"start":50,"end":551,"count":2},
{"start":115,"end":203,"count":1},
{"start":167,"end":171,"count":0},
- {"start":265,"end":273,"count":1},
- {"start":279,"end":287,"count":1},
- {"start":315,"end":319,"count":1},
- {"start":325,"end":329,"count":1},
+ {"start":265,"end":287,"count":1},
+ {"start":315,"end":329,"count":1},
{"start":363,"end":367,"count":0},
{"start":413,"end":417,"count":0},
- {"start":472,"end":476,"count":0}]
-
+ {"start":466,"end":476,"count":0}]
);
TestCoverage(
@@ -82,7 +85,7 @@ TestCoverage(
`,
[{"start":0,"end":249,"count":1},
{"start":1,"end":201,"count":1},
- {"start":124,"end":129,"count":0}]
+ {"start":118,"end":129,"count":0}]
);
TestCoverage(
@@ -109,7 +112,7 @@ function g() {} // 0000
{"start":330,"end":334,"count":0},
{"start":431,"end":503,"count":12},
{"start":470,"end":474,"count":4},
- {"start":480,"end":484,"count":8}]
+ {"start":474,"end":484,"count":8}]
);
TestCoverage(
diff --git a/deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js b/deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js
new file mode 100644
index 0000000000..2ef0cc3a01
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js
@@ -0,0 +1,122 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --no-always-opt
+
+function runTest(f, message, mkICTraining, deoptArg) {
+ function test(f, message, ictraining, deoptArg) {
+ // Train the call ic to the maps.
+ let t = ictraining;
+
+ // We put the training data into local variables
+ // to ensure their maps are kepts alive. If the
+ // maps die, gc *may* deoptimize {f}, which makes
+ // the test flaky.
+ let t1 = t();
+ let t2 = t();
+ let t3 = t();
+
+ for (let a of t1) {
+ f(a.arr, () => a.el);
+ }
+ for (let a of t2) {
+ f(a.arr, () => a.el);
+ }
+ %OptimizeFunctionOnNextCall(f);
+ message += " trained with" + JSON.stringify(t());
+ if (deoptArg == undefined) {
+ // Make sure the optimized function can handle
+ // all trained maps without deopt.
+ for (let a of t3) {
+ f(a.arr, () => a.el);
+ message += " for args " + JSON.stringify(a);
+ assertOptimized(f, undefined, message + " should have been optimized");
+ }
+ } else {
+ // Trigger deopt, causing no-speculation bit to be set.
+ let a1 = deoptArg;
+ let a2 = deoptArg;
+ message += " for args " + JSON.stringify(a1);
+ f(a1.arr, () => a1.el);
+ assertUnoptimized(f, undefined, message + " should have been unoptimized");
+ %OptimizeFunctionOnNextCall(f);
+ // No speculation should protect against further deopts.
+ f(a2.arr, () => a2.el);
+ assertOptimized(f, undefined, message + " should have been optimized");
+ }
+ }
+
+ // Get function as a string.
+ var testString = test.toString();
+ // Remove the function header..
+ testString = testString.replace(new RegExp("[^\n]*"), "let f = " + f.toString() + ";");
+ // ..and trailing '}'.
+ testString = testString.replace(new RegExp("[^\n]*$"), "");
+ // Substitute parameters.
+ testString = testString.replace(new RegExp("ictraining", 'g'), mkICTraining.toString());
+ testString = testString.replace(new RegExp("deoptArg", 'g'),
+ deoptArg ? JSON.stringify(deoptArg) : "undefined");
+
+ var modTest = new Function("message", testString);
+ //print(modTest);
+ modTest(message);
+}
+
+let checks = {
+ smiReceiver:
+ { mkTrainingArguments : () => [{arr:[1], el:3}],
+ deoptingArguments : [{arr:[0.1], el:1}, {arr:[{}], el:1}]
+ },
+ objectReceiver:
+ { mkTrainingArguments : () => [{arr:[{}], el:0.1}],
+ deoptingArguments : []
+ },
+ multipleSmiReceivers:
+ { mkTrainingArguments : () => { let b = [1]; b.x=3; return [{arr:[1], el:3}, {arr:b, el:3}] },
+ deoptingArguments : [{arr:[0.1], el:1}, {arr:[{}], el:1}]
+ },
+ multipleSmiReceiversPackedUnpacked:
+ { mkTrainingArguments : () => { let b = [1]; b[100] = 3; return [{arr:[1], el:3}, {arr:b, el:3}] },
+ deoptingArguments : [{arr:[0.1], el:1}, {arr:[{}], el:1}]
+ },
+ multipleDoubleReceivers:
+ { mkTrainingArguments : () => { let b = [0.1]; b.x=0.3; return [{arr:[0.1], el:0.3}, {arr:b, el:0.3}] },
+ deoptingArguments : [{arr:[{}], el:true}, {arr:[1], el:true}]
+ },
+ multipleDoubleReceiversPackedUnpacked:
+ { mkTrainingArguments : () => { let b = [0.1]; b[100] = 0.3; return [{arr:[0.1], el:0.3}, {arr:b, el:0.3}] },
+ deoptingArguments : [{arr:[{}], el:true}, {arr:[1], el:true}]
+ },
+ multipleMixedReceivers:
+ { mkTrainingArguments : () => { let b = [0.1]; b.x=0.3; return [{arr:[1], el:0.3}, {arr:[{}], el:true}, {arr:b, el:0.3}] },
+ deoptingArguments : []
+ },
+ multipleMixedReceiversPackedUnpacked:
+ { mkTrainingArguments : () => { let b = [0.1]; b[100] = 0.3; return [{arr:[1], el:0.3}, {arr:[{}], el:true}, {arr:b, el:0.3}] },
+ deoptingArguments : []
+ },
+};
+
+const functions = {
+ push_reliable: (a,g) => { let b = g(); return a.push(2, b); },
+ push_unreliable: (a,g) => { return a.push(2, g()); },
+ pop_reliable: (a,g) => { let b = g(); return a.pop(2, b); },
+ pop_unreliable: (a,g) => { return a.pop(2, g()); },
+ shift_reliable: (a,g) => { let b = g(); return a.shift(2, b); },
+ shift_unreliable: (a,g) => { return a.shift(2, g()); }
+}
+
+Object.keys(checks).forEach(
+ key => {
+ let check = checks[key];
+
+ for (fnc in functions) {
+ runTest(functions[fnc], "test-reliable-" + key, check.mkTrainingArguments);
+ // Test each deopting arg separately.
+ for (let deoptArg of check.deoptingArguments) {
+ runTest(functions[fnc], "testDeopt-reliable-" + key, check.mkTrainingArguments, deoptArg);
+ }
+ }
+ }
+);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-builtins.js b/deps/v8/test/mjsunit/compiler/deopt-array-builtins.js
new file mode 100644
index 0000000000..b737b17ed0
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-builtins.js
@@ -0,0 +1,148 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+/* Test MapCheck behavior */
+
+(function testForEachMapCheck() {
+ function f(v,n,o) {
+ Object.freeze(o);
+ }
+ function g() {
+ [1,2,3].forEach(f);
+ }
+ g();
+ g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertOptimized(g);
+})();
+
+
+(function testFindMapCheck() {
+ function f(v,n,o) {
+ Object.freeze(o);
+ return false;
+ }
+ function g() {
+ [1,2,3].find(f);
+ }
+ g();
+ g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertOptimized(g);
+})();
+
+(function testMapMapCheck() {
+ function f(v,n,o) {
+ Object.freeze(o);
+ return false;
+ }
+ function g() {
+ [1,2,3].map(f);
+ }
+ g();
+ g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertOptimized(g);
+})();
+
+(function testFilterMapCheck() {
+ function f(v,n,o) {
+ Object.freeze(o);
+ return true;
+ }
+ function g() {
+ [1,2,3].filter(f);
+ }
+ g();
+ g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertOptimized(g);
+})();
+
+
+/* Test CheckBounds behavior */
+
+(function testForEachCheckBounds() {
+ function f(v,n,o) {
+ o.length=2;
+ }
+ function g() {
+ [1,2,3].forEach(f);
+ }
+ g();
+ g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertOptimized(g);
+})();
+
+
+(function testFindCheckBounds() {
+ function f(v,n,o) {
+ o.length=2;
+ return false;
+ }
+ function g() {
+ [1,2,3].find(f);
+ }
+ g();
+ g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertOptimized(g);
+})();
+
+(function testMapCheckBounds() {
+ function f(v,n,o) {
+ o.length=2;
+ return false;
+ }
+ function g() {
+ [1,2,3].map(f);
+ }
+ g();
+ g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertOptimized(g);
+})();
+
+(function testFilterCheckBounds() {
+ function f(v,n,o) {
+ o.length = 2;
+ return true;
+ }
+ function g() {
+ [1,2,3].filter(f);
+ }
+ g();
+ g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ g();
+ assertOptimized(g);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-push.js b/deps/v8/test/mjsunit/compiler/deopt-array-push.js
new file mode 100644
index 0000000000..e34d99a325
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-push.js
@@ -0,0 +1,97 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+(function test() {
+ function foo(a) { a.push(a.length = 2); }
+
+ foo([1]);
+ foo([1]);
+ %OptimizeFunctionOnNextCall(foo);
+ foo([1]);
+ %OptimizeFunctionOnNextCall(foo);
+ foo([1]);
+ assertOptimized(foo);
+})();
+
+(function testElementTypeCheckSmi() {
+ function foo(a) { a.push('a'); }
+
+ foo([1]);
+ foo([1]);
+ %OptimizeFunctionOnNextCall(foo);
+ foo([1]);
+ %OptimizeFunctionOnNextCall(foo);
+ foo([1]);
+ assertOptimized(foo);
+})();
+
+(function testElementTypeCheckDouble() {
+ function foo(a) { a.push('a'); }
+
+ foo([0.3413312]);
+ foo([0.3413312]);
+ %OptimizeFunctionOnNextCall(foo);
+ foo([0.3413312]);
+ %OptimizeFunctionOnNextCall(foo);
+ foo([0.3413312]);
+ assertOptimized(foo);
+})();
+(function test() {
+ function bar(a) { a.x = 2 };
+ %NeverOptimizeFunction(bar);
+ function foo(a) { a.push(bar(a)); }
+
+ foo(["1"]);
+ foo(["1"]);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(["1"]);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(["1"]);
+ assertOptimized(foo);
+})();
+
+(function test() {
+ function foo(a) { a.push(a.length = 2); }
+
+ foo([0.34234]);
+ foo([0.34234]);
+ %OptimizeFunctionOnNextCall(foo);
+ foo([0.34234]);
+ %OptimizeFunctionOnNextCall(foo);
+ foo([0.34234]);
+ assertOptimized(foo);
+})();
+
+(function test() {
+ const N = 128 * 1024;
+
+ function foo(a) { a.push(1); }
+
+ foo(new Array(N));
+ foo(new Array(N));
+ %OptimizeFunctionOnNextCall(foo);
+ foo(new Array(N));
+ %OptimizeFunctionOnNextCall(foo);
+ foo(new Array(N));
+ assertOptimized(foo);
+})();
+
+(function test() {
+ function mkArray() {
+ const N = 128 * 1024;
+ let a = [0.1];
+ a.length = N;
+ return a;
+ }
+ function foo(a) { a.push(0.23441233123); }
+ foo(mkArray());
+ foo(mkArray());
+ %OptimizeFunctionOnNextCall(foo);
+ foo(mkArray());
+ %OptimizeFunctionOnNextCall(foo);
+ foo(mkArray());
+ assertOptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-13.js b/deps/v8/test/mjsunit/compiler/escape-analysis-13.js
index fca4da618e..5f281aaaa4 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-13.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-13.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --turbo-escape --turbo-experimental
+// Flags: --allow-natives-syntax --turbo-escape
function f() {
var x = {};
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-15.js b/deps/v8/test/mjsunit/compiler/escape-analysis-15.js
index 4f9a40ad5c..1960d74892 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-15.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-15.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --turbo-escape --turbo-experimental --no-turbo-load-elimination
+// Flags: --allow-natives-syntax --turbo-escape --no-turbo-load-elimination
function f(i) {
var o1 = {a: 1, b: 2};
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-phi-type.js b/deps/v8/test/mjsunit/compiler/escape-analysis-phi-type.js
index 806b09b3de..9d033b9640 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-phi-type.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-phi-type.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --turbo-escape --turbo-experimental --no-turbo-loop-peeling
+// Flags: --allow-natives-syntax --turbo-escape --no-turbo-loop-peeling
function f(x) {
var o = {a : 0};
diff --git a/deps/v8/test/mjsunit/compiler/materialize-dictionary-properties.js b/deps/v8/test/mjsunit/compiler/materialize-dictionary-properties.js
new file mode 100644
index 0000000000..5838a83979
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/materialize-dictionary-properties.js
@@ -0,0 +1,18 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() {
+ // Create a non-escaping object.
+ var o = Object.create(null);
+ %DeoptimizeNow();
+ // Keep it alive.
+ return o ? 1 : 0;
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+assertEquals(1, f());
diff --git a/deps/v8/test/mjsunit/compiler/materialize-mutable-heap-number.js b/deps/v8/test/mjsunit/compiler/materialize-mutable-heap-number.js
new file mode 100644
index 0000000000..b6b99afcf4
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/materialize-mutable-heap-number.js
@@ -0,0 +1,22 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function C() {}
+%CompleteInobjectSlackTracking(new C());
+
+function f() {
+ // Create a non-escaping object.
+ var o = new C();
+ // Add an out-of-object double property.
+ o.x = 0.5;
+ %DeoptimizeNow();
+ return o.x + 0.25;
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+assertEquals(0.75, f());
diff --git a/deps/v8/test/mjsunit/compiler/new-cons-string.js b/deps/v8/test/mjsunit/compiler/new-cons-string.js
new file mode 100644
index 0000000000..7f6da7262a
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/new-cons-string.js
@@ -0,0 +1,71 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-externalize-string
+
+(function() {
+ function foo(s) {
+ return "abcdefghijklm" + s;
+ }
+
+ assertTrue(isOneByteString(foo("0")));
+ assertTrue(isOneByteString(foo("0")));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(isOneByteString(foo("0")));
+})();
+
+(function() {
+ function foo(s) {
+ return s + "abcdefghijklm";
+ }
+
+ assertTrue(isOneByteString(foo("0")));
+ assertTrue(isOneByteString(foo("0")));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(isOneByteString(foo("0")));
+})();
+
+(function() {
+ function foo(s) {
+ return "abcdefghijklm" + s;
+ }
+
+ assertFalse(isOneByteString(foo("\u1234")));
+ assertFalse(isOneByteString(foo("\u1234")));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(isOneByteString(foo("\u1234")));
+})();
+
+(function() {
+ function foo(s) {
+ return s + "abcdefghijklm";
+ }
+
+ assertFalse(isOneByteString(foo("\u1234")));
+ assertFalse(isOneByteString(foo("\u1234")));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(isOneByteString(foo("\u1234")));
+})();
+
+(function() {
+ function foo(s) {
+ return "abcdefghijkl\u1234" + s;
+ }
+
+ assertFalse(isOneByteString(foo("0")));
+ assertFalse(isOneByteString(foo("0")));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(isOneByteString(foo("0")));
+})();
+
+(function() {
+ function foo(s) {
+ return s + "abcdefghijkl\u1234";
+ }
+
+ assertFalse(isOneByteString(foo("0")));
+ assertFalse(isOneByteString(foo("0")));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(isOneByteString(foo("0")));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/regress-786521.js b/deps/v8/test/mjsunit/compiler/regress-786521.js
new file mode 100644
index 0000000000..2b161270ed
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-786521.js
@@ -0,0 +1,23 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Provoke type None as result of a SpeculativeNumberMultiply to
+// ensure that Turbofan can handle this.
+
+function inlined(b, x) {
+ if (b) {
+ x * 2 * 2
+ }
+}
+
+inlined(true, 1);
+inlined(true, 2);
+inlined(false, 1);
+
+function foo(b) { inlined(b, "") }
+foo(false); foo(false);
+%OptimizeFunctionOnNextCall(foo);
+foo(true);
diff --git a/deps/v8/test/mjsunit/compiler/regress-793863.js b/deps/v8/test/mjsunit/compiler/regress-793863.js
new file mode 100644
index 0000000000..883805dff6
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-793863.js
@@ -0,0 +1,12 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(a) {
+ return arguments[0];
+}
+
+%OptimizeFunctionOnNextCall(f);
+assertEquals(undefined, f());
diff --git a/deps/v8/test/mjsunit/compiler/regress-796041.js b/deps/v8/test/mjsunit/compiler/regress-796041.js
new file mode 100644
index 0000000000..e2c2e11c0b
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-796041.js
@@ -0,0 +1,35 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+'use strict';
+
+function f(abort, n, a, b) {
+ if (abort) return;
+ var x = a ? true : "" + a;
+ if (!a) {
+ var dead = n + 1 + 1;
+ if(!b) {
+ x = dead;
+ }
+ if (x) {
+ x = false;
+ }
+ if (b) {
+ x = false;
+ }
+ }
+ return x + 1;
+}
+f(false, 5); f(false, 6); f(false, 7); f(false, 8);
+
+function g(abort, a, b) {
+ return f(abort, "abc", a, b);
+}
+
+g(true); g(true); g(true); g(true);
+
+%OptimizeFunctionOnNextCall(g);
+g(false);
diff --git a/deps/v8/test/mjsunit/compiler/regress-797596.js b/deps/v8/test/mjsunit/compiler/regress-797596.js
new file mode 100644
index 0000000000..4e3594bdb1
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-797596.js
@@ -0,0 +1,30 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc --allow-natives-syntax
+var notCallable;
+function inferReceiverMapsInDeadCode() {
+ var obj = { func() {} };
+ gc();
+ function wrappedCode() { try { code(); } catch (e) {} }
+ function code() {
+ obj.a;
+ try {
+ Object.defineProperty(obj, "func", { get() {} });
+ } catch (neverCaught) {}
+ for (var i = 0; i < 1; i++) {
+ try {
+ notCallable(arguments[i]);
+ } catch (alwaysCaught) {}
+ }
+ }
+ wrappedCode();
+ try {
+ %OptimizeFunctionOnNextCall(wrappedCode);
+ wrappedCode();
+ } catch (e) {}
+}
+inferReceiverMapsInDeadCode();
+inferReceiverMapsInDeadCode();
+inferReceiverMapsInDeadCode();
diff --git a/deps/v8/test/mjsunit/compiler/regress-801097.js b/deps/v8/test/mjsunit/compiler/regress-801097.js
new file mode 100644
index 0000000000..d488ce4deb
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-801097.js
@@ -0,0 +1,19 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function GetFunction() {
+ var source = "return ((dividend | 0) / ((";
+ for (var i = 0; i < 0x8000; i++) {
+ source += "a,"
+ }
+ source += "a) | 0)) | 0";
+ return Function("dividend", source);
+}
+
+var func = GetFunction();
+assertThrows("func();");
+%OptimizeFunctionOnNextCall(func);
+assertThrows("func()");
diff --git a/deps/v8/test/mjsunit/compiler/varargs.js b/deps/v8/test/mjsunit/compiler/varargs.js
new file mode 100644
index 0000000000..ae636dc0f7
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/varargs.js
@@ -0,0 +1,49 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+x = "a";
+
+function test_varargs(...args) {
+ var sum = this.x;
+ for (i in args) {
+ sum += "," + args[i];
+ }
+ return sum;
+}
+
+assertEquals("a", test_varargs());
+assertEquals("a,b", test_varargs("b"));
+assertEquals("a,b,c", test_varargs("b", "c"));
+assertEquals("a,b,c,d", test_varargs("b", "c", "d"));
+assertEquals("a,b,c,d,e", test_varargs("b", "c", "d", "e"));
+
+function forward_varargs(...args) {
+ return test_varargs(...args);
+}
+
+assertEquals("a", forward_varargs());
+assertEquals("a,b", forward_varargs("b"));
+assertEquals("a,b,c", forward_varargs("b", "c"));
+assertEquals("a,b,c,d", forward_varargs("b", "c", "d"));
+assertEquals("a,b,c,d,e", forward_varargs("b", "c", "d", "e"));
+
+function forward_varargs_one_arg(x, ...args) {
+ return test_varargs(x, ...args);
+}
+
+assertEquals("a,undefined", forward_varargs_one_arg());
+assertEquals("a,b", forward_varargs_one_arg("b"));
+assertEquals("a,b,c", forward_varargs_one_arg("b", "c"));
+assertEquals("a,b,c,d", forward_varargs_one_arg("b", "c", "d"));
+assertEquals("a,b,c,d,e", forward_varargs_one_arg("b", "c", "d", "e"));
+
+function forward_varargs_two_args(x, y, ...args) {
+ return test_varargs(x, y, ...args);
+}
+
+assertEquals("a,undefined,undefined", forward_varargs_two_args());
+assertEquals("a,b,undefined", forward_varargs_two_args("b"));
+assertEquals("a,b,c", forward_varargs_two_args("b", "c"));
+assertEquals("a,b,c,d", forward_varargs_two_args("b", "c", "d"));
+assertEquals("a,b,c,d,e", forward_varargs_two_args("b", "c", "d", "e"));
diff --git a/deps/v8/test/mjsunit/constant-folding-2.js b/deps/v8/test/mjsunit/constant-folding-2.js
index da9e5d5469..7586261c92 100644
--- a/deps/v8/test/mjsunit/constant-folding-2.js
+++ b/deps/v8/test/mjsunit/constant-folding-2.js
@@ -33,8 +33,6 @@ function test(f) {
f();
%OptimizeFunctionOnNextCall(f);
f();
- // Assert that there has been no deopt.
- assertOptimized(f);
}
test(function add() {
@@ -234,6 +232,7 @@ test(function stringCharCodeAt() {
assertEquals("NaN", String("abc".charCodeAt(4)));
assertEquals(98, "abc".charCodeAt(1.1));
assertEquals("NaN", String("abc".charCodeAt(4.1)));
+ assertEquals("NaN", String("abc".charCodeAt(1 + 4294967295)));
});
test(function stringCharAt() {
@@ -242,6 +241,7 @@ test(function stringCharAt() {
assertEquals("", "abc".charAt(4));
assertEquals("b", "abc".charAt(1.1));
assertEquals("", "abc".charAt(4.1));
+ assertEquals("", String("abc".charAt(1 + 4294967295)));
});
diff --git a/deps/v8/test/mjsunit/d8/.gitignore b/deps/v8/test/mjsunit/d8/.gitignore
new file mode 100644
index 0000000000..4497115e4c
--- /dev/null
+++ b/deps/v8/test/mjsunit/d8/.gitignore
@@ -0,0 +1 @@
+v8_trace.json
diff --git a/deps/v8/test/mjsunit/d8-os.js b/deps/v8/test/mjsunit/d8/d8-os.js
index c2d8ec59bc..c2d8ec59bc 100644
--- a/deps/v8/test/mjsunit/d8-os.js
+++ b/deps/v8/test/mjsunit/d8/d8-os.js
diff --git a/deps/v8/test/mjsunit/d8-performance-now.js b/deps/v8/test/mjsunit/d8/d8-performance-now.js
index 3e5485e81d..3e5485e81d 100644
--- a/deps/v8/test/mjsunit/d8-performance-now.js
+++ b/deps/v8/test/mjsunit/d8/d8-performance-now.js
diff --git a/deps/v8/test/mjsunit/d8-worker-sharedarraybuffer.js b/deps/v8/test/mjsunit/d8/d8-worker-sharedarraybuffer.js
index 09586c3a11..09586c3a11 100644
--- a/deps/v8/test/mjsunit/d8-worker-sharedarraybuffer.js
+++ b/deps/v8/test/mjsunit/d8/d8-worker-sharedarraybuffer.js
diff --git a/deps/v8/test/mjsunit/d8-worker-spawn-worker.js b/deps/v8/test/mjsunit/d8/d8-worker-spawn-worker.js
index a114d8587e..a114d8587e 100644
--- a/deps/v8/test/mjsunit/d8-worker-spawn-worker.js
+++ b/deps/v8/test/mjsunit/d8/d8-worker-spawn-worker.js
diff --git a/deps/v8/test/mjsunit/d8-worker.js b/deps/v8/test/mjsunit/d8/d8-worker.js
index a73d7b1706..a73d7b1706 100644
--- a/deps/v8/test/mjsunit/d8-worker.js
+++ b/deps/v8/test/mjsunit/d8/d8-worker.js
diff --git a/deps/v8/test/mjsunit/d8/enable-tracing.js b/deps/v8/test/mjsunit/d8/enable-tracing.js
new file mode 100644
index 0000000000..5174b41155
--- /dev/null
+++ b/deps/v8/test/mjsunit/d8/enable-tracing.js
@@ -0,0 +1,8 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --enable-tracing --trace-path=test/mjsunit/d8/v8_trace.json
+
+// Just test that running d8 with --enable-tracing does not crash in a normal
+// execution without exceptions or calls to natives.
diff --git a/deps/v8/test/mjsunit/deserialize-reference.js b/deps/v8/test/mjsunit/deserialize-reference.js
index b032013159..ac4979bd26 100644
--- a/deps/v8/test/mjsunit/deserialize-reference.js
+++ b/deps/v8/test/mjsunit/deserialize-reference.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --cache=code --serialize-toplevel
+// Flags: --cache=code
var a = "123";
assertEquals(a, "123");
diff --git a/deps/v8/test/mjsunit/dictionary-prototypes.js b/deps/v8/test/mjsunit/dictionary-prototypes.js
new file mode 100644
index 0000000000..109f8d42a6
--- /dev/null
+++ b/deps/v8/test/mjsunit/dictionary-prototypes.js
@@ -0,0 +1,409 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function EnsureDictionaryMode(obj, properties=1500) {
+ for (let i = 0; i < properties; i++) {
+ obj["x" + i] = 0;
+ }
+ assertFalse(%HasFastProperties(obj));
+}
+
+function EnsureAlmostDictionaryMode(obj) {
+ for (let i = 0; i < 1020; i++) {
+ obj["x" + i] = 0;
+ }
+}
+
+function TestAddingPropertyToDictionaryPrototype() {
+ let foo_func_called = 0;
+ let bar_func_called = 0;
+
+ function Foo() {}
+ Foo.prototype.func = function() { ++foo_func_called; }
+
+ function Bar() {}
+ Bar.prototype = Object.create(Foo.prototype);
+ EnsureDictionaryMode(Bar.prototype);
+
+ let o = new Bar();
+
+ for (let i = 0; i < 11; ++i) {
+ // First, the property is looked up from Foo.
+ o.func();
+
+ // Add the property to Bar which is a dictionary-mode prototype between o
+ // and Foo. In the next iteration, it's looked up from Bar.
+ if (i == 9) {
+ // The UNINITIALIZED -> PREMONOMORPHIC transition of StoreIC should
+ // properly invalidate prototype chains.
+ Bar.prototype.func = function() { ++bar_func_called; }
+ }
+ }
+
+ assertEquals(10, foo_func_called);
+ assertEquals(1, bar_func_called);
+}
+
+TestAddingPropertyToDictionaryPrototype();
+
+// Same as TestAddingPropertyToDictionaryPrototype, but using o["foo"] access
+// instead of o.foo.
+function TestAddingPropertyToDictionaryPrototype2() {
+ let foo_func_called = 0;
+ let bar_func_called = 0;
+ let name = "func";
+
+ function Foo() {}
+ Foo.prototype[name] = function() { ++foo_func_called; }
+
+ function Bar() {}
+ Bar.prototype = Object.create(Foo.prototype);
+ EnsureDictionaryMode(Bar.prototype);
+
+ let o = new Bar();
+
+ for (let i = 0; i < 11; ++i) {
+ // First, the property is looked up from Foo.
+ o[name]();
+
+ // Add the property to Bar which is a dictionary-mode prototype between o
+ // and Foo. In the next iteration, it's looked up from Bar.
+ if (i == 9) {
+ // The UNINITIALIZED -> PREMONOMORPHIC transition of KeyedStoreIC should
+ // properly invalidate prototype chains.
+ Bar.prototype[name] = function() { ++bar_func_called; }
+ }
+ }
+
+ assertEquals(10, foo_func_called);
+ assertEquals(1, bar_func_called);
+}
+
+TestAddingPropertyToDictionaryPrototype2();
+
+function TestAddingPropertyToDictionaryPrototype_DefineProperty() {
+ let foo_func_called = 0;
+ let bar_func_called = 0;
+
+ function Foo() {}
+ Foo.prototype.func = function() { ++foo_func_called; }
+
+ function Bar() {}
+ Bar.prototype = Object.create(Foo.prototype);
+ EnsureDictionaryMode(Bar.prototype);
+
+ let o = new Bar();
+
+ for (let i = 0; i < 11; ++i) {
+ // First, the property is looked up from Foo.
+ o.func();
+
+ // Add the property to Bar which is a dictionary-mode prototype between o
+ // and Foo. In the next iteration, it's looked up from Bar.
+ if (i == 9) {
+ // The runtime should properly invalidate prototype chains.
+ Object.defineProperty(Bar.prototype, "func", {value: function() { ++bar_func_called; }});
+ }
+ }
+
+ assertEquals(10, foo_func_called);
+ assertEquals(1, bar_func_called);
+}
+
+TestAddingPropertyToDictionaryPrototype_DefineProperty();
+
+function TestAddingPropertyToDictionaryPrototype_DictionaryAddSlowPath() {
+ let foo_func_called = 0;
+ let bar_func_called = 0;
+
+ function Foo() {}
+ Foo.prototype.func = function() { ++foo_func_called; }
+
+ function Bar() {}
+ Bar.prototype = Object.create(Foo.prototype);
+ // The magic number ensures that the next addition to the dictionary will
+ // trigger the slow path.
+ EnsureDictionaryMode(Bar.prototype, 2731);
+
+ let o = new Bar();
+
+ for (let i = 0; i < 11; ++i) {
+ // First, the property is looked up from Foo.
+ o.func();
+
+ // Add the property to Bar which is a dictionary-mode prototype between o
+ // and Foo. In the next iteration, it's looked up from Bar.
+ if (i == 9) {
+ // -> slow path for dictionary add
+ Bar.prototype.func = function() { ++bar_func_called; }
+ }
+ }
+
+ assertEquals(10, foo_func_called);
+ assertEquals(1, bar_func_called);
+}
+
+TestAddingPropertyToDictionaryPrototype_DictionaryAddSlowPath();
+
+function TestAddingAccessorPropertyToDictionaryPrototype() {
+ let foo_func_called = 0;
+ let bar_func_called = 0;
+
+ function Foo() {}
+ Foo.prototype.func = function() { ++foo_func_called; }
+
+ function Bar() {}
+ Bar.prototype = Object.create(Foo.prototype);
+ EnsureDictionaryMode(Bar.prototype);
+
+ let o = new Bar();
+
+ for (let i = 0; i < 11; ++i) {
+ // First, the property is looked up from Foo.
+ o.func();
+
+ // Add the property to Bar which is a dictionary-mode prototype between o
+ // and Foo. In the next iteration, it's looked up from Bar.
+ if (i == 9) {
+ Object.defineProperty(Bar.prototype, "func",
+ {get: function() { return function() { ++bar_func_called; }}});
+ }
+ }
+
+ assertEquals(10, foo_func_called);
+ assertEquals(1, bar_func_called);
+}
+
+TestAddingAccessorPropertyToDictionaryPrototype();
+
+function TestRemovingPropertyFromDictionaryPrototype() {
+ let foo_func_called = 0;
+ let bar_func_called = 0;
+
+ function Foo() {}
+ Foo.prototype.func = function() { ++foo_func_called; }
+
+ function Bar() {}
+ Bar.prototype = Object.create(Foo.prototype);
+ EnsureDictionaryMode(Bar.prototype);
+ Bar.prototype.func = function() { ++bar_func_called; }
+
+ let o = new Bar();
+
+ for (let i = 0; i < 11; ++i) {
+ // First, the property is looked up from Bar.
+ o.func();
+
+ // Remove the property from Bar which is a dictionary-mode prototype between
+ // o and Foo. In the next iteration, it's looked up from Foo.
+ if (i == 9) {
+ delete Bar.prototype.func;
+ }
+ }
+
+ assertEquals(1, foo_func_called);
+ assertEquals(10, bar_func_called);
+}
+
+TestRemovingPropertyFromDictionaryPrototype();
+
+// Same as TestRemovingPropertyFromDictionaryPrototype, but using o["foo"] access
+// instead of o.foo.
+function TestRemovingPropertyFromDictionaryPrototype2() {
+ let foo_func_called = 0;
+ let bar_func_called = 0;
+ let name = "func";
+
+ function Foo() {}
+ Foo.prototype[name] = function() { ++foo_func_called; }
+
+ function Bar() {}
+ Bar.prototype = Object.create(Foo.prototype);
+ EnsureDictionaryMode(Bar.prototype);
+ Bar.prototype[name] = function() { ++bar_func_called; }
+
+ let o = new Bar();
+
+ for (let i = 0; i < 11; ++i) {
+ // First, the property is looked up from Bar.
+ o[name]();
+
+ // Remove the property from Bar which is a dictionary-mode prototype between
+ // o and Foo. In the next iteration, it's looked up from Foo.
+ if (i == 9) {
+ delete Bar.prototype[name];
+ }
+ }
+
+ assertEquals(1, foo_func_called);
+ assertEquals(10, bar_func_called);
+}
+
+TestRemovingPropertyFromDictionaryPrototype2();
+
+function TestAddingPropertyToDictionaryPrototype_Monomorphic() {
+ function DoMonomorphicStoreToPrototype(p, f, do_delete=true) {
+ p.func = f;
+ if (do_delete) {
+ delete p.func;
+ }
+ }
+
+ let foo_func_called = 0;
+ let bar_func_called = 0;
+
+ function Foo() {}
+ Foo.prototype.func = function() { ++foo_func_called; }
+
+ function Bar() {}
+ Bar.prototype = Object.create(Foo.prototype);
+ EnsureDictionaryMode(Bar.prototype);
+
+ function bar_func() {
+ ++bar_func_called;
+ }
+ DoMonomorphicStoreToPrototype(Bar.prototype, bar_func);
+ DoMonomorphicStoreToPrototype(Bar.prototype, bar_func);
+ DoMonomorphicStoreToPrototype(Bar.prototype, bar_func);
+
+ let o = new Bar();
+
+ for (let i = 0; i < 11; ++i) {
+ // First, the property is looked up from Foo.
+ o.func();
+
+ // Add the property to Bar which is a dictionary-mode prototype between o
+ // and Foo. In the next iteration, it's looked up from Bar.
+ if (i == 9) {
+ DoMonomorphicStoreToPrototype(Bar.prototype, bar_func, false);
+ }
+ }
+
+ assertEquals(10, foo_func_called);
+ assertEquals(1, bar_func_called);
+}
+
+TestAddingPropertyToDictionaryPrototype_Monomorphic();
+
+function TestAddingKeyedPropertyToDictionaryPrototype_Monomorphic() {
+ function DoMonomorphicKeyedStoreToPrototype(p, name, f, do_delete=true) {
+ p[name] = f;
+ if (do_delete) {
+ delete p[name];
+ }
+ }
+
+ let foo_func_called = 0;
+ let bar_func_called = 0;
+ let name = "func";
+
+ function Foo() {}
+ Foo.prototype[name] = function() { ++foo_func_called; }
+
+ function Bar() {}
+ Bar.prototype = Object.create(Foo.prototype);
+ EnsureDictionaryMode(Bar.prototype);
+
+ function bar_func() {
+ ++bar_func_called;
+ }
+ DoMonomorphicKeyedStoreToPrototype(Bar.prototype, name, bar_func);
+ DoMonomorphicKeyedStoreToPrototype(Bar.prototype, name, bar_func);
+ DoMonomorphicKeyedStoreToPrototype(Bar.prototype, name, bar_func);
+
+ let o = new Bar();
+
+ for (let i = 0; i < 11; ++i) {
+ // First, the property is looked up from Foo.
+ o.func();
+
+ // Add the property to Bar which is a dictionary-mode prototype between o
+ // and Foo. In the next iteration, it's looked up from Bar.
+ if (i == 9) {
+ DoMonomorphicKeyedStoreToPrototype(Bar.prototype, name, bar_func, false);
+ }
+ }
+
+ assertEquals(10, foo_func_called);
+ assertEquals(1, bar_func_called);
+}
+
+TestAddingKeyedPropertyToDictionaryPrototype_Monomorphic();
+
+// Like TestAddingPropertyToDictionaryPrototype, except that the prototype isn't
+// in dictionary mode yet, but turns to dictionary mode after the interesting
+// property is added.
+function TestAddingPropertyToAlmostDictionaryPrototype() {
+ let foo_func_called = 0;
+ let bar_func_called = 0;
+
+ function Foo() {}
+ Foo.prototype.func = function() { ++foo_func_called; }
+
+ function Bar() {}
+ Bar.prototype = Object.create(Foo.prototype);
+ EnsureAlmostDictionaryMode(Bar.prototype);
+
+ let o = new Bar();
+ for (let i = 0; i < 2; ++i) {
+ o.x0;
+ }
+ assertTrue(%HasFastProperties(Bar.prototype));
+
+ for (let i = 0; i < 11; ++i) {
+ // First, the property is looked up from Foo.
+ o.func();
+
+ // Add the property to Bar which will now turn permanently into dictionary
+ // mode. In the next iteration, it's looked up from Bar.
+ if (i == 9) {
+ Bar.prototype.func = function() { ++bar_func_called; }
+ assertFalse(%HasFastProperties(Bar.prototype));
+ }
+ }
+
+ assertEquals(10, foo_func_called);
+ assertEquals(1, bar_func_called);
+}
+
+TestAddingPropertyToAlmostDictionaryPrototype();
+
+function TestReconfiguringDataToAccessor() {
+ let setter_called = 0;
+
+ function Bar() {}
+ EnsureDictionaryMode(Bar.prototype);
+ let name = "prop";
+ Object.defineProperty(Bar.prototype, name,
+ {value: 1000, writable: true, configurable: true});
+
+ for (let i = 0; i < 11; ++i) {
+ let obj1 = new Bar();
+ if (i < 10) {
+ assertEquals(1000, obj1.prop);
+ } else {
+ assertEquals(3000, obj1.prop);
+ }
+
+ // Add the property into the object.
+ obj1.prop = 2000;
+ if (i < 10) {
+ assertEquals(2000, obj1.prop);
+ } else {
+ assertEquals(3000, obj1.prop);
+ }
+
+ // Make "prop" an accessor property in the prototype.
+ if (i == 9) {
+ Object.defineProperty(Bar.prototype, name,
+ {get: () => 3000,
+ set: function(val) { ++setter_called; }});
+ }
+ }
+ assertEquals(1, setter_called);
+}
+
+TestReconfiguringDataToAccessor();
diff --git a/deps/v8/test/mjsunit/es6/array-find.js b/deps/v8/test/mjsunit/es6/array-find.js
index 5f6ba4226b..9fed027c8f 100644
--- a/deps/v8/test/mjsunit/es6/array-find.js
+++ b/deps/v8/test/mjsunit/es6/array-find.js
@@ -234,6 +234,40 @@ assertEquals(22, a.find(function(val) { return 22 === val; }), undefined);
//
+// Test predicate is called for missing properties
+//
+(function() {
+ const obj = {
+ "0": 0,
+ "2": 2,
+ length: 3
+ };
+ const received = [];
+ const predicate = (v) => { received.push(v); return false; };
+ const found = Array.prototype.find.call(obj, predicate);
+ assertEquals(undefined, found);
+ assertArrayEquals([0, undefined, 2], received);
+})();
+
+
+//
+// Test predicate modifying array prototype
+//
+(function() {
+ const a = [0, , 2];
+ const received = [];
+ const predicate = (v) => {
+ a.__proto__ = null;
+ received.push(v);
+ return false;
+ };
+ const found = Array.prototype.find.call(a, predicate);
+ assertEquals(undefined, found);
+ assertArrayEquals([0, undefined, 2], received);
+})();
+
+
+//
// Test thisArg
//
(function() {
diff --git a/deps/v8/test/mjsunit/es6/array-findindex.js b/deps/v8/test/mjsunit/es6/array-findindex.js
index 716eb4e0db..d335c15108 100644
--- a/deps/v8/test/mjsunit/es6/array-findindex.js
+++ b/deps/v8/test/mjsunit/es6/array-findindex.js
@@ -234,6 +234,40 @@ assertEquals(3, a.findIndex(function(val) { return 24 === val; }));
//
+// Test predicate is called for missing properties
+//
+(function() {
+ const obj = {
+ "0": 0,
+ "2": 2,
+ length: 3
+ };
+ const received = [];
+ const predicate = (v) => { received.push(v); return false; };
+ const found = Array.prototype.findIndex.call(obj, predicate);
+ assertEquals(-1, found);
+ assertArrayEquals([0, undefined, 2], received);
+})();
+
+
+//
+// Test predicate modifying array prototype
+//
+(function() {
+ const a = [0, , 2];
+ const received = [];
+ const predicate = (v) => {
+ a.__proto__ = null;
+ received.push(v);
+ return false;
+ };
+ const found = Array.prototype.findIndex.call(a, predicate);
+ assertEquals(-1, found);
+ assertArrayEquals([0, undefined, 2], received);
+})();
+
+
+//
// Test thisArg
//
(function() {
diff --git a/deps/v8/test/mjsunit/es6/array-iterator-turbo.js b/deps/v8/test/mjsunit/es6/array-iterator-turbo.js
index 3a159b6337..489a53dbc7 100644
--- a/deps/v8/test/mjsunit/es6/array-iterator-turbo.js
+++ b/deps/v8/test/mjsunit/es6/array-iterator-turbo.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --turbo-escape --allow-natives-syntax --no-always-opt
-// Flags: --opt --turbo-filter=*
+// Flags: --opt --turbo-filter=* --no-force-slow-path
"use strict";
diff --git a/deps/v8/test/mjsunit/es6/call-with-spread-modify-next.js b/deps/v8/test/mjsunit/es6/call-with-spread-modify-next.js
index d22a1eaec0..3cae94ff9d 100644
--- a/deps/v8/test/mjsunit/es6/call-with-spread-modify-next.js
+++ b/deps/v8/test/mjsunit/es6/call-with-spread-modify-next.js
@@ -37,6 +37,8 @@
var r2 = testMax(1, 2);
- assertEquals(3, called);
+ // .next() is only loaded once during the iteration prologue (see
+ // https://github.com/tc39/ecma262/pull/988/ and v8:6861)
+ assertEquals(1, called);
assertEquals(2, r2);
})();
diff --git a/deps/v8/test/mjsunit/es6/computed-property-names-object-literals-methods.js b/deps/v8/test/mjsunit/es6/computed-property-names-object-literals-methods.js
index 36afbe2ced..24a357258a 100644
--- a/deps/v8/test/mjsunit/es6/computed-property-names-object-literals-methods.js
+++ b/deps/v8/test/mjsunit/es6/computed-property-names-object-literals-methods.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-async-iteration
-
function ID(x) {
return x;
}
diff --git a/deps/v8/test/mjsunit/es6/destructuring-assignment.js b/deps/v8/test/mjsunit/es6/destructuring-assignment.js
index 579c87718b..dee7a0b16d 100644
--- a/deps/v8/test/mjsunit/es6/destructuring-assignment.js
+++ b/deps/v8/test/mjsunit/es6/destructuring-assignment.js
@@ -513,25 +513,31 @@ assertEquals(oz, [1, 2, 3, 4, 5]);
}
function FakeNewTarget() {}
- assertEquals(undefined, ReturnNewTarget1());
- assertEquals(ReturnNewTarget1, new ReturnNewTarget1());
- assertEquals(FakeNewTarget,
- Reflect.construct(ReturnNewTarget1, [], FakeNewTarget));
-
- assertEquals(undefined, ReturnNewTarget2());
- assertEquals(ReturnNewTarget2, new ReturnNewTarget2());
- assertEquals(FakeNewTarget,
- Reflect.construct(ReturnNewTarget2, [], FakeNewTarget));
-
- assertEquals(undefined, ReturnNewTarget3());
- assertEquals(ReturnNewTarget3, new ReturnNewTarget3());
- assertEquals(FakeNewTarget,
- Reflect.construct(ReturnNewTarget3, [], FakeNewTarget));
-
- assertEquals(undefined, ReturnNewTarget4());
- assertEquals(ReturnNewTarget4, new ReturnNewTarget4());
- assertEquals(FakeNewTarget,
- Reflect.construct(ReturnNewTarget4, [], FakeNewTarget));
+
+ function construct() {
+ assertEquals(undefined, ReturnNewTarget1());
+ assertEquals(ReturnNewTarget1, new ReturnNewTarget1());
+ assertEquals(FakeNewTarget,
+ Reflect.construct(ReturnNewTarget1, [], FakeNewTarget));
+
+ assertEquals(undefined, ReturnNewTarget2());
+ assertEquals(ReturnNewTarget2, new ReturnNewTarget2());
+ assertEquals(FakeNewTarget,
+ Reflect.construct(ReturnNewTarget2, [], FakeNewTarget));
+
+ assertEquals(undefined, ReturnNewTarget3());
+ assertEquals(ReturnNewTarget3, new ReturnNewTarget3());
+ assertEquals(FakeNewTarget,
+ Reflect.construct(ReturnNewTarget3, [], FakeNewTarget));
+
+ assertEquals(undefined, ReturnNewTarget4());
+ assertEquals(ReturnNewTarget4, new ReturnNewTarget4());
+ assertEquals(FakeNewTarget,
+ Reflect.construct(ReturnNewTarget4, [], FakeNewTarget));
+ }
+ construct();
+ FakeNewTarget.prototype = 1;
+ construct();
})();
(function testSuperCall() {
diff --git a/deps/v8/test/mjsunit/es6/iteration-semantics.js b/deps/v8/test/mjsunit/es6/iteration-semantics.js
index 558fb837e7..40037be6f5 100644
--- a/deps/v8/test/mjsunit/es6/iteration-semantics.js
+++ b/deps/v8/test/mjsunit/es6/iteration-semantics.js
@@ -220,13 +220,11 @@ assertThrows('fold(sum, 0, unreachable({}))', TypeError);
assertThrows('fold(sum, 0, unreachable(false))', TypeError);
assertThrows('fold(sum, 0, unreachable(37))', TypeError);
-// "next" is looked up each time.
-assertThrows('fold(sum, 0, remove_next_after(integers_until(10), 5))',
- TypeError);
-// It is not called at any other time.
+// "next" is looked up only once during the iteration prologue (see
+// https://github.com/tc39/ecma262/pull/988)
+assertEquals(45, fold(sum, 0, remove_next_after(integers_until(10), 5)));
assertEquals(45,
fold(sum, 0, remove_next_after(integers_until(10), 10)));
-// It is not looked up too many times.
assertEquals(45,
fold(sum, 0, poison_next_after(integers_until(10), 10)));
diff --git a/deps/v8/test/mjsunit/es6/reflect-construct.js b/deps/v8/test/mjsunit/es6/reflect-construct.js
index 03e8397a9b..34b6f27373 100644
--- a/deps/v8/test/mjsunit/es6/reflect-construct.js
+++ b/deps/v8/test/mjsunit/es6/reflect-construct.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --allow-unsafe-function-constructor --harmony-async-iteration
+// Flags: --allow-unsafe-function-constructor
(function testReflectConstructArity() {
diff --git a/deps/v8/test/mjsunit/es6/spread-call.js b/deps/v8/test/mjsunit/es6/spread-call.js
index cdedd990c8..7403e0726e 100644
--- a/deps/v8/test/mjsunit/es6/spread-call.js
+++ b/deps/v8/test/mjsunit/es6/spread-call.js
@@ -376,6 +376,11 @@ testSpreadCallsStrict();
a[3] = 4;
var called = 0;
+ // .next method is only accessed during iteration prologue (see
+ // https://github.com/tc39/ecma262/pull/988)
+ let ArrayIteratorPrototype = Array.prototype[Symbol.iterator]().__proto__;
+ let ArrayIteratorPrototypeNextDescriptor =
+ Object.getOwnPropertyDescriptor(ArrayIteratorPrototype, 'next');
Object.defineProperty(Array.prototype, 2, {
get: function() {
var ai = a[Symbol.iterator]();
@@ -384,7 +389,8 @@ testSpreadCallsStrict();
get: function() {
called++;
return original_next;
- }
+ },
+ configurable: true
});
return 3;
},
@@ -392,8 +398,10 @@ testSpreadCallsStrict();
});
assertEquals(10, sum(...a));
- assertEquals(2, called);
+ assertEquals(0, called);
+ Object.defineProperty(ArrayIteratorPrototype, 'next',
+ ArrayIteratorPrototypeNextDescriptor);
Object.defineProperty(Array.prototype, 2, {});
})();
@@ -430,9 +438,9 @@ testSpreadCallsStrict();
countArgs(...a);
- // should be called 4 times; 3 for the values, 1 for the final
- // {value: undefined, done: true} pair
- assertEquals(4, called);
+ // .next method is only accessed during iteration prologue (see
+ // https://github.com/tc39/ecma262/pull/988)
+ assertEquals(1, called);
})();
(function testArrayIteratorPrototypeModified() {
diff --git a/deps/v8/test/mjsunit/es6/super-with-spread-modify-next.js b/deps/v8/test/mjsunit/es6/super-with-spread-modify-next.js
index 299917dbf1..cd7798b8d1 100644
--- a/deps/v8/test/mjsunit/es6/super-with-spread-modify-next.js
+++ b/deps/v8/test/mjsunit/es6/super-with-spread-modify-next.js
@@ -48,7 +48,9 @@
var r2 = testArgumentsPoint(1, 2);
- assertEquals(3, called);
+ // .next() is only loaded once during the iteration prologue (see
+ // https://github.com/tc39/ecma262/pull/988/ and v8:6861)
+ assertEquals(1, called);
assertInstanceof(r2, ArgumentsPoint);
assertInstanceof(r2, Point);
assertEquals(r2.x, 1);
diff --git a/deps/v8/test/mjsunit/es6/typedarray.js b/deps/v8/test/mjsunit/es6/typedarray.js
index 93d92097cd..02bd91c1e5 100644
--- a/deps/v8/test/mjsunit/es6/typedarray.js
+++ b/deps/v8/test/mjsunit/es6/typedarray.js
@@ -341,16 +341,30 @@ function TestTypedArray(constr, elementSize, typicalElement) {
// Modified %ArrayIteratorPrototype%.next() method is honoured (v8:5699)
const ArrayIteratorPrototype = Object.getPrototypeOf([][Symbol.iterator]());
+ const ArrayIteratorPrototypeNextDescriptor =
+ Object.getOwnPropertyDescriptor(ArrayIteratorPrototype, 'next');
const ArrayIteratorPrototypeNext = ArrayIteratorPrototype.next;
ArrayIteratorPrototype.next = function() {
return { done: true };
};
genArr = new constr([1, 2, 3]);
assertEquals(0, genArr.length);
+
ArrayIteratorPrototype.next = ArrayIteratorPrototypeNext;
- // Modified %ArrayIteratorPrototype%.next() during iteration is honoured as
- // well.
+ // Modified %ArrayIteratorPrototype%.next() is only loaded during the iterator
+ // prologue.
+ let nextMethod = ArrayIteratorPrototypeNext;
+ let getNextCount = 0;
+ Object.defineProperty(ArrayIteratorPrototype, 'next', {
+ get() {
+ getNextCount++;
+ return nextMethod;
+ },
+ set(v) { nextMethod = v; },
+ configurable: true
+ });
+
genArr = new constr(Object.defineProperty([1, , 3], 1, {
get() {
ArrayIteratorPrototype.next = function() {
@@ -359,9 +373,13 @@ function TestTypedArray(constr, elementSize, typicalElement) {
return 2;
}
}));
- assertEquals(2, genArr.length);
+ Object.defineProperty(ArrayIteratorPrototype, 'next',
+ ArrayIteratorPrototypeNextDescriptor);
+ assertEquals(1, getNextCount);
+ assertEquals(3, genArr.length);
assertEquals(1, genArr[0]);
assertEquals(2, genArr[1]);
+ assertEquals(3, genArr[2]);
ArrayIteratorPrototype.next = ArrayIteratorPrototypeNext;
}
diff --git a/deps/v8/test/mjsunit/es8/object-entries.js b/deps/v8/test/mjsunit/es8/object-entries.js
index c59d81c823..5c7e74e378 100644
--- a/deps/v8/test/mjsunit/es8/object-entries.js
+++ b/deps/v8/test/mjsunit/es8/object-entries.js
@@ -284,8 +284,8 @@ TestMutateDuringEnumeration();
HOLEY_DOUBLE_ELEMENTS: [ [, , NaN], [ ["2", NaN] ] ],
DICTIONARY_ELEMENTS: [ Object.defineProperties({ 10000: "world" }, {
- 100: { enumerable: true, value: "hello" },
- 99: { enumerable: false, value: "nope" }
+ 100: { enumerable: true, value: "hello", configurable: true},
+ 99: { enumerable: false, value: "nope", configurable: true}
}), [ ["100", "hello"], ["10000", "world" ] ] ],
FAST_SLOPPY_ARGUMENTS_ELEMENTS: [
fastSloppyArguments("a", "b", "c"),
@@ -298,17 +298,42 @@ TestMutateDuringEnumeration();
[ ["0", "s"], ["1", "t"], ["2", "r"]] ],
SLOW_STRING_WRAPPER_ELEMENTS: [
Object.defineProperties(new String("str"), {
- 10000: { enumerable: false, value: "X" },
- 9999: { enumerable: true, value: "Y" }
+ 10000: { enumerable: false, value: "X", configurable: true},
+ 9999: { enumerable: true, value: "Y", configurable: true}
}), [["0", "s"], ["1", "t"], ["2", "r"], ["9999", "Y"]] ],
};
for (let [kind, [object, expected]] of Object.entries(element_kinds)) {
let result1 = Object.entries(object);
+ %HeapObjectVerify(object);
+ %HeapObjectVerify(result1);
assertEquals(expected, result1, `fast Object.entries() with ${kind}`);
let proxy = new Proxy(object, {});
let result2 = Object.entries(proxy);
+ %HeapObjectVerify(result2);
assertEquals(result1, result2, `slow Object.entries() with ${kind}`);
}
+
+ function makeFastElements(array) {
+ // Remove all possible getters.
+ for (let k of Object.getOwnPropertyNames(this)) {
+ if (k == "length") continue;
+ delete this[k];
+ }
+ // Make the array large enough to trigger re-checking for compaction.
+ this[1000] = 1;
+ // Make the elements fast again.
+ Array.prototype.unshift.call(this, 1.1);
+ }
+
+ // Test that changing the elements kind is supported.
+ for (let [kind, [object, expected]] of Object.entries(element_kinds)) {
+ if (kind == "FAST_STRING_WRAPPER_ELEMENTS") break;
+ object.__defineGetter__(1, makeFastElements);
+ let result1 = Object.entries(object).toString();
+ %HeapObjectVerify(object);
+ %HeapObjectVerify(result1);
+ }
+
})();
diff --git a/deps/v8/test/mjsunit/es8/regress/regress-794744.js b/deps/v8/test/mjsunit/es8/regress/regress-794744.js
new file mode 100644
index 0000000000..a4dcb5d42a
--- /dev/null
+++ b/deps/v8/test/mjsunit/es8/regress/regress-794744.js
@@ -0,0 +1,8 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Object.getOwnPropertyDescriptors loads %FunctionPrototype%.caller, an
+// accessor property which inspects the current callstack. Verify that this
+// callstack iteration doesn't crash when there are no JS frames on the stack.
+Promise.resolve(function () {}).then(Object.getOwnPropertyDescriptors);
diff --git a/deps/v8/test/mjsunit/global-prototypes.js b/deps/v8/test/mjsunit/global-prototypes.js
new file mode 100644
index 0000000000..98232c2814
--- /dev/null
+++ b/deps/v8/test/mjsunit/global-prototypes.js
@@ -0,0 +1,354 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+
+assertEquals(this.__proto__, Object.prototype);
+
+function TestAddingPropertyToGlobalPrototype() {
+ let foo_func_called = 0;
+ let bar_func_called = 0;
+
+ function Foo() {}
+ Foo.prototype.func = function() { ++foo_func_called; }
+
+ delete this.func;
+ this.__proto__ = Foo.prototype;
+
+ function Bar() {}
+ Bar.prototype = this;
+
+ let o = new Bar();
+
+ for (let i = 0; i < 11; ++i) {
+ // First, the property is looked up from Foo.
+ o.func();
+
+ // Add the property to Bar which is a Global-mode prototype between o
+ // and Foo. In the next iteration, it's looked up from Bar.
+ if (i == 9) {
+ Bar.prototype.func = function() { ++bar_func_called; }
+ }
+ }
+
+ assertEquals(10, foo_func_called);
+ assertEquals(1, bar_func_called);
+}
+
+TestAddingPropertyToGlobalPrototype();
+
+
+// Same as TestAddingPropertyToGlobalPrototype, but using o["foo"] access
+// instead of o.foo.
+function TestAddingPropertyToGlobalPrototype2() {
+ let foo_func_called = 0;
+ let bar_func_called = 0;
+ let name = "func";
+
+ function Foo() {}
+ Foo.prototype[name] = function() { ++foo_func_called; }
+
+ delete this[name];
+ this.__proto__ = Foo.prototype;
+
+ function Bar() {}
+ Bar.prototype = this;
+
+ let o = new Bar();
+
+ for (let i = 0; i < 11; ++i) {
+ // First, the property is looked up from Foo.
+ o[name]();
+
+ // Add the property to Bar which is a Global-mode prototype between o
+ // and Foo. In the next iteration, it's looked up from Bar.
+ if (i == 9) {
+ Bar.prototype[name] = function() { ++bar_func_called; }
+ }
+ }
+
+ assertEquals(10, foo_func_called);
+ assertEquals(1, bar_func_called);
+}
+
+TestAddingPropertyToGlobalPrototype2();
+
+
+function TestAddingPropertyToGlobalPrototype_DefineProperty() {
+ let foo_func_called = 0;
+ let bar_func_called = 0;
+
+ function Foo() {}
+ Foo.prototype.func = function() { ++foo_func_called; }
+
+ delete this.func;
+ this.__proto__ = Foo.prototype;
+
+ function Bar() {}
+ Bar.prototype = this;
+
+ let o = new Bar();
+
+ for (let i = 0; i < 11; ++i) {
+ // First, the property is looked up from Foo.
+ o.func();
+
+ // Add the property to Bar which is a Global-mode prototype between o
+ // and Foo. In the next iteration, it's looked up from Bar.
+ if (i == 9) {
+ Object.defineProperty(Bar.prototype, "func",
+ {
+ value: function() { ++bar_func_called; },
+ configurable:true
+ });
+ }
+ }
+
+ assertEquals(10, foo_func_called);
+ assertEquals(1, bar_func_called);
+}
+
+TestAddingPropertyToGlobalPrototype_DefineProperty();
+
+
+function TestAddingAccessorPropertyToGlobalPrototype() {
+ let foo_func_called = 0;
+ let bar_func_called = 0;
+
+ function Foo() {}
+ Foo.prototype.func = function() { ++foo_func_called; }
+
+ delete this.func;
+ this.__proto__ = Foo.prototype;
+
+ function Bar() {}
+ Bar.prototype = this;
+
+ let o = new Bar();
+
+ for (let i = 0; i < 11; ++i) {
+ // First, the property is looked up from Foo.
+ o.func();
+
+ // Add the property to Bar which is a Global-mode prototype between o
+ // and Foo. In the next iteration, it's looked up from Bar.
+ if (i == 9) {
+ Object.defineProperty(Bar.prototype, "func",
+ {
+ get: function() { return function() { ++bar_func_called; }},
+ configurable: true
+ });
+ }
+ }
+
+ assertEquals(10, foo_func_called);
+ assertEquals(1, bar_func_called);
+}
+
+TestAddingAccessorPropertyToGlobalPrototype();
+
+
+function TestRemovingPropertyFromGlobalPrototype() {
+ let foo_func_called = 0;
+ let bar_func_called = 0;
+
+ function Foo() {}
+ Foo.prototype.func = function() { ++foo_func_called; }
+
+ delete this.func;
+ this.__proto__ = Foo.prototype;
+
+ function Bar() {}
+ Bar.prototype = this;
+ Bar.prototype.func = function() { ++bar_func_called; }
+
+ let o = new Bar();
+
+ for (let i = 0; i < 11; ++i) {
+ // First, the property is looked up from Bar.
+ o.func();
+
+ // Remove the property from Bar which is a Global-mode prototype between
+ // o and Foo. In the next iteration, it's looked up from Foo.
+ if (i == 9) {
+ delete Bar.prototype.func;
+ }
+ }
+
+ assertEquals(1, foo_func_called);
+ assertEquals(10, bar_func_called);
+}
+
+TestRemovingPropertyFromGlobalPrototype();
+
+
+// Same as TestRemovingPropertyFromGlobalPrototype, but using o["foo"] access
+// instead of o.foo.
+function TestRemovingPropertyFromGlobalPrototype2() {
+ let foo_func_called = 0;
+ let bar_func_called = 0;
+ let name = "func";
+
+ function Foo() {}
+ Foo.prototype[name] = function() { ++foo_func_called; }
+
+ this.__proto__ = Foo.prototype;
+
+ function Bar() {}
+ Bar.prototype = this;
+ Bar.prototype[name] = function() { ++bar_func_called; }
+
+ let o = new Bar();
+
+ for (let i = 0; i < 11; ++i) {
+ // First, the property is looked up from Bar.
+ o[name]();
+
+ // Remove the property from Bar which is a Global-mode prototype between
+ // o and Foo. In the next iteration, it's looked up from Foo.
+ if (i == 9) {
+ delete Bar.prototype[name];
+ }
+ }
+
+ assertEquals(1, foo_func_called);
+ assertEquals(10, bar_func_called);
+}
+
+TestRemovingPropertyFromGlobalPrototype2();
+
+
+function TestAddingPropertyToGlobalPrototype_MonomorphicDot() {
+ function DoMonomorphicStoreToPrototypeDot(p, f, do_delete=true) {
+ p.func = f;
+ if (do_delete) {
+ delete p.func;
+ }
+ }
+ let foo_func_called = 0;
+ let bar_func_called = 0;
+
+ function Foo() {}
+ Foo.prototype.func = function() { ++foo_func_called; }
+
+ delete this.func;
+ this.__proto__ = Foo.prototype;
+
+ function Bar() {}
+ Bar.prototype = this;
+
+ function bar_func() {
+ ++bar_func_called;
+ }
+ DoMonomorphicStoreToPrototypeDot(Bar.prototype, bar_func);
+ DoMonomorphicStoreToPrototypeDot(Bar.prototype, bar_func);
+ DoMonomorphicStoreToPrototypeDot(Bar.prototype, bar_func);
+
+ let o = new Bar();
+
+ for (let i = 0; i < 11; ++i) {
+ // First, the property is looked up from Foo.
+ o.func();
+
+ // Add the property to Bar which is a Global-mode prototype between o
+ // and Foo. In the next iteration, it's looked up from Bar.
+ if (i == 9) {
+ DoMonomorphicStoreToPrototypeDot(Bar.prototype, bar_func, false);
+ }
+ }
+
+ assertEquals(10, foo_func_called);
+ assertEquals(1, bar_func_called);
+}
+
+TestAddingPropertyToGlobalPrototype_MonomorphicDot();
+
+
+function TestAddingPropertyToGlobalPrototype_MonomorphicBrackets() {
+ function DoMonomorphicStoreToPrototypeBrackets(p, name, f, do_delete=true) {
+ p[name] = f;
+ if (do_delete) {
+ delete p[name];
+ }
+ }
+ let foo_func_called = 0;
+ let bar_func_called = 0;
+ let name = "func";
+
+ function Foo() {}
+ Foo.prototype[name] = function() { ++foo_func_called; }
+
+ delete this[name];
+ this.__proto__ = Foo.prototype;
+
+ function Bar() {}
+ Bar.prototype = this;
+
+ function bar_func() {
+ ++bar_func_called;
+ }
+ DoMonomorphicStoreToPrototypeBrackets(Bar.prototype, name, bar_func);
+ DoMonomorphicStoreToPrototypeBrackets(Bar.prototype, name, bar_func);
+ DoMonomorphicStoreToPrototypeBrackets(Bar.prototype, name, bar_func);
+
+ let o = new Bar();
+
+ for (let i = 0; i < 11; ++i) {
+ // First, the property is looked up from Foo.
+ o.func();
+
+ // Add the property to Bar which is a Global-mode prototype between o
+ // and Foo. In the next iteration, it's looked up from Bar.
+ if (i == 9) {
+ DoMonomorphicStoreToPrototypeBrackets(Bar.prototype, name, bar_func, false);
+ }
+ }
+
+ assertEquals(10, foo_func_called);
+ assertEquals(1, bar_func_called);
+}
+
+TestAddingPropertyToGlobalPrototype_MonomorphicBrackets();
+
+
+function TestReconfiguringDataToAccessor() {
+ let setter_called = 0;
+ let name = "prop";
+
+ delete this[name];
+ this.__proto__ = Object.prototype;
+
+ function Bar() {}
+ Bar.prototype = this;
+
+ Object.defineProperty(Bar.prototype, name, {value: 1000, writable: true, configurable: true});
+
+ for (let i = 0; i < 11; ++i) {
+ let obj1 = new Bar();
+ if (i < 10) {
+ assertEquals(1000, obj1.prop);
+ } else {
+ assertEquals(3000, obj1.prop);
+ }
+
+ // Add the property into the object.
+ obj1.prop = 2000;
+ if (i < 10) {
+ assertEquals(2000, obj1.prop);
+ } else {
+ assertEquals(3000, obj1.prop);
+ }
+
+ // Make "prop" an accessor property in the prototype.
+ if (i == 9) {
+ Object.defineProperty(Bar.prototype, name,
+ {get: () => 3000,
+ set: function(val) { ++setter_called; }});
+ }
+ }
+ assertEquals(1, setter_called);
+}
+
+TestReconfiguringDataToAccessor();
diff --git a/deps/v8/test/mjsunit/harmony/async-for-of-non-iterable.js b/deps/v8/test/mjsunit/harmony/async-for-of-non-iterable.js
index c84c9c6884..3394ed394c 100644
--- a/deps/v8/test/mjsunit/harmony/async-for-of-non-iterable.js
+++ b/deps/v8/test/mjsunit/harmony/async-for-of-non-iterable.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-async-iteration
var done = false;
async function f() {
diff --git a/deps/v8/test/mjsunit/harmony/async-from-sync-iterator.js b/deps/v8/test/mjsunit/harmony/async-from-sync-iterator.js
index d965bd070c..a7b0d1bda4 100644
--- a/deps/v8/test/mjsunit/harmony/async-from-sync-iterator.js
+++ b/deps/v8/test/mjsunit/harmony/async-from-sync-iterator.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-async-iteration --allow-natives-syntax
+// Flags: --allow-natives-syntax
let testFailed = false;
let testFailure;
diff --git a/deps/v8/test/mjsunit/harmony/async-generators-basic.js b/deps/v8/test/mjsunit/harmony/async-generators-basic.js
index 29441b119b..d7af1836b8 100644
--- a/deps/v8/test/mjsunit/harmony/async-generators-basic.js
+++ b/deps/v8/test/mjsunit/harmony/async-generators-basic.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-async-iteration --allow-natives-syntax
+// Flags: --allow-natives-syntax
function assertThrowsAsync(run, errorType, message) {
var actual;
diff --git a/deps/v8/test/mjsunit/harmony/async-generators-resume-return.js b/deps/v8/test/mjsunit/harmony/async-generators-resume-return.js
index 7a7efe7801..715c81fc21 100644
--- a/deps/v8/test/mjsunit/harmony/async-generators-resume-return.js
+++ b/deps/v8/test/mjsunit/harmony/async-generators-resume-return.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-async-iteration --allow-natives-syntax
+// Flags: --allow-natives-syntax
// .return() from state suspendedStart with undefined
testAsync(test => {
diff --git a/deps/v8/test/mjsunit/harmony/async-generators-return.js b/deps/v8/test/mjsunit/harmony/async-generators-return.js
index b0c7febf8c..27cbd4373b 100644
--- a/deps/v8/test/mjsunit/harmony/async-generators-return.js
+++ b/deps/v8/test/mjsunit/harmony/async-generators-return.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-async-iteration --allow-natives-syntax
+// Flags: --allow-natives-syntax
testAsync(test => {
test.plan(2);
diff --git a/deps/v8/test/mjsunit/harmony/async-generators-yield.js b/deps/v8/test/mjsunit/harmony/async-generators-yield.js
index c999c7006f..feb6339af2 100644
--- a/deps/v8/test/mjsunit/harmony/async-generators-yield.js
+++ b/deps/v8/test/mjsunit/harmony/async-generators-yield.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-async-iteration --allow-natives-syntax
+// Flags: --allow-natives-syntax
// Yield a thenable which is never settled
testAsync(test => {
diff --git a/deps/v8/test/mjsunit/harmony/bigint/as-int-n.js b/deps/v8/test/mjsunit/harmony/bigint/as-int-n.js
index 08c94245fd..faa7dba866 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/as-int-n.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/as-int-n.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-bigint --noopt
+// Flags: --harmony-bigint
// BigInt.asIntN
{
@@ -145,6 +145,8 @@
}{
assertThrows(() => BigInt.asIntN(3, 12), TypeError);
assertEquals(-4n, BigInt.asIntN(3, "12"));
+ assertEquals(0x123456789abcdefn,
+ BigInt.asIntN(64, 0xabcdef0123456789abcdefn));
}
// BigInt.asUintN
@@ -244,10 +246,9 @@
assertEquals(9223372036854775808n - 42n, BigInt.asUintN(63, -42n));
assertEquals(18446744073709551616n - 42n, BigInt.asUintN(64, -42n));
assertEquals(36893488147419103232n - 42n, BigInt.asUintN(65, -42n));
- // TODO(neis): Enable once we have exponentation.
- // assertEquals(2n**127n - 42n, BigInt.asUintN(127, -42n));
- // assertEquals(2n**128n - 42n, BigInt.asUintN(128, -42n));
- // assertEquals(2n**129n - 42n, BigInt.asUintN(129, -42n));
+ assertEquals(2n**127n - 42n, BigInt.asUintN(127, -42n));
+ assertEquals(2n**128n - 42n, BigInt.asUintN(128, -42n));
+ assertEquals(2n**129n - 42n, BigInt.asUintN(129, -42n));
}{
assertEquals(0n, BigInt.asUintN(0, 4294967295n));
assertEquals(1n, BigInt.asUintN(1, 4294967295n));
@@ -274,10 +275,9 @@
BigInt.asUintN(64,-4294967295n));
assertEquals(36893488147419103232n - 4294967295n,
BigInt.asUintN(65, -4294967295n));
- // TODO(neis): Enable once we have exponentation.
- // assertEquals(2n**127n - 42n, BigInt.asUintN(127, -4294967295n));
- // assertEquals(2n**128n - 42n, BigInt.asUintN(128, -4294967295n));
- // assertEquals(2n**129n - 42n, BigInt.asUintN(129, -4294967295n));
+ assertEquals(2n**127n - 4294967295n, BigInt.asUintN(127, -4294967295n));
+ assertEquals(2n**128n - 4294967295n, BigInt.asUintN(128, -4294967295n));
+ assertEquals(2n**129n - 4294967295n, BigInt.asUintN(129, -4294967295n));
}{
assertEquals(42n, BigInt.asUintN(2**32, 42n));
assertEquals(4294967295n, BigInt.asUintN(2**32, 4294967295n));
diff --git a/deps/v8/test/mjsunit/harmony/bigint/basics.js b/deps/v8/test/mjsunit/harmony/bigint/basics.js
index 5ea89009a3..398d670ca8 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/basics.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/basics.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-bigint --no-opt
+// Flags: --allow-natives-syntax --harmony-bigint
'use strict'
@@ -105,14 +105,6 @@ const six = BigInt(6);
assertTrue(typeof 1n === "bigint");
assertFalse(typeof 1n === "BigInt");
assertFalse(typeof 1 === "bigint");
-}{
- // TODO(neis): Enable once --no-opt can be removed.
- //
- // function Typeof(x) { return typeof x }
- // assertEquals(Typeof(zero), "bigint");
- // assertEquals(Typeof(zero), "bigint");
- // %OptimizeFunctionOnNextCall(Typeof);
- // assertEquals(Typeof(zero), "bigint");
}
// ToString
diff --git a/deps/v8/test/mjsunit/harmony/bigint/comparisons.js b/deps/v8/test/mjsunit/harmony/bigint/comparisons.js
index 7be5eb7ee5..513ff37d00 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/comparisons.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/comparisons.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-bigint --no-opt
+// Flags: --allow-natives-syntax --harmony-bigint
'use strict'
diff --git a/deps/v8/test/mjsunit/harmony/bigint/dec.js b/deps/v8/test/mjsunit/harmony/bigint/dec.js
index cdf1d96d60..5e1f40b2dd 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/dec.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/dec.js
@@ -6,9 +6,6 @@
// Flags: --harmony-bigint
-// TODO(adamk/jkummerow/neis): Support BigInts in TF unary ops.
-// Flags: --noopt
-
var data = [{
a: "-609648ccf253976b12f6b6c8e20790c17ef6b89ea9f536267783607cf465b1ca",
r: "-609648ccf253976b12f6b6c8e20790c17ef6b89ea9f536267783607cf465b1cb"
diff --git a/deps/v8/test/mjsunit/harmony/bigint/exp.js b/deps/v8/test/mjsunit/harmony/bigint/exp.js
new file mode 100644
index 0000000000..5a4601134f
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/exp.js
@@ -0,0 +1,43 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-bigint
+
+assertEquals(1n, 0n ** 0n);
+assertEquals(0n, 0n ** 1n);
+assertEquals(0n, 0n ** 23n);
+
+assertEquals(1n, 1n ** 0n);
+assertEquals(1n, 1n ** 1n);
+assertEquals(1n, 99n ** 0n);
+
+assertEquals(2n, 2n ** 1n);
+assertEquals(4n, 2n ** 2n);
+assertEquals(8n, 2n ** 3n);
+assertEquals(16n, 2n ** 4n);
+assertEquals(151115727451828646838272n, 2n ** 77n);
+
+assertEquals(3n, 3n ** 1n);
+assertEquals(9n, 3n ** 2n);
+assertEquals(27n, 3n ** 3n);
+assertEquals(81n, 3n ** 4n);
+assertEquals(243n, 3n ** 5n);
+assertEquals(30903154382632612361920641803529n, 3n ** 66n);
+
+assertEquals(1n, (-2n) ** 0n);
+assertEquals(-2n, (-2n) ** 1n);
+assertEquals(4n, (-2n) ** 2n);
+assertEquals(-8n, (-2n) ** 3n);
+assertEquals(16n, (-2n) ** 4n);
+assertEquals(-32n, (-2n) ** 5n);
+
+assertEquals(1n, (-3n) ** 0n);
+assertEquals(-3n, (-3n) ** 1n);
+assertEquals(9n, (-3n) ** 2n);
+assertEquals(-27n, (-3n) ** 3n);
+assertEquals(81n, (-3n) ** 4n);
+assertEquals(-243n, (-3n) ** 5n);
+
+assertThrows(() => 3n ** -2n, RangeError); // Negative exponent.
+assertThrows(() => 2n ** (1024n ** 4n), RangeError); // Too big.
diff --git a/deps/v8/test/mjsunit/harmony/bigint/inc.js b/deps/v8/test/mjsunit/harmony/bigint/inc.js
index 2773ed9110..64865a2b32 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/inc.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/inc.js
@@ -6,9 +6,6 @@
// Flags: --harmony-bigint
-// TODO(adamk/jkummerow/neis): Support BigInts in TF unary ops.
-// Flags: --noopt
-
var data = [{
a: "-989c298c6fc3",
r: "-989c298c6fc2"
diff --git a/deps/v8/test/mjsunit/harmony/bigint/json.js b/deps/v8/test/mjsunit/harmony/bigint/json.js
index 10afdfce02..eb0eefc4bb 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/json.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/json.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-bigint --no-opt
+// Flags: --allow-natives-syntax --harmony-bigint
'use strict'
diff --git a/deps/v8/test/mjsunit/harmony/bigint/neg.js b/deps/v8/test/mjsunit/harmony/bigint/neg.js
index 75548f62c3..8cec9cc21b 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/neg.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/neg.js
@@ -6,9 +6,6 @@
// Flags: --harmony-bigint
-// TODO(adamk/jkummerow/neis): Support BigInts in TF unary ops.
-// Flags: --noopt
-
var data = [{
a: "58ad59aa3aa9d04d4c12493966e204ef0500d5f92ecb31",
r: "-58ad59aa3aa9d04d4c12493966e204ef0500d5f92ecb31"
diff --git a/deps/v8/test/mjsunit/harmony/bigint/not.js b/deps/v8/test/mjsunit/harmony/bigint/not.js
index fe23c8f965..7ceaa01e63 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/not.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/not.js
@@ -6,9 +6,6 @@
// Flags: --harmony-bigint
-// TODO(adamk/jkummerow/neis): Support BigInts in TF unary ops.
-// Flags: --noopt
-
var data = [{
a: "3d02c87edc77722299f6559ecca038911f864a4e78c20af80f4a6d9",
r: "-3d02c87edc77722299f6559ecca038911f864a4e78c20af80f4a6da"
diff --git a/deps/v8/test/mjsunit/harmony/bigint/regressions.js b/deps/v8/test/mjsunit/harmony/bigint/regressions.js
index 45c8816fe7..3057fe1230 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/regressions.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/regressions.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-bigint --noopt
+// Flags: --harmony-bigint
var a = 5n;
var b = a / -1n;
@@ -16,3 +16,5 @@ assertEquals(0n, 5n % 1n);
assertEquals(0n, -5n % 1n);
assertEquals(0n, 5n % -1n);
assertEquals(0n, -5n % -1n);
+
+assertTrue(0n === 0n);
diff --git a/deps/v8/test/mjsunit/harmony/bigint/tonumber.js b/deps/v8/test/mjsunit/harmony/bigint/tonumber.js
index 0061d91d67..d2802a79be 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/tonumber.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/tonumber.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-bigint --no-opt
+// Flags: --harmony-bigint
function Check(bigint, number_string) {
var number = Number(number_string);
diff --git a/deps/v8/test/mjsunit/harmony/bigint/too-big-literal.js b/deps/v8/test/mjsunit/harmony/bigint/too-big-literal.js
deleted file mode 100644
index 242700191a..0000000000
--- a/deps/v8/test/mjsunit/harmony/bigint/too-big-literal.js
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Flags: --harmony-bigint --no-opt
-
-const MAX_BIGINT_BITS = 1024 * 1024; // Matches BigInt::kMaxLengthBits
-const MAX_BIGINT_CHARS = MAX_BIGINT_BITS / 4;
-
-const TOO_MANY_ONES = Array(MAX_BIGINT_CHARS + 2).join("1") + "n";
-
-const tooBigHex = "0x" + TOO_MANY_ONES;
-
-assertThrows(tooBigHex, SyntaxError);
diff --git a/deps/v8/test/mjsunit/harmony/bigint/turbo.js b/deps/v8/test/mjsunit/harmony/bigint/turbo.js
new file mode 100644
index 0000000000..87130ea101
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/turbo.js
@@ -0,0 +1,193 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-bigint
+
+'use strict'
+
+
+function test(f, {input, check}) {
+ let result;
+ try {
+ result = { value: f(input), exception: false }
+ } catch(e) {
+ result = { value: e, exception: true }
+ }
+ check(result);
+}
+
+function Test(f, ...cases) {
+ for (let i = 0; i < cases.length; ++i) {
+ test(f, cases[i]);
+ %OptimizeFunctionOnNextCall(f);
+ for (let j = 0; j < cases.length; ++j) {
+ test(f, cases[j]);
+ }
+ %DeoptimizeFunction(f);
+ }
+}
+
+
+function V(input, expected_value) {
+ function check(result) {
+ assertFalse(result.exception, input);
+ assertEquals(expected_value, result.value);
+ }
+ return {input, check};
+}
+
+function E(input, expected_exception) {
+ function check(result) {
+ assertTrue(result.exception, input);
+ assertInstanceof(result.value, expected_exception);
+ }
+ return {input, check};
+}
+
+
+const six = {[Symbol.toPrimitive]() {return 6n}};
+
+
+////////////////////////////////////////////////////////////////////////////////
+// The first argument to {Test} is the function to test. The other arguments are
+// the test cases, basically pairs of input and expected output. {Test} runs the
+// function first unoptimized on one of the inputs, and then optimized on all
+// inputs.
+////////////////////////////////////////////////////////////////////////////////
+
+
+Test(x => Number(x),
+ V(1n, 1), V(1, 1), V("", 0), V(1.4, 1.4), V(null, 0), V(six, 6));
+
+Test(x => String(x),
+ V(1n, "1"), V(1, "1"), V(1.4, "1.4"), V(null, "null"), V(six, "6"));
+
+Test(x => BigInt(x),
+ V(true, 1n), V(false, 0n), V(42n, 42n), E(NaN, RangeError), V(six, 6n));
+
+Test(x => typeof x,
+ V(1n, "bigint"), V(1, "number"), V(six, "object"));
+Test(x => typeof x == "bigint",
+ V(1n, true), V(1, false), V(six, false));
+
+Test(x => !x,
+ V(0n, true), V(42n, false), V(0x10000000000000000n, false), V(1, false),
+ V(undefined, true), V(six, false));
+Test(x => !!x,
+ V(0n, false), V(42n, true), V(0x10000000000000000n, true), V(1, true),
+ V(undefined, false), V(six, true));
+
+Test(x => +x,
+ E(-3n, TypeError), V(-4, -4), V(1.4, 1.4), V(null, 0), V("5", 5),
+ E(six, TypeError));
+
+Test(x => -x,
+ V(-3n, 3n), V(-4, 4), V(1.4, -1.4), V(null, -0), V("5", -5), V(six, -6n));
+
+Test(x => ~x,
+ V(-3n, 2n), V(-4, 3), V(1.5, -2), V(null, -1), V("5", -6), V(six, -7n));
+
+Test(x => ++x,
+ V(-3n, -2n), V(-4, -3), V(1.5, 2.5), V(null, 1), V("5", 6), V(six, 7n));
+
+Test(x => --x,
+ V(-3n, -4n), V(-4, -5), V(1.5, 0.5), V(null, -1), V("5", 4), V(six, 5n));
+
+Test(x => x++,
+ V(-3n, -3n), V(-4, -4), V(1.5, 1.5), V(null, 0), V("5", 5), V(six, 6n));
+
+Test(x => x--,
+ V(-3n, -3n), V(-4, -4), V(1.5, 1.5), V(null, 0), V("5", 5), V(six, 6n));
+
+Test(x => x + 42,
+ E(1n, TypeError), V(2, 44), V(null, 42), V("a", "a42"), E(six, TypeError));
+Test(x => x + 42n,
+ V(1n, 43n), E(2, TypeError), E(null, TypeError), V("a", "a42"), V(six,48n));
+
+Test(x => x - 4,
+ E(1n, TypeError), V(3, -1), V(null, -4), V("a", NaN), E(six, TypeError));
+Test(x => x - 4n,
+ V(1n, -3n), E(3, TypeError), E(null, TypeError), E("a", TypeError),
+ V(six, 2n));
+
+Test(x => x * 42,
+ E(2n, TypeError), V(3, 126), V("a", NaN), V(null, 0), E(six, TypeError));
+Test(x => x * 42n,
+ V(2n, 84n), E(3, TypeError), E("a", TypeError), E(null, TypeError),
+ V(six, 252n));
+
+Test(x => x / 2,
+ E(2n, TypeError), V(6, 3), V("a", NaN), V(null, 0), E(six, TypeError));
+Test(x => x / 2n,
+ V(2n, 1n), E(6, TypeError), E("a", TypeError), E(null, TypeError),
+ V(six, 3n));
+
+Test(x => x % 2,
+ E(2n, TypeError), V(3, 1), V("a", NaN), V(null, 0), E(six, TypeError));
+Test(x => x % 2n,
+ V(2n, 0n), E(3, TypeError), E("a", TypeError), E(null, TypeError),
+ V(six, 0n));
+
+Test(x => x | 5,
+ E(2n, TypeError), V(3, 7), V("a", 5), V(null, 5), E(six, TypeError));
+Test(x => x | 5n,
+ V(2n, 7n), E(3, TypeError), E("a", TypeError), E(null, TypeError),
+ V(six, 7n));
+
+Test(x => x & 5,
+ E(2n, TypeError), V(3, 1), V("a", 0), V(null, 0), E(six, TypeError));
+Test(x => x & 5n,
+ V(2n, 0n), E(3, TypeError), E("a", TypeError), E(null, TypeError),
+ V(six, 4n));
+
+Test(x => x ^ 5,
+ E(2n, TypeError), V(3, 6), V("a", 5), V(null, 5), E(six, TypeError));
+Test(x => x ^ 5n,
+ V(2n, 7n), E(3, TypeError), E("a", TypeError), E(null, TypeError),
+ V(six, 3n));
+
+Test(x => x << 3,
+ E(2n, TypeError), V(3, 24), V("a", 0), V(null, 0), E(six, TypeError));
+Test(x => x << 3n,
+ V(2n, 16n), E(3, TypeError), E("a", TypeError), E(null, TypeError),
+ V(six, 48n));
+
+Test(x => x >> 1,
+ E(2n, TypeError), V(3, 1), V("a", 0), V(null, 0), E(six, TypeError));
+Test(x => x >> 1n,
+ V(2n, 1n), E(3, TypeError), E("a", TypeError), E(null, TypeError),
+ V(six, 3n));
+
+Test(x => x >>> 1,
+ E(2n, TypeError), V(3, 1), V("a", 0), V(null, 0), E(six, TypeError));
+Test(x => x >>> 1n,
+ E(2n, TypeError), E(3, TypeError), E("a", TypeError), E(null, TypeError),
+ E(six, TypeError));
+
+Test(x => x === 42,
+ V(1n, false), V(2, false), V(null, false), V("a", false), V(six, false));
+Test(x => x === 42,
+ V(42n, false), V(42, true), V(null, false), V("42", false), V(six, false));
+Test(x => x === 42n,
+ V(1n, false), V(2, false), V(null, false), V("a", false), V(six, false));
+Test(x => x === 42n,
+ V(42n, true), V(42, false), V(null, false), V("42", false), V(six, false));
+
+Test(x => x == 42,
+ V(1n, false), V(2, false), V(null, false), V("a", false), V(six, false));
+Test(x => x == 42,
+ V(42n, true), V(42, true), V(null, false), V("42", true), V(six, false));
+Test(x => x == 42n,
+ V(1n, false), V(2, false), V(null, false), V("a", false), V(six, false));
+Test(x => x == 42n,
+ V(42n, true), V(42, true), V(null, false), V("42", true), V(six, false));
+
+Test(x => x < 42,
+ V(1n, true), V(2, true), V(null, true), V("41", true), V(six, true));
+Test(x => x < 42,
+ V(42n, false), V(42, false), V(null, true), V("42", false), V(six, true));
+Test(x => x < 42n,
+ V(1n, true), V(2, true), V(null, true), V("41", true), V(six, true));
+Test(x => x < 42n,
+ V(42n, false), V(42, false), V(null, true), V("42", false), V(six, true));
diff --git a/deps/v8/test/mjsunit/harmony/for-await-of.js b/deps/v8/test/mjsunit/harmony/for-await-of.js
index efcfdab2ea..e23758a5e1 100644
--- a/deps/v8/test/mjsunit/harmony/for-await-of.js
+++ b/deps/v8/test/mjsunit/harmony/for-await-of.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-async-iteration --allow-natives-syntax
+// Flags: --allow-natives-syntax
let testFailed = false;
let testFailure;
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-15.js b/deps/v8/test/mjsunit/harmony/modules-import-15.js
index ac33cd50b2..32255ce980 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-15.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-15.js
@@ -29,7 +29,8 @@ async function test2() {
} catch(e) {
assertInstanceof(e, SyntaxError);
assertEquals(
- "The requested module does not provide an export named 'default'",
+ "The requested module 'modules-skip-empty.js' does not provide an " +
+ "export named 'default'",
e.message);
ran = true;
}
diff --git a/deps/v8/test/mjsunit/harmony/optional-catch-binding-breaks.js b/deps/v8/test/mjsunit/harmony/optional-catch-binding-breaks.js
new file mode 100644
index 0000000000..82be60cda1
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/optional-catch-binding-breaks.js
@@ -0,0 +1,65 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-optional-catch-binding
+
+let state = 'initial';
+x: try {
+ throw new Error('caught');
+ state = 'unreachable';
+} catch {
+ assertEquals(state, 'initial');
+ state = 'caught';
+ break x;
+ state = 'unreachable';
+}
+assertEquals(state, 'caught');
+
+
+state = 'initial';
+x: try {
+ throw new Error('caught');
+ state = 'unreachable';
+} catch {
+ assertEquals(state, 'initial');
+ state = 'caught';
+ break x;
+ state = 'unreachable';
+} finally {
+ assertEquals(state, 'caught');
+ state = 'finally';
+}
+assertEquals(state, 'finally');
+
+
+state = 'initial';
+x: {
+ y: try {
+ throw new Error('caught');
+ state = 'unreachable';
+ } catch {
+ assertEquals(state, 'initial');
+ state = 'caught';
+ break x;
+ state = 'unreachable';
+ } finally {
+ assertEquals(state, 'caught');
+ state = 'finally';
+ break y;
+ state = 'unreachable';
+ }
+ assertEquals(state, 'finally');
+ state = 'after block';
+}
+assertEquals(state, 'after block');
+
+
+do {
+ try {
+ throw new Error();
+ } catch {
+ break;
+ }
+ assertUnreachable();
+} while(false);
diff --git a/deps/v8/test/mjsunit/harmony/optional-catch-binding.js b/deps/v8/test/mjsunit/harmony/optional-catch-binding.js
new file mode 100644
index 0000000000..093288c4e6
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/optional-catch-binding.js
@@ -0,0 +1,39 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-optional-catch-binding
+
+let state = 'initial';
+try {
+ throw new Error('caught');
+ state = 'unreachable';
+} catch { // Note the lack of a binding
+ assertEquals(state, 'initial');
+ state = 'caught';
+}
+assertEquals(state, 'caught');
+
+
+let sigil1 = {};
+try {
+ throw sigil1;
+} catch (e) {
+ assertEquals(e, sigil1);
+}
+
+
+let sigil2 = {};
+let reached = false;
+try {
+ try {
+ throw sigil1;
+ } catch {
+ reached = true;
+ } finally {
+ throw sigil2;
+ }
+} catch (e) {
+ assertEquals(e, sigil2);
+}
+assertTrue(reached);
diff --git a/deps/v8/test/mjsunit/harmony/promise-prototype-finally.js b/deps/v8/test/mjsunit/harmony/promise-prototype-finally.js
index 3668ab5538..4e91f2e6d1 100644
--- a/deps/v8/test/mjsunit/harmony/promise-prototype-finally.js
+++ b/deps/v8/test/mjsunit/harmony/promise-prototype-finally.js
@@ -605,3 +605,13 @@ testAsync(assert => {
.then(() => assert.equals(1, value));
}, "PromiseResolve-ordering");
+
+(function testIsObject() {
+ var called = false;
+ var p = new Proxy(Promise.resolve(), {});
+ var oldThen = Promise.prototype.then;
+ Promise.prototype.then = () => called = true;
+ Promise.prototype.finally.call(p);
+ assertTrue(called);
+ Promise.prototype.then = oldThen;
+})();
diff --git a/deps/v8/test/mjsunit/harmony/public-instance-class-fields.js b/deps/v8/test/mjsunit/harmony/public-instance-class-fields.js
index acf0f13a99..a82a0ac919 100644
--- a/deps/v8/test/mjsunit/harmony/public-instance-class-fields.js
+++ b/deps/v8/test/mjsunit/harmony/public-instance-class-fields.js
@@ -52,16 +52,8 @@
b = x;
c = 1;
hasOwnProperty() { return 1;}
- static [x] = 2;
- static b = 3;
- static d;
}
- assertEquals(2, C.a);
- assertEquals(3, C.b);
- assertEquals(undefined, C.d);
- assertEquals(undefined, C.c);
-
let c = new C;
assertEquals(undefined, c.a);
assertEquals('a', c.b);
@@ -270,7 +262,7 @@
let c = new C;
assertEquals(1, c.a);
assertEquals(undefined, c.b);
- assertEquals(undefined, c.c1);
+ assertEquals(undefined, c[c1]);
}
{
@@ -281,10 +273,10 @@
}
class C {
- [run(1)] = run(7);
- [run(2)] = run(8);
+ [run(1)] = run(6);
+ [run(2)] = run(7);
[run(3)]() { run(9);}
- static [run(4)] = run(6);
+ [run(4)] = run(8);
[run(5)]() { throw new Error('should not execute');};
}
@@ -303,10 +295,10 @@ function x() {
}
class C {
- [run(1)] = run(7);
- [run(2)] = run(8);
+ [run(1)] = run(6);
+ [run(2)] = run(7);
[run(3)]() { run(9);}
- static [run(4)] = run(6);
+ [run(4)] = run(8);
[run(5)]() { throw new Error('should not execute');};
}
@@ -315,7 +307,7 @@ function x() {
assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9], log);
}
}
-x();
+x()();
{
class C {}
@@ -637,20 +629,6 @@ x();
}
{
- function t() {
- return class {
- ['x'] = 1;
- static ['x'] = 2;
- }
- }
-
- let klass = t();
- let obj = new klass;
- assertEquals(1, obj.x);
- assertEquals(2, klass.x);
-}
-
-{
new class {
t = 1;
constructor(t = this.t) {
@@ -674,3 +652,47 @@ x();
}
}, ReferenceError);
}
+
+{
+ class X {
+ p = function() { return arguments[0]; }
+ }
+
+ let x = new X;
+ assertEquals(1, x.p(1));
+}
+
+{
+ class X {
+ t = () => {
+ function p() { return arguments[0]; };
+ return p;
+ }
+ }
+
+ let x = new X;
+ let p = x.t();
+ assertEquals(1, p(1));
+}
+
+{
+ class X {
+ t = () => {
+ function p() { return eval("arguments[0]"); };
+ return p;
+ }
+ }
+
+ let x = new X;
+ let p = x.t();
+ assertEquals(1, p(1));
+}
+
+{
+ class X {
+ p = eval("(function() { return arguments[0]; })(1)");
+ }
+
+ let x = new X;
+ assertEquals(1, x.p);
+}
diff --git a/deps/v8/test/mjsunit/harmony/public-static-class-fields.js b/deps/v8/test/mjsunit/harmony/public-static-class-fields.js
index 0477e3dca7..3de3e2d9d2 100644
--- a/deps/v8/test/mjsunit/harmony/public-static-class-fields.js
+++ b/deps/v8/test/mjsunit/harmony/public-static-class-fields.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-public-fields
+// Flags: --harmony-public-fields --harmony-static-fields
"use strict";
@@ -262,7 +262,7 @@
assertEquals(1, C.a);
assertEquals(undefined, C.b);
- assertEquals(undefined, C.c);
+ assertEquals(undefined, C[c]);
}
{
@@ -310,7 +310,51 @@ function x() {
assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9], log);
}
}
-x();
+x()();
+
+{
+ let log = [];
+ function run(i) {
+ log.push(i);
+ return i;
+ }
+
+ class C {
+ [run(1)] = run(7);
+ [run(2)] = run(8);
+ [run(3)]() { run(9);}
+ static [run(4)] = run(6);
+ [run(5)]() { throw new Error('should not execute');};
+ }
+
+ let c = new C;
+ c[3]();
+ assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9], log);
+}
+
+function y() {
+ // This tests lazy parsing.
+ return function() {
+ let log = [];
+ function run(i) {
+ log.push(i);
+ return i;
+ }
+
+ class C {
+ [run(1)] = run(7);
+ [run(2)] = run(8);
+ [run(3)]() { run(9);}
+ static [run(4)] = run(6);
+ [run(5)]() { throw new Error('should not execute');};
+ }
+
+ let c = new C;
+ c[3]();
+ assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9], log);
+ }
+}
+y()();
{
class C {}
@@ -333,3 +377,83 @@ x();
let obj = new klass;
assertEquals(2, klass.x);
}
+
+{
+ let x = 'a';
+ class C {
+ a;
+ b = x;
+ c = 1;
+ hasOwnProperty() { return 1;}
+ static [x] = 2;
+ static b = 3;
+ static d;
+ }
+
+ assertEquals(2, C.a);
+ assertEquals(3, C.b);
+ assertEquals(undefined, C.d);
+ assertEquals(undefined, C.c);
+
+ let c = new C;
+ assertEquals(undefined, c.a);
+ assertEquals('a', c.b);
+ assertEquals(1, c.c);
+ assertEquals(undefined, c.d);
+ assertEquals(1, c.hasOwnProperty());
+}
+
+{
+ function t() {
+ return class {
+ ['x'] = 1;
+ static ['x'] = 2;
+ }
+ }
+
+ let klass = t();
+ let obj = new klass;
+ assertEquals(1, obj.x);
+ assertEquals(2, klass.x);
+}
+
+
+{
+ class X {
+ static p = function() { return arguments[0]; }
+ }
+
+ assertEquals(1, X.p(1));
+}
+
+{
+ class X {
+ static t = () => {
+ function p() { return arguments[0]; };
+ return p;
+ }
+ }
+
+ let p = X.t();
+ assertEquals(1, p(1));
+}
+
+{
+ class X {
+ static t = () => {
+ function p() { return eval("arguments[0]"); };
+ return p;
+ }
+ }
+
+ let p = X.t();
+ assertEquals(1, p(1));
+}
+
+{
+ class X {
+ static p = eval("(function() { return arguments[0]; })(1)");
+ }
+
+ assertEquals(1, X.p);
+}
diff --git a/deps/v8/test/mjsunit/harmony/regexp-named-captures.js b/deps/v8/test/mjsunit/harmony/regexp-named-captures.js
index 1ad29f6b49..72041b99bf 100644
--- a/deps/v8/test/mjsunit/harmony/regexp-named-captures.js
+++ b/deps/v8/test/mjsunit/harmony/regexp-named-captures.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-regexp-named-captures
+// Flags: --harmony-regexp-named-captures --allow-natives-syntax
// Malformed named captures.
assertThrows("/(?<>a)/u", SyntaxError); // Empty name.
@@ -418,3 +418,124 @@ function toSlowMode(re) {
assertEquals("cd", "abcd".replace(re, "$<fth>"));
assertEquals("cd", "abcd".replace(re, "$<$1>"));
}
+
+// Tests for 'groups' semantics on the regexp result object.
+// https://crbug.com/v8/7192
+
+{
+ const re = /./;
+ const result = re.exec("a");
+ assertTrue(%SpeciesProtector());
+ assertEquals(result.__proto__, Array.prototype);
+ assertTrue(result.hasOwnProperty('groups'));
+ assertArrayEquals(["a"], result);
+ assertEquals(0, result.index);
+ assertEquals(undefined, result.groups);
+
+ Array.prototype.groups = { a: "b" };
+ assertTrue(%SpeciesProtector());
+ assertEquals("$<a>", "a".replace(re, "$<a>"));
+ Array.prototype.groups = undefined;
+}
+
+{
+ const re = toSlowMode(/./);
+ const result = re.exec("a");
+ assertTrue(%SpeciesProtector());
+ assertEquals(result.__proto__, Array.prototype);
+ assertTrue(result.hasOwnProperty('groups'));
+ assertArrayEquals(["a"], result);
+ assertEquals(0, result.index);
+ assertEquals(undefined, result.groups);
+
+ Array.prototype.groups = { a: "b" };
+ assertTrue(%SpeciesProtector());
+ assertEquals("$<a>", "a".replace(re, "$<a>"));
+ Array.prototype.groups = undefined;
+}
+
+{
+ const re = /(?<a>a).|(?<x>x)/;
+ const result = re.exec("ab");
+ assertTrue(%SpeciesProtector());
+ assertEquals(result.__proto__, Array.prototype);
+ assertTrue(result.hasOwnProperty('groups'));
+ assertArrayEquals(["ab", "a", undefined], result);
+ assertEquals(0, result.index);
+ assertEquals({a: "a", x: undefined}, result.groups);
+
+ // a is a matched named capture, b is an unmatched named capture, and z
+ // is not a named capture.
+ Array.prototype.groups = { a: "b", x: "y", z: "z" };
+ assertTrue(%SpeciesProtector());
+ assertEquals("a", "ab".replace(re, "$<a>"));
+ assertEquals("", "ab".replace(re, "$<x>"));
+ assertEquals("", "ab".replace(re, "$<z>"));
+ Array.prototype.groups = undefined;
+}
+
+{
+ const re = toSlowMode(/(?<a>a).|(?<x>x)/);
+ const result = re.exec("ab");
+ assertTrue(%SpeciesProtector());
+ assertEquals(result.__proto__, Array.prototype);
+ assertTrue(result.hasOwnProperty('groups'));
+ assertArrayEquals(["ab", "a", undefined], result);
+ assertEquals(0, result.index);
+ assertEquals({a: "a", x: undefined}, result.groups);
+
+ // a is a matched named capture, b is an unmatched named capture, and z
+ // is not a named capture.
+ Array.prototype.groups = { a: "b", x: "y", z: "z" };
+ assertTrue(%SpeciesProtector());
+ assertEquals("a", "ab".replace(re, "$<a>"));
+ assertEquals("", "ab".replace(re, "$<x>"));
+ assertEquals("", "ab".replace(re, "$<z>"));
+ Array.prototype.groups = undefined;
+}
+
+{
+ class FakeRegExp extends RegExp {
+ exec(subject) {
+ const fake_result = [ "ab", "a" ];
+ fake_result.index = 0;
+ // groups is not set, triggering prototype lookup.
+ return fake_result;
+ }
+ };
+
+ const re = new FakeRegExp();
+ const result = re.exec("ab");
+ assertTrue(%SpeciesProtector());
+ assertEquals(result.__proto__, Array.prototype);
+ assertFalse(result.hasOwnProperty('groups'));
+
+ Array.prototype.groups = { a: "b" };
+ Array.prototype.groups.__proto__.b = "c";
+ assertTrue(%SpeciesProtector());
+ assertEquals("b", "ab".replace(re, "$<a>"));
+ assertEquals("c", "ab".replace(re, "$<b>"));
+ Array.prototype.groups = undefined;
+}
+
+{
+ class FakeRegExp extends RegExp {
+ exec(subject) {
+ const fake_result = [ "ab", "a" ];
+ fake_result.index = 0;
+ fake_result.groups = { a: "b" };
+ fake_result.groups.__proto__.b = "c";
+ return fake_result;
+ }
+ };
+
+ const re = new FakeRegExp();
+ const result = re.exec("ab");
+ assertTrue(%SpeciesProtector());
+ assertEquals(result.__proto__, Array.prototype);
+ assertTrue(result.hasOwnProperty('groups'));
+ assertEquals({ a: "b" }, result.groups);
+
+ assertEquals("b", "ab".replace(re, "$<a>"));
+ assertEquals("c", "ab".replace(re, "$<b>"));
+}
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-6322.js b/deps/v8/test/mjsunit/harmony/regress/regress-6322.js
index 9c312a35a5..927b56ea79 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-6322.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-6322.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-async-iteration
-
// Crash with --verify-heap
(async function() { for await (let { a = class b { } } of [{}]) { } })();
(async function() { var a; for await ({ a = class b { } } of [{}]) { } })();
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-772649.js b/deps/v8/test/mjsunit/harmony/regress/regress-772649.js
index d080410226..2ff27670df 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-772649.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-772649.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-async-iteration
+// Flags: --allow-natives-syntax
async function* gen([[notIterable]] = [null]) {}
assertThrows(() => gen(), TypeError);
diff --git a/deps/v8/test/mjsunit/harmony/sharedarraybuffer.js b/deps/v8/test/mjsunit/harmony/sharedarraybuffer.js
index 12e8c9508e..a79574d69f 100644
--- a/deps/v8/test/mjsunit/harmony/sharedarraybuffer.js
+++ b/deps/v8/test/mjsunit/harmony/sharedarraybuffer.js
@@ -89,9 +89,6 @@ function TestTypedArray(constr, elementSize, typicalElement) {
assertEquals("[object " + constr.name + "]",
Object.prototype.toString.call(a0));
- // TODO(binji): Should this return false here? It is a view, but it doesn't
- // view a SharedArrayBuffer...
- assertTrue(SharedArrayBuffer.isView(a0));
assertSame(elementSize, a0.BYTES_PER_ELEMENT);
assertSame(30, a0.length);
assertSame(30*elementSize, a0.byteLength);
diff --git a/deps/v8/test/mjsunit/harmony/symbol-async-iterator.js b/deps/v8/test/mjsunit/harmony/symbol-async-iterator.js
index 8a92add635..5b7e6b5f40 100644
--- a/deps/v8/test/mjsunit/harmony/symbol-async-iterator.js
+++ b/deps/v8/test/mjsunit/harmony/symbol-async-iterator.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-async-iteration
-
assertTrue(Symbol.hasOwnProperty('asyncIterator'));
assertEquals('symbol', typeof Symbol.asyncIterator);
assertInstanceof(Object(Symbol.asyncIterator), Symbol);
diff --git a/deps/v8/test/mjsunit/ic-lookup-on-receiver.js b/deps/v8/test/mjsunit/ic-lookup-on-receiver.js
new file mode 100644
index 0000000000..8be3779f05
--- /dev/null
+++ b/deps/v8/test/mjsunit/ic-lookup-on-receiver.js
@@ -0,0 +1,44 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function TestLookupOnReceiver() {
+ let log = [];
+
+ function f(o, v) {
+ o.x = v;
+ return o.x;
+ }
+
+ let p = {};
+ Object.defineProperty(
+ p, "x",
+ {
+ get: function() { return 153; },
+ set: function(v) { log.push("set"); },
+ configurable: true
+ });
+
+ let o = Object.create(p);
+ // Turn o to dictionary mode.
+ for (let i = 0; i < 2048; i++) {
+ o["p"+i] = 0;
+ }
+ assertFalse(%HasFastProperties(o));
+
+ for (let i = 0; i < 5; i++) {
+ log.push(f(o, i));
+ }
+
+ Object.defineProperty(o, "x", { value: 0, configurable: true, writable: true});
+
+ for (let i = 0; i < 5; i++) {
+ log.push(f(o, 42 + i));
+ }
+
+ assertEquals(log,
+ ["set", 153, "set", 153, "set", 153, "set", 153, "set", 153,
+ 42, 43, 44, 45, 46]);
+})();
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index aa59fb680a..d91ff6f015 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -31,6 +31,7 @@
# tested standalone.
'modules-skip*': [SKIP],
'harmony/modules-skip*': [SKIP],
+ 'regress/modules-skip*': [SKIP],
# All tests in the bug directory are expected to fail.
'bugs/*': [FAIL],
@@ -78,13 +79,16 @@
##############################################################################
# No need to waste time for this test.
- 'd8-performance-now': [PASS, NO_VARIANTS],
+ 'd8/d8-performance-now': [PASS, NO_VARIANTS],
'regress/regress-crbug-491062': [PASS, NO_VARIANTS],
# Issue 488: this test sometimes times out.
# TODO(arm): This seems to flush out a bug on arm with simulator.
'array-constructor': [PASS, SLOW, ['arch == arm and simulator == True', SKIP]],
+ # Very slow test
+ 'regress/regress-crbug-808192' : [PASS, NO_VARIANTS, ['arch == arm or arch == arm64 or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips64 or arch == mips', SKIP]],
+
# Very slow on ARM and MIPS, contains no architecture dependent code.
'unicode-case-overoptimization': [PASS, NO_VARIANTS, ['arch == arm or arch == arm64 or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips64 or arch == mips', SKIP]],
'regress/regress-3976': [PASS, NO_VARIANTS, ['arch == arm or arch == arm64 or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips64 or arch == mips', SKIP]],
@@ -109,11 +113,17 @@
# we cannot run several variants of d8-os simultaneously, since all of them
# get the same random seed and would generate the same directory name. Besides
# that, it doesn't make sense to run several variants of d8-os anyways.
- 'd8-os': [PASS, NO_VARIANTS, ['isolates or arch == android_arm or arch == android_arm64 or arch == android_ia32', SKIP]],
+ 'd8/d8-os': [PASS, NO_VARIANTS, ['isolates or arch == android_arm or arch == android_arm64 or arch == android_ia32', SKIP]],
'tools/tickprocessor': [PASS, NO_VARIANTS, ['arch == android_arm or arch == android_arm64 or arch == android_ia32', SKIP]],
'tools/dumpcpp': [PASS, NO_VARIANTS, ['arch == android_arm or arch == android_arm64 or arch == android_ia32', SKIP]],
##############################################################################
+ # This test generates a file in the test directory, so we cannot run several
+ # variants of the test simultaneously. Additionally the test should not be
+ # affected by variants.
+ 'd8/enable-tracing': [PASS, NO_VARIANTS],
+
+ ##############################################################################
# Long running test that reproduces memory leak and should be run manually.
'regress/regress-2073': [SKIP],
@@ -135,9 +145,9 @@
'math-floor-of-div-nosudiv': [PASS, SLOW, ['arch not in [arm, arm64, android_arm, android_arm64]', SKIP]],
# Too slow for slow variants.
- 'asm/embenchen/*': [PASS, SLOW, FAST_VARIANTS],
- 'asm/poppler/*': [PASS, SLOW, FAST_VARIANTS],
- 'asm/sqlite3/*': [PASS, SLOW, FAST_VARIANTS],
+ 'asm/embenchen/*': [PASS, SLOW, NO_VARIANTS],
+ 'asm/poppler/*': [PASS, SLOW, NO_VARIANTS],
+ 'asm/sqlite3/*': [PASS, SLOW, NO_VARIANTS],
# Slow tests.
'copy-on-write-assert': [PASS, SLOW],
@@ -162,6 +172,7 @@
'regexp-modifiers-autogenerated-i18n': [PASS, ['no_i18n == True', FAIL]],
# desugaring regexp property class relies on ICU.
'harmony/regexp-property-*': [PASS, ['no_i18n == True', FAIL]],
+ 'regress/regress-793588': [PASS, ['no_i18n == True', FAIL]],
# noi18n build cannot parse characters in supplementary plane.
'harmony/regexp-named-captures': [PASS, ['no_i18n == True', FAIL]],
@@ -193,13 +204,19 @@
}], # novfp3 == True
##############################################################################
+# TODO(ahaas): Port multiple return values to ARM, MIPS, S390 and PPC
+['arch == arm or arch == arm64 or arch == mips or arch == mips64 or arch == mipsel or arch == mips64el or arch == s390 or arch == s390x or arch == ppc or arch == ppc64', {
+ 'wasm/multi-value': [SKIP],
+}],
+
+##############################################################################
['gc_stress == True', {
# Skip tests not suitable for GC stress.
'allocation-site-info': [SKIP],
'array-constructor-feedback': [SKIP],
'array-feedback': [SKIP],
'array-literal-feedback': [SKIP],
- 'd8-performance-now': [SKIP],
+ 'd8/d8-performance-now': [SKIP],
'elements-kind': [SKIP],
'elements-transition-hoisting': [SKIP],
'fast-prototype': [SKIP],
@@ -250,7 +267,7 @@
'regress/regress-inline-getter-near-stack-limit': [PASS, SLOW],
# BUG(v8:4779): Crashes flakily with stress mode on arm64.
- 'array-splice': [PASS, SLOW, ['arch == arm64', FAST_VARIANTS]],
+ 'array-splice': [PASS, SLOW, ['arch == arm64', NO_VARIANTS]],
# BUG(chromium:751825): Crashes flakily.
'wasm/js-api': [SKIP],
@@ -333,6 +350,9 @@
'unicode-test': [PASS, SLOW],
'wasm/atomics': [PASS, SLOW],
'whitespaces': [PASS, SLOW],
+
+ # BUG(v8:7247).
+ 'regress/regress-779407': [PASS, SLOW, NO_VARIANTS],
}], # 'arch == arm64'
['arch == arm64 and mode == debug and simulator_run', {
@@ -578,9 +598,6 @@
'math-floor-of-div-nosudiv': [PASS, ['mode == debug', SKIP]],
'unicodelctest': [PASS, ['mode == debug', SKIP]],
- # BUG(v8:4495).
- 'es6/collections': [PASS, ['arch == ia32', FAST_VARIANTS]],
-
# Setting the timezone and locale with environment variables unavailable
'icu-date-to-string': [SKIP],
'icu-date-lord-howe': [SKIP],
@@ -624,14 +641,19 @@
'regress/regress-336820': [SKIP],
'regress/regress-748069': [SKIP],
'regress/regress-778668': [SKIP],
+ 'ignition/regress-672027': [PASS, ['tsan', SKIP]],
}], # 'gc_fuzzer == True'
##############################################################################
['predictable == True', {
# Skip tests that are known to be non-deterministic.
- 'd8-worker-sharedarraybuffer': [SKIP],
- 'd8-os': [SKIP],
+ 'd8/d8-worker-sharedarraybuffer': [SKIP],
+ 'd8/d8-os': [SKIP],
+ 'harmony/futex': [SKIP],
+
+ # BUG(v8:7166).
+ 'd8/enable-tracing': [SKIP],
}], # 'predictable == True'
##############################################################################
@@ -735,16 +757,17 @@
}], # arch != x64 and arch != ia32
##############################################################################
-# BUG(v8:7138).
-['arch == arm and not simulator_run and variant == wasm_traps', {
- '*': [SKIP],
-}], # arch == arm and not simulator_run and variant == wasm_traps
-
-##############################################################################
['variant == liftoff', {
# In the liftoff variant, liftoff compilation happens even though the test
# does not explicitly enable it.
'wasm/default-liftoff-setting': [SKIP],
}], # variant == liftoff
+##############################################################################
+['variant == slow_path and gc_stress', {
+ # Slow tests.
+ 'regress/regress-crbug-493779': [SKIP],
+ 'string-replace-gc': [SKIP],
+}], # variant == slow_path
+
]
diff --git a/deps/v8/test/mjsunit/optimized-array-every.js b/deps/v8/test/mjsunit/optimized-array-every.js
new file mode 100644
index 0000000000..0cbab7df67
--- /dev/null
+++ b/deps/v8/test/mjsunit/optimized-array-every.js
@@ -0,0 +1,520 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo-inline-array-builtins --opt
+// Flags: --no-always-opt
+
+// Early exit from every functions properly.
+(() => {
+ const a = [1, 2, 3, 4, 5];
+ let result = 0;
+ function earlyExit() {
+ return a.every(v => {
+ result += v;
+ return v < 2;
+ });
+ }
+ assertFalse(earlyExit());
+ earlyExit();
+ %OptimizeFunctionOnNextCall(earlyExit);
+ assertFalse(earlyExit());
+ assertEquals(9, result);
+})();
+
+// Soft-deopt plus early exit.
+(() => {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ let result = 0;
+ function softyPlusEarlyExit(deopt) {
+ return a.every(v => {
+ result += v;
+ if (v === 4 && deopt) {
+ a.abc = 25;
+ }
+ return v < 8;
+ });
+ }
+ assertFalse(softyPlusEarlyExit(false));
+ softyPlusEarlyExit(false);
+ %OptimizeFunctionOnNextCall(softyPlusEarlyExit);
+ assertFalse(softyPlusEarlyExit(true));
+ assertEquals(36*3, result);
+})();
+
+// Soft-deopt synced with early exit, which forces the lazy deoptimization
+// continuation handler to exit.
+(() => {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ let called_values = [];
+ function softyPlusEarlyExit(deopt) {
+ called_values = [];
+ return a.every(v => {
+ called_values.push(v);
+ if (v === 4 && deopt) {
+ a.abc = 25;
+ return false;
+ }
+ return v < 8;
+ });
+ }
+ assertFalse(softyPlusEarlyExit(false));
+ assertArrayEquals([1, 2, 3, 4, 5, 6, 7, 8], called_values);
+ softyPlusEarlyExit(false);
+ %OptimizeFunctionOnNextCall(softyPlusEarlyExit);
+ assertFalse(softyPlusEarlyExit(true));
+ assertArrayEquals([1, 2, 3, 4], called_values);
+})();
+
+// Unknown field access leads to soft-deopt unrelated to every, should still
+// lead to correct result.
+(() => {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25];
+ let result = 0;
+ function eagerDeoptInCalled(deopt) {
+ return a.every((v, i) => {
+ if (i === 13 && deopt) {
+ a.abc = 25;
+ }
+ result += v;
+ return true;
+ });
+ }
+ eagerDeoptInCalled();
+ eagerDeoptInCalled();
+ %OptimizeFunctionOnNextCall(eagerDeoptInCalled);
+ eagerDeoptInCalled();
+ assertTrue(eagerDeoptInCalled(true));
+ eagerDeoptInCalled();
+ assertEquals(1625, result);
+})();
+
+// Length change detected during loop, must cause properly handled eager deopt.
+(() => {
+ let called_values;
+ function eagerDeoptInCalled(deopt) {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ called_values = [];
+ return a.every((v,i) => {
+ called_values.push(v);
+ a.length = (i === 5 && deopt) ? 8 : 10;
+ return true;
+ });
+ }
+ assertTrue(eagerDeoptInCalled());
+ assertArrayEquals([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], called_values);
+ eagerDeoptInCalled();
+ %OptimizeFunctionOnNextCall(eagerDeoptInCalled);
+ assertTrue(eagerDeoptInCalled());
+ assertTrue(eagerDeoptInCalled(true));
+ assertArrayEquals([1, 2, 3, 4, 5, 6, 7, 8], called_values);
+ eagerDeoptInCalled();
+})();
+
+// Lazy deopt from a callback that changes the input array. Deopt in a callback
+// execution that returns true.
+(() => {
+ const a = [1, 2, 3, 4, 5];
+ function lazyChanger(deopt) {
+ return a.every((v, i) => {
+ if (i === 3 && deopt) {
+ a[3] = 100;
+ %DeoptimizeNow();
+ }
+ return true;
+ });
+ }
+ assertTrue(lazyChanger());
+ lazyChanger();
+ %OptimizeFunctionOnNextCall(lazyChanger);
+ assertTrue(lazyChanger(true));
+ assertTrue(lazyChanger());
+})();
+
+// Lazy deopt from a callback that will always return true and no element is
+// found. Verifies the lazy-after-callback continuation builtin.
+(() => {
+ const a = [1, 2, 3, 4, 5];
+ function lazyChanger(deopt) {
+ return a.every((v, i) => {
+ if (i === 3 && deopt) {
+ %DeoptimizeNow();
+ }
+ return true;
+ });
+ }
+ assertTrue(lazyChanger());
+ lazyChanger();
+ %OptimizeFunctionOnNextCall(lazyChanger);
+ assertTrue(lazyChanger(true));
+ assertTrue(lazyChanger());
+})();
+
+// Lazy deopt from a callback that changes the input array. Deopt in a callback
+// execution that returns true.
+(() => {
+ const a = [1, 2, 3, 4, 5];
+ function lazyChanger(deopt) {
+ return a.every((v, i) => {
+ if (i === 2 && deopt) {
+ a[3] = 100;
+ %DeoptimizeNow();
+ }
+ return true;
+ });
+ }
+ assertTrue(lazyChanger());
+ lazyChanger();
+ %OptimizeFunctionOnNextCall(lazyChanger);
+ assertTrue(lazyChanger(true));
+ assertTrue(lazyChanger());
+})();
+
+// Escape analyzed array
+(() => {
+ let result = 0;
+ function eagerDeoptInCalled(deopt) {
+ const a_noescape = [0, 1, 2, 3, 4, 5];
+ a_noescape.every((v, i) => {
+ result += v | 0;
+ if (i === 13 && deopt) {
+ a_noescape.length = 25;
+ }
+ return true;
+ });
+ }
+ eagerDeoptInCalled();
+ eagerDeoptInCalled();
+ %OptimizeFunctionOnNextCall(eagerDeoptInCalled);
+ eagerDeoptInCalled();
+ eagerDeoptInCalled(true);
+ eagerDeoptInCalled();
+ assertEquals(75, result);
+})();
+
+// Lazy deopt from runtime call from inlined callback function.
+(() => {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25];
+ let result = 0;
+ function lazyDeopt(deopt) {
+ a.every((v, i) => {
+ result += i;
+ if (i === 13 && deopt) {
+ %DeoptimizeNow();
+ }
+ return true;
+ });
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+ lazyDeopt(true);
+ lazyDeopt();
+ assertEquals(1500, result);
+})();
+
+// Lazy deopt from runtime call from non-inline callback function.
+(() => {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25];
+ let result = 0;
+ function lazyDeopt(deopt) {
+ function callback(v, i) {
+ result += i;
+ if (i === 13 && deopt) {
+ %DeoptimizeNow();
+ }
+ return true;
+ }
+ %NeverOptimizeFunction(callback);
+ a.every(callback);
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+ lazyDeopt(true);
+ lazyDeopt();
+ assertEquals(1500, result);
+})();
+
+// Call to a.every is done inside a try-catch block and the callback function
+// being called actually throws.
+(() => {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25];
+ let caught = false;
+ function lazyDeopt(deopt) {
+ try {
+ a.every((v, i) => {
+ if (i === 1 && deopt) {
+ throw("a");
+ }
+ return true;
+ });
+ } catch (e) {
+ caught = true;
+ }
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+ assertDoesNotThrow(() => lazyDeopt(true));
+ assertTrue(caught);
+ lazyDeopt();
+})();
+
+// Call to a.every is done inside a try-catch block and the callback function
+// being called actually throws, but the callback is not inlined.
+(() => {
+ let a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ let caught = false;
+ function lazyDeopt(deopt) {
+ function callback(v, i) {
+ if (i === 1 && deopt) {
+ throw("a");
+ }
+ return true;
+ }
+ %NeverOptimizeFunction(callback);
+ try {
+ a.every(callback);
+ } catch (e) {
+ caught = true;
+ }
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+ assertDoesNotThrow(() => lazyDeopt(true));
+ assertTrue(caught);
+ lazyDeopt();
+})();
+
+// Call to a.every is done inside a try-catch block and the callback function
+// being called throws into a deoptimized caller function.
+(function TestThrowIntoDeoptimizedOuter() {
+ const a = [1, 2, 3, 4];
+ function lazyDeopt(deopt) {
+ function callback(v, i) {
+ if (i === 1 && deopt) {
+ %DeoptimizeFunction(lazyDeopt);
+ throw "some exception";
+ }
+ return true;
+ }
+ %NeverOptimizeFunction(callback);
+ let result = 0;
+ try {
+ result = a.every(callback);
+ } catch (e) {
+ assertEquals("some exception", e);
+ result = "nope";
+ }
+ return result;
+ }
+ assertEquals(true, lazyDeopt(false));
+ assertEquals(true, lazyDeopt(false));
+ assertEquals("nope", lazyDeopt(true));
+ assertEquals("nope", lazyDeopt(true));
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ assertEquals(true, lazyDeopt(false));
+ assertEquals("nope", lazyDeopt(true));
+})();
+
+// An error generated inside the callback includes every in it's
+// stack trace.
+(() => {
+ const re = /Array\.every/;
+ function lazyDeopt(deopt) {
+ const b = [1, 2, 3];
+ let result = 0;
+ b.every((v, i) => {
+ result += v;
+ if (i === 1) {
+ const e = new Error();
+ assertTrue(re.exec(e.stack) !== null);
+ }
+ return true;
+ });
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+})();
+
+// An error generated inside a non-inlined callback function also
+// includes every in it's stack trace.
+(() => {
+ const re = /Array\.every/;
+ function lazyDeopt(deopt) {
+ const b = [1, 2, 3];
+ let did_assert_error = false;
+ let result = 0;
+ function callback(v, i) {
+ result += v;
+ if (i === 1) {
+ const e = new Error();
+ assertTrue(re.exec(e.stack) !== null);
+ did_assert_error = true;
+ }
+ return true;
+ }
+ %NeverOptimizeFunction(callback);
+ b.every(callback);
+ return did_assert_error;
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ assertTrue(lazyDeopt());
+})();
+
+// An error generated inside a recently deoptimized callback function
+// includes every in it's stack trace.
+(() => {
+ const re = /Array\.every/;
+ function lazyDeopt(deopt) {
+ const b = [1, 2, 3];
+ let did_assert_error = false;
+ let result = 0;
+ b.every((v, i) => {
+ result += v;
+ if (i === 1) {
+ %DeoptimizeNow();
+ } else if (i === 2) {
+ const e = new Error();
+ assertTrue(re.exec(e.stack) !== null);
+ did_assert_error = true;
+ }
+ return true;
+ });
+ return did_assert_error;
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ assertTrue(lazyDeopt());
+})();
+
+// Verify that various exception edges are handled appropriately.
+// The thrown Error object should always indicate it was created from
+// an every call stack.
+(() => {
+ const re = /Array\.every/;
+ const a = [1, 2, 3];
+ let result = 0;
+ function lazyDeopt() {
+ a.every((v, i) => {
+ result += i;
+ if (i === 1) {
+ %DeoptimizeFunction(lazyDeopt);
+ throw new Error();
+ }
+ return true;
+ });
+ }
+ assertThrows(() => lazyDeopt());
+ assertThrows(() => lazyDeopt());
+ try {
+ lazyDeopt();
+ } catch (e) {
+ assertTrue(re.exec(e.stack) !== null);
+ }
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ try {
+ lazyDeopt();
+ } catch (e) {
+ assertTrue(re.exec(e.stack) !== null);
+ }
+})();
+
+// Verify holes are skipped.
+(() => {
+ const a = [1, 2, , 3, 4];
+ function withHoles() {
+ const callback_values = [];
+ a.every(v => {
+ callback_values.push(v);
+ return true;
+ });
+ return callback_values;
+ }
+ withHoles();
+ withHoles();
+ %OptimizeFunctionOnNextCall(withHoles);
+ assertArrayEquals([1, 2, 3, 4], withHoles());
+})();
+
+(() => {
+ const a = [1.5, 2.5, , 3.5, 4.5];
+ function withHoles() {
+ const callback_values = [];
+ a.every(v => {
+ callback_values.push(v);
+ return true;
+ });
+ return callback_values;
+ }
+ withHoles();
+ withHoles();
+ %OptimizeFunctionOnNextCall(withHoles);
+ assertArrayEquals([1.5, 2.5, 3.5, 4.5], withHoles());
+})();
+
+// Ensure that we handle side-effects between load and call.
+(() => {
+ function side_effect(a, b) { if (b) a.foo = 3; return a; }
+ %NeverOptimizeFunction(side_effect);
+
+ function unreliable(a, b) {
+ return a.every(x => true, side_effect(a, b));
+ }
+
+ let a = [1, 2, 3];
+ unreliable(a, false);
+ unreliable(a, false);
+ %OptimizeFunctionOnNextCall(unreliable);
+ unreliable(a, false);
+ // Now actually do change the map.
+ unreliable(a, true);
+})();
+
+// Handle callback is not callable.
+(() => {
+ const a = [1, 2, 3, 4, 5];
+ function notCallable() {
+ return a.every(undefined);
+ }
+
+ assertThrows(notCallable, TypeError);
+ try { notCallable(); } catch(e) { }
+ %OptimizeFunctionOnNextCall(notCallable);
+ assertThrows(notCallable, TypeError);
+})();
+
+// Messing with the Array prototype causes deoptimization.
+(() => {
+ const a = [1, 2, 3];
+ let result = 0;
+ function prototypeChanged() {
+ a.every((v, i) => {
+ result += v;
+ return true;
+ });
+ }
+ prototypeChanged();
+ prototypeChanged();
+ %OptimizeFunctionOnNextCall(prototypeChanged);
+ prototypeChanged();
+ a.constructor = {};
+ prototypeChanged();
+ assertUnoptimized(prototypeChanged);
+ assertEquals(24, result);
+})();
diff --git a/deps/v8/test/mjsunit/optimized-array-find.js b/deps/v8/test/mjsunit/optimized-array-find.js
new file mode 100644
index 0000000000..abcd2cf704
--- /dev/null
+++ b/deps/v8/test/mjsunit/optimized-array-find.js
@@ -0,0 +1,460 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo-inline-array-builtins --opt
+// Flags: --no-always-opt
+
+// Unknown field access leads to soft-deopt unrelated to find, should still
+// lead to correct result.
+(() => {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25];
+ let result = 0;
+ function eagerDeoptInCalled(deopt) {
+ return a.find((v, i) => {
+ if (i === 13 && deopt) {
+ a.abc = 25;
+ }
+ result += v;
+ return v === 20;
+ });
+ }
+ eagerDeoptInCalled();
+ eagerDeoptInCalled();
+ %OptimizeFunctionOnNextCall(eagerDeoptInCalled);
+ eagerDeoptInCalled();
+ assertEquals(20, eagerDeoptInCalled(true));
+ eagerDeoptInCalled();
+ assertEquals(1050, result);
+})();
+
+// Length change detected during loop, must cause properly handled eager deopt.
+(() => {
+ let called_values;
+ function eagerDeoptInCalled(deopt) {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ called_values = [];
+ return a.find((v,i) => {
+ called_values.push(v);
+ a.length = (i === 5 && deopt) ? 8 : 10;
+ return v === 9;
+ });
+ }
+ assertEquals(9, eagerDeoptInCalled());
+ assertArrayEquals([1, 2, 3, 4, 5, 6, 7, 8, 9], called_values);
+ eagerDeoptInCalled();
+ %OptimizeFunctionOnNextCall(eagerDeoptInCalled);
+ assertEquals(9, eagerDeoptInCalled());
+ assertEquals(undefined, eagerDeoptInCalled(true));
+ assertArrayEquals([1, 2, 3, 4, 5, 6, 7, 8, undefined, undefined],
+ called_values);
+ eagerDeoptInCalled();
+})();
+
+// Lazy deopt from a callback that changes the input array. Deopt in a callback
+// execution that returns true.
+(() => {
+ const a = [1, 2, 3, 4, 5];
+ function lazyChanger(deopt) {
+ return a.find((v, i) => {
+ if (i === 3 && deopt) {
+ a[3] = 100;
+ %DeoptimizeNow();
+ }
+ return v > 3;
+ });
+ }
+ assertEquals(4, lazyChanger());
+ lazyChanger();
+ %OptimizeFunctionOnNextCall(lazyChanger);
+ assertEquals(4, lazyChanger(true));
+ assertEquals(100, lazyChanger());
+})();
+
+// Lazy deopt from a callback that will always return false and no element is
+// found. Verifies the lazy-after-callback continuation builtin.
+(() => {
+ const a = [1, 2, 3, 4, 5];
+ function lazyChanger(deopt) {
+ return a.find((v, i) => {
+ if (i === 3 && deopt) {
+ %DeoptimizeNow();
+ }
+ return false;
+ });
+ }
+ assertEquals(undefined, lazyChanger());
+ lazyChanger();
+ %OptimizeFunctionOnNextCall(lazyChanger);
+ assertEquals(undefined, lazyChanger(true));
+ assertEquals(undefined, lazyChanger());
+})();
+
+// Lazy deopt from a callback that changes the input array. Deopt in a callback
+// execution that returns false.
+(() => {
+ const a = [1, 2, 3, 4, 5];
+ function lazyChanger(deopt) {
+ return a.find((v, i) => {
+ if (i === 2 && deopt) {
+ a[3] = 100;
+ %DeoptimizeNow();
+ }
+ return v > 3;
+ });
+ }
+ assertEquals(4, lazyChanger());
+ lazyChanger();
+ %OptimizeFunctionOnNextCall(lazyChanger);
+ assertEquals(100, lazyChanger(true));
+ assertEquals(100, lazyChanger());
+})();
+
+// Escape analyzed array
+(() => {
+ let result = 0;
+ function eagerDeoptInCalled(deopt) {
+ const a_noescape = [0, 1, 2, 3, 4, 5];
+ a_noescape.find((v, i) => {
+ result += v | 0;
+ if (i === 13 && deopt) {
+ a_noescape.length = 25;
+ }
+ return false;
+ });
+ }
+ eagerDeoptInCalled();
+ eagerDeoptInCalled();
+ %OptimizeFunctionOnNextCall(eagerDeoptInCalled);
+ eagerDeoptInCalled();
+ eagerDeoptInCalled(true);
+ eagerDeoptInCalled();
+ assertEquals(75, result);
+})();
+
+// Lazy deopt from runtime call from inlined callback function.
+(() => {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25];
+ let result = 0;
+ function lazyDeopt(deopt) {
+ a.find((v, i) => {
+ result += i;
+ if (i === 13 && deopt) {
+ %DeoptimizeNow();
+ }
+ return false;
+ });
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+ lazyDeopt(true);
+ lazyDeopt();
+ assertEquals(1500, result);
+})();
+
+// Lazy deopt from runtime call from non-inline callback function.
+(() => {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25];
+ let result = 0;
+ function lazyDeopt(deopt) {
+ function callback(v, i) {
+ result += i;
+ if (i === 13 && deopt) {
+ %DeoptimizeNow();
+ }
+ return false;
+ }
+ %NeverOptimizeFunction(callback);
+ a.find(callback);
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+ lazyDeopt(true);
+ lazyDeopt();
+ assertEquals(1500, result);
+})();
+
+// Call to a.find is done inside a try-catch block and the callback function
+// being called actually throws.
+(() => {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25];
+ let caught = false;
+ function lazyDeopt(deopt) {
+ try {
+ a.find((v, i) => {
+ if (i === 1 && deopt) {
+ throw("a");
+ }
+ return false;
+ });
+ } catch (e) {
+ caught = true;
+ }
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+ assertDoesNotThrow(() => lazyDeopt(true));
+ assertTrue(caught);
+ lazyDeopt();
+})();
+
+// Call to a.find is done inside a try-catch block and the callback function
+// being called actually throws, but the callback is not inlined.
+(() => {
+ let a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ let caught = false;
+ function lazyDeopt(deopt) {
+ function callback(v, i) {
+ if (i === 1 && deopt) {
+ throw("a");
+ }
+ return false;
+ }
+ %NeverOptimizeFunction(callback);
+ try {
+ a.find(callback);
+ } catch (e) {
+ caught = true;
+ }
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+ assertDoesNotThrow(() => lazyDeopt(true));
+ assertTrue(caught);
+ lazyDeopt();
+})();
+
+// Call to a.find is done inside a try-catch block and the callback function
+// being called throws into a deoptimized caller function.
+(function TestThrowIntoDeoptimizedOuter() {
+ const a = [1, 2, 3, 4];
+ function lazyDeopt(deopt) {
+ function callback(v, i) {
+ if (i === 1 && deopt) {
+ %DeoptimizeFunction(lazyDeopt);
+ throw "some exception";
+ }
+ return v === 3;
+ }
+ %NeverOptimizeFunction(callback);
+ let result = 0;
+ try {
+ result = a.find(callback);
+ } catch (e) {
+ assertEquals("some exception", e);
+ result = "nope";
+ }
+ return result;
+ }
+ assertEquals(3, lazyDeopt(false));
+ assertEquals(3, lazyDeopt(false));
+ assertEquals("nope", lazyDeopt(true));
+ assertEquals("nope", lazyDeopt(true));
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ assertEquals(3, lazyDeopt(false));
+ assertEquals("nope", lazyDeopt(true));
+})();
+
+// An error generated inside the callback includes find in it's
+// stack trace.
+(() => {
+ const re = /Array\.find/;
+ function lazyDeopt(deopt) {
+ const b = [1, 2, 3];
+ let result = 0;
+ b.find((v, i) => {
+ result += v;
+ if (i === 1) {
+ const e = new Error();
+ assertTrue(re.exec(e.stack) !== null);
+ }
+ return false;
+ });
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+})();
+
+// An error generated inside a non-inlined callback function also
+// includes find in it's stack trace.
+(() => {
+ const re = /Array\.find/;
+ function lazyDeopt(deopt) {
+ const b = [1, 2, 3];
+ let did_assert_error = false;
+ let result = 0;
+ function callback(v, i) {
+ result += v;
+ if (i === 1) {
+ const e = new Error();
+ assertTrue(re.exec(e.stack) !== null);
+ did_assert_error = true;
+ }
+ return false;
+ }
+ %NeverOptimizeFunction(callback);
+ b.find(callback);
+ return did_assert_error;
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ assertTrue(lazyDeopt());
+})();
+
+// An error generated inside a recently deoptimized callback function
+// includes find in it's stack trace.
+(() => {
+ const re = /Array\.find/;
+ function lazyDeopt(deopt) {
+ const b = [1, 2, 3];
+ let did_assert_error = false;
+ let result = 0;
+ b.find((v, i) => {
+ result += v;
+ if (i === 1) {
+ %DeoptimizeNow();
+ } else if (i === 2) {
+ const e = new Error();
+ assertTrue(re.exec(e.stack) !== null);
+ did_assert_error = true;
+ }
+ return false;
+ });
+ return did_assert_error;
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ assertTrue(lazyDeopt());
+})();
+
+// Verify that various exception edges are handled appropriately.
+// The thrown Error object should always indicate it was created from
+// a find call stack.
+(() => {
+ const re = /Array\.find/;
+ const a = [1, 2, 3];
+ let result = 0;
+ function lazyDeopt() {
+ a.find((v, i) => {
+ result += i;
+ if (i === 1) {
+ %DeoptimizeFunction(lazyDeopt);
+ throw new Error();
+ }
+ return false;
+ });
+ }
+ assertThrows(() => lazyDeopt());
+ assertThrows(() => lazyDeopt());
+ try {
+ lazyDeopt();
+ } catch (e) {
+ assertTrue(re.exec(e.stack) !== null);
+ }
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ try {
+ lazyDeopt();
+ } catch (e) {
+ assertTrue(re.exec(e.stack) !== null);
+ }
+})();
+
+// Messing with the Array prototype causes deoptimization.
+(() => {
+ const a = [1, 2, 3];
+ let result = 0;
+ function prototypeChanged() {
+ a.find((v, i) => {
+ result += v;
+ return false;
+ });
+ }
+ prototypeChanged();
+ prototypeChanged();
+ %OptimizeFunctionOnNextCall(prototypeChanged);
+ prototypeChanged();
+ a.constructor = {};
+ prototypeChanged();
+ assertUnoptimized(prototypeChanged);
+ assertEquals(24, result);
+})();
+
+// Verify holes are replaced with undefined.
+(() => {
+ const a = [1, 2, , 3, 4];
+ function withHoles() {
+ const callback_values = [];
+ a.find(v => {
+ callback_values.push(v);
+ return false;
+ });
+ return callback_values;
+ }
+ withHoles();
+ withHoles();
+ %OptimizeFunctionOnNextCall(withHoles);
+ assertArrayEquals([1, 2, undefined, 3, 4], withHoles());
+})();
+
+(() => {
+ const a = [1.5, 2.5, , 3.5, 4.5];
+ function withHoles() {
+ const callback_values = [];
+ a.find(v => {
+ callback_values.push(v);
+ return false;
+ });
+ return callback_values;
+ }
+ withHoles();
+ withHoles();
+ %OptimizeFunctionOnNextCall(withHoles);
+ assertArrayEquals([1.5, 2.5, undefined, 3.5, 4.5], withHoles());
+})();
+
+// Ensure that we handle side-effects between load and call.
+(() => {
+ function side_effect(a, b) { if (b) a.foo = 3; return a; }
+ %NeverOptimizeFunction(side_effect);
+
+ function unreliable(a, b) {
+ return a.find(x => false, side_effect(a, b));
+ }
+
+ let a = [1, 2, 3];
+ unreliable(a, false);
+ unreliable(a, false);
+ %OptimizeFunctionOnNextCall(unreliable);
+ unreliable(a, false);
+ // Now actually do change the map.
+ unreliable(a, true);
+})();
+
+// Handle callback is not callable.
+(() => {
+ const a = [1, 2, 3, 4, 5];
+ function notCallable() {
+ return a.find(undefined);
+ }
+
+ assertThrows(notCallable, TypeError);
+ try { notCallable(); } catch(e) { }
+ %OptimizeFunctionOnNextCall(notCallable);
+ assertThrows(notCallable, TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/optimized-array-findindex.js b/deps/v8/test/mjsunit/optimized-array-findindex.js
new file mode 100644
index 0000000000..91f4a6cc60
--- /dev/null
+++ b/deps/v8/test/mjsunit/optimized-array-findindex.js
@@ -0,0 +1,460 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo-inline-array-builtins --opt
+// Flags: --no-always-opt
+
+// Unknown field access leads to soft-deopt unrelated to findIndex, should still
+// lead to correct result.
+(() => {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25];
+ let result = 0;
+ function eagerDeoptInCalled(deopt) {
+ return a.findIndex((v, i) => {
+ if (i === 13 && deopt) {
+ a.abc = 25;
+ }
+ result += v;
+ return v === 20;
+ });
+ }
+ eagerDeoptInCalled();
+ eagerDeoptInCalled();
+ %OptimizeFunctionOnNextCall(eagerDeoptInCalled);
+ eagerDeoptInCalled();
+ assertEquals(19, eagerDeoptInCalled(true));
+ eagerDeoptInCalled();
+ assertEquals(1050, result);
+})();
+
+// Length change detected during loop, must cause properly handled eager deopt.
+(() => {
+ let called_values;
+ function eagerDeoptInCalled(deopt) {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ called_values = [];
+ return a.findIndex((v,i) => {
+ called_values.push(v);
+ a.length = (i === 5 && deopt) ? 8 : 10;
+ return v === 9;
+ });
+ }
+ assertEquals(8, eagerDeoptInCalled());
+ assertArrayEquals([1, 2, 3, 4, 5, 6, 7, 8, 9], called_values);
+ eagerDeoptInCalled();
+ %OptimizeFunctionOnNextCall(eagerDeoptInCalled);
+ assertEquals(8, eagerDeoptInCalled());
+ assertEquals(-1, eagerDeoptInCalled(true));
+ assertArrayEquals([1, 2, 3, 4, 5, 6, 7, 8, undefined, undefined],
+ called_values);
+ eagerDeoptInCalled();
+})();
+
+// Lazy deopt from a callback that changes the input array. Deopt in a callback
+// execution that returns true.
+(() => {
+ const a = [1, 2, 3, 4, 5];
+ function lazyChanger(deopt) {
+ return a.findIndex((v, i) => {
+ if (i === 3 && deopt) {
+ a[3] = 3;
+ %DeoptimizeNow();
+ }
+ return v > 3;
+ });
+ }
+ assertEquals(3, lazyChanger());
+ lazyChanger();
+ %OptimizeFunctionOnNextCall(lazyChanger);
+ assertEquals(3, lazyChanger(true));
+ assertEquals(4, lazyChanger());
+})();
+
+// Lazy deopt from a callback that will always return false and no element is
+// found. Verifies the lazy-after-callback continuation builtin.
+(() => {
+ const a = [1, 2, 3, 4, 5];
+ function lazyChanger(deopt) {
+ return a.findIndex((v, i) => {
+ if (i === 3 && deopt) {
+ %DeoptimizeNow();
+ }
+ return false;
+ });
+ }
+ assertEquals(-1, lazyChanger());
+ lazyChanger();
+ %OptimizeFunctionOnNextCall(lazyChanger);
+ assertEquals(-1, lazyChanger(true));
+ assertEquals(-1, lazyChanger());
+})();
+
+// Lazy deopt from a callback that changes the input array. Deopt in a callback
+// execution that returns false.
+(() => {
+ const a = [1, 2, 3, 4, 5];
+ function lazyChanger(deopt) {
+ return a.findIndex((v, i) => {
+ if (i === 2 && deopt) {
+ a[3] = 2;
+ %DeoptimizeNow();
+ }
+ return v > 3;
+ });
+ }
+ assertEquals(3, lazyChanger());
+ lazyChanger();
+ %OptimizeFunctionOnNextCall(lazyChanger);
+ assertEquals(4, lazyChanger(true));
+ assertEquals(4, lazyChanger());
+})();
+
+// Escape analyzed array
+(() => {
+ let result = 0;
+ function eagerDeoptInCalled(deopt) {
+ const a_noescape = [0, 1, 2, 3, 4, 5];
+ a_noescape.findIndex((v, i) => {
+ result += v | 0;
+ if (i === 13 && deopt) {
+ a_noescape.length = 25;
+ }
+ return false;
+ });
+ }
+ eagerDeoptInCalled();
+ eagerDeoptInCalled();
+ %OptimizeFunctionOnNextCall(eagerDeoptInCalled);
+ eagerDeoptInCalled();
+ eagerDeoptInCalled(true);
+ eagerDeoptInCalled();
+ assertEquals(75, result);
+})();
+
+// Lazy deopt from runtime call from inlined callback function.
+(() => {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25];
+ let result = 0;
+ function lazyDeopt(deopt) {
+ a.findIndex((v, i) => {
+ result += i;
+ if (i === 13 && deopt) {
+ %DeoptimizeNow();
+ }
+ return false;
+ });
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+ lazyDeopt(true);
+ lazyDeopt();
+ assertEquals(1500, result);
+})();
+
+// Lazy deopt from runtime call from non-inline callback function.
+(() => {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25];
+ let result = 0;
+ function lazyDeopt(deopt) {
+ function callback(v, i) {
+ result += i;
+ if (i === 13 && deopt) {
+ %DeoptimizeNow();
+ }
+ return false;
+ }
+ %NeverOptimizeFunction(callback);
+ a.findIndex(callback);
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+ lazyDeopt(true);
+ lazyDeopt();
+ assertEquals(1500, result);
+})();
+
+// Call to a.findIndex is done inside a try-catch block and the callback function
+// being called actually throws.
+(() => {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25];
+ let caught = false;
+ function lazyDeopt(deopt) {
+ try {
+ a.findIndex((v, i) => {
+ if (i === 1 && deopt) {
+ throw("a");
+ }
+ return false;
+ });
+ } catch (e) {
+ caught = true;
+ }
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+ assertDoesNotThrow(() => lazyDeopt(true));
+ assertTrue(caught);
+ lazyDeopt();
+})();
+
+// Call to a.findIndex is done inside a try-catch block and the callback function
+// being called actually throws, but the callback is not inlined.
+(() => {
+ let a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ let caught = false;
+ function lazyDeopt(deopt) {
+ function callback(v, i) {
+ if (i === 1 && deopt) {
+ throw("a");
+ }
+ return false;
+ }
+ %NeverOptimizeFunction(callback);
+ try {
+ a.findIndex(callback);
+ } catch (e) {
+ caught = true;
+ }
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+ assertDoesNotThrow(() => lazyDeopt(true));
+ assertTrue(caught);
+ lazyDeopt();
+})();
+
+// Call to a.findIndex is done inside a try-catch block and the callback function
+// being called throws into a deoptimized caller function.
+(function TestThrowIntoDeoptimizedOuter() {
+ const a = [1, 2, 3, 4];
+ function lazyDeopt(deopt) {
+ function callback(v, i) {
+ if (i === 1 && deopt) {
+ %DeoptimizeFunction(lazyDeopt);
+ throw "some exception";
+ }
+ return v === 3;
+ }
+ %NeverOptimizeFunction(callback);
+ let result = 0;
+ try {
+ result = a.findIndex(callback);
+ } catch (e) {
+ assertEquals("some exception", e);
+ result = "nope";
+ }
+ return result;
+ }
+ assertEquals(2, lazyDeopt(false));
+ assertEquals(2, lazyDeopt(false));
+ assertEquals("nope", lazyDeopt(true));
+ assertEquals("nope", lazyDeopt(true));
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ assertEquals(2, lazyDeopt(false));
+ assertEquals("nope", lazyDeopt(true));
+})();
+
+// An error generated inside the callback includes findIndex in it's
+// stack trace.
+(() => {
+ const re = /Array\.findIndex/;
+ function lazyDeopt(deopt) {
+ const b = [1, 2, 3];
+ let result = 0;
+ b.findIndex((v, i) => {
+ result += v;
+ if (i === 1) {
+ const e = new Error();
+ assertTrue(re.exec(e.stack) !== null);
+ }
+ return false;
+ });
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+})();
+
+// An error generated inside a non-inlined callback function also
+// includes findIndex in it's stack trace.
+(() => {
+ const re = /Array\.findIndex/;
+ function lazyDeopt(deopt) {
+ const b = [1, 2, 3];
+ let did_assert_error = false;
+ let result = 0;
+ function callback(v, i) {
+ result += v;
+ if (i === 1) {
+ const e = new Error();
+ assertTrue(re.exec(e.stack) !== null);
+ did_assert_error = true;
+ }
+ return false;
+ }
+ %NeverOptimizeFunction(callback);
+ b.findIndex(callback);
+ return did_assert_error;
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ assertTrue(lazyDeopt());
+})();
+
+// An error generated inside a recently deoptimized callback function
+// includes findIndex in it's stack trace.
+(() => {
+ const re = /Array\.findIndex/;
+ function lazyDeopt(deopt) {
+ const b = [1, 2, 3];
+ let did_assert_error = false;
+ let result = 0;
+ b.findIndex((v, i) => {
+ result += v;
+ if (i === 1) {
+ %DeoptimizeNow();
+ } else if (i === 2) {
+ const e = new Error();
+ assertTrue(re.exec(e.stack) !== null);
+ did_assert_error = true;
+ }
+ return false;
+ });
+ return did_assert_error;
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ assertTrue(lazyDeopt());
+})();
+
+// Verify that various exception edges are handled appropriately.
+// The thrown Error object should always indicate it was created from
+// a findIndex call stack.
+(() => {
+ const re = /Array\.findIndex/;
+ const a = [1, 2, 3];
+ let result = 0;
+ function lazyDeopt() {
+ a.findIndex((v, i) => {
+ result += i;
+ if (i === 1) {
+ %DeoptimizeFunction(lazyDeopt);
+ throw new Error();
+ }
+ return false;
+ });
+ }
+ assertThrows(() => lazyDeopt());
+ assertThrows(() => lazyDeopt());
+ try {
+ lazyDeopt();
+ } catch (e) {
+ assertTrue(re.exec(e.stack) !== null);
+ }
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ try {
+ lazyDeopt();
+ } catch (e) {
+ assertTrue(re.exec(e.stack) !== null);
+ }
+})();
+
+// Messing with the Array prototype causes deoptimization.
+(() => {
+ const a = [1, 2, 3];
+ let result = 0;
+ function prototypeChanged() {
+ a.findIndex((v, i) => {
+ result += v;
+ return false;
+ });
+ }
+ prototypeChanged();
+ prototypeChanged();
+ %OptimizeFunctionOnNextCall(prototypeChanged);
+ prototypeChanged();
+ a.constructor = {};
+ prototypeChanged();
+ assertUnoptimized(prototypeChanged);
+ assertEquals(24, result);
+})();
+
+// Verify holes are replaced with undefined.
+(() => {
+ const a = [1, 2, , 3, 4];
+ function withHoles() {
+ const callback_values = [];
+ a.findIndex(v => {
+ callback_values.push(v);
+ return false;
+ });
+ return callback_values;
+ }
+ withHoles();
+ withHoles();
+ %OptimizeFunctionOnNextCall(withHoles);
+ assertArrayEquals([1, 2, undefined, 3, 4], withHoles());
+})();
+
+(() => {
+ const a = [1.5, 2.5, , 3.5, 4.5];
+ function withHoles() {
+ const callback_values = [];
+ a.findIndex(v => {
+ callback_values.push(v);
+ return false;
+ });
+ return callback_values;
+ }
+ withHoles();
+ withHoles();
+ %OptimizeFunctionOnNextCall(withHoles);
+ assertArrayEquals([1.5, 2.5, undefined, 3.5, 4.5], withHoles());
+})();
+
+// Ensure that we handle side-effects between load and call.
+(() => {
+ function side_effect(a, b) { if (b) a.foo = 3; return a; }
+ %NeverOptimizeFunction(side_effect);
+
+ function unreliable(a, b) {
+ return a.findIndex(x => false, side_effect(a, b));
+ }
+
+ let a = [1, 2, 3];
+ unreliable(a, false);
+ unreliable(a, false);
+ %OptimizeFunctionOnNextCall(unreliable);
+ unreliable(a, false);
+ // Now actually do change the map.
+ unreliable(a, true);
+})();
+
+// Handle callback is not callable.
+(() => {
+ const a = [1, 2, 3, 4, 5];
+ function notCallable() {
+ return a.findIndex(undefined);
+ }
+
+ assertThrows(notCallable, TypeError);
+ try { notCallable(); } catch(e) { }
+ %OptimizeFunctionOnNextCall(notCallable);
+ assertThrows(notCallable, TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/optimized-array-some.js b/deps/v8/test/mjsunit/optimized-array-some.js
new file mode 100644
index 0000000000..8d0114aa64
--- /dev/null
+++ b/deps/v8/test/mjsunit/optimized-array-some.js
@@ -0,0 +1,502 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo-inline-array-builtins --opt
+// Flags: --no-always-opt
+
+// Early exit from some functions properly.
+(() => {
+ const a = [1, 2, 3, 4, 5];
+ let result = 0;
+ function earlyExit() {
+ return a.some(v => {
+ result += v;
+ return v > 2;
+ });
+ }
+ assertTrue(earlyExit());
+ earlyExit();
+ %OptimizeFunctionOnNextCall(earlyExit);
+ assertTrue(earlyExit());
+ assertEquals(18, result);
+})();
+
+// Soft-deopt plus early exit.
+(() => {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ let result = 0;
+ function softyPlusEarlyExit(deopt) {
+ return a.some(v => {
+ result += v;
+ if (v === 4 && deopt) {
+ a.abc = 25;
+ }
+ return v > 7;
+ });
+ }
+ assertTrue(softyPlusEarlyExit(false));
+ softyPlusEarlyExit(false);
+ %OptimizeFunctionOnNextCall(softyPlusEarlyExit);
+ assertTrue(softyPlusEarlyExit(true));
+ assertEquals(36*3, result);
+})();
+
+// Soft-deopt synced with early exit, which forces the lazy deoptimization
+// continuation handler to exit.
+(() => {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ let called_values = [];
+ function softyPlusEarlyExit(deopt) {
+ called_values = [];
+ return a.some(v => {
+ called_values.push(v);
+ if (v === 4 && deopt) {
+ a.abc = 25;
+ return true;
+ }
+ return v > 7;
+ });
+ }
+ assertTrue(softyPlusEarlyExit(false));
+ assertArrayEquals([1, 2, 3, 4, 5, 6, 7, 8], called_values);
+ softyPlusEarlyExit(false);
+ %OptimizeFunctionOnNextCall(softyPlusEarlyExit);
+ assertTrue(softyPlusEarlyExit(true));
+ assertArrayEquals([1, 2, 3, 4], called_values);
+})();
+
+// Unknown field access leads to soft-deopt unrelated to some, should still
+// lead to correct result.
+(() => {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25];
+ let result = 0;
+ function eagerDeoptInCalled(deopt) {
+ return a.some((v, i) => {
+ if (i === 13 && deopt) {
+ a.abc = 25;
+ }
+ result += v;
+ return false;
+ });
+ }
+ eagerDeoptInCalled();
+ eagerDeoptInCalled();
+ %OptimizeFunctionOnNextCall(eagerDeoptInCalled);
+ eagerDeoptInCalled();
+ assertFalse(eagerDeoptInCalled(true));
+ eagerDeoptInCalled();
+ assertEquals(1625, result);
+})();
+
+// Length change detected during loop, must cause properly handled eager deopt.
+(() => {
+ let called_values;
+ function eagerDeoptInCalled(deopt) {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ called_values = [];
+ return a.some((v,i) => {
+ called_values.push(v);
+ a.length = (i === 5 && deopt) ? 8 : 10;
+ return false;
+ });
+ }
+ assertFalse(eagerDeoptInCalled());
+ assertArrayEquals([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], called_values);
+ eagerDeoptInCalled();
+ %OptimizeFunctionOnNextCall(eagerDeoptInCalled);
+ assertFalse(eagerDeoptInCalled());
+ assertFalse(eagerDeoptInCalled(true));
+ assertArrayEquals([1, 2, 3, 4, 5, 6, 7, 8], called_values);
+ eagerDeoptInCalled();
+})();
+
+// Lazy deopt from a callback that changes the input array. Deopt in a callback
+// execution that returns true.
+(() => {
+ const a = [1, 2, 3, 4, 5];
+ function lazyChanger(deopt) {
+ return a.some((v, i) => {
+ if (i === 3 && deopt) {
+ a[3] = 100;
+ %DeoptimizeNow();
+ }
+ return false;
+ });
+ }
+ assertFalse(lazyChanger());
+ lazyChanger();
+ %OptimizeFunctionOnNextCall(lazyChanger);
+ assertFalse(lazyChanger(true));
+ assertFalse(lazyChanger());
+})();
+
+// Lazy deopt from a callback that will always return false and no element is
+// found. Verifies the lazy-after-callback continuation builtin.
+(() => {
+ const a = [1, 2, 3, 4, 5];
+ function lazyChanger(deopt) {
+ return a.some((v, i) => {
+ if (i === 3 && deopt) {
+ %DeoptimizeNow();
+ }
+ return false;
+ });
+ }
+ assertFalse(lazyChanger());
+ lazyChanger();
+ %OptimizeFunctionOnNextCall(lazyChanger);
+ assertFalse(lazyChanger(true));
+ assertFalse(lazyChanger());
+})();
+
+// Lazy deopt from a callback that changes the input array. Deopt in a callback
+// execution that returns false.
+(() => {
+ const a = [1, 2, 3, 4, 5];
+ function lazyChanger(deopt) {
+ return a.every((v, i) => {
+ if (i === 2 && deopt) {
+ a[3] = 100;
+ %DeoptimizeNow();
+ }
+ return false;
+ });
+ }
+ assertFalse(lazyChanger());
+ lazyChanger();
+ %OptimizeFunctionOnNextCall(lazyChanger);
+ assertFalse(lazyChanger(true));
+ assertFalse(lazyChanger());
+})();
+
+// Escape analyzed array
+(() => {
+ let result = 0;
+ function eagerDeoptInCalled(deopt) {
+ const a_noescape = [0, 1, 2, 3, 4, 5];
+ a_noescape.some((v, i) => {
+ result += v | 0;
+ if (i === 13 && deopt) {
+ a_noescape.length = 25;
+ }
+ return false;
+ });
+ }
+ eagerDeoptInCalled();
+ eagerDeoptInCalled();
+ %OptimizeFunctionOnNextCall(eagerDeoptInCalled);
+ eagerDeoptInCalled();
+ eagerDeoptInCalled(true);
+ eagerDeoptInCalled();
+ assertEquals(75, result);
+})();
+
+// Lazy deopt from runtime call from inlined callback function.
+(() => {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25];
+ let result = 0;
+ function lazyDeopt(deopt) {
+ a.some((v, i) => {
+ result += i;
+ if (i === 13 && deopt) {
+ %DeoptimizeNow();
+ }
+ return false;
+ });
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+ lazyDeopt(true);
+ lazyDeopt();
+ assertEquals(1500, result);
+})();
+
+// Lazy deopt from runtime call from non-inline callback function.
+(() => {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25];
+ let result = 0;
+ function lazyDeopt(deopt) {
+ function callback(v, i) {
+ result += i;
+ if (i === 13 && deopt) {
+ %DeoptimizeNow();
+ }
+ return false;
+ }
+ %NeverOptimizeFunction(callback);
+ a.some(callback);
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+ lazyDeopt(true);
+ lazyDeopt();
+ assertEquals(1500, result);
+})();
+
+// Call to a.some is done inside a try-catch block and the callback function
+// being called actually throws.
+(() => {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25];
+ let caught = false;
+ function lazyDeopt(deopt) {
+ try {
+ a.some((v, i) => {
+ if (i === 1 && deopt) {
+ throw("a");
+ }
+ return false;
+ });
+ } catch (e) {
+ caught = true;
+ }
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+ assertDoesNotThrow(() => lazyDeopt(true));
+ assertTrue(caught);
+ lazyDeopt();
+})();
+
+// Call to a.some is done inside a try-catch block and the callback function
+// being called actually throws, but the callback is not inlined.
+(() => {
+ let a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ let caught = false;
+ function lazyDeopt(deopt) {
+ function callback(v, i) {
+ if (i === 1 && deopt) {
+ throw("a");
+ }
+ return false;
+ }
+ %NeverOptimizeFunction(callback);
+ try {
+ a.some(callback);
+ } catch (e) {
+ caught = true;
+ }
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+ assertDoesNotThrow(() => lazyDeopt(true));
+ assertTrue(caught);
+ lazyDeopt();
+})();
+
+// Call to a.some is done inside a try-catch block and the callback function
+// being called throws into a deoptimized caller function.
+(function TestThrowIntoDeoptimizedOuter() {
+ const a = [1, 2, 3, 4];
+ function lazyDeopt(deopt) {
+ function callback(v, i) {
+ if (i === 1 && deopt) {
+ %DeoptimizeFunction(lazyDeopt);
+ throw "some exception";
+ }
+ return false;
+ }
+ %NeverOptimizeFunction(callback);
+ let result = 0;
+ try {
+ result = a.some(callback);
+ } catch (e) {
+ assertEquals("some exception", e);
+ result = "nope";
+ }
+ return result;
+ }
+ assertEquals(false, lazyDeopt(false));
+ assertEquals(false, lazyDeopt(false));
+ assertEquals("nope", lazyDeopt(true));
+ assertEquals("nope", lazyDeopt(true));
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ assertEquals(false, lazyDeopt(false));
+ assertEquals("nope", lazyDeopt(true));
+})();
+
+// An error generated inside the callback includes some in it's
+// stack trace.
+(() => {
+ const re = /Array\.some/;
+ function lazyDeopt(deopt) {
+ const b = [1, 2, 3];
+ let result = 0;
+ b.some((v, i) => {
+ result += v;
+ if (i === 1) {
+ const e = new Error();
+ assertTrue(re.exec(e.stack) !== null);
+ }
+ return false;
+ });
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ lazyDeopt();
+})();
+
+// An error generated inside a non-inlined callback function also
+// includes some in it's stack trace.
+(() => {
+ const re = /Array\.some/;
+ function lazyDeopt(deopt) {
+ const b = [1, 2, 3];
+ let did_assert_error = false;
+ let result = 0;
+ function callback(v, i) {
+ result += v;
+ if (i === 1) {
+ const e = new Error();
+ assertTrue(re.exec(e.stack) !== null);
+ did_assert_error = true;
+ }
+ return false;
+ }
+ %NeverOptimizeFunction(callback);
+ b.some(callback);
+ return did_assert_error;
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ assertTrue(lazyDeopt());
+})();
+
+// An error generated inside a recently deoptimized callback function
+// includes some in it's stack trace.
+(() => {
+ const re = /Array\.some/;
+ function lazyDeopt(deopt) {
+ const b = [1, 2, 3];
+ let did_assert_error = false;
+ let result = 0;
+ b.some((v, i) => {
+ result += v;
+ if (i === 1) {
+ %DeoptimizeNow();
+ } else if (i === 2) {
+ const e = new Error();
+ assertTrue(re.exec(e.stack) !== null);
+ did_assert_error = true;
+ }
+ return false;
+ });
+ return did_assert_error;
+ }
+ lazyDeopt();
+ lazyDeopt();
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ assertTrue(lazyDeopt());
+})();
+
+// Verify that various exception edges are handled appropriately.
+// The thrown Error object should always indicate it was created from
+// a some call stack.
+(() => {
+ const re = /Array\.some/;
+ const a = [1, 2, 3];
+ let result = 0;
+ function lazyDeopt() {
+ a.some((v, i) => {
+ result += i;
+ if (i === 1) {
+ %DeoptimizeFunction(lazyDeopt);
+ throw new Error();
+ }
+ return false;
+ });
+ }
+ assertThrows(() => lazyDeopt());
+ assertThrows(() => lazyDeopt());
+ try {
+ lazyDeopt();
+ } catch (e) {
+ assertTrue(re.exec(e.stack) !== null);
+ }
+ %OptimizeFunctionOnNextCall(lazyDeopt);
+ try {
+ lazyDeopt();
+ } catch (e) {
+ assertTrue(re.exec(e.stack) !== null);
+ }
+})();
+
+// Messing with the Array prototype causes deoptimization.
+(() => {
+ const a = [1, 2, 3];
+ let result = 0;
+ function prototypeChanged() {
+ a.some((v, i) => {
+ result += v;
+ return false;
+ });
+ }
+ prototypeChanged();
+ prototypeChanged();
+ %OptimizeFunctionOnNextCall(prototypeChanged);
+ prototypeChanged();
+ a.constructor = {};
+ prototypeChanged();
+ assertUnoptimized(prototypeChanged);
+ assertEquals(24, result);
+})();
+
+// Verify holes are skipped.
+(() => {
+ const a = [1, 2, , 3, 4];
+ function withHoles() {
+ const callback_values = [];
+ a.some(v => {
+ callback_values.push(v);
+ return false;
+ });
+ return callback_values;
+ }
+ withHoles();
+ withHoles();
+ %OptimizeFunctionOnNextCall(withHoles);
+ assertArrayEquals([1, 2, 3, 4], withHoles());
+})();
+
+(() => {
+ const a = [1.5, 2.5, , 3.5, 4.5];
+ function withHoles() {
+ const callback_values = [];
+ a.some(v => {
+ callback_values.push(v);
+ return false;
+ });
+ return callback_values;
+ }
+ withHoles();
+ withHoles();
+ %OptimizeFunctionOnNextCall(withHoles);
+ assertArrayEquals([1.5, 2.5, 3.5, 4.5], withHoles());
+})();
+
+// Handle callback is not callable.
+(() => {
+ const a = [1, 2, 3, 4, 5];
+ function notCallable() {
+ return a.some(undefined);
+ }
+
+ assertThrows(notCallable, TypeError);
+ try { notCallable(); } catch(e) { }
+ %OptimizeFunctionOnNextCall(notCallable);
+ assertThrows(notCallable, TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/optimized-filter.js b/deps/v8/test/mjsunit/optimized-filter.js
index b13edc3b36..3c7d827e0f 100644
--- a/deps/v8/test/mjsunit/optimized-filter.js
+++ b/deps/v8/test/mjsunit/optimized-filter.js
@@ -417,6 +417,59 @@
}
})();
+// Verify holes are skipped.
+(() => {
+ const a = [1, 2, , 3, 4];
+ let callback_values = [];
+ function withHoles() {
+ callback_values = [];
+ return a.filter(v => {
+ callback_values.push(v);
+ return true;
+ });
+ }
+ withHoles();
+ withHoles();
+ %OptimizeFunctionOnNextCall(withHoles);
+ assertArrayEquals([1, 2, 3, 4], withHoles());
+ assertArrayEquals([1, 2, 3, 4], callback_values);
+})();
+
+(() => {
+ const a = [1.5, 2.5, , 3.5, 4.5];
+ let callback_values = [];
+ function withHoles() {
+ callback_values = [];
+ return a.filter(v => {
+ callback_values.push(v);
+ return true;
+ });
+ }
+ withHoles();
+ withHoles();
+ %OptimizeFunctionOnNextCall(withHoles);
+ assertArrayEquals([1.5, 2.5, 3.5, 4.5], withHoles());
+ assertArrayEquals([1.5, 2.5, 3.5, 4.5], callback_values);
+})();
+
+// Ensure that we handle side-effects between load and call.
+(() => {
+ function side_effect(a, b) { if (b) a.foo = 3; return a; }
+ %NeverOptimizeFunction(side_effect);
+
+ function unreliable(a, b) {
+ return a.filter(x => x % 2 === 0, side_effect(a, b));
+ }
+
+ let a = [1, 2, 3];
+ unreliable(a, false);
+ unreliable(a, false);
+ %OptimizeFunctionOnNextCall(unreliable);
+ unreliable(a, false);
+ // Now actually do change the map.
+ unreliable(a, true);
+})();
+
// Messing with the Array species constructor causes deoptimization.
(function() {
var result = 0;
diff --git a/deps/v8/test/mjsunit/optimized-foreach.js b/deps/v8/test/mjsunit/optimized-foreach.js
index f3513f3838..1fe54b5e9f 100644
--- a/deps/v8/test/mjsunit/optimized-foreach.js
+++ b/deps/v8/test/mjsunit/optimized-foreach.js
@@ -343,3 +343,53 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
assertTrue(re.exec(e.stack) !== null);
}
})();
+
+// Verify holes are skipped.
+(() => {
+ const a = [1, 2, , 3, 4];
+ function withHoles() {
+ const callback_values = [];
+ a.forEach(v => {
+ callback_values.push(v);
+ });
+ return callback_values;
+ }
+ withHoles();
+ withHoles();
+ %OptimizeFunctionOnNextCall(withHoles);
+ assertArrayEquals([1, 2, 3, 4], withHoles());
+})();
+
+(() => {
+ const a = [1.5, 2.5, , 3.5, 4.5];
+ function withHoles() {
+ const callback_values = [];
+ a.forEach(v => {
+ callback_values.push(v);
+ });
+ return callback_values;
+ }
+ withHoles();
+ withHoles();
+ %OptimizeFunctionOnNextCall(withHoles);
+ assertArrayEquals([1.5, 2.5, 3.5, 4.5], withHoles());
+})();
+
+// Ensure that we handle side-effects between load and call.
+(() => {
+ function side_effect(a, b) { if (b) a.foo = 3; return a; }
+ %NeverOptimizeFunction(side_effect);
+
+ function unreliable(a, b) {
+ let sum = 0;
+ return a.forEach(x => sum += x, side_effect(a, b));
+ }
+
+ let a = [1, 2, 3];
+ unreliable(a, false);
+ unreliable(a, false);
+ %OptimizeFunctionOnNextCall(unreliable);
+ unreliable(a, false);
+ // Now actually do change the map.
+ unreliable(a, true);
+})();
diff --git a/deps/v8/test/mjsunit/optimized-map.js b/deps/v8/test/mjsunit/optimized-map.js
index d8613e0300..6a3df4d7d4 100644
--- a/deps/v8/test/mjsunit/optimized-map.js
+++ b/deps/v8/test/mjsunit/optimized-map.js
@@ -468,6 +468,59 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
assertEquals("hello1", string_results()[0]);
})();
+// Verify holes are not visited.
+(() => {
+ const a = [1, 2, , 3, 4];
+ let callback_values = [];
+ function withHoles() {
+ callback_values = [];
+ return a.map(v => {
+ callback_values.push(v);
+ return v;
+ });
+ }
+ withHoles();
+ withHoles();
+ %OptimizeFunctionOnNextCall(withHoles);
+ assertArrayEquals([1, 2, , 3, 4], withHoles());
+ assertArrayEquals([1, 2, 3, 4], callback_values);
+})();
+
+(() => {
+ const a = [1.5, 2.5, , 3.5, 4.5];
+ let callback_values = [];
+ function withHoles() {
+ callback_values = [];
+ return a.map(v => {
+ callback_values.push(v);
+ return v;
+ });
+ }
+ withHoles();
+ withHoles();
+ %OptimizeFunctionOnNextCall(withHoles);
+ assertArrayEquals([1.5, 2.5, , 3.5, 4.5], withHoles());
+ assertArrayEquals([1.5, 2.5, 3.5, 4.5], callback_values);
+})();
+
+// Ensure that we handle side-effects between load and call.
+(() => {
+ function side_effect(a, b) { if (b) a.foo = 3; return a; }
+ %NeverOptimizeFunction(side_effect);
+
+ function unreliable(a, b) {
+ return a.map(x => x * 2, side_effect(a, b));
+ }
+
+ let a = [1, 2, 3];
+ unreliable(a, false);
+ unreliable(a, false);
+ %OptimizeFunctionOnNextCall(unreliable);
+ unreliable(a, false);
+ // Now actually do change the map.
+ unreliable(a, true);
+})();
+
// Messing with the Array species constructor causes deoptimization.
(function() {
var result = 0;
diff --git a/deps/v8/test/mjsunit/regress/modules-skip-regress-797581-1.js b/deps/v8/test/mjsunit/regress/modules-skip-regress-797581-1.js
new file mode 100644
index 0000000000..1aa55aa9fb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/modules-skip-regress-797581-1.js
@@ -0,0 +1,5 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export default ()
diff --git a/deps/v8/test/mjsunit/regress/modules-skip-regress-797581-2.js b/deps/v8/test/mjsunit/regress/modules-skip-regress-797581-2.js
new file mode 100644
index 0000000000..855aa2e9d7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/modules-skip-regress-797581-2.js
@@ -0,0 +1,5 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export default (...)
diff --git a/deps/v8/test/mjsunit/regress/modules-skip-regress-797581-3.js b/deps/v8/test/mjsunit/regress/modules-skip-regress-797581-3.js
new file mode 100644
index 0000000000..e6d043d2ce
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/modules-skip-regress-797581-3.js
@@ -0,0 +1,5 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export default (a, ...b)
diff --git a/deps/v8/test/mjsunit/regress/modules-skip-regress-797581-4.js b/deps/v8/test/mjsunit/regress/modules-skip-regress-797581-4.js
new file mode 100644
index 0000000000..fc7968d03d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/modules-skip-regress-797581-4.js
@@ -0,0 +1,5 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export default 1, 2;
diff --git a/deps/v8/test/mjsunit/regress/modules-skip-regress-797581-5.js b/deps/v8/test/mjsunit/regress/modules-skip-regress-797581-5.js
new file mode 100644
index 0000000000..10864c260f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/modules-skip-regress-797581-5.js
@@ -0,0 +1,6 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let x;
+export default x = 0;
diff --git a/deps/v8/test/mjsunit/regress/regress-2646.js b/deps/v8/test/mjsunit/regress/regress-2646.js
index c51a28060c..ef72556e04 100644
--- a/deps/v8/test/mjsunit/regress/regress-2646.js
+++ b/deps/v8/test/mjsunit/regress/regress-2646.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --heap-stats
-
var expectedItemsCount = 10000,
itemSize = 5,
heap = new ArrayBuffer(expectedItemsCount * itemSize * 8),
diff --git a/deps/v8/test/mjsunit/regress/regress-370827.js b/deps/v8/test/mjsunit/regress/regress-370827.js
index 5536d5196b..e6d5185e70 100644
--- a/deps/v8/test/mjsunit/regress/regress-370827.js
+++ b/deps/v8/test/mjsunit/regress/regress-370827.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --expose-gc --heap-stats
+// Flags: --allow-natives-syntax --expose-gc
function g(dummy, x) {
var start = "";
diff --git a/deps/v8/test/mjsunit/regress/regress-599717.js b/deps/v8/test/mjsunit/regress/regress-599717.js
index 94a41ce4d3..51831860e9 100644
--- a/deps/v8/test/mjsunit/regress/regress-599717.js
+++ b/deps/v8/test/mjsunit/regress/regress-599717.js
@@ -15,7 +15,7 @@ function __f_61(stdlib, foreign, buffer) {
}
var ok = false;
try {
- var __v_12 = new ArrayBuffer(1 << 30);
+ var __v_12 = new ArrayBuffer(2147483648);
ok = true;
} catch (e) {
// Can happen on 32 bit systems.
diff --git a/deps/v8/test/mjsunit/regress/regress-791334.js b/deps/v8/test/mjsunit/regress/regress-791334.js
new file mode 100644
index 0000000000..9f2748fdad
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-791334.js
@@ -0,0 +1,8 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MODULE
+
+let foo = () => { return this };
+assertEquals(undefined, foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-791958.js b/deps/v8/test/mjsunit/regress/regress-791958.js
new file mode 100644
index 0000000000..443ef6e359
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-791958.js
@@ -0,0 +1,15 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+obj = {m: print};
+function foo() {
+ for (var x = -536870912; x != -536870903; ++x) {
+ obj.m(-x >= 1000000 ? x % 1000000 : y);
+ }
+}
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-793588.js b/deps/v8/test/mjsunit/regress/regress-793588.js
new file mode 100644
index 0000000000..6ad7a76e2a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-793588.js
@@ -0,0 +1,13 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-property
+
+assertNull(/a\P{Any}a/u.exec("a\u{d83d}a"));
+assertEquals(["a\u{d83d}a"], /a\p{Any}a/u.exec("a\u{d83d}a"));
+assertEquals(["a\u{d83d}a"], /(?:a\P{Any}a|a\p{Any}a)/u.exec("a\u{d83d}a"));
+assertNull(/a[\P{Any}]a/u.exec("a\u{d83d}a"));
+assertEquals(["a\u{d83d}a"], /a[^\P{Any}]a/u.exec("a\u{d83d}a"));
+assertEquals(["a\u{d83d}a"], /a[^\P{Any}x]a/u.exec("a\u{d83d}a"));
+assertNull(/a[^\P{Any}x]a/u.exec("axa"));
diff --git a/deps/v8/test/mjsunit/regress/regress-796427.js b/deps/v8/test/mjsunit/regress/regress-796427.js
new file mode 100644
index 0000000000..c09688d1ec
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-796427.js
@@ -0,0 +1,7 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --stack-size=150
+
+assertThrows(() => "" + { toString: Object.prototype.toLocaleString }, RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-797481.js b/deps/v8/test/mjsunit/regress/regress-797481.js
new file mode 100644
index 0000000000..7963dbd3b7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-797481.js
@@ -0,0 +1,10 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --stack-size=100
+
+const a = /x/;
+
+a.exec = RegExp.prototype.test;
+assertThrows(() => RegExp.prototype.test.call(a), RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-797581.js b/deps/v8/test/mjsunit/regress/regress-797581.js
new file mode 100644
index 0000000000..17ac0ea50d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-797581.js
@@ -0,0 +1,29 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-dynamic-import
+
+function TryToLoadModule(filename, expect_error, token) {
+ let caught_error;
+
+ function SetError(e) {
+ caught_error = e;
+ }
+
+ import(filename).catch(SetError);
+ %RunMicrotasks();
+
+ if (expect_error) {
+ assertTrue(caught_error instanceof SyntaxError);
+ assertEquals("Unexpected token " + token, caught_error.message);
+ } else {
+ assertEquals(undefined, caught_error);
+ }
+}
+
+TryToLoadModule("modules-skip-regress-797581-1.js", true, ")");
+TryToLoadModule("modules-skip-regress-797581-2.js", true, ")");
+TryToLoadModule("modules-skip-regress-797581-3.js", true, "...");
+TryToLoadModule("modules-skip-regress-797581-4.js", true, ",");
+TryToLoadModule("modules-skip-regress-797581-5.js", false);
diff --git a/deps/v8/test/mjsunit/regress/regress-800538.js b/deps/v8/test/mjsunit/regress/regress-800538.js
new file mode 100644
index 0000000000..bc420d676c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-800538.js
@@ -0,0 +1,6 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+RegExp.prototype.__defineGetter__("global", () => true);
+assertEquals("/()/g", /()/.toString());
diff --git a/deps/v8/test/mjsunit/regress/regress-801171.js b/deps/v8/test/mjsunit/regress/regress-801171.js
new file mode 100644
index 0000000000..4bd85eeafc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-801171.js
@@ -0,0 +1,20 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let called_custom_unicode_getter = false;
+const re = /./;
+
+function f() {
+ re.__defineGetter__("unicode", function() {
+ called_custom_unicode_getter = true;
+ });
+ return 2;
+}
+
+assertEquals(["","",], re[Symbol.split]("abc", { valueOf: f }));
+
+// The spec mandates retrieving the regexp instance's flags before
+// ToUint(limit), i.e. the unicode getter must still be unmodified when
+// flags are retrieved.
+assertFalse(called_custom_unicode_getter);
diff --git a/deps/v8/test/mjsunit/regress/regress-801772.js b/deps/v8/test/mjsunit/regress/regress-801772.js
new file mode 100644
index 0000000000..06597e251a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-801772.js
@@ -0,0 +1,9 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function foo(f) { f(); }
+
+foo(function arguments() {
+ function skippable() { }
+});
diff --git a/deps/v8/test/mjsunit/regress/regress-802060.js b/deps/v8/test/mjsunit/regress/regress-802060.js
new file mode 100644
index 0000000000..e975615484
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-802060.js
@@ -0,0 +1,24 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function assertEquals(expected, found) {
+ found.length !== expected.length;
+}
+assertEquals([], [])
+assertEquals("a", "a");
+assertEquals([], []);
+function f() {
+ assertEquals(0, undefined);
+}
+try {
+ f();
+} catch (e) {
+}
+%OptimizeFunctionOnNextCall(f);
+try {
+ f();
+} catch (e) {
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-789764.js b/deps/v8/test/mjsunit/regress/regress-crbug-789764.js
new file mode 100644
index 0000000000..c377e644fc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-789764.js
@@ -0,0 +1,15 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Original repro (used to crash):
+_v3 = ({ _v7 = (function outer() {
+ for ([...[]][function inner() {}] in []) {
+ }
+ })} = {}) => {
+};
+_v3();
+
+// Smaller repro (used to crash):
+a = (b = !function outer() { for (function inner() {}.foo in []) {} }) => {};
+a();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-791245-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-791245-1.js
new file mode 100644
index 0000000000..0d51f8a4a0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-791245-1.js
@@ -0,0 +1,18 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+const s = new Map;
+
+function foo(s) {
+ const i = s[Symbol.iterator]();
+ i.next();
+ return i;
+}
+
+console.log(foo(s));
+console.log(foo(s));
+%OptimizeFunctionOnNextCall(foo);
+console.log(foo(s));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-791245-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-791245-2.js
new file mode 100644
index 0000000000..6734ed2baa
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-791245-2.js
@@ -0,0 +1,18 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+const s = new Set;
+
+function foo(s) {
+ const i = s[Symbol.iterator]();
+ i.next();
+ return i;
+}
+
+console.log(foo(s));
+console.log(foo(s));
+%OptimizeFunctionOnNextCall(foo);
+console.log(foo(s));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-795922.js b/deps/v8/test/mjsunit/regress/regress-crbug-795922.js
new file mode 100644
index 0000000000..da2b36740e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-795922.js
@@ -0,0 +1,9 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows(
+ // Should throw a syntax error, but not crash.
+ "({ __proto__: null, __proto__: 1 })",
+ SyntaxError
+);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-798644.js b/deps/v8/test/mjsunit/regress/regress-crbug-798644.js
new file mode 100644
index 0000000000..c878a6fda8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-798644.js
@@ -0,0 +1,21 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+let arr = [];
+// Make the array large enough to trigger re-checking for compaction.
+arr[1000] = 0x1234;
+
+arr.__defineGetter__(256, function () {
+ // Remove the getter so we can compact the array.
+ delete arr[256];
+ // Trigger compaction.
+ arr.unshift(1.1);
+});
+
+let results = Object.entries(arr);
+%HeapObjectVerify(results);
+%HeapObjectVerify(arr);
+let str = results.toString();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-800077.js b/deps/v8/test/mjsunit/regress/regress-crbug-800077.js
new file mode 100644
index 0000000000..13679073fe
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-800077.js
@@ -0,0 +1,6 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var sample = new Float64Array(1);
+Reflect.has(sample, undefined);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-800810.js b/deps/v8/test/mjsunit/regress/regress-crbug-800810.js
new file mode 100644
index 0000000000..22ac38833e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-800810.js
@@ -0,0 +1,13 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var array = [];
+Object.defineProperty(array , 506519, {});
+Object.defineProperty(array , 3, {
+ get: function () {
+ Object.defineProperty(array , undefined, {
+ })
+ }
+});
+array.includes(61301);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-807096.js b/deps/v8/test/mjsunit/regress/regress-crbug-807096.js
new file mode 100644
index 0000000000..845120db6a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-807096.js
@@ -0,0 +1,27 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --no-lazy
+
+// For regression testing, it's important that these functions are:
+// 1) toplevel
+// 2) arrow functions with single-expression bodies
+// 3) eagerly compiled
+
+let f = ({a = (({b = {a = c} = {
+ a: 0x1234
+}}) => 1)({})}, c) => 1;
+
+assertThrows(() => f({}), ReferenceError);
+
+let g = ({a = (async ({b = {a = c} = {
+ a: 0x1234
+}}) => 1)({})}, c) => a;
+
+testAsync(assert => {
+ assert.plan(1);
+ g({}).catch(e => {
+ assert.equals("ReferenceError", e.name);
+ });
+});
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-808192.js b/deps/v8/test/mjsunit/regress/regress-crbug-808192.js
new file mode 100644
index 0000000000..3336c0043e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-808192.js
@@ -0,0 +1,32 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// TODO(cbruni): enable always opt once v8:7438
+// Flags: --expose-gc --no-always-opt
+
+const f = eval(`(function f(i) {
+ if (i == 0) {
+ class Derived extends Object {
+ constructor() {
+ super();
+ ${"this.a=1;".repeat(0x3fffe-8)}
+ }
+ }
+ return Derived;
+ }
+
+ class DerivedN extends f(i-1) {
+ constructor() {
+ super();
+ ${"this.a=1;".repeat(0x40000-8)}
+ }
+ }
+
+ return DerivedN;
+})`);
+
+let a = new (f(0x7ff))();
+a.a = 1;
+gc();
+assertEquals(1, a.a);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-813427.js b/deps/v8/test/mjsunit/regress/regress-crbug-813427.js
new file mode 100644
index 0000000000..95fa015de2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-813427.js
@@ -0,0 +1,49 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Create {count} property assignments.
+function createPropertiesAssignment(count) {
+ let result = "";
+ for (let i = 0; i < count; i++) {
+ result += "this.p"+i+" = undefined;";
+ }
+ return result;
+}
+
+function testSubclassProtoProperties(count) {
+ const MyClass = eval(`(class MyClass {
+ constructor() {
+ ${createPropertiesAssignment(count)}
+ }
+ });`);
+
+ class BaseClass {};
+ class SubClass extends BaseClass {
+ constructor() {
+ super()
+ }
+ };
+
+ const boundMyClass = MyClass.bind();
+ %HeapObjectVerify(boundMyClass);
+
+ SubClass.__proto__ = boundMyClass;
+ var instance = new SubClass();
+
+ %HeapObjectVerify(instance);
+ // Create some more instances to complete in-object slack tracking.
+ let results = [];
+ for (let i = 0; i < 4000; i++) {
+ results.push(new SubClass());
+ }
+ var instance = new SubClass();
+ %HeapObjectVerify(instance);
+}
+
+
+for (let count = 0; count < 10; count++) {
+ testSubclassProtoProperties(count);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-7245.js b/deps/v8/test/mjsunit/regress/regress-v8-7245.js
new file mode 100644
index 0000000000..c1a9df2bb3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-7245.js
@@ -0,0 +1,6 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const { revoke } = Proxy.revocable({}, {});
+assertEquals("", revoke.name);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-791810.js b/deps/v8/test/mjsunit/regress/wasm/regress-791810.js
new file mode 100644
index 0000000000..cd6c4e2728
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-791810.js
@@ -0,0 +1,21 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addFunction('test', kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0x00, // get_local 0
+ kExprBlock, kWasmStmt, // block
+ kExprBr, 0x00, // br depth=0
+ kExprEnd, // end
+ kExprBlock, kWasmStmt, // block
+ kExprBr, 0x00, // br depth=0
+ kExprEnd, // end
+ kExprBr, 0x00, // br depth=0
+ ])
+ .exportFunc();
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-793551.js b/deps/v8/test/mjsunit/regress/wasm/regress-793551.js
new file mode 100644
index 0000000000..8aa0241923
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-793551.js
@@ -0,0 +1,20 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addFunction('test', kSig_i_i)
+ .addBody([
+ // body:
+ kExprGetLocal, 0, // get_local 0
+ kExprGetLocal, 0, // get_local 0
+ kExprLoop, kWasmStmt, // loop
+ kExprBr, 0, // br depth=0
+ kExprEnd, // end
+ kExprUnreachable, // unreachable
+ ])
+ .exportFunc();
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-797846.js b/deps/v8/test/mjsunit/regress/wasm/regress-797846.js
new file mode 100644
index 0000000000..6a4fd5c5f7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-797846.js
@@ -0,0 +1,14 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+// We need a module with one valid function.
+const builder = new WasmModuleBuilder();
+builder.addFunction('test', kSig_v_v).addBody([]);
+
+const buffer = builder.toBuffer();
+assertPromiseResult(
+ WebAssembly.compile(buffer), _ => Realm.createAllowCrossRealmAccess());
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-800756.js b/deps/v8/test/mjsunit/regress/wasm/regress-800756.js
new file mode 100644
index 0000000000..2d29997cef
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-800756.js
@@ -0,0 +1,15 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(16, 32);
+builder.addFunction(undefined, kSig_i_iii).addBody([
+ kExprI32Const, 0, // i32.const 0
+ kExprI32LoadMem8S, 0, 0, // i32.load8_s offset=0 align=0
+ kExprI32Eqz, // i32.eqz
+]);
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-801850.js b/deps/v8/test/mjsunit/regress/wasm/regress-801850.js
new file mode 100644
index 0000000000..ad6ff4c432
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-801850.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+let module = new WebAssembly.Module(builder.toBuffer());
+var worker = new Worker('onmessage = function() {};');
+worker.postMessage(module)
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-802244.js b/deps/v8/test/mjsunit/regress/wasm/regress-802244.js
new file mode 100644
index 0000000000..0b8decb637
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-802244.js
@@ -0,0 +1,22 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addFunction(undefined, kSig_v_iii).addBody([
+ kExprI32Const, 0x41, // i32.const 0x41
+ kExprLoop, 0x7c, // loop f64
+ kExprGetLocal, 0x00, // get_local 0
+ kExprGetLocal, 0x01, // get_local 1
+ kExprBrIf, 0x01, // br_if depth=1
+ kExprGetLocal, 0x00, // get_local 0
+ kExprI32Rol, // i32.rol
+ kExprBrIf, 0x00, // br_if depth=0
+ kExprUnreachable, // unreachable
+ kExprEnd, // end
+ kExprUnreachable, // unreachable
+]);
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-808980.js b/deps/v8/test/mjsunit/regress/wasm/regress-808980.js
new file mode 100644
index 0000000000..884572b895
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-808980.js
@@ -0,0 +1,28 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --throws
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+let kTableSize = 3;
+
+var builder = new WasmModuleBuilder();
+var sig_index1 = builder.addType(kSig_i_v);
+builder.addFunction('main', kSig_i_ii).addBody([
+ kExprGetLocal,
+ 0,
+ kExprCallIndirect,
+ sig_index1,
+ kTableZero
+]).exportAs('main');
+builder.setFunctionTableBounds(kTableSize, kTableSize);
+var m1_bytes = builder.toBuffer();
+var m1 = new WebAssembly.Module(m1_bytes);
+
+var serialized_m1 = %SerializeWasmModule(m1);
+var m1_clone = %DeserializeWasmModule(serialized_m1, m1_bytes);
+var i1 = new WebAssembly.Instance(m1_clone);
+
+i1.exports.main(123123);
diff --git a/deps/v8/test/mjsunit/serialize-after-execute.js b/deps/v8/test/mjsunit/serialize-after-execute.js
new file mode 100644
index 0000000000..a3e6bc82ae
--- /dev/null
+++ b/deps/v8/test/mjsunit/serialize-after-execute.js
@@ -0,0 +1,15 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --cache=after-execute
+
+function g() {
+ function h() {
+ function k() { return 0; };
+ return k;
+ }
+ return h();
+}
+
+g();
diff --git a/deps/v8/test/mjsunit/serialize-embedded-error.js b/deps/v8/test/mjsunit/serialize-embedded-error.js
index 473c931b30..320fe475b0 100644
--- a/deps/v8/test/mjsunit/serialize-embedded-error.js
+++ b/deps/v8/test/mjsunit/serialize-embedded-error.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// --serialize-toplevel --cache=code
+// --cache=code
var caught = false;
try {
diff --git a/deps/v8/test/mjsunit/serialize-ic.js b/deps/v8/test/mjsunit/serialize-ic.js
index 8e5cd2fd50..74821a9ec3 100644
--- a/deps/v8/test/mjsunit/serialize-ic.js
+++ b/deps/v8/test/mjsunit/serialize-ic.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --cache=code --serialize-toplevel
+// Flags: --cache=code
var foo = [];
foo[0] = "bar";
diff --git a/deps/v8/test/mjsunit/testcfg.py b/deps/v8/test/mjsunit/testcfg.py
index ff84bc3be5..bc9d69ff33 100644
--- a/deps/v8/test/mjsunit/testcfg.py
+++ b/deps/v8/test/mjsunit/testcfg.py
@@ -31,7 +31,6 @@ import re
from testrunner.local import testsuite
from testrunner.objects import testcase
-FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
ENV_PATTERN = re.compile(r"//\s+Environment Variables:(.*)")
SELF_SCRIPT_PATTERN = re.compile(r"//\s+Env: TEST_FILE_NAME")
@@ -39,11 +38,7 @@ MODULE_PATTERN = re.compile(r"^// MODULE$", flags=re.MULTILINE)
NO_HARNESS_PATTERN = re.compile(r"^// NO HARNESS$", flags=re.MULTILINE)
-class MjsunitTestSuite(testsuite.TestSuite):
-
- def __init__(self, name, root):
- super(MjsunitTestSuite, self).__init__(name, root)
-
+class TestSuite(testsuite.TestSuite):
def ListTests(self, context):
tests = []
for dirname, dirs, files in os.walk(self.root, followlinks=True):
@@ -56,19 +51,19 @@ class MjsunitTestSuite(testsuite.TestSuite):
fullpath = os.path.join(dirname, filename)
relpath = fullpath[len(self.root) + 1 : -3]
testname = relpath.replace(os.path.sep, "/")
- test = testcase.TestCase(self, testname)
+ test = self._create_test(testname)
tests.append(test)
return tests
- def GetParametersForTestCase(self, testcase, context):
- source = self.GetSourceForTest(testcase)
+ def _test_class(self):
+ return TestCase
- flags = testcase.flags + context.mode_flags
- env = self._get_env(source)
- flags_match = re.findall(FLAGS_PATTERN, source)
- for match in flags_match:
- flags += match.strip().split()
+class TestCase(testcase.TestCase):
+ def __init__(self, *args, **kwargs):
+ super(TestCase, self).__init__(*args, **kwargs)
+
+ source = self.get_source()
files_list = [] # List of file names to append to command arguments.
files_match = FILES_PATTERN.search(source);
@@ -79,28 +74,32 @@ class MjsunitTestSuite(testsuite.TestSuite):
files_match = FILES_PATTERN.search(source, files_match.end())
else:
break
- files = [ os.path.normpath(os.path.join(self.root, '..', '..', f))
+ files = [ os.path.normpath(os.path.join(self.suite.root, '..', '..', f))
for f in files_list ]
- testfilename = os.path.join(self.root, testcase.path + self.suffix())
+ testfilename = os.path.join(self.suite.root,
+ self.path + self._get_suffix())
if SELF_SCRIPT_PATTERN.search(source):
files = (
["-e", "TEST_FILE_NAME=\"%s\"" % testfilename.replace("\\", "\\\\")] +
files)
- if not context.no_harness and not NO_HARNESS_PATTERN.search(source):
- files.append(os.path.join(self.root, "mjsunit.js"))
+ if NO_HARNESS_PATTERN.search(source):
+ mjsunit_files = []
+ else:
+ mjsunit_files = [os.path.join(self.suite.root, "mjsunit.js")]
+ files_suffix = []
if MODULE_PATTERN.search(source):
- files.append("--module")
- files.append(testfilename)
-
- all_files = list(files)
- if context.isolates:
- all_files += ["--isolate"] + files
+ files_suffix.append("--module")
+ files_suffix.append(testfilename)
- return all_files, flags, env
+ self._source_files = files
+ self._source_flags = self._parse_source_flags(source)
+ self._mjsunit_files = mjsunit_files
+ self._files_suffix = files_suffix
+ self._env = self._parse_source_env(source)
- def _get_env(self, source):
+ def _parse_source_env(self, source):
env_match = ENV_PATTERN.search(source)
env = {}
if env_match:
@@ -109,11 +108,25 @@ class MjsunitTestSuite(testsuite.TestSuite):
env[var] = value
return env
- def GetSourceForTest(self, testcase):
- filename = os.path.join(self.root, testcase.path + self.suffix())
- with open(filename) as f:
- return f.read()
+ def _get_source_flags(self):
+ return self._source_flags
+
+ def _get_files_params(self, ctx):
+ files = list(self._source_files)
+ if not ctx.no_harness:
+ files += self._mjsunit_files
+ files += self._files_suffix
+ if ctx.isolates:
+ files += ['--isolate'] + files
+
+ return files
+
+ def _get_cmd_env(self):
+ return self._env
+
+ def _get_source_path(self):
+ return os.path.join(self.suite.root, self.path + self._get_suffix())
def GetSuite(name, root):
- return MjsunitTestSuite(name, root)
+ return TestSuite(name, root)
diff --git a/deps/v8/test/mjsunit/wasm/errors.js b/deps/v8/test/mjsunit/wasm/errors.js
index 89066d671a..a90236459f 100644
--- a/deps/v8/test/mjsunit/wasm/errors.js
+++ b/deps/v8/test/mjsunit/wasm/errors.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm
+// Flags: --expose-wasm --allow-natives-syntax
'use strict';
@@ -170,3 +170,19 @@ function assertConversionError(bytes, imports, msg) {
kExprI64Const, 0
]).exportFunc().end().toBuffer(), {}, "invalid type");
})();
+
+
+(function InternalDebugTrace() {
+ var builder = new WasmModuleBuilder();
+ var sig = builder.addType(kSig_i_dd);
+ builder.addImport("mod", "func", sig);
+ builder.addFunction("main", sig)
+ .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprCallFunction, 0])
+ .exportAs("main")
+ var main = builder.instantiate({
+ mod: {
+ func: ()=>{%DebugTrace();}
+ }
+ }).exports.main;
+ main();
+})();
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory-detaching.js b/deps/v8/test/mjsunit/wasm/grow-memory-detaching.js
new file mode 100644
index 0000000000..da6516afd7
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/grow-memory-detaching.js
@@ -0,0 +1,65 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+let module = (() => {
+ let builder = new WasmModuleBuilder();
+ builder.addMemory(1, kV8MaxPages, false);
+ builder.addFunction("grow_memory", kSig_i_i)
+ .addBody([kExprGetLocal, 0, kExprGrowMemory, kMemoryZero])
+ .exportFunc();
+ builder.exportMemoryAs("memory");
+ return builder.toModule();
+})();
+
+(function TestDetachingViaAPI() {
+ print("TestDetachingViaAPI...");
+ let memory = new WebAssembly.Memory({initial: 1, maximum: 100});
+ let growMem = (pages) => memory.grow(pages);
+
+ let b1 = memory.buffer;
+ assertEquals(kPageSize, b1.byteLength);
+
+ growMem(0);
+ let b2 = memory.buffer;
+ assertFalse(b1 === b2);
+ assertEquals(0, b1.byteLength);
+ assertEquals(kPageSize, b2.byteLength);
+
+ growMem(1);
+ let b3 = memory.buffer;
+ assertFalse(b1 === b3);
+ assertFalse(b2 === b3);
+ assertEquals(0, b1.byteLength);
+ assertEquals(0, b2.byteLength);
+ assertEquals(2 * kPageSize, b3.byteLength);
+})();
+
+(function TestDetachingViaBytecode() {
+ print("TestDetachingViaBytecode...");
+ let instance = new WebAssembly.Instance(module);
+ let growMem = instance.exports.grow_memory;
+ let memory = instance.exports.memory;
+
+ let b1 = memory.buffer;
+ assertEquals(kPageSize, b1.byteLength);
+
+ growMem(0);
+ let b2 = memory.buffer;
+ assertFalse(b1 === b2);
+ assertEquals(0, b1.byteLength);
+ assertEquals(kPageSize, b2.byteLength);
+
+ growMem(1);
+ let b3 = memory.buffer;
+ assertFalse(b1 === b3);
+ assertFalse(b2 === b3);
+ assertEquals(0, b1.byteLength);
+ assertEquals(0, b2.byteLength);
+ assertEquals(2 * kPageSize, b3.byteLength);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/indirect-tables.js b/deps/v8/test/mjsunit/wasm/indirect-tables.js
index 4c6d9c9f3b..88d1bb719a 100644
--- a/deps/v8/test/mjsunit/wasm/indirect-tables.js
+++ b/deps/v8/test/mjsunit/wasm/indirect-tables.js
@@ -602,6 +602,47 @@ function js_div(a, b) { return (a / b) | 0; }
/signature mismatch/);
})();
+(function IndirectCallIntoOtherInstance() {
+ print("IndirectCallIntoOtherInstance...");
+ var mem_1 = new WebAssembly.Memory({initial: 1});
+ var mem_2 = new WebAssembly.Memory({initial: 1});
+ var view_1 = new Int32Array(mem_1.buffer);
+ var view_2 = new Int32Array(mem_2.buffer);
+ view_1[0] = 1;
+ view_2[0] = 1000;
+
+ let builder = new WasmModuleBuilder();
+ let sig = builder.addType(kSig_i_v);
+ builder.addFunction('main', kSig_i_i)
+ .addBody([kExprGetLocal, 0, kExprCallIndirect, sig, kTableZero])
+ .exportAs('main');
+ builder.addImportedMemory('', 'memory', 1);
+
+ builder.setFunctionTableBounds(1, 1);
+ builder.addExportOfKind('table', kExternalTable);
+
+ let module1 = new WebAssembly.Module(builder.toBuffer());
+ let instance1 = new WebAssembly.Instance(module1, {'':{memory:mem_1}});
+
+ builder = new WasmModuleBuilder();
+ builder.addFunction('main', kSig_i_v).addBody([kExprI32Const, 0, kExprI32LoadMem, 0, 0]);
+ builder.addImportedTable('', 'table');
+ builder.addFunctionTableInit(0, false, [0], true);
+ builder.addImportedMemory('', 'memory', 1);
+
+
+ let module2 = new WebAssembly.Module(builder.toBuffer());
+ let instance2 = new WebAssembly.Instance(module2, {
+ '': {
+ table: instance1.exports.table,
+ memory: mem_2
+ }
+ });
+
+ assertEquals(instance1.exports.main(0), 1000);
+})();
+
+
(function ImportedFreestandingTable() {
print("ImportedFreestandingTable...");
@@ -665,42 +706,40 @@ function js_div(a, b) { return (a / b) | 0; }
test(1, 3);
})();
-(function IndirectCallIntoOtherInstance() {
- print("IndirectCallIntoOtherInstance...");
- var mem_1 = new WebAssembly.Memory({initial: 1});
- var mem_2 = new WebAssembly.Memory({initial: 1});
- var view_1 = new Int32Array(mem_1.buffer);
- var view_2 = new Int32Array(mem_2.buffer);
- view_1[0] = 1;
- view_2[0] = 1000;
-
- let builder = new WasmModuleBuilder();
- let sig = builder.addType(kSig_i_v);
- builder.addFunction('main', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprCallIndirect, sig, kTableZero])
- .exportAs('main');
- builder.addImportedMemory('', 'memory', 1);
- builder.setFunctionTableBounds(1, 1);
- builder.addExportOfKind('table', kExternalTable);
-
- let module1 = new WebAssembly.Module(builder.toBuffer());
- let instance1 = new WebAssembly.Instance(module1, {'':{memory:mem_1}});
-
- builder = new WasmModuleBuilder();
- builder.addFunction('main', kSig_i_v).addBody([kExprI32Const, 0, kExprI32LoadMem, 0, 0]);
- builder.addImportedTable('', 'table');
- builder.addFunctionTableInit(0, false, [0], true);
- builder.addImportedMemory('', 'memory', 1);
+// Remove this test when v8:7232 is addressed comprehensively.
+(function TablesAreImmutableInWasmCallstacks() {
+ print('TablesAreImmutableInWasmCallstacks...');
+ let table = new WebAssembly.Table({initial:2, element:'anyfunc'});
+ let builder = new WasmModuleBuilder();
+ builder.addImport('', 'mutator', kSig_v_v);
+ builder.addFunction('main', kSig_v_v)
+ .addBody([
+ kExprCallFunction, 0
+ ]).exportAs('main');
- let module2 = new WebAssembly.Module(builder.toBuffer());
- let instance2 = new WebAssembly.Instance(module2, {
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module, {
'': {
- table: instance1.exports.table,
- memory: mem_2
+ 'mutator': () => {table.set(0, null);}
}
});
- assertEquals(instance1.exports.main(0), 1000);
+ table.set(0, instance.exports.main);
+
+ try {
+ instance.exports.main();
+ assertUnreached();
+ } catch (e) {
+ assertTrue(e instanceof RangeError);
+ }
+ try {
+ instance.exports.main();
+ assertUnreached();
+ } catch (e) {
+ assertTrue(e instanceof RangeError);
+ }
+ table.set(0, null);
+ assertEquals(null, table.get(0));
})();
diff --git a/deps/v8/test/mjsunit/wasm/lazy-compilation.js b/deps/v8/test/mjsunit/wasm/lazy-compilation.js
index 3d840398a8..fc41fbd622 100644
--- a/deps/v8/test/mjsunit/wasm/lazy-compilation.js
+++ b/deps/v8/test/mjsunit/wasm/lazy-compilation.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --wasm-lazy-compilation
+// Flags: --wasm-lazy-compilation --allow-natives-syntax
load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
@@ -46,6 +46,10 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
instance2.exports.call_store(3);
assertEquals(3, mem1[0]);
assertEquals(0, mem2[0]);
+ %FreezeWasmLazyCompilation(instance1);
+ %FreezeWasmLazyCompilation(instance2);
+ instance2.exports.call_store(7);
+ assertEquals(7, mem1[0]);
})();
(function exportImportedFunction() {
@@ -60,4 +64,37 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
const instance2 = builder2.instantiate({A: instance1.exports});
instance2.exports.foo();
+ %FreezeWasmLazyCompilation(instance1);
+ %FreezeWasmLazyCompilation(instance2);
+ instance2.exports.foo();
+})();
+
+(function exportImportedFunctionWithDifferentMemory() {
+ print(arguments.callee.name);
+ const builder1 = new WasmModuleBuilder();
+ builder1.addMemory(1, 1, true);
+ builder1.addFunction('store', kSig_v_i)
+ .addBody([
+ kExprI32Const, 0, // i32.const 1
+ kExprGetLocal, 0, // get_local 0
+ kExprI32StoreMem, 0, 0, // i32.store offset=0 align=0
+ ])
+ .exportFunc();
+ const instance1 = builder1.instantiate();
+ const mem1 = new Int32Array(instance1.exports.memory.buffer);
+
+ const builder2 = new WasmModuleBuilder();
+ builder2.addMemory(1, 1, true);
+ const imp_idx = builder2.addImport('A', 'store', kSig_v_i);
+ builder2.addExport('exp_store', imp_idx);
+ const instance2 = builder2.instantiate({A: instance1.exports});
+ const mem2 = new Int32Array(instance2.exports.memory.buffer);
+
+ instance2.exports.exp_store(3);
+ assertEquals(3, mem1[0]);
+ assertEquals(0, mem2[0]);
+ %FreezeWasmLazyCompilation(instance1);
+ %FreezeWasmLazyCompilation(instance2);
+ instance2.exports.exp_store(7);
+ assertEquals(7, mem1[0]);
})();
diff --git a/deps/v8/test/mjsunit/wasm/many-parameters.js b/deps/v8/test/mjsunit/wasm/many-parameters.js
index 03d7e09ef3..a56619a6ad 100644
--- a/deps/v8/test/mjsunit/wasm/many-parameters.js
+++ b/deps/v8/test/mjsunit/wasm/many-parameters.js
@@ -12,10 +12,13 @@ let type_const = [wasmI32Const, wasmF32Const, wasmF64Const];
function f(values, shift, num_const_params, ...args) {
assertEquals(
values.length + num_const_params, args.length, 'number of arguments');
+ const expected = idx =>
+ idx < values.length ? values[(idx + shift) % values.length] : idx;
+ const msg = 'shifted by ' + shift + ': ' +
+ 'expected [' + args.map((_, i) => expected(i)).join(', ') + '], got [' +
+ args.join(', ') + ']';
args.forEach((arg_val, idx) => {
- const expected =
- idx < values.length ? values[(idx + shift) % values.length] : idx;
- assertEquals(expected, arg_val, 'arg #' + idx + ', shifted by ' + shift);
+ assertEquals(expected(idx), arg_val, 'arg #' + idx + ', ' + msg);
});
}
diff --git a/deps/v8/test/mjsunit/wasm/module-memory.js b/deps/v8/test/mjsunit/wasm/module-memory.js
index f5b5981436..e9d2bb954d 100644
--- a/deps/v8/test/mjsunit/wasm/module-memory.js
+++ b/deps/v8/test/mjsunit/wasm/module-memory.js
@@ -172,3 +172,26 @@ function testOOBThrows() {
}
testOOBThrows();
+
+function testAddressSpaceLimit() {
+ // 1TiB, see wasm-memory.h
+ const kMaxAddressSpace = 1 * 1024 * 1024 * 1024 * 1024;
+ const kAddressSpacePerMemory = 8 * 1024 * 1024 * 1024;
+
+ try {
+ let memories = [];
+ let address_space = 0;
+ while (address_space <= kMaxAddressSpace + 1) {
+ memories.push(new WebAssembly.Memory({initial: 1}));
+ address_space += kAddressSpacePerMemory;
+ }
+ } catch (e) {
+ assertTrue(e instanceof RangeError);
+ return;
+ }
+ failWithMessage("allocated too much memory");
+}
+
+if(%IsWasmTrapHandlerEnabled()) {
+ testAddressSpaceLimit();
+}
diff --git a/deps/v8/test/mjsunit/wasm/shared-memory.js b/deps/v8/test/mjsunit/wasm/shared-memory.js
index fa51a8307f..bbe89a3fe5 100644
--- a/deps/v8/test/mjsunit/wasm/shared-memory.js
+++ b/deps/v8/test/mjsunit/wasm/shared-memory.js
@@ -7,20 +7,23 @@
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
-function assertMemoryIsValid(memory) {
+function assertMemoryIsValid(memory, shared) {
assertSame(WebAssembly.Memory.prototype, memory.__proto__);
assertSame(WebAssembly.Memory, memory.constructor);
assertTrue(memory instanceof Object);
assertTrue(memory instanceof WebAssembly.Memory);
+ if (shared) {
+ assertTrue(memory.buffer instanceof SharedArrayBuffer);
+ // Assert that the buffer is frozen when memory is shared.
+ assertTrue(Object.isFrozen(memory.buffer));
+ }
}
(function TestConstructorWithShared() {
print("TestConstructorWithShared");
let memory = new WebAssembly.Memory({
initial: 0, maximum: 10, shared: true});
- assertMemoryIsValid(memory);
- // Assert that the buffer is frozen when memory is shared.
- assertTrue(Object.isFrozen(memory.buffer));
+ assertMemoryIsValid(memory, true);
})();
(function TestConstructorWithUndefinedShared() {
@@ -36,7 +39,7 @@ function assertMemoryIsValid(memory) {
// For numeric values, shared = true.
let memory = new WebAssembly.Memory({
initial: 0, maximum: 10, shared: 2098665});
- assertMemoryIsValid(memory);
+ assertMemoryIsValid(memory, true);
})();
(function TestConstructorWithEmptyStringShared() {
@@ -101,3 +104,29 @@ function assertMemoryIsValid(memory) {
assertThrows(() => new WebAssembly.Instance(module,
{m: {imported_mem: memory}}), WebAssembly.LinkError);
})();
+
+(function TestInstantiateWithSharedDefined() {
+ print("TestInstantiateWithSharedDefined");
+ let builder = new WasmModuleBuilder();
+ builder.addMemory(2, 10, true, "shared");
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module);
+ assertMemoryIsValid(instance.exports.memory, true);
+})();
+
+(function TestAtomicOpWithSharedMemoryDefined() {
+ print("TestAtomicOpWithSharedMemoryDefined");
+ let builder = new WasmModuleBuilder();
+ builder.addMemory(2, 10, false, "shared");
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kAtomicPrefix,
+ kExprI32AtomicAdd, 2, 0])
+ .exportFunc();
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module);
+ assertEquals(0, instance.exports.main(0, 0x11111111));
+ assertEquals(0x11111111, instance.exports.main(0, 0x11111111));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/trap-location.js b/deps/v8/test/mjsunit/wasm/trap-location.js
index 0c646c92cd..c4a0f4d787 100644
--- a/deps/v8/test/mjsunit/wasm/trap-location.js
+++ b/deps/v8/test/mjsunit/wasm/trap-location.js
@@ -86,7 +86,7 @@ let buffer = builder.toBuffer();
// Test async compilation and instantiation.
assertPromiseResult(WebAssembly.instantiate(buffer), pair => {
- testTrapLocations(pair.instance, 6);
+ testTrapLocations(pair.instance, 5);
});
// Test sync compilation and instantiation.
diff --git a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
index d21067b36e..c00c2c8226 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
@@ -121,9 +121,25 @@ class WasmFunctionBuilder {
return this;
}
+ getNumLocals() {
+ let total_locals = 0;
+ for (let l of this.locals || []) {
+ for (let type of ["i32", "i64", "f32", "f64", "s128"]) {
+ total_locals += l[type + "_count"] || 0;
+ }
+ }
+ return total_locals;
+ }
+
addLocals(locals, names) {
- this.locals = locals;
- this.local_names = names;
+ const old_num_locals = this.getNumLocals();
+ if (!this.locals) this.locals = []
+ this.locals.push(locals);
+ if (names) {
+ if (!this.local_names) this.local_names = [];
+ const missing_names = old_num_locals - this.local_names.length;
+ this.local_names.push(...new Array(missing_names), ...names);
+ }
return this;
}
@@ -409,7 +425,6 @@ class WasmModuleBuilder {
}
section.emit_u32v(wasm.memory.min);
if (has_max) section.emit_u32v(wasm.memory.max);
- if (wasm.memory.shared) section.emit_u8(1);
});
}
@@ -538,9 +553,7 @@ class WasmModuleBuilder {
for (let func of wasm.functions) {
// Function body length will be patched later.
let local_decls = [];
- let l = func.locals;
- if (l !== undefined) {
- let local_decls_count = 0;
+ for (let l of func.locals || []) {
if (l.i32_count > 0) {
local_decls.push({count: l.i32_count, type: kWasmI32});
}
diff --git a/deps/v8/test/mkgrokdump/mkgrokdump.cc b/deps/v8/test/mkgrokdump/mkgrokdump.cc
index 264779601b..75dac7a4a4 100644
--- a/deps/v8/test/mkgrokdump/mkgrokdump.cc
+++ b/deps/v8/test/mkgrokdump/mkgrokdump.cc
@@ -75,7 +75,7 @@ static int DumpHeapConstants(const char* argv0) {
for (i::Object* o = it.Next(); o != NULL; o = it.Next()) {
i::Map* m = i::Map::cast(o);
const char* n = NULL;
- intptr_t p = reinterpret_cast<intptr_t>(m) & 0x7ffff;
+ intptr_t p = reinterpret_cast<intptr_t>(m) & 0x7FFFF;
int t = m->instance_type();
ROOT_LIST(ROOT_LIST_CASE)
STRUCT_LIST(STRUCT_LIST_CASE)
@@ -93,17 +93,18 @@ static int DumpHeapConstants(const char* argv0) {
n = #camel_name; \
i = i::Heap::k##camel_name##RootIndex; \
}
- i::OldSpaces spit(heap);
+ i::PagedSpaces spit(heap);
i::PrintF("KNOWN_OBJECTS = {\n");
for (i::PagedSpace* s = spit.next(); s != NULL; s = spit.next()) {
i::HeapObjectIterator it(s);
// Code objects are generally platform-dependent.
- if (s->identity() == i::CODE_SPACE) continue;
+ if (s->identity() == i::CODE_SPACE || s->identity() == i::MAP_SPACE)
+ continue;
const char* sname = AllocationSpaceName(s->identity());
for (i::Object* o = it.Next(); o != NULL; o = it.Next()) {
const char* n = NULL;
i::Heap::RootListIndex i = i::Heap::kStrongRootListLength;
- intptr_t p = reinterpret_cast<intptr_t>(o) & 0x7ffff;
+ intptr_t p = reinterpret_cast<intptr_t>(o) & 0x7FFFF;
ROOT_LIST(ROOT_LIST_CASE)
if (n == NULL) continue;
if (!i::Heap::RootIsImmortalImmovable(i)) continue;
diff --git a/deps/v8/test/mkgrokdump/testcfg.py b/deps/v8/test/mkgrokdump/testcfg.py
index 3dcf80a6a1..de8e71f7ea 100644
--- a/deps/v8/test/mkgrokdump/testcfg.py
+++ b/deps/v8/test/mkgrokdump/testcfg.py
@@ -3,48 +3,46 @@
# found in the LICENSE file.
import os
-import difflib
from testrunner.local import testsuite
from testrunner.objects import testcase
+from testrunner.outproc import mkgrokdump
-class MkGrokdump(testsuite.TestSuite):
- SHELL = 'mkgrokdump'
+SHELL = 'mkgrokdump'
- def __init__(self, name, root):
- super(MkGrokdump, self).__init__(name, root)
+class TestSuite(testsuite.TestSuite):
+ def __init__(self, *args, **kwargs):
+ super(TestSuite, self).__init__(*args, **kwargs)
+
+ v8_path = os.path.dirname(os.path.dirname(os.path.abspath(self.root)))
+ self.expected_path = os.path.join(v8_path, 'tools', 'v8heapconst.py')
def ListTests(self, context):
- test = testcase.TestCase(self, self.SHELL)
+ test = self._create_test(SHELL)
return [test]
- def GetShellForTestCase(self, testcase):
- return self.SHELL
+ def _test_class(self):
+ return TestCase
- def GetParametersForTestCase(self, testcase, context):
- return [], [], {}
- def IsFailureOutput(self, testcase):
- output = testcase.output
- v8_path = os.path.dirname(os.path.dirname(os.path.abspath(self.root)))
- expected_path = os.path.join(v8_path, "tools", "v8heapconst.py")
- with open(expected_path) as f:
- expected = f.read()
- expected_lines = expected.splitlines()
- actual_lines = output.stdout.splitlines()
- diff = difflib.unified_diff(expected_lines, actual_lines, lineterm="",
- fromfile="expected_path")
- diffstring = '\n'.join(diff)
- if diffstring is not "":
- if "generated from a non-shipping build" in output.stdout:
- return False
- if not "generated from a shipping build" in output.stdout:
- output.stdout = "Unexpected output:\n\n" + output.stdout
- return True
- output.stdout = diffstring
- return True
- return False
+class TestCase(testcase.TestCase):
+ def _get_variant_flags(self):
+ return []
+
+ def _get_statusfile_flags(self):
+ return []
+
+ def _get_mode_flags(self, ctx):
+ return []
+
+ def get_shell(self):
+ return SHELL
+
+ @property
+ def output_proc(self):
+ return mkgrokdump.OutProc(self.expected_outcomes, self.suite.expected_path)
+
def GetSuite(name, root):
- return MkGrokdump(name, root)
+ return TestSuite(name, root)
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index d64b74f354..40334cdcd8 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -154,7 +154,7 @@
# Compiles a long chain of && or || operations, can time out under slower
# variants.
- 'js1_5/Expressions/regress-394673': [PASS, FAST_VARIANTS],
+ 'js1_5/Expressions/regress-394673': [PASS, NO_VARIANTS],
# This takes a long time to run (~100 seconds). It should only be run
# by the really patient.
@@ -879,10 +879,10 @@
['no_i18n == True and mode == debug', {
# Tests too slow for no18n debug.
- 'ecma_3/Statements/regress-302439': [PASS, FAST_VARIANTS],
+ 'ecma_3/Statements/regress-302439': [PASS, NO_VARIANTS],
'js1_5/Regress/regress-98901': [SKIP],
- 'ecma_3/RegExp/perlstress-001': [PASS, FAST_VARIANTS],
- 'js1_5/extensions/regress-311161': [FAIL_OK, FAST_VARIANTS],
+ 'ecma_3/RegExp/perlstress-001': [PASS, NO_VARIANTS],
+ 'js1_5/extensions/regress-311161': [FAIL_OK, NO_VARIANTS],
}], # 'no_i18n == True and mode == debug'
diff --git a/deps/v8/test/mozilla/testcfg.py b/deps/v8/test/mozilla/testcfg.py
index 46623d0848..8e69b7d9b7 100644
--- a/deps/v8/test/mozilla/testcfg.py
+++ b/deps/v8/test/mozilla/testcfg.py
@@ -30,6 +30,7 @@ import os
from testrunner.local import testsuite
from testrunner.objects import testcase
+from testrunner.outproc import mozilla
EXCLUDED = ["CVS", ".svn"]
@@ -54,10 +55,9 @@ TEST_DIRS = """
""".split()
-class MozillaTestSuite(testsuite.TestSuite):
-
+class TestSuite(testsuite.TestSuite):
def __init__(self, name, root):
- super(MozillaTestSuite, self).__init__(name, root)
+ super(TestSuite, self).__init__(name, root)
self.testroot = os.path.join(root, "data")
def ListTests(self, context):
@@ -77,37 +77,46 @@ class MozillaTestSuite(testsuite.TestSuite):
fullpath = os.path.join(dirname, filename)
relpath = fullpath[len(self.testroot) + 1 : -3]
testname = relpath.replace(os.path.sep, "/")
- case = testcase.TestCase(self, testname)
+ case = self._create_test(testname)
tests.append(case)
return tests
- def GetParametersForTestCase(self, testcase, context):
- files = [os.path.join(self.root, "mozilla-shell-emulation.js")]
- testfilename = testcase.path + ".js"
+ def _test_class(self):
+ return TestCase
+
+
+class TestCase(testcase.TestCase):
+ def _get_files_params(self, ctx):
+ files = [os.path.join(self.suite.root, "mozilla-shell-emulation.js")]
+ testfilename = self.path + ".js"
testfilepath = testfilename.split("/")
for i in xrange(len(testfilepath)):
- script = os.path.join(self.testroot,
+ script = os.path.join(self.suite.testroot,
reduce(os.path.join, testfilepath[:i], ""),
"shell.js")
if os.path.exists(script):
files.append(script)
- files.append(os.path.join(self.testroot, testfilename))
- flags = testcase.flags + context.mode_flags + ["--expose-gc"]
- return files, flags, {}
- def GetSourceForTest(self, testcase):
- filename = os.path.join(self.testroot, testcase.path + ".js")
- with open(filename) as f:
- return f.read()
+ files.append(os.path.join(self.suite.testroot, testfilename))
+ return files
+
+ def _get_suite_flags(self, ctx):
+ return ['--expose-gc']
+
+ def _get_source_path(self):
+ return os.path.join(self.suite.testroot, self.path + self._get_suffix())
- def IsNegativeTest(self, testcase):
- return testcase.path.endswith("-n")
+ @property
+ def output_proc(self):
+ if not self.expected_outcomes:
+ if self.path.endswith('-n'):
+ return mozilla.MOZILLA_PASS_NEGATIVE
+ return mozilla.MOZILLA_PASS_DEFAULT
+ if self.path.endswith('-n'):
+ return mozilla.NegOutProc(self.expected_outcomes)
+ return mozilla.OutProc(self.expected_outcomes)
- def IsFailureOutput(self, testcase):
- if testcase.output.exit_code != 0:
- return True
- return "FAILED!" in testcase.output.stdout
def GetSuite(name, root):
- return MozillaTestSuite(name, root)
+ return TestSuite(name, root)
diff --git a/deps/v8/test/optimize_for_size.gyp b/deps/v8/test/optimize_for_size.gyp
index 047e3d8acb..8728479c23 100644
--- a/deps/v8/test/optimize_for_size.gyp
+++ b/deps/v8/test/optimize_for_size.gyp
@@ -11,6 +11,7 @@
'type': 'none',
'dependencies': [
'cctest/cctest.gyp:cctest_run',
+ 'debugger/debugger.gyp:debugger_run',
'intl/intl.gyp:intl_run',
'mjsunit/mjsunit.gyp:mjsunit_run',
'webkit/webkit.gyp:webkit_run',
diff --git a/deps/v8/test/perf.isolate b/deps/v8/test/perf.isolate
index 5eec44a3ee..132bcc54f0 100644
--- a/deps/v8/test/perf.isolate
+++ b/deps/v8/test/perf.isolate
@@ -10,7 +10,7 @@
'../tools/run_perf.py',
# TODO(machenbach): These files are referenced by the perf runner.
# They should be transformed into a proper python module.
- '../tools/testrunner/local/commands.py',
+ '../tools/testrunner/local/command.py',
'../tools/testrunner/local/utils.py',
'../tools/testrunner/objects/output.py',
# This is often used to trigger performance bots. We include it in the
diff --git a/deps/v8/test/preparser/testcfg.py b/deps/v8/test/preparser/testcfg.py
index f90d34f4ac..f6814e756f 100644
--- a/deps/v8/test/preparser/testcfg.py
+++ b/deps/v8/test/preparser/testcfg.py
@@ -32,20 +32,22 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
-class PreparserTestSuite(testsuite.TestSuite):
- def __init__(self, name, root):
- super(PreparserTestSuite, self).__init__(name, root)
+class VariantsGenerator(testsuite.VariantsGenerator):
+ def _get_variants(self, test):
+ return self._standard_variant
+
+class TestSuite(testsuite.TestSuite):
def _ParsePythonTestTemplates(self, result, filename):
pathname = os.path.join(self.root, filename + ".pyt")
- def Test(name, source, expectation, extra_flags=[]):
+ def Test(name, source, expectation):
source = source.replace("\n", " ")
- testname = os.path.join(filename, name)
- flags = ["-e", source]
+ path = os.path.join(filename, name)
if expectation:
- flags += ["--throws"]
- flags += extra_flags
- test = testcase.TestCase(self, testname, flags=flags)
+ template_flags = ["--throws"]
+ else:
+ template_flags = []
+ test = self._create_test(path, source, template_flags)
result.append(test)
def Template(name, source):
def MkTest(replacement, expectation):
@@ -68,16 +70,48 @@ class PreparserTestSuite(testsuite.TestSuite):
self._ParsePythonTestTemplates(result, f)
return result
- def GetParametersForTestCase(self, testcase, context):
- return [], testcase.flags, {}
+ def _create_test(self, path, source, template_flags):
+ return super(TestSuite, self)._create_test(
+ path, source=source, template_flags=template_flags)
+
+ def _test_class(self):
+ return TestCase
+
+ def _LegacyVariantsGeneratorFactory(self):
+ return testsuite.StandardLegacyVariantsGenerator
+
+ def _variants_gen_class(self):
+ return VariantsGenerator
+
+
+class TestCase(testcase.TestCase):
+ def __init__(self, suite, path, name, source, template_flags):
+ super(TestCase, self).__init__(suite, path, name)
+
+ self._source = source
+ self._template_flags = template_flags
+
+ def _get_cmd_params(self, ctx):
+ return (
+ self._get_files_params(ctx) +
+ self._get_extra_flags(ctx) +
+ ['-e', self._source] +
+ self._template_flags +
+ self._get_variant_flags() +
+ self._get_statusfile_flags() +
+ self._get_mode_flags(ctx) +
+ self._get_source_flags()
+ )
+
+ def _get_mode_flags(self, ctx):
+ return []
- def GetSourceForTest(self, testcase):
- assert testcase.flags[0] == "-e"
- return testcase.flags[1]
+ def is_source_available(self):
+ return True
- def _VariantGeneratorFactory(self):
- return testsuite.StandardVariantGenerator
+ def get_source(self):
+ return self._source
def GetSuite(name, root):
- return PreparserTestSuite(name, root)
+ return TestSuite(name, root)
diff --git a/deps/v8/test/promises-aplus/README b/deps/v8/test/promises-aplus/README
deleted file mode 100644
index de15da362c..0000000000
--- a/deps/v8/test/promises-aplus/README
+++ /dev/null
@@ -1,29 +0,0 @@
-This directory contains code for running Promise/A+ Compliance Test Suite[1].
-You can download the it from [1], or by specifying --download to
-tools/run-tests.py.
-Promise/A+ Compliance Test Suite requires Node environment and needs some
-libraries. To run it in d8 shell, we provides some emulation functions in the
-lib/ directory.
-
- - lib/adapter.js
- - An adapter for harmony Promise used in Promise/A+ tests.
- - lib/assert.js
- - Emulates assert modules in Node.
- - lib/global.js
- - Provides global functions and variables.
- - lib/mocha.js
- - Emulates Mocha[2] test framework.
- - lib/require.j
- - Emulate require function in Node.
- - lib/run-tests.js
- - Run all describe tests.
-
-The emulation is not complete. Upgrading Promise/A+ tests will require
-changing lib/ scripts.
-
-Sinon.JS[3], required by Promise/A+ tests, is also downloaded by run-tests.py.
-
-[1]: https://github.com/promises-aplus/promises-tests
-[2]: http://visionmedia.github.io/mocha/
-[3]: http://sinonjs.org/
-
diff --git a/deps/v8/test/promises-aplus/lib/adapter.js b/deps/v8/test/promises-aplus/lib/adapter.js
deleted file mode 100644
index d99913a138..0000000000
--- a/deps/v8/test/promises-aplus/lib/adapter.js
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-var global = this.global || {};
-
-global.adapter = {
- resolved: function(value) { return Promise.resolve(value); },
- rejected: function(reason) { return Promise.reject(reason); },
- deferred: function() {
- var resolve, reject;
- var promise = new Promise(function(res, rej) {
- resolve = res;
- reject = rej;
- });
- return {promise: promise, resolve: resolve, reject: reject};
- }
-};
diff --git a/deps/v8/test/promises-aplus/lib/assert.js b/deps/v8/test/promises-aplus/lib/assert.js
deleted file mode 100644
index 0138f36041..0000000000
--- a/deps/v8/test/promises-aplus/lib/assert.js
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Mimics assert module in node.
-
-function compose(message1, message2) {
- return message2 ? message1 + ': ' + message2 : message1
-}
-
-function fail(actual, expected, message, operator) {
- var e = Error(compose('FAIL', message) +
- ': (' + actual + ' ' + operator + ' ' + expected + ') should hold');
- fails.push(e);
- throw e;
-}
-
-function ok(value, message) {
- if (!value) {
- throw Error(compose('FAIL', + message) + ': value = ' + value);
- }
-}
-
-function equal(actual, expected, message) {
- if (!(expected == actual)) {
- fail(actual, expected, message, '==');
- }
-}
-
-function notEqual(actual, expected, message) {
- if (!(expected != actual)) {
- fail(actual, expected, message, '!=');
- }
-}
-
-function strictEqual(actual, expected, message) {
- if (!(expected === actual)) {
- fail(actual, expected, message, '===');
- }
-}
-
-function notStrictEqual(actual, expected, message) {
- if (!(expected !== actual)) {
- fail(actual, expected, message, '!==');
- }
-}
-
-function assert(value, message) {
- return ok(value, message);
-}
-
-function notImplemented() {
- throw Error('FAIL: This assertion function is not yet implemented.');
-}
-
-function clear() {
- this.fails = [];
-}
-
-assert.fail = fail;
-assert.ok = ok;
-assert.equal = equal;
-assert.notEqual = notEqual;
-assert.deepEqual = notImplemented;
-assert.notDeepEqual = notImplemented;
-assert.strictEqual = strictEqual;
-assert.notStrictEqual = notStrictEqual;
-assert.throws = notImplemented;
-assert.doesNotThrow = notImplemented;
-assert.ifError = notImplemented;
-
-assert.clear = clear;
-
-exports = assert;
diff --git a/deps/v8/test/promises-aplus/lib/global.js b/deps/v8/test/promises-aplus/lib/global.js
deleted file mode 100644
index ece338ed3e..0000000000
--- a/deps/v8/test/promises-aplus/lib/global.js
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-var global = this.global || {};
-var setTimeout;
-var clearTimeout;
-
-(function() {
-var timers = {};
-var currentId = 0;
-
-setInterval = function(fn, delay) {
- var i = 0;
- var id = currentId++;
- function loop() {
- if (!timers[id]) {
- return;
- }
- if (i++ >= delay) {
- fn();
- }
- %EnqueueMicrotask(loop);
- }
- %EnqueueMicrotask(loop);
- timers[id] = true;
- return id;
-}
-
-clearTimeout = function(id) {
- delete timers[id];
-}
-
-clearInterval = clearTimeout;
-
-setTimeout = function(fn, delay) {
- var id = setInterval(function() {
- fn();
- clearInterval(id);
- }, delay);
- return id;
-}
-
-}());
diff --git a/deps/v8/test/promises-aplus/lib/mocha.js b/deps/v8/test/promises-aplus/lib/mocha.js
deleted file mode 100644
index 0a172b9d2f..0000000000
--- a/deps/v8/test/promises-aplus/lib/mocha.js
+++ /dev/null
@@ -1,255 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file emulates Mocha test framework used in promises-aplus tests.
-
-var describe;
-var it;
-var specify;
-var before;
-var after;
-var beforeEach;
-var afterEach;
-var RunAllTests;
-
-var assert = require('assert');
-
-(function() {
-var TIMEOUT = 1000;
-
-var context = {
- beingDescribed: undefined,
- currentSuiteIndex: 0,
- suites: []
-};
-
-function Run() {
- function current() {
- while (context.currentSuiteIndex < context.suites.length &&
- context.suites[context.currentSuiteIndex].hasRun) {
- ++context.currentSuiteIndex;
- }
- if (context.suites.length == context.currentSuiteIndex) {
- return undefined;
- }
- return context.suites[context.currentSuiteIndex];
- }
- var suite = current();
- if (!suite) {
- // done
- print('All tests have run.');
- return;
- }
- suite.Run();
-}
-
-RunAllTests = function() {
- context.currentSuiteIndex = 0;
- var numRegularTestCases = 0;
- for (var i = 0; i < context.suites.length; ++i) {
- numRegularTestCases += context.suites[i].numRegularTestCases();
- }
- print(context.suites.length + ' suites and ' + numRegularTestCases +
- ' test cases are found');
- Run();
-};
-
-function TestCase(name, before, fn, after, isRegular) {
- this.name = name;
- this.before = before;
- this.fn = fn;
- this.after = after;
- this.isRegular = isRegular;
- this.hasDone = false;
-}
-
-TestCase.prototype.RunFunction = function(suite, fn, postAction) {
- if (!fn) {
- postAction();
- return;
- }
- try {
- if (fn.length === 0) {
- // synchronous
- fn();
- postAction();
- } else {
- // asynchronous
- fn(postAction);
- }
- } catch (e) {
- suite.ReportError(this, e);
- }
-}
-
-TestCase.prototype.MarkAsDone = function() {
- this.hasDone = true;
- clearTimeout(this.timer);
-}
-
-TestCase.prototype.Run = function(suite, postAction) {
- print('Running ' + suite.description + '#' + this.name + ' ...');
- assert.clear();
-
- this.timer = setTimeout(function() {
- suite.ReportError(this, Error('timeout'));
- }.bind(this), TIMEOUT);
-
- this.RunFunction(suite, this.before, function(e) {
- if (this.hasDone) {
- return;
- }
- if (e instanceof Error) {
- return suite.ReportError(this, e);
- }
- if (assert.fails.length > 0) {
- return suite.ReportError(this, assert.fails[0]);
- }
- this.RunFunction(suite, this.fn, function(e) {
- if (this.hasDone) {
- return;
- }
- if (e instanceof Error) {
- return suite.ReportError(this, e);
- }
- if (assert.fails.length > 0) {
- return suite.ReportError(this, assert.fails[0]);
- }
- this.RunFunction(suite, this.after, function(e) {
- if (this.hasDone) {
- return;
- }
- if (e instanceof Error) {
- return suite.ReportError(this, e);
- }
- if (assert.fails.length > 0) {
- return suite.ReportError(this, assert.fails[0]);
- }
- this.MarkAsDone();
- if (this.isRegular) {
- print('PASS: ' + suite.description + '#' + this.name);
- }
- %EnqueueMicrotask(postAction);
- }.bind(this));
- }.bind(this));
- }.bind(this));
-};
-
-function TestSuite(described) {
- this.description = described.description;
- this.cases = [];
- this.currentIndex = 0;
- this.hasRun = false;
-
- if (described.before) {
- this.cases.push(new TestCase(this.description + ' :before', undefined,
- described.before, undefined, false));
- }
- for (var i = 0; i < described.cases.length; ++i) {
- this.cases.push(new TestCase(described.cases[i].description,
- described.beforeEach,
- described.cases[i].fn,
- described.afterEach,
- true));
- }
- if (described.after) {
- this.cases.push(new TestCase(this.description + ' :after',
- undefined, described.after, undefined, false));
- }
-}
-
-TestSuite.prototype.Run = function() {
- this.hasRun = this.currentIndex === this.cases.length;
- if (this.hasRun) {
- %EnqueueMicrotask(Run);
- return;
- }
-
- // TestCase.prototype.Run cannot throw an exception.
- this.cases[this.currentIndex].Run(this, function() {
- ++this.currentIndex;
- %EnqueueMicrotask(Run);
- }.bind(this));
-};
-
-TestSuite.prototype.numRegularTestCases = function() {
- var n = 0;
- for (var i = 0; i < this.cases.length; ++i) {
- if (this.cases[i].isRegular) {
- ++n;
- }
- }
- return n;
-}
-
-TestSuite.prototype.ReportError = function(testCase, e) {
- if (testCase.hasDone) {
- return;
- }
- testCase.MarkAsDone();
- this.hasRun = this.currentIndex === this.cases.length;
- print('FAIL: ' + this.description + '#' + testCase.name + ': ' +
- e.name + ' (' + e.message + ')');
- ++this.currentIndex;
- %EnqueueMicrotask(Run);
-};
-
-describe = function(description, fn) {
- var parent = context.beingDescribed;
- var incomplete = {
- cases: [],
- description: parent ? parent.description + ' ' + description : description,
- parent: parent,
- };
- context.beingDescribed = incomplete;
- fn();
- context.beingDescribed = parent;
-
- context.suites.push(new TestSuite(incomplete));
-}
-
-specify = it = function(description, fn) {
- context.beingDescribed.cases.push({description: description, fn: fn});
-}
-
-before = function(fn) {
- context.beingDescribed.before = fn;
-}
-
-after = function(fn) {
- context.beingDescribed.after = fn;
-}
-
-beforeEach = function(fn) {
- context.beingDescribed.beforeEach = fn;
-}
-
-afterEach = function(fn) {
- context.beingDescribed.afterEach = fn;
-}
-
-}());
diff --git a/deps/v8/test/promises-aplus/lib/require.js b/deps/v8/test/promises-aplus/lib/require.js
deleted file mode 100644
index b987a1a862..0000000000
--- a/deps/v8/test/promises-aplus/lib/require.js
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-var global = this.global || {};
-
-// Emulates 'require' function in Node.
-// This is not a generic function: it only works for known modules.
-var require = function(name) {
- var exports = {};
- var path;
- var base = 'test/promises-aplus/'
- if (name.search('./helpers/') === 0) {
- path = base + 'promises-tests/lib/tests/' + name + '.js';
- } else if (name === 'assert') {
- path = base + 'lib/assert.js';
- } else if (name === 'sinon') {
- path = base + 'sinon/sinon.js';
- } else {
- throw Error('We cannot load the library: ' + name);
- }
- eval('(function() { ' + read(path) + '}())');
- if (name === 'sinon') {
- return this.sinon;
- }
- return exports;
-};
diff --git a/deps/v8/test/promises-aplus/lib/run-tests.js b/deps/v8/test/promises-aplus/lib/run-tests.js
deleted file mode 100644
index 6a0a96c639..0000000000
--- a/deps/v8/test/promises-aplus/lib/run-tests.js
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Defined in lib/mocha.js
-RunAllTests();
diff --git a/deps/v8/test/promises-aplus/promises-aplus.status b/deps/v8/test/promises-aplus/promises-aplus.status
deleted file mode 100644
index 5da9efae90..0000000000
--- a/deps/v8/test/promises-aplus/promises-aplus.status
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-[
-[ALWAYS, {
-}], # ALWAYS
-]
diff --git a/deps/v8/test/promises-aplus/testcfg.py b/deps/v8/test/promises-aplus/testcfg.py
deleted file mode 100644
index 4db598a78a..0000000000
--- a/deps/v8/test/promises-aplus/testcfg.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import hashlib
-import os
-import shutil
-import sys
-import tarfile
-
-from testrunner.local import testsuite
-from testrunner.local import utils
-from testrunner.objects import testcase
-
-
-"""
-Requirements for using this test suite:
-Download http://sinonjs.org/releases/sinon-1.7.3.js into
-test/promises-aplus/sinon.
-Download https://github.com/promises-aplus/promises-tests/tree/2.0.3 into
-test/promises-aplus/promises-tests.
-"""
-
-TEST_NAME = 'promises-tests'
-
-
-class PromiseAplusTestSuite(testsuite.TestSuite):
-
- def __init__(self, name, root):
- self.root = root
- self.test_files_root = os.path.join(self.root, TEST_NAME, 'lib', 'tests')
- self.name = name
- self.helper_files_pre = [
- os.path.join(root, 'lib', name) for name in
- ['global.js', 'require.js', 'mocha.js', 'adapter.js']
- ]
- self.helper_files_post = [
- os.path.join(root, 'lib', name) for name in
- ['run-tests.js']
- ]
-
- def CommonTestName(self, testcase):
- return testcase.path.split(os.path.sep)[-1]
-
- def ListTests(self, context):
- return [testcase.TestCase(self, fname[:-len('.js')]) for fname in
- os.listdir(os.path.join(self.root, TEST_NAME, 'lib', 'tests'))
- if fname.endswith('.js')]
-
- def GetParametersForTestCase(self, testcase, context):
- files = (
- self.helper_files_pre +
- [os.path.join(self.test_files_root, testcase.path + '.js')] +
- self.helper_files_post
- )
- flags = testcase.flags + context.mode_flags + ['--allow-natives-syntax']
- return files, flags, {}
-
- def GetSourceForTest(self, testcase):
- filename = os.path.join(self.root, TEST_NAME,
- 'lib', 'tests', testcase.path + '.js')
- with open(filename) as f:
- return f.read()
-
- def IsNegativeTest(self, testcase):
- return '@negative' in self.GetSourceForTest(testcase)
-
- def IsFailureOutput(self, testcase):
- if testcase.output.exit_code != 0:
- return True
- return not 'All tests have run.' in testcase.output.stdout or \
- 'FAIL:' in testcase.output.stdout
-
-def GetSuite(name, root):
- return PromiseAplusTestSuite(name, root)
diff --git a/deps/v8/test/test262/local-tests/test/language/expressions/class/fields-inner-arrow-err-contains-arguments.js b/deps/v8/test/test262/local-tests/test/language/expressions/class/fields-inner-arrow-err-contains-arguments.js
new file mode 100644
index 0000000000..a260f59de7
--- /dev/null
+++ b/deps/v8/test/test262/local-tests/test/language/expressions/class/fields-inner-arrow-err-contains-arguments.js
@@ -0,0 +1,26 @@
+/*---
+description: Syntax error if `arguments` used in class field (arrow function expression)
+esid: sec-class-definitions-static-semantics-early-errors
+features: [class, class-fields-public, arrow-function]
+flags: [generated]
+negative:
+ phase: early
+ type: SyntaxError
+info: |
+ Static Semantics: Early Errors
+ FieldDefinition:
+ PropertyNameInitializeropt
+ - It is a Syntax Error if ContainsArguments of Initializer is true.
+ Static Semantics: ContainsArguments
+ IdentifierReference : Identifier
+ 1. If the StringValue of Identifier is "arguments", return true.
+ ...
+ For all other grammatical productions, recurse on all nonterminals. If any piece returns true, then return true. Otherwise return false.
+---*/
+throw "Test262: This statement should not be evaluated.";
+var C = class {
+ x = () => {
+ var t = () => { arguments; };
+ t();
+ }
+}
diff --git a/deps/v8/test/test262/local-tests/test/language/expressions/class/fields-inner-arrow-eval-err-contains-arguments.js b/deps/v8/test/test262/local-tests/test/language/expressions/class/fields-inner-arrow-eval-err-contains-arguments.js
new file mode 100644
index 0000000000..20e2bac839
--- /dev/null
+++ b/deps/v8/test/test262/local-tests/test/language/expressions/class/fields-inner-arrow-eval-err-contains-arguments.js
@@ -0,0 +1,33 @@
+/*---
+description: error if `arguments` in StatementList of eval (direct eval)
+esid: sec-performeval-rules-in-initializer
+features: [class, class-fields-public, arrow-function]
+flags: [generated]
+info: |
+ Static Semantics: Early Errors
+
+ FieldDefinition:
+ PropertyNameInitializeropt
+
+ - It is a Syntax Error if ContainsArguments of Initializer is true.
+
+ Static Semantics: ContainsArguments
+ IdentifierReference : Identifier
+
+ 1. If the StringValue of Identifier is "arguments", return true.
+ ...
+ For all other grammatical productions, recurse on all nonterminals. If any piece returns true, then return true. Otherwise return false.
+
+---*/
+
+var C = class {
+ x = () => {
+ var t = () => { eval("arguments"); };
+ t();
+ }
+}
+
+assert.throws(SyntaxError, function() {
+ var c = new C();
+ c.x();
+});
diff --git a/deps/v8/test/test262/local-tests/test/language/expressions/class/fields-inner-eval-arrow-err-contains-arguments.js b/deps/v8/test/test262/local-tests/test/language/expressions/class/fields-inner-eval-arrow-err-contains-arguments.js
new file mode 100644
index 0000000000..28ae79ed94
--- /dev/null
+++ b/deps/v8/test/test262/local-tests/test/language/expressions/class/fields-inner-eval-arrow-err-contains-arguments.js
@@ -0,0 +1,30 @@
+/*---
+description: error if `arguments` in StatementList of eval (direct eval)
+esid: sec-performeval-rules-in-initializer
+features: [class, class-fields-public, arrow-function]
+flags: [generated]
+info: |
+ Static Semantics: Early Errors
+
+ FieldDefinition:
+ PropertyNameInitializeropt
+
+ - It is a Syntax Error if ContainsArguments of Initializer is true.
+
+ Static Semantics: ContainsArguments
+ IdentifierReference : Identifier
+
+ 1. If the StringValue of Identifier is "arguments", return true.
+ ...
+ For all other grammatical productions, recurse on all nonterminals. If any piece returns true, then return true. Otherwise return false.
+
+---*/
+
+var C = class {
+ x = eval("() => arguments");
+}
+
+assert.throws(SyntaxError, function() {
+ var c = new C();
+ c.x();
+});
diff --git a/deps/v8/test/test262/local-tests/test/language/statements/class/fields-inner-arrow-err-contains-arguments.js b/deps/v8/test/test262/local-tests/test/language/statements/class/fields-inner-arrow-err-contains-arguments.js
new file mode 100644
index 0000000000..2ce086c612
--- /dev/null
+++ b/deps/v8/test/test262/local-tests/test/language/statements/class/fields-inner-arrow-err-contains-arguments.js
@@ -0,0 +1,26 @@
+/*---
+description: Syntax error if `arguments` used in class field (arrow function expression)
+esid: sec-class-definitions-static-semantics-early-errors
+features: [class, class-fields-public, arrow-function]
+flags: [generated]
+negative:
+ phase: early
+ type: SyntaxError
+info: |
+ Static Semantics: Early Errors
+ FieldDefinition:
+ PropertyNameInitializeropt
+ - It is a Syntax Error if ContainsArguments of Initializer is true.
+ Static Semantics: ContainsArguments
+ IdentifierReference : Identifier
+ 1. If the StringValue of Identifier is "arguments", return true.
+ ...
+ For all other grammatical productions, recurse on all nonterminals. If any piece returns true, then return true. Otherwise return false.
+---*/
+throw "Test262: This statement should not be evaluated.";
+class C {
+ x = () => {
+ var t = () => { arguments; };
+ t();
+ }
+}
diff --git a/deps/v8/test/test262/local-tests/test/language/statements/class/fields-inner-arrow-eval-err-contains-arguments.js b/deps/v8/test/test262/local-tests/test/language/statements/class/fields-inner-arrow-eval-err-contains-arguments.js
new file mode 100644
index 0000000000..9b29503256
--- /dev/null
+++ b/deps/v8/test/test262/local-tests/test/language/statements/class/fields-inner-arrow-eval-err-contains-arguments.js
@@ -0,0 +1,34 @@
+/*---
+description: error if `arguments` in StatementList of eval (direct eval)
+esid: sec-performeval-rules-in-initializer
+features: [class, class-fields-public, arrow-function]
+flags: [generated]
+info: |
+ Static Semantics: Early Errors
+
+ FieldDefinition:
+ PropertyNameInitializeropt
+
+ - It is a Syntax Error if ContainsArguments of Initializer is true.
+
+ Static Semantics: ContainsArguments
+ IdentifierReference : Identifier
+
+ 1. If the StringValue of Identifier is "arguments", return true.
+ ...
+ For all other grammatical productions, recurse on all nonterminals. If any piece returns true, then return true. Otherwise return false.
+
+---*/
+
+
+class C {
+ x = () => {
+ var t = () => { eval("arguments"); };
+ t();
+ }
+}
+
+assert.throws(SyntaxError, function() {
+ var c = new C();
+ c.x();
+});
diff --git a/deps/v8/test/test262/local-tests/test/language/statements/class/fields-inner-eval-arrow-err-contains-arguments.js b/deps/v8/test/test262/local-tests/test/language/statements/class/fields-inner-eval-arrow-err-contains-arguments.js
new file mode 100644
index 0000000000..f2680e4b67
--- /dev/null
+++ b/deps/v8/test/test262/local-tests/test/language/statements/class/fields-inner-eval-arrow-err-contains-arguments.js
@@ -0,0 +1,30 @@
+/*---
+description: error if `arguments` in StatementList of eval (direct eval)
+esid: sec-performeval-rules-in-initializer
+features: [class, class-fields-public, arrow-function]
+flags: [generated]
+info: |
+ Static Semantics: Early Errors
+
+ FieldDefinition:
+ PropertyNameInitializeropt
+
+ - It is a Syntax Error if ContainsArguments of Initializer is true.
+
+ Static Semantics: ContainsArguments
+ IdentifierReference : Identifier
+
+ 1. If the StringValue of Identifier is "arguments", return true.
+ ...
+ For all other grammatical productions, recurse on all nonterminals. If any piece returns true, then return true. Otherwise return false.
+
+---*/
+
+class C {
+ x = eval("() => arguments");
+}
+
+assert.throws(SyntaxError, function() {
+ var c = new C();
+ c.x();
+});
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index 51ab51447a..bd4b667a09 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -95,6 +95,8 @@
'built-ins/TypedArrays/internals/HasProperty/detached-buffer-realm': [FAIL],
'built-ins/TypedArrays/internals/Set/detached-buffer': [FAIL],
'built-ins/TypedArrays/internals/Set/detached-buffer-realm': [FAIL],
+ 'built-ins/TypedArrays/internals/Set/tonumber-value-detached-buffer': [FAIL],
+ 'built-ins/TypedArrays/internals/DefineOwnProperty/tonumber-value-detached-buffer': [FAIL],
# Some TypedArray methods throw due to the same bug, from Get
'built-ins/TypedArray/prototype/every/callbackfn-detachbuffer': [FAIL],
'built-ins/TypedArray/prototype/filter/callbackfn-detachbuffer': [FAIL],
@@ -222,9 +224,6 @@
'language/global-code/script-decl-func-err-non-configurable': [FAIL],
'language/global-code/script-decl-var-collision': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=4124
- 'built-ins/Simd/*': [SKIP],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=4958
'built-ins/Function/prototype/toString/*': ['--harmony-function-tostring'],
@@ -424,15 +423,33 @@
'built-ins/Proxy/ownKeys/return-duplicate-symbol-entries-throws': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=6861
- 'language/statements/for-of/iterator-next-reference': [FAIL],
- 'language/expressions/async-generator/named-yield-star-async-next': [FAIL],
- 'language/expressions/async-generator/yield-star-async-next': [FAIL],
- 'language/expressions/class/async-gen-method-yield-star-async-next': [FAIL],
- 'language/expressions/class/async-gen-method-static-yield-star-async-next': [FAIL],
- 'language/expressions/object/method-definition/async-gen-yield-star-async-next': [FAIL],
- 'language/statements/async-generator/yield-star-async-next': [FAIL],
- 'language/statements/class/async-gen-method-yield-star-async-next': [FAIL],
- 'language/statements/class/async-gen-method-static-yield-star-async-next': [FAIL],
+ 'language/expressions/object/method-definition/async-gen-yield-star-sync-next': [FAIL],
+ 'language/expressions/class/async-gen-method-static-yield-star-sync-next': [FAIL],
+ 'language/expressions/async-generator/yield-star-sync-next': [FAIL],
+ 'language/statements/class/async-gen-method-static-yield-star-sync-next': [FAIL],
+ 'language/expressions/async-generator/named-yield-star-sync-next': [FAIL],
+ 'language/expressions/class/async-gen-method-yield-star-sync-next': [FAIL],
+ 'language/statements/class/async-gen-method-yield-star-sync-next': [FAIL],
+ 'language/statements/async-generator/yield-star-sync-next': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=6791
+ 'built-ins/BigInt/prototype/Symbol.toStringTag': [SKIP],
+ 'built-ins/DataView/prototype/getBigInt64/*': [SKIP],
+ 'built-ins/DataView/prototype/getBigUint64/*': [SKIP],
+ 'built-ins/DataView/prototype/setBigInt64/*': [SKIP],
+ 'built-ins/DataView/prototype/setBigUint64/*': [SKIP],
+ 'built-ins/TypedArrays/BigInt64Array/*': [SKIP],
+ 'built-ins/TypedArrays/BigUint64Array/*': [SKIP],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=7184
+ 'annexB/language/expressions/yield/star-iterable-return-emulates-undefined-throws-when-called': [FAIL],
+ 'annexB/language/statements/for-await-of/iterator-close-return-emulates-undefined-throws-when-called': [FAIL],
+ 'annexB/language/statements/for-of/iterator-close-return-emulates-undefined-throws-when-called': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=7186
+ 'language/statements/class/fields-indirect-eval-err-contains-arguments': [FAIL],
+ 'language/expressions/class/fields-indirect-eval-err-contains-arguments': [FAIL],
+
######################## NEEDS INVESTIGATION ###########################
@@ -446,7 +463,6 @@
'intl402/DateTimeFormat/prototype/resolvedOptions/hourCycle': [FAIL],
'intl402/DateTimeFormat/12.1.2': [PASS, FAIL],
'intl402/DateTimeFormat/12.2.3_b': [FAIL],
- 'intl402/Intl/getCanonicalLocales/success_cases': [FAIL],
'intl402/Number/prototype/toLocaleString/13.2.1_5': [PASS, FAIL],
'intl402/NumberFormat/11.1.1_20_c': [FAIL],
'intl402/NumberFormat/11.1.2': [PASS, FAIL],
@@ -514,6 +530,11 @@
'harness/detachArrayBuffer': [SKIP],
'harness/detachArrayBuffer-host-detachArrayBuffer': [SKIP],
+ # https://github.com/tc39/test262/pull/1371
+ 'language/expressions/bitwise-and/bigint-non-primitive': ['--harmony-bigint'],
+ 'language/expressions/bitwise-or/bigint-non-primitive': ['--harmony-bigint'],
+ 'language/expressions/bitwise-xor/bigint-non-primitive': ['--harmony-bigint'],
+
############################ SKIPPED TESTS #############################
# These tests take a looong time to run.
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index 358d0db459..2f23fb67fb 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -33,21 +33,25 @@ import re
import sys
import tarfile
-
from testrunner.local import statusfile
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.objects import testcase
+from testrunner.outproc import base as outproc
+from testrunner.outproc import test262
+
# TODO(littledan): move the flag mapping into the status file
FEATURE_FLAGS = {
- 'async-iteration': '--harmony-async-iteration',
+ 'BigInt': '--harmony-bigint',
'regexp-named-groups': '--harmony-regexp-named-captures',
'regexp-unicode-property-escapes': '--harmony-regexp-property',
'Promise.prototype.finally': '--harmony-promise-finally',
+ 'class-fields-public': '--harmony-class-fields',
+ 'optional-catch-binding': '--harmony-optional-catch-binding',
}
-SKIPPED_FEATURES = set(['BigInt', 'class-fields', 'optional-catch-binding'])
+SKIPPED_FEATURES = set(['class-fields-private'])
DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
ARCHIVE = DATA + ".tar"
@@ -71,64 +75,88 @@ ALL_VARIANT_FLAGS_STRICT = dict(
for v, flag_sets in testsuite.ALL_VARIANT_FLAGS.iteritems()
)
-FAST_VARIANT_FLAGS_STRICT = dict(
- (v, [flags + ["--use-strict"] for flags in flag_sets])
- for v, flag_sets in testsuite.FAST_VARIANT_FLAGS.iteritems()
-)
-
ALL_VARIANT_FLAGS_BOTH = dict(
(v, [flags for flags in testsuite.ALL_VARIANT_FLAGS[v] +
ALL_VARIANT_FLAGS_STRICT[v]])
for v in testsuite.ALL_VARIANT_FLAGS
)
-FAST_VARIANT_FLAGS_BOTH = dict(
- (v, [flags for flags in testsuite.FAST_VARIANT_FLAGS[v] +
- FAST_VARIANT_FLAGS_STRICT[v]])
- for v in testsuite.FAST_VARIANT_FLAGS
-)
-
ALL_VARIANTS = {
'nostrict': testsuite.ALL_VARIANT_FLAGS,
'strict': ALL_VARIANT_FLAGS_STRICT,
'both': ALL_VARIANT_FLAGS_BOTH,
}
-FAST_VARIANTS = {
- 'nostrict': testsuite.FAST_VARIANT_FLAGS,
- 'strict': FAST_VARIANT_FLAGS_STRICT,
- 'both': FAST_VARIANT_FLAGS_BOTH,
-}
-
-class Test262VariantGenerator(testsuite.VariantGenerator):
- def GetFlagSets(self, testcase, variant):
- outcomes = testcase.suite.GetStatusFileOutcomes(testcase)
- if outcomes and statusfile.OnlyFastVariants(outcomes):
- variant_flags = FAST_VARIANTS
- else:
- variant_flags = ALL_VARIANTS
-
- test_record = self.suite.GetTestRecord(testcase)
+class LegacyVariantsGenerator(testsuite.LegacyVariantsGenerator):
+ def GetFlagSets(self, test, variant):
+ test_record = test.test_record
if "noStrict" in test_record:
- return variant_flags["nostrict"][variant]
+ return ALL_VARIANTS["nostrict"][variant]
if "onlyStrict" in test_record:
- return variant_flags["strict"][variant]
- return variant_flags["both"][variant]
-
-
-class Test262TestSuite(testsuite.TestSuite):
+ return ALL_VARIANTS["strict"][variant]
+ return ALL_VARIANTS["both"][variant]
+
+
+class VariantsGenerator(testsuite.VariantsGenerator):
+ def gen(self, test):
+ flags_set = self._get_flags_set(test)
+ test_record = test.test_record
+ for n, variant in enumerate(self._get_variants(test)):
+ flags = flags_set[variant][0]
+ if 'noStrict' in test_record:
+ yield (variant, flags, str(n))
+ elif 'onlyStrict' in test_record:
+ yield (variant, flags + ['--use-strict'], 'strict-%d' % n)
+ else:
+ yield (variant, flags, str(n))
+ yield (variant, flags + ['--use-strict'], 'strict-%d' % n)
+
+
+class TestSuite(testsuite.TestSuite):
# Match the (...) in '/path/to/v8/test/test262/subdir/test/(...).js'
# In practice, subdir is data or local-tests
def __init__(self, name, root):
- super(Test262TestSuite, self).__init__(name, root)
+ super(TestSuite, self).__init__(name, root)
self.testroot = os.path.join(self.root, *TEST_262_SUITE_PATH)
self.harnesspath = os.path.join(self.root, *TEST_262_HARNESS_PATH)
self.harness = [os.path.join(self.harnesspath, f)
for f in TEST_262_HARNESS_FILES]
self.harness += [os.path.join(self.root, "harness-adapt.js")]
self.localtestroot = os.path.join(self.root, *TEST_262_LOCAL_TESTS_PATH)
- self.ParseTestRecord = None
+
+ self._extract_sources()
+ self.parse_test_record = self._load_parse_test_record()
+
+ def _extract_sources(self):
+ # The archive is created only on swarming. Local checkouts have the
+ # data folder.
+ if (os.path.exists(ARCHIVE) and
+ # Check for a JS file from the archive if we need to unpack. Some other
+ # files from the archive unfortunately exist due to a bug in the
+ # isolate_processor.
+ # TODO(machenbach): Migrate this to GN to avoid using the faulty
+ # isolate_processor: http://crbug.com/669910
+ not os.path.exists(os.path.join(DATA, 'test', 'harness', 'error.js'))):
+ print "Extracting archive..."
+ tar = tarfile.open(ARCHIVE)
+ tar.extractall(path=os.path.dirname(ARCHIVE))
+ tar.close()
+
+ def _load_parse_test_record(self):
+ root = os.path.join(self.root, *TEST_262_TOOLS_PATH)
+ f = None
+ try:
+ (f, pathname, description) = imp.find_module("parseTestRecord", [root])
+ module = imp.load_module("parseTestRecord", f, pathname, description)
+ return module.parseTestRecord
+ except:
+ print ('Cannot load parseTestRecord; '
+ 'you may need to gclient sync for test262')
+ raise
+ finally:
+ if f:
+ f.close()
def ListTests(self, context):
testnames = set()
@@ -148,122 +176,79 @@ class Test262TestSuite(testsuite.TestSuite):
fullpath = os.path.join(dirname, filename)
relpath = re.match(TEST_262_RELPATH_REGEXP, fullpath).group(1)
testnames.add(relpath.replace(os.path.sep, "/"))
- cases = [testcase.TestCase(self, testname) for testname in testnames]
+ cases = map(self._create_test, testnames)
return [case for case in cases if len(
SKIPPED_FEATURES.intersection(
- self.GetTestRecord(case).get("features", []))) == 0]
-
- def GetParametersForTestCase(self, testcase, context):
- files = (
- list(self.harness) +
- ([os.path.join(self.root, "harness-agent.js")]
- if testcase.path.startswith('built-ins/Atomics') else []) +
- self.GetIncludesForTest(testcase) +
- (["--module"] if "module" in self.GetTestRecord(testcase) else []) +
- [self.GetPathForTest(testcase)]
+ case.test_record.get("features", []))) == 0]
+
+ def _test_class(self):
+ return TestCase
+
+ def _LegacyVariantsGeneratorFactory(self):
+ return LegacyVariantsGenerator
+
+ def _variants_gen_class(self):
+ return VariantsGenerator
+
+
+class TestCase(testcase.TestCase):
+ def __init__(self, *args, **kwargs):
+ super(TestCase, self).__init__(*args, **kwargs)
+
+ source = self.get_source()
+ self.test_record = self.suite.parse_test_record(source, self.path)
+ self._expected_exception = (
+ self.test_record
+ .get('negative', {})
+ .get('type', None)
)
- flags = (
- testcase.flags + context.mode_flags +
- (["--throws"] if "negative" in self.GetTestRecord(testcase)
- else []) +
+
+ def _get_files_params(self, ctx):
+ return (
+ list(self.suite.harness) +
+ ([os.path.join(self.suite.root, "harness-agent.js")]
+ if self.path.startswith('built-ins/Atomics') else []) +
+ self._get_includes() +
+ (["--module"] if "module" in self.test_record else []) +
+ [self._get_source_path()]
+ )
+
+ def _get_suite_flags(self, ctx):
+ return (
+ (["--throws"] if "negative" in self.test_record else []) +
(["--allow-natives-syntax"]
- if "detachArrayBuffer.js" in
- self.GetTestRecord(testcase).get("includes", [])
+ if "detachArrayBuffer.js" in self.test_record.get("includes", [])
else []) +
- ([flag for (feature, flag) in FEATURE_FLAGS.items()
- if feature in self.GetTestRecord(testcase).get("features", [])])
+ [flag for (feature, flag) in FEATURE_FLAGS.items()
+ if feature in self.test_record.get("features", [])]
)
- return files, flags, {}
-
- def _VariantGeneratorFactory(self):
- return Test262VariantGenerator
-
- def LoadParseTestRecord(self):
- if not self.ParseTestRecord:
- root = os.path.join(self.root, *TEST_262_TOOLS_PATH)
- f = None
- try:
- (f, pathname, description) = imp.find_module("parseTestRecord", [root])
- module = imp.load_module("parseTestRecord", f, pathname, description)
- self.ParseTestRecord = module.parseTestRecord
- except:
- raise ImportError("Cannot load parseTestRecord; you may need to "
- "gclient sync for test262")
- finally:
- if f:
- f.close()
- return self.ParseTestRecord
-
- def GetTestRecord(self, testcase):
- if not hasattr(testcase, "test_record"):
- ParseTestRecord = self.LoadParseTestRecord()
- testcase.test_record = ParseTestRecord(self.GetSourceForTest(testcase),
- testcase.path)
- return testcase.test_record
-
- def BasePath(self, filename):
- return self.root if filename in TEST_262_NATIVE_FILES else self.harnesspath
-
- def GetIncludesForTest(self, testcase):
- test_record = self.GetTestRecord(testcase)
- return [os.path.join(self.BasePath(filename), filename)
- for filename in test_record.get("includes", [])]
-
- def GetPathForTest(self, testcase):
- filename = os.path.join(self.localtestroot, testcase.path + ".js")
- if not os.path.exists(filename):
- filename = os.path.join(self.testroot, testcase.path + ".js")
- return filename
-
- def GetSourceForTest(self, testcase):
- with open(self.GetPathForTest(testcase)) as f:
- return f.read()
-
- def _ParseException(self, str, testcase):
- # somefile:somelinenumber: someerror[: sometext]
- # somefile might include an optional drive letter on windows e.g. "e:".
- match = re.search(
- '^(?:\w:)?[^:]*:[0-9]+: ([^: ]+?)($|: )', str, re.MULTILINE)
- if match:
- return match.group(1).strip()
+
+ def _get_includes(self):
+ return [os.path.join(self._base_path(filename), filename)
+ for filename in self.test_record.get("includes", [])]
+
+ def _base_path(self, filename):
+ if filename in TEST_262_NATIVE_FILES:
+ return self.suite.root
else:
- print "Error parsing exception for %s" % testcase.GetLabel()
- return None
-
- def IsFailureOutput(self, testcase):
- output = testcase.output
- test_record = self.GetTestRecord(testcase)
- if output.exit_code != 0:
- return True
- if ("negative" in test_record and
- "type" in test_record["negative"] and
- self._ParseException(output.stdout, testcase) !=
- test_record["negative"]["type"]):
- return True
- return "FAILED!" in output.stdout
-
- def GetExpectedOutcomes(self, testcase):
- outcomes = self.GetStatusFileOutcomes(testcase)
- if (statusfile.FAIL_SLOPPY in outcomes and
- '--use-strict' not in testcase.flags):
- return [statusfile.FAIL]
- return super(Test262TestSuite, self).GetExpectedOutcomes(testcase)
-
- def PrepareSources(self):
- # The archive is created only on swarming. Local checkouts have the
- # data folder.
- if (os.path.exists(ARCHIVE) and
- # Check for a JS file from the archive if we need to unpack. Some other
- # files from the archive unfortunately exist due to a bug in the
- # isolate_processor.
- # TODO(machenbach): Migrate this to GN to avoid using the faulty
- # isolate_processor: http://crbug.com/669910
- not os.path.exists(os.path.join(DATA, 'test', 'harness', 'error.js'))):
- print "Extracting archive..."
- tar = tarfile.open(ARCHIVE)
- tar.extractall(path=os.path.dirname(ARCHIVE))
- tar.close()
+ return self.suite.harnesspath
+
+ def _get_source_path(self):
+ filename = self.path + self._get_suffix()
+ path = os.path.join(self.suite.localtestroot, filename)
+ if os.path.exists(path):
+ return path
+ return os.path.join(self.suite.testroot, filename)
+
+ @property
+ def output_proc(self):
+ if self._expected_exception is not None:
+ return test262.ExceptionOutProc(self.expected_outcomes,
+ self._expected_exception)
+ if self.expected_outcomes == outproc.OUTCOMES_PASS:
+ return test262.PASS_NO_EXCEPTION
+ return test262.NoExceptionOutProc(self.expected_outcomes)
def GetSuite(name, root):
- return Test262TestSuite(name, root)
+ return TestSuite(name, root)
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index d6d0a1067f..7f70a5c959 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -35,6 +35,7 @@ v8_source_set("unittests_sources") {
"../../test/common/wasm/wasm-macro-gen.h",
"../../testing/gmock-support.h",
"../../testing/gtest-support.h",
+ "allocation-unittest.cc",
"api/access-check-unittest.cc",
"api/exception-unittest.cc",
"api/interceptor-unittest.cc",
@@ -121,6 +122,7 @@ v8_source_set("unittests_sources") {
"compiler/schedule-unittest.cc",
"compiler/scheduler-rpo-unittest.cc",
"compiler/scheduler-unittest.cc",
+ "compiler/simplified-lowering-unittest.cc",
"compiler/simplified-operator-reducer-unittest.cc",
"compiler/simplified-operator-unittest.cc",
"compiler/state-values-utils-unittest.cc",
@@ -186,7 +188,7 @@ v8_source_set("unittests_sources") {
"wasm/module-decoder-unittest.cc",
"wasm/streaming-decoder-unittest.cc",
"wasm/trap-handler-unittest.cc",
- "wasm/wasm-heap-unittest.cc",
+ "wasm/wasm-code-manager-unittest.cc",
"wasm/wasm-macro-gen-unittest.cc",
"wasm/wasm-module-builder-unittest.cc",
"wasm/wasm-opcodes-unittest.cc",
diff --git a/deps/v8/test/unittests/allocation-unittest.cc b/deps/v8/test/unittests/allocation-unittest.cc
new file mode 100644
index 0000000000..42904da149
--- /dev/null
+++ b/deps/v8/test/unittests/allocation-unittest.cc
@@ -0,0 +1,164 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/allocation.h"
+
+#if V8_OS_POSIX
+#include <setjmp.h>
+#include <signal.h>
+#include <unistd.h> // NOLINT
+#endif // V8_OS_POSIX
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+// TODO(eholk): Add a windows version of permissions tests.
+#if V8_OS_POSIX
+namespace {
+
+// These tests make sure the routines to allocate memory do so with the correct
+// permissions.
+//
+// Unfortunately, there is no API to find the protection of a memory address,
+// so instead we test permissions by installing a signal handler, probing a
+// memory location and recovering from the fault.
+//
+// We don't test the execution permission because to do so we'd have to
+// dynamically generate code and test if we can execute it.
+
+class MemoryAllocationPermissionsTest : public ::testing::Test {
+ static void SignalHandler(int signal, siginfo_t* info, void*) {
+ siglongjmp(continuation_, 1);
+ }
+ struct sigaction old_action_;
+// On Mac, sometimes we get SIGBUS instead of SIGSEGV.
+#if V8_OS_MACOSX
+ struct sigaction old_bus_action_;
+#endif
+
+ protected:
+ virtual void SetUp() {
+ struct sigaction action;
+ action.sa_sigaction = SignalHandler;
+ sigemptyset(&action.sa_mask);
+ action.sa_flags = SA_SIGINFO;
+ sigaction(SIGSEGV, &action, &old_action_);
+#if V8_OS_MACOSX
+ sigaction(SIGBUS, &action, &old_bus_action_);
+#endif
+ }
+
+ virtual void TearDown() {
+ // Be a good citizen and restore the old signal handler.
+ sigaction(SIGSEGV, &old_action_, nullptr);
+#if V8_OS_MACOSX
+ sigaction(SIGBUS, &old_bus_action_, nullptr);
+#endif
+ }
+
+ public:
+ static sigjmp_buf continuation_;
+
+ enum class MemoryAction { kRead, kWrite };
+
+ void ProbeMemory(volatile int* buffer, MemoryAction action,
+ bool should_succeed) {
+ const int save_sigs = 1;
+ if (!sigsetjmp(continuation_, save_sigs)) {
+ switch (action) {
+ case MemoryAction::kRead: {
+ // static_cast to remove the reference and force a memory read.
+ USE(static_cast<int>(*buffer));
+ break;
+ }
+ case MemoryAction::kWrite: {
+ *buffer = 0;
+ break;
+ }
+ }
+ if (should_succeed) {
+ SUCCEED();
+ } else {
+ FAIL();
+ }
+ return;
+ }
+ if (should_succeed) {
+ FAIL();
+ } else {
+ SUCCEED();
+ }
+ }
+
+ void TestPermissions(PageAllocator::Permission permission, bool can_read,
+ bool can_write) {
+ const size_t page_size = AllocatePageSize();
+ int* buffer = static_cast<int*>(
+ AllocatePages(nullptr, page_size, page_size, permission));
+ ProbeMemory(buffer, MemoryAction::kRead, can_read);
+ ProbeMemory(buffer, MemoryAction::kWrite, can_write);
+ CHECK(FreePages(buffer, page_size));
+ }
+};
+
+sigjmp_buf MemoryAllocationPermissionsTest::continuation_;
+
+} // namespace
+
+TEST_F(MemoryAllocationPermissionsTest, DoTest) {
+ TestPermissions(PageAllocator::Permission::kNoAccess, false, false);
+ TestPermissions(PageAllocator::Permission::kReadWrite, true, true);
+ TestPermissions(PageAllocator::Permission::kReadWriteExecute, true, true);
+}
+#endif // V8_OS_POSIX
+
+// Basic tests of allocation.
+
+class AllocationTest : public ::testing::Test {};
+
+TEST(AllocationTest, AllocateAndFree) {
+ size_t page_size = v8::internal::AllocatePageSize();
+ CHECK_NE(0, page_size);
+
+ // A large allocation, aligned at native allocation granularity.
+ const size_t kAllocationSize = 1 * v8::internal::MB;
+ void* mem_addr = v8::internal::AllocatePages(
+ v8::internal::GetRandomMmapAddr(), kAllocationSize, page_size,
+ PageAllocator::Permission::kReadWrite);
+ CHECK_NOT_NULL(mem_addr);
+ CHECK(v8::internal::FreePages(mem_addr, kAllocationSize));
+
+ // A large allocation, aligned significantly beyond native granularity.
+ const size_t kBigAlignment = 64 * v8::internal::MB;
+ void* aligned_mem_addr = v8::internal::AllocatePages(
+ v8::internal::GetRandomMmapAddr(), kAllocationSize, kBigAlignment,
+ PageAllocator::Permission::kReadWrite);
+ CHECK_NOT_NULL(aligned_mem_addr);
+ CHECK_EQ(aligned_mem_addr, AlignedAddress(aligned_mem_addr, kBigAlignment));
+ CHECK(v8::internal::FreePages(aligned_mem_addr, kAllocationSize));
+}
+
+TEST(AllocationTest, ReserveMemory) {
+ size_t page_size = v8::internal::AllocatePageSize();
+ const size_t kAllocationSize = 1 * v8::internal::MB;
+ void* mem_addr = v8::internal::AllocatePages(
+ v8::internal::GetRandomMmapAddr(), kAllocationSize, page_size,
+ PageAllocator::Permission::kReadWrite);
+ CHECK_NE(0, page_size);
+ CHECK_NOT_NULL(mem_addr);
+ size_t commit_size = v8::internal::CommitPageSize();
+ CHECK(v8::internal::SetPermissions(mem_addr, commit_size,
+ PageAllocator::Permission::kReadWrite));
+ // Check whether we can write to memory.
+ int* addr = static_cast<int*>(mem_addr);
+ addr[v8::internal::KB - 1] = 2;
+ CHECK(v8::internal::SetPermissions(mem_addr, commit_size,
+ PageAllocator::Permission::kNoAccess));
+ CHECK(v8::internal::FreePages(mem_addr, kAllocationSize));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/api/access-check-unittest.cc b/deps/v8/test/unittests/api/access-check-unittest.cc
index 05913de62a..8bfb507a7c 100644
--- a/deps/v8/test/unittests/api/access-check-unittest.cc
+++ b/deps/v8/test/unittests/api/access-check-unittest.cc
@@ -37,15 +37,10 @@ TEST_F(AccessCheckTest, GetOwnPropertyDescriptor) {
global_template->SetAccessCheckCallback(AccessCheck);
Local<FunctionTemplate> getter_template = FunctionTemplate::New(
- isolate(), [](const FunctionCallbackInfo<Value>& info) {
- FAIL() << "This should never be called.";
- info.GetReturnValue().Set(42);
- });
+ isolate(), [](const FunctionCallbackInfo<Value>& info) { FAIL(); });
getter_template->SetAcceptAnyReceiver(false);
Local<FunctionTemplate> setter_template = FunctionTemplate::New(
- isolate(), [](const FunctionCallbackInfo<v8::Value>& info) {
- FAIL() << "This should never be called.";
- });
+ isolate(), [](const FunctionCallbackInfo<v8::Value>& info) { FAIL(); });
setter_template->SetAcceptAnyReceiver(false);
global_template->SetAccessorProperty(
String::NewFromUtf8(isolate(), "property", NewStringType::kNormal)
diff --git a/deps/v8/test/unittests/asmjs/asm-scanner-unittest.cc b/deps/v8/test/unittests/asmjs/asm-scanner-unittest.cc
index ebfcc665f8..fe061f8e2a 100644
--- a/deps/v8/test/unittests/asmjs/asm-scanner-unittest.cc
+++ b/deps/v8/test/unittests/asmjs/asm-scanner-unittest.cc
@@ -190,7 +190,7 @@ TEST_F(AsmJsScannerTest, LocalScope) {
}
TEST_F(AsmJsScannerTest, Numbers) {
- SetupScanner("1 1.2 0x1f 1.e3");
+ SetupScanner("1 1.2 0x1F 1.e3");
CHECK(scanner->IsUnsigned());
CHECK_EQ(1, scanner->AsUnsigned());
@@ -212,10 +212,10 @@ TEST_F(AsmJsScannerTest, Numbers) {
}
TEST_F(AsmJsScannerTest, UnsignedNumbers) {
- SetupScanner("0x7fffffff 0x80000000 0xffffffff 0x100000000");
+ SetupScanner("0x7FFFFFFF 0x80000000 0xFFFFFFFF 0x100000000");
CHECK(scanner->IsUnsigned());
- CHECK_EQ(0x7fffffff, scanner->AsUnsigned());
+ CHECK_EQ(0x7FFFFFFF, scanner->AsUnsigned());
scanner->Next();
CHECK(scanner->IsUnsigned());
@@ -223,7 +223,7 @@ TEST_F(AsmJsScannerTest, UnsignedNumbers) {
scanner->Next();
CHECK(scanner->IsUnsigned());
- CHECK_EQ(0xffffffff, scanner->AsUnsigned());
+ CHECK_EQ(0xFFFFFFFF, scanner->AsUnsigned());
scanner->Next();
// Numeric "unsigned" literals with a payload of more than 32-bit are rejected
diff --git a/deps/v8/test/unittests/base/bits-unittest.cc b/deps/v8/test/unittests/base/bits-unittest.cc
index 485dddf529..6f787eb727 100644
--- a/deps/v8/test/unittests/base/bits-unittest.cc
+++ b/deps/v8/test/unittests/base/bits-unittest.cc
@@ -22,18 +22,18 @@ TEST(Bits, CountPopulation16) {
EXPECT_EQ(0u, CountPopulation(uint16_t{0}));
EXPECT_EQ(1u, CountPopulation(uint16_t{1}));
EXPECT_EQ(4u, CountPopulation(uint16_t{0x1111}));
- EXPECT_EQ(8u, CountPopulation(uint16_t{0xf0f0}));
- EXPECT_EQ(12u, CountPopulation(uint16_t{0xf0ff}));
- EXPECT_EQ(16u, CountPopulation(uint16_t{0xffff}));
+ EXPECT_EQ(8u, CountPopulation(uint16_t{0xF0F0}));
+ EXPECT_EQ(12u, CountPopulation(uint16_t{0xF0FF}));
+ EXPECT_EQ(16u, CountPopulation(uint16_t{0xFFFF}));
}
TEST(Bits, CountPopulation32) {
EXPECT_EQ(0u, CountPopulation(uint32_t{0}));
EXPECT_EQ(1u, CountPopulation(uint32_t{1}));
EXPECT_EQ(8u, CountPopulation(uint32_t{0x11111111}));
- EXPECT_EQ(16u, CountPopulation(uint32_t{0xf0f0f0f0}));
- EXPECT_EQ(24u, CountPopulation(uint32_t{0xfff0f0ff}));
- EXPECT_EQ(32u, CountPopulation(uint32_t{0xffffffff}));
+ EXPECT_EQ(16u, CountPopulation(uint32_t{0xF0F0F0F0}));
+ EXPECT_EQ(24u, CountPopulation(uint32_t{0xFFF0F0FF}));
+ EXPECT_EQ(32u, CountPopulation(uint32_t{0xFFFFFFFF}));
}
TEST(Bits, CountPopulation64) {
@@ -41,13 +41,13 @@ TEST(Bits, CountPopulation64) {
EXPECT_EQ(1u, CountPopulation(uint64_t{1}));
EXPECT_EQ(2u, CountPopulation(uint64_t{0x8000000000000001}));
EXPECT_EQ(8u, CountPopulation(uint64_t{0x11111111}));
- EXPECT_EQ(16u, CountPopulation(uint64_t{0xf0f0f0f0}));
- EXPECT_EQ(24u, CountPopulation(uint64_t{0xfff0f0ff}));
- EXPECT_EQ(32u, CountPopulation(uint64_t{0xffffffff}));
+ EXPECT_EQ(16u, CountPopulation(uint64_t{0xF0F0F0F0}));
+ EXPECT_EQ(24u, CountPopulation(uint64_t{0xFFF0F0FF}));
+ EXPECT_EQ(32u, CountPopulation(uint64_t{0xFFFFFFFF}));
EXPECT_EQ(16u, CountPopulation(uint64_t{0x1111111111111111}));
- EXPECT_EQ(32u, CountPopulation(uint64_t{0xf0f0f0f0f0f0f0f0}));
- EXPECT_EQ(48u, CountPopulation(uint64_t{0xfff0f0fffff0f0ff}));
- EXPECT_EQ(64u, CountPopulation(uint64_t{0xffffffffffffffff}));
+ EXPECT_EQ(32u, CountPopulation(uint64_t{0xF0F0F0F0F0F0F0F0}));
+ EXPECT_EQ(48u, CountPopulation(uint64_t{0xFFF0F0FFFFF0F0FF}));
+ EXPECT_EQ(64u, CountPopulation(uint64_t{0xFFFFFFFFFFFFFFFF}));
}
TEST(Bits, CountLeadingZeros16) {
@@ -57,7 +57,7 @@ TEST(Bits, CountLeadingZeros16) {
EXPECT_EQ(15u - shift,
CountLeadingZeros(static_cast<uint16_t>(1 << shift)));
}
- EXPECT_EQ(4u, CountLeadingZeros(uint16_t{0x0f0f}));
+ EXPECT_EQ(4u, CountLeadingZeros(uint16_t{0x0F0F}));
}
TEST(Bits, CountLeadingZeros32) {
@@ -66,7 +66,7 @@ TEST(Bits, CountLeadingZeros32) {
TRACED_FORRANGE(uint32_t, shift, 0, 31) {
EXPECT_EQ(31u - shift, CountLeadingZeros(uint32_t{1} << shift));
}
- EXPECT_EQ(4u, CountLeadingZeros(uint32_t{0x0f0f0f0f}));
+ EXPECT_EQ(4u, CountLeadingZeros(uint32_t{0x0F0F0F0F}));
}
TEST(Bits, CountLeadingZeros64) {
@@ -75,8 +75,8 @@ TEST(Bits, CountLeadingZeros64) {
TRACED_FORRANGE(uint32_t, shift, 0, 63) {
EXPECT_EQ(63u - shift, CountLeadingZeros(uint64_t{1} << shift));
}
- EXPECT_EQ(36u, CountLeadingZeros(uint64_t{0x0f0f0f0f}));
- EXPECT_EQ(4u, CountLeadingZeros(uint64_t{0x0f0f0f0f00000000}));
+ EXPECT_EQ(36u, CountLeadingZeros(uint64_t{0x0F0F0F0F}));
+ EXPECT_EQ(4u, CountLeadingZeros(uint64_t{0x0F0F0F0F00000000}));
}
TEST(Bits, CountTrailingZeros16) {
@@ -85,7 +85,7 @@ TEST(Bits, CountTrailingZeros16) {
TRACED_FORRANGE(uint16_t, shift, 0, 15) {
EXPECT_EQ(shift, CountTrailingZeros(static_cast<uint16_t>(1 << shift)));
}
- EXPECT_EQ(4u, CountTrailingZeros(uint16_t{0xf0f0u}));
+ EXPECT_EQ(4u, CountTrailingZeros(uint16_t{0xF0F0u}));
}
TEST(Bits, CountTrailingZerosu32) {
@@ -94,7 +94,7 @@ TEST(Bits, CountTrailingZerosu32) {
TRACED_FORRANGE(uint32_t, shift, 0, 31) {
EXPECT_EQ(shift, CountTrailingZeros(uint32_t{1} << shift));
}
- EXPECT_EQ(4u, CountTrailingZeros(uint32_t{0xf0f0f0f0u}));
+ EXPECT_EQ(4u, CountTrailingZeros(uint32_t{0xF0F0F0F0u}));
}
TEST(Bits, CountTrailingZerosi32) {
@@ -102,7 +102,7 @@ TEST(Bits, CountTrailingZerosi32) {
TRACED_FORRANGE(uint32_t, shift, 0, 31) {
EXPECT_EQ(shift, CountTrailingZeros(int32_t{1} << shift));
}
- EXPECT_EQ(4u, CountTrailingZeros(int32_t{0x70f0f0f0u}));
+ EXPECT_EQ(4u, CountTrailingZeros(int32_t{0x70F0F0F0u}));
EXPECT_EQ(2u, CountTrailingZeros(int32_t{-4}));
EXPECT_EQ(0u, CountTrailingZeros(int32_t{-1}));
}
@@ -113,8 +113,8 @@ TEST(Bits, CountTrailingZeros64) {
TRACED_FORRANGE(uint32_t, shift, 0, 63) {
EXPECT_EQ(shift, CountTrailingZeros(uint64_t{1} << shift));
}
- EXPECT_EQ(4u, CountTrailingZeros(uint64_t{0xf0f0f0f0}));
- EXPECT_EQ(36u, CountTrailingZeros(uint64_t{0xf0f0f0f000000000}));
+ EXPECT_EQ(4u, CountTrailingZeros(uint64_t{0xF0F0F0F0}));
+ EXPECT_EQ(36u, CountTrailingZeros(uint64_t{0xF0F0F0F000000000}));
}
@@ -128,21 +128,21 @@ TEST(Bits, IsPowerOfTwo32) {
TRACED_FORRANGE(uint32_t, shift, 2, 31) {
EXPECT_FALSE(IsPowerOfTwo((1U << shift) - 1U));
}
- EXPECT_FALSE(IsPowerOfTwo(0xffffffff));
+ EXPECT_FALSE(IsPowerOfTwo(0xFFFFFFFF));
}
TEST(Bits, IsPowerOfTwo64) {
- EXPECT_FALSE(IsPowerOfTwo(V8_UINT64_C(0)));
+ EXPECT_FALSE(IsPowerOfTwo(uint64_t{0}));
TRACED_FORRANGE(uint32_t, shift, 0, 63) {
- EXPECT_TRUE(IsPowerOfTwo(V8_UINT64_C(1) << shift));
- EXPECT_FALSE(IsPowerOfTwo((V8_UINT64_C(1) << shift) + 5U));
- EXPECT_FALSE(IsPowerOfTwo(~(V8_UINT64_C(1) << shift)));
+ EXPECT_TRUE(IsPowerOfTwo(uint64_t{1} << shift));
+ EXPECT_FALSE(IsPowerOfTwo((uint64_t{1} << shift) + 5U));
+ EXPECT_FALSE(IsPowerOfTwo(~(uint64_t{1} << shift)));
}
TRACED_FORRANGE(uint32_t, shift, 2, 63) {
- EXPECT_FALSE(IsPowerOfTwo((V8_UINT64_C(1) << shift) - 1U));
+ EXPECT_FALSE(IsPowerOfTwo((uint64_t{1} << shift) - 1U));
}
- EXPECT_FALSE(IsPowerOfTwo(V8_UINT64_C(0xffffffffffffffff)));
+ EXPECT_FALSE(IsPowerOfTwo(uint64_t{0xFFFFFFFFFFFFFFFF}));
}
@@ -153,7 +153,7 @@ TEST(Bits, RoundUpToPowerOfTwo32) {
EXPECT_EQ(1u, RoundUpToPowerOfTwo32(0));
EXPECT_EQ(1u, RoundUpToPowerOfTwo32(1));
EXPECT_EQ(4u, RoundUpToPowerOfTwo32(3));
- EXPECT_EQ(0x80000000u, RoundUpToPowerOfTwo32(0x7fffffffu));
+ EXPECT_EQ(0x80000000u, RoundUpToPowerOfTwo32(0x7FFFFFFFu));
}
@@ -206,7 +206,7 @@ TEST(Bits, RotateRight64) {
}
EXPECT_EQ(1u, RotateRight64(1, 0));
EXPECT_EQ(1u, RotateRight64(2, 1));
- EXPECT_EQ(V8_UINT64_C(0x8000000000000000), RotateRight64(1, 1));
+ EXPECT_EQ(uint64_t{0x8000000000000000}, RotateRight64(1, 1));
}
diff --git a/deps/v8/test/unittests/base/logging-unittest.cc b/deps/v8/test/unittests/base/logging-unittest.cc
index fd334ec49d..a0686a2f64 100644
--- a/deps/v8/test/unittests/base/logging-unittest.cc
+++ b/deps/v8/test/unittests/base/logging-unittest.cc
@@ -32,30 +32,34 @@ namespace {
} // namespace
TEST(LoggingTest, CheckEQImpl) {
- CHECK_SUCCEED(EQ, 0.0, 0.0)
- CHECK_SUCCEED(EQ, 0.0, -0.0)
- CHECK_SUCCEED(EQ, -0.0, 0.0)
- CHECK_SUCCEED(EQ, -0.0, -0.0)
+ CHECK_SUCCEED(EQ, 0.0, 0.0);
+ CHECK_SUCCEED(EQ, 0.0, -0.0);
+ CHECK_SUCCEED(EQ, -0.0, 0.0);
+ CHECK_SUCCEED(EQ, -0.0, -0.0);
}
TEST(LoggingTest, CompareSignedMismatch) {
- CHECK_SUCCEED(EQ, static_cast<int32_t>(14), static_cast<uint32_t>(14))
- CHECK_FAIL(EQ, static_cast<int32_t>(14), static_cast<uint32_t>(15))
- CHECK_FAIL(EQ, static_cast<int32_t>(-1), static_cast<uint32_t>(-1))
- CHECK_SUCCEED(LT, static_cast<int32_t>(-1), static_cast<uint32_t>(0))
- CHECK_SUCCEED(LT, static_cast<int32_t>(-1), static_cast<uint32_t>(-1))
- CHECK_SUCCEED(LE, static_cast<int32_t>(-1), static_cast<uint32_t>(0))
- CHECK_SUCCEED(LE, static_cast<int32_t>(55), static_cast<uint32_t>(55))
- CHECK_SUCCEED(LT, static_cast<int32_t>(55), static_cast<uint32_t>(0x7fffff00))
- CHECK_SUCCEED(LE, static_cast<int32_t>(55), static_cast<uint32_t>(0x7fffff00))
- CHECK_SUCCEED(GE, static_cast<uint32_t>(0x7fffff00), static_cast<int32_t>(55))
- CHECK_SUCCEED(GT, static_cast<uint32_t>(0x7fffff00), static_cast<int32_t>(55))
- CHECK_SUCCEED(GT, static_cast<uint32_t>(-1), static_cast<int32_t>(-1))
- CHECK_SUCCEED(GE, static_cast<uint32_t>(0), static_cast<int32_t>(-1))
- CHECK_SUCCEED(LT, static_cast<int8_t>(-1), static_cast<uint32_t>(0))
- CHECK_SUCCEED(GT, static_cast<uint64_t>(0x7f01010101010101), 0)
- CHECK_SUCCEED(LE, static_cast<int64_t>(0xff01010101010101),
- static_cast<uint8_t>(13))
+ CHECK_SUCCEED(EQ, static_cast<int32_t>(14), static_cast<uint32_t>(14));
+ CHECK_FAIL(EQ, static_cast<int32_t>(14), static_cast<uint32_t>(15));
+ CHECK_FAIL(EQ, static_cast<int32_t>(-1), static_cast<uint32_t>(-1));
+ CHECK_SUCCEED(LT, static_cast<int32_t>(-1), static_cast<uint32_t>(0));
+ CHECK_SUCCEED(LT, static_cast<int32_t>(-1), static_cast<uint32_t>(-1));
+ CHECK_SUCCEED(LE, static_cast<int32_t>(-1), static_cast<uint32_t>(0));
+ CHECK_SUCCEED(LE, static_cast<int32_t>(55), static_cast<uint32_t>(55));
+ CHECK_SUCCEED(LT, static_cast<int32_t>(55),
+ static_cast<uint32_t>(0x7FFFFF00));
+ CHECK_SUCCEED(LE, static_cast<int32_t>(55),
+ static_cast<uint32_t>(0x7FFFFF00));
+ CHECK_SUCCEED(GE, static_cast<uint32_t>(0x7FFFFF00),
+ static_cast<int32_t>(55));
+ CHECK_SUCCEED(GT, static_cast<uint32_t>(0x7FFFFF00),
+ static_cast<int32_t>(55));
+ CHECK_SUCCEED(GT, static_cast<uint32_t>(-1), static_cast<int32_t>(-1));
+ CHECK_SUCCEED(GE, static_cast<uint32_t>(0), static_cast<int32_t>(-1));
+ CHECK_SUCCEED(LT, static_cast<int8_t>(-1), static_cast<uint32_t>(0));
+ CHECK_SUCCEED(GT, static_cast<uint64_t>(0x7F01010101010101), 0);
+ CHECK_SUCCEED(LE, static_cast<int64_t>(0xFF01010101010101),
+ static_cast<uint8_t>(13));
}
TEST(LoggingTest, CompareAgainstStaticConstPointer) {
diff --git a/deps/v8/test/unittests/base/ostreams-unittest.cc b/deps/v8/test/unittests/base/ostreams-unittest.cc
index 31744cdbda..1444eb7a5c 100644
--- a/deps/v8/test/unittests/base/ostreams-unittest.cc
+++ b/deps/v8/test/unittests/base/ostreams-unittest.cc
@@ -47,7 +47,7 @@ TEST(Ostream, AsHexBytes) {
testAsHexBytes("23 01", AsHexBytes(0x123, 1));
testAsHexBytes("23 01", AsHexBytes(0x123, 2));
testAsHexBytes("23 01 00", AsHexBytes(0x123, 3));
- testAsHexBytes("ff ff ff ff", AsHexBytes(0xffffffff));
+ testAsHexBytes("ff ff ff ff", AsHexBytes(0xFFFFFFFF));
testAsHexBytes("00 00 00 00", AsHexBytes(0, 4));
testAsHexBytes("56 34 12", AsHexBytes(0x123456));
@@ -58,7 +58,7 @@ TEST(Ostream, AsHexBytes) {
testAsHexBytes("01 23", AsHexBytes(0x123, 1, AsHexBytes::kBigEndian));
testAsHexBytes("01 23", AsHexBytes(0x123, 2, AsHexBytes::kBigEndian));
testAsHexBytes("00 01 23", AsHexBytes(0x123, 3, AsHexBytes::kBigEndian));
- testAsHexBytes("ff ff ff ff", AsHexBytes(0xffffffff, AsHexBytes::kBigEndian));
+ testAsHexBytes("ff ff ff ff", AsHexBytes(0xFFFFFFFF, AsHexBytes::kBigEndian));
testAsHexBytes("00 00 00 00", AsHexBytes(0, 4, AsHexBytes::kBigEndian));
testAsHexBytes("12 34 56", AsHexBytes(0x123456, 1, AsHexBytes::kBigEndian));
}
diff --git a/deps/v8/test/unittests/base/platform/platform-unittest.cc b/deps/v8/test/unittests/base/platform/platform-unittest.cc
index cb07ad1ca4..f9fc26a2df 100644
--- a/deps/v8/test/unittests/base/platform/platform-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/platform-unittest.cc
@@ -4,23 +4,8 @@
#include "src/base/platform/platform.h"
-#if V8_OS_POSIX
-#include <setjmp.h>
-#include <signal.h>
-#include <unistd.h> // NOLINT
-#endif
-
-#if V8_OS_WIN
-#include "src/base/win32-headers.h"
-#endif
#include "testing/gtest/include/gtest/gtest.h"
-#if V8_OS_ANDROID
-#define DISABLE_ON_ANDROID(Name) DISABLED_##Name
-#else
-#define DISABLE_ON_ANDROID(Name) Name
-#endif
-
namespace v8 {
namespace base {
@@ -98,106 +83,5 @@ TEST_F(ThreadLocalStorageTest, DoTest) {
Join();
}
-#if V8_OS_POSIX
-// TODO(eholk): Add a windows version of these tests
-
-namespace {
-
-// These tests make sure the routines to allocate memory do so with the correct
-// permissions.
-//
-// Unfortunately, there is no API to find the protection of a memory address,
-// so instead we test permissions by installing a signal handler, probing a
-// memory location and recovering from the fault.
-//
-// We don't test the execution permission because to do so we'd have to
-// dynamically generate code and test if we can execute it.
-
-class MemoryAllocationPermissionsTest : public ::testing::Test {
- static void SignalHandler(int signal, siginfo_t* info, void*) {
- siglongjmp(continuation_, 1);
- }
- struct sigaction old_action_;
-// On Mac, sometimes we get SIGBUS instead of SIGSEGV.
-#if V8_OS_MACOSX
- struct sigaction old_bus_action_;
-#endif
-
- protected:
- virtual void SetUp() {
- struct sigaction action;
- action.sa_sigaction = SignalHandler;
- sigemptyset(&action.sa_mask);
- action.sa_flags = SA_SIGINFO;
- sigaction(SIGSEGV, &action, &old_action_);
-#if V8_OS_MACOSX
- sigaction(SIGBUS, &action, &old_bus_action_);
-#endif
- }
-
- virtual void TearDown() {
- // be a good citizen and restore the old signal handler.
- sigaction(SIGSEGV, &old_action_, nullptr);
-#if V8_OS_MACOSX
- sigaction(SIGBUS, &old_bus_action_, nullptr);
-#endif
- }
-
- public:
- static sigjmp_buf continuation_;
-
- enum class MemoryAction { kRead, kWrite };
-
- void ProbeMemory(volatile int* buffer, MemoryAction action,
- bool should_succeed) {
- const int save_sigs = 1;
- if (!sigsetjmp(continuation_, save_sigs)) {
- switch (action) {
- case MemoryAction::kRead: {
- // static_cast to remove the reference and force a memory read.
- USE(static_cast<int>(*buffer));
- break;
- }
- case MemoryAction::kWrite: {
- *buffer = 0;
- break;
- }
- }
- if (should_succeed) {
- SUCCEED();
- } else {
- FAIL();
- }
- return;
- }
- if (should_succeed) {
- FAIL();
- } else {
- SUCCEED();
- }
- }
-
- void TestPermissions(OS::MemoryPermission permission, bool can_read,
- bool can_write) {
- const size_t page_size = OS::AllocatePageSize();
- int* buffer = static_cast<int*>(
- OS::Allocate(nullptr, page_size, page_size, permission));
- ProbeMemory(buffer, MemoryAction::kRead, can_read);
- ProbeMemory(buffer, MemoryAction::kWrite, can_write);
- CHECK(OS::Free(buffer, page_size));
- }
-};
-
-sigjmp_buf MemoryAllocationPermissionsTest::continuation_;
-
-TEST_F(MemoryAllocationPermissionsTest, DoTest) {
- TestPermissions(OS::MemoryPermission::kNoAccess, false, false);
- TestPermissions(OS::MemoryPermission::kReadWrite, true, true);
- TestPermissions(OS::MemoryPermission::kReadWriteExecute, true, true);
-}
-
-} // namespace
-#endif // V8_OS_POSIX
-
} // namespace base
} // namespace v8
diff --git a/deps/v8/test/unittests/bigint-unittest.cc b/deps/v8/test/unittests/bigint-unittest.cc
index 252cad6a8f..6e1018c581 100644
--- a/deps/v8/test/unittests/bigint-unittest.cc
+++ b/deps/v8/test/unittests/bigint-unittest.cc
@@ -97,7 +97,7 @@ TEST_F(BigIntWithIsolate, CompareToDouble) {
big = NewFromInt(isolate(), 0xF00D00);
Compare(big, 15731968.125, ComparisonResult::kLessThan);
Compare(big, 15731967.875, ComparisonResult::kGreaterThan);
- big = BigIntLiteral(isolate(), "0x123456789ab").ToHandleChecked();
+ big = BigIntLiteral(isolate(), "0x123456789AB").ToHandleChecked();
Compare(big, 1250999896491.125, ComparisonResult::kLessThan);
// Equality!
diff --git a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
index 186a4397ed..ab1c57d9c5 100644
--- a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
@@ -2302,7 +2302,7 @@ TEST_F(InstructionSelectorTest, Int32AddWithWord32And) {
MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
- Node* const r = m.Int32Add(m.Word32And(p0, m.Int32Constant(0xff)), p1);
+ Node* const r = m.Int32Add(m.Word32And(p0, m.Int32Constant(0xFF)), p1);
m.Return(r);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2319,7 +2319,7 @@ TEST_F(InstructionSelectorTest, Int32AddWithWord32And) {
MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
- Node* const r = m.Int32Add(p1, m.Word32And(p0, m.Int32Constant(0xff)));
+ Node* const r = m.Int32Add(p1, m.Word32And(p0, m.Int32Constant(0xFF)));
m.Return(r);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2336,7 +2336,7 @@ TEST_F(InstructionSelectorTest, Int32AddWithWord32And) {
MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
- Node* const r = m.Int32Add(m.Word32And(p0, m.Int32Constant(0xffff)), p1);
+ Node* const r = m.Int32Add(m.Word32And(p0, m.Int32Constant(0xFFFF)), p1);
m.Return(r);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2353,7 +2353,7 @@ TEST_F(InstructionSelectorTest, Int32AddWithWord32And) {
MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
- Node* const r = m.Int32Add(p1, m.Word32And(p0, m.Int32Constant(0xffff)));
+ Node* const r = m.Int32Add(p1, m.Word32And(p0, m.Int32Constant(0xFFFF)));
m.Return(r);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2808,7 +2808,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithUbfxImmediateForARMv7) {
if (width == 16) continue; // Uxth.
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32And(m.Parameter(0),
- m.Int32Constant(0xffffffffu >> (32 - width))));
+ m.Int32Constant(0xFFFFFFFFu >> (32 - width))));
Stream s = m.Build(ARMv7);
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
@@ -2819,7 +2819,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithUbfxImmediateForARMv7) {
TRACED_FORRANGE(int32_t, width, 9, 23) {
if (width == 16) continue; // Uxth.
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
- m.Return(m.Word32And(m.Int32Constant(0xffffffffu >> (32 - width)),
+ m.Return(m.Word32And(m.Int32Constant(0xFFFFFFFFu >> (32 - width)),
m.Parameter(0)));
Stream s = m.Build(ARMv7);
ASSERT_EQ(1U, s.size());
@@ -2837,7 +2837,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithBfcImmediateForARMv7) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32And(
m.Parameter(0),
- m.Int32Constant(~((0xffffffffu >> (32 - width)) << lsb))));
+ m.Int32Constant(~((0xFFFFFFFFu >> (32 - width)) << lsb))));
Stream s = m.Build(ARMv7);
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArmBfc, s[0]->arch_opcode());
@@ -2853,7 +2853,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithBfcImmediateForARMv7) {
TRACED_FORRANGE(int32_t, width, 9, (24 - lsb) - 1) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(
- m.Word32And(m.Int32Constant(~((0xffffffffu >> (32 - width)) << lsb)),
+ m.Word32And(m.Int32Constant(~((0xFFFFFFFFu >> (32 - width)) << lsb)),
m.Parameter(0)));
Stream s = m.Build(ARMv7);
ASSERT_EQ(1U, s.size());
@@ -2868,12 +2868,11 @@ TEST_F(InstructionSelectorTest, Word32AndWithBfcImmediateForARMv7) {
}
}
-
-TEST_F(InstructionSelectorTest, Word32AndWith0xffff) {
+TEST_F(InstructionSelectorTest, Word32AndWith0xFFFF) {
{
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
- Node* const r = m.Word32And(p0, m.Int32Constant(0xffff));
+ Node* const r = m.Word32And(p0, m.Int32Constant(0xFFFF));
m.Return(r);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2887,7 +2886,7 @@ TEST_F(InstructionSelectorTest, Word32AndWith0xffff) {
{
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
- Node* const r = m.Word32And(m.Int32Constant(0xffff), p0);
+ Node* const r = m.Word32And(m.Int32Constant(0xFFFF), p0);
m.Return(r);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2941,7 +2940,7 @@ TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediateForARMv7) {
uint32_t max = 1 << lsb;
if (max > static_cast<uint32_t>(kMaxInt)) max -= 1;
uint32_t jnk = rng()->NextInt(max);
- uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+ uint32_t msk = ((0xFFFFFFFFu >> (32 - width)) << lsb) | jnk;
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Shr(m.Word32And(m.Parameter(0), m.Int32Constant(msk)),
m.Int32Constant(lsb)));
@@ -2958,7 +2957,7 @@ TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediateForARMv7) {
uint32_t max = 1 << lsb;
if (max > static_cast<uint32_t>(kMaxInt)) max -= 1;
uint32_t jnk = rng()->NextInt(max);
- uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+ uint32_t msk = ((0xFFFFFFFFu >> (32 - width)) << lsb) | jnk;
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Shr(m.Word32And(m.Int32Constant(msk), m.Parameter(0)),
m.Int32Constant(lsb)));
@@ -3098,7 +3097,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrWithImmediateForARMv7) {
continue; // Uxtb/h ror.
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb)),
- m.Int32Constant(0xffffffffu >> (32 - width))));
+ m.Int32Constant(0xFFFFFFFFu >> (32 - width))));
Stream s = m.Build(ARMv7);
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
@@ -3113,7 +3112,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrWithImmediateForARMv7) {
((lsb == 8) || (lsb == 16) || (lsb == 24)))
continue; // Uxtb/h ror.
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
- m.Return(m.Word32And(m.Int32Constant(0xffffffffu >> (32 - width)),
+ m.Return(m.Word32And(m.Int32Constant(0xFFFFFFFFu >> (32 - width)),
m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb))));
Stream s = m.Build(ARMv7);
ASSERT_EQ(1U, s.size());
@@ -3125,13 +3124,12 @@ TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrWithImmediateForARMv7) {
}
}
-
-TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrAnd0xff) {
+TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrAnd0xFF) {
TRACED_FORRANGE(int32_t, shr, 1, 3) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const r = m.Word32And(m.Word32Shr(p0, m.Int32Constant(shr * 8)),
- m.Int32Constant(0xff));
+ m.Int32Constant(0xFF));
m.Return(r);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -3142,7 +3140,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrAnd0xff) {
TRACED_FORRANGE(int32_t, shr, 1, 3) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
- Node* const r = m.Word32And(m.Int32Constant(0xff),
+ Node* const r = m.Word32And(m.Int32Constant(0xFF),
m.Word32Shr(p0, m.Int32Constant(shr * 8)));
m.Return(r);
Stream s = m.Build();
@@ -3153,13 +3151,12 @@ TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrAnd0xff) {
}
}
-
-TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrAnd0xffff) {
+TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrAnd0xFFFF) {
TRACED_FORRANGE(int32_t, shr, 1, 2) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const r = m.Word32And(m.Word32Shr(p0, m.Int32Constant(shr * 8)),
- m.Int32Constant(0xffff));
+ m.Int32Constant(0xFFFF));
m.Return(r);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -3170,7 +3167,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrAnd0xffff) {
TRACED_FORRANGE(int32_t, shr, 1, 2) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
- Node* const r = m.Word32And(m.Int32Constant(0xffff),
+ Node* const r = m.Word32And(m.Int32Constant(0xFFFF),
m.Word32Shr(p0, m.Int32Constant(shr * 8)));
m.Return(r);
Stream s = m.Build();
diff --git a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index 92cdb4962c..013d96f26d 100644
--- a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -80,49 +80,47 @@ const MachInst2 kLogicalInstructions[] = {
// sized block. The block is then duplicated across the word. Below is a random
// subset of the 32-bit immediates.
const uint32_t kLogical32Immediates[] = {
- 0x00000002, 0x00000003, 0x00000070, 0x00000080, 0x00000100, 0x000001c0,
- 0x00000300, 0x000007e0, 0x00003ffc, 0x00007fc0, 0x0003c000, 0x0003f000,
- 0x0003ffc0, 0x0003fff8, 0x0007ff00, 0x0007ffe0, 0x000e0000, 0x001e0000,
- 0x001ffffc, 0x003f0000, 0x003f8000, 0x00780000, 0x007fc000, 0x00ff0000,
- 0x01800000, 0x01800180, 0x01f801f8, 0x03fe0000, 0x03ffffc0, 0x03fffffc,
- 0x06000000, 0x07fc0000, 0x07ffc000, 0x07ffffc0, 0x07ffffe0, 0x0ffe0ffe,
- 0x0ffff800, 0x0ffffff0, 0x0fffffff, 0x18001800, 0x1f001f00, 0x1f801f80,
- 0x30303030, 0x3ff03ff0, 0x3ff83ff8, 0x3fff0000, 0x3fff8000, 0x3fffffc0,
- 0x70007000, 0x7f7f7f7f, 0x7fc00000, 0x7fffffc0, 0x8000001f, 0x800001ff,
- 0x81818181, 0x9fff9fff, 0xc00007ff, 0xc0ffffff, 0xdddddddd, 0xe00001ff,
- 0xe00003ff, 0xe007ffff, 0xefffefff, 0xf000003f, 0xf001f001, 0xf3fff3ff,
- 0xf800001f, 0xf80fffff, 0xf87ff87f, 0xfbfbfbfb, 0xfc00001f, 0xfc0000ff,
- 0xfc0001ff, 0xfc03fc03, 0xfe0001ff, 0xff000001, 0xff03ff03, 0xff800000,
- 0xff800fff, 0xff801fff, 0xff87ffff, 0xffc0003f, 0xffc007ff, 0xffcfffcf,
- 0xffe00003, 0xffe1ffff, 0xfff0001f, 0xfff07fff, 0xfff80007, 0xfff87fff,
- 0xfffc00ff, 0xfffe07ff, 0xffff00ff, 0xffffc001, 0xfffff007, 0xfffff3ff,
- 0xfffff807, 0xfffff9ff, 0xfffffc0f, 0xfffffeff};
-
+ 0x00000002, 0x00000003, 0x00000070, 0x00000080, 0x00000100, 0x000001C0,
+ 0x00000300, 0x000007E0, 0x00003FFC, 0x00007FC0, 0x0003C000, 0x0003F000,
+ 0x0003FFC0, 0x0003FFF8, 0x0007FF00, 0x0007FFE0, 0x000E0000, 0x001E0000,
+ 0x001FFFFC, 0x003F0000, 0x003F8000, 0x00780000, 0x007FC000, 0x00FF0000,
+ 0x01800000, 0x01800180, 0x01F801F8, 0x03FE0000, 0x03FFFFC0, 0x03FFFFFC,
+ 0x06000000, 0x07FC0000, 0x07FFC000, 0x07FFFFC0, 0x07FFFFE0, 0x0FFE0FFE,
+ 0x0FFFF800, 0x0FFFFFF0, 0x0FFFFFFF, 0x18001800, 0x1F001F00, 0x1F801F80,
+ 0x30303030, 0x3FF03FF0, 0x3FF83FF8, 0x3FFF0000, 0x3FFF8000, 0x3FFFFFC0,
+ 0x70007000, 0x7F7F7F7F, 0x7FC00000, 0x7FFFFFC0, 0x8000001F, 0x800001FF,
+ 0x81818181, 0x9FFF9FFF, 0xC00007FF, 0xC0FFFFFF, 0xDDDDDDDD, 0xE00001FF,
+ 0xE00003FF, 0xE007FFFF, 0xEFFFEFFF, 0xF000003F, 0xF001F001, 0xF3FFF3FF,
+ 0xF800001F, 0xF80FFFFF, 0xF87FF87F, 0xFBFBFBFB, 0xFC00001F, 0xFC0000FF,
+ 0xFC0001FF, 0xFC03FC03, 0xFE0001FF, 0xFF000001, 0xFF03FF03, 0xFF800000,
+ 0xFF800FFF, 0xFF801FFF, 0xFF87FFFF, 0xFFC0003F, 0xFFC007FF, 0xFFCFFFCF,
+ 0xFFE00003, 0xFFE1FFFF, 0xFFF0001F, 0xFFF07FFF, 0xFFF80007, 0xFFF87FFF,
+ 0xFFFC00FF, 0xFFFE07FF, 0xFFFF00FF, 0xFFFFC001, 0xFFFFF007, 0xFFFFF3FF,
+ 0xFFFFF807, 0xFFFFF9FF, 0xFFFFFC0F, 0xFFFFFEFF};
// Random subset of 64-bit logical immediates.
const uint64_t kLogical64Immediates[] = {
0x0000000000000001, 0x0000000000000002, 0x0000000000000003,
0x0000000000000070, 0x0000000000000080, 0x0000000000000100,
- 0x00000000000001c0, 0x0000000000000300, 0x0000000000000600,
- 0x00000000000007e0, 0x0000000000003ffc, 0x0000000000007fc0,
- 0x0000000600000000, 0x0000003ffffffffc, 0x000000f000000000,
- 0x000001f800000000, 0x0003fc0000000000, 0x0003fc000003fc00,
- 0x0003ffffffc00000, 0x0003ffffffffffc0, 0x0006000000060000,
- 0x003ffffffffc0000, 0x0180018001800180, 0x01f801f801f801f8,
+ 0x00000000000001C0, 0x0000000000000300, 0x0000000000000600,
+ 0x00000000000007E0, 0x0000000000003FFC, 0x0000000000007FC0,
+ 0x0000000600000000, 0x0000003FFFFFFFFC, 0x000000F000000000,
+ 0x000001F800000000, 0x0003FC0000000000, 0x0003FC000003FC00,
+ 0x0003FFFFFFC00000, 0x0003FFFFFFFFFFC0, 0x0006000000060000,
+ 0x003FFFFFFFFC0000, 0x0180018001800180, 0x01F801F801F801F8,
0x0600000000000000, 0x1000000010000000, 0x1000100010001000,
- 0x1010101010101010, 0x1111111111111111, 0x1f001f001f001f00,
- 0x1f1f1f1f1f1f1f1f, 0x1ffffffffffffffe, 0x3ffc3ffc3ffc3ffc,
- 0x5555555555555555, 0x7f7f7f7f7f7f7f7f, 0x8000000000000000,
- 0x8000001f8000001f, 0x8181818181818181, 0x9999999999999999,
- 0x9fff9fff9fff9fff, 0xaaaaaaaaaaaaaaaa, 0xdddddddddddddddd,
- 0xe0000000000001ff, 0xf800000000000000, 0xf8000000000001ff,
- 0xf807f807f807f807, 0xfefefefefefefefe, 0xfffefffefffefffe,
- 0xfffff807fffff807, 0xfffff9fffffff9ff, 0xfffffc0ffffffc0f,
- 0xfffffc0fffffffff, 0xfffffefffffffeff, 0xfffffeffffffffff,
- 0xffffff8000000000, 0xfffffffefffffffe, 0xffffffffefffffff,
- 0xfffffffff9ffffff, 0xffffffffff800000, 0xffffffffffffc0ff,
- 0xfffffffffffffffe};
-
+ 0x1010101010101010, 0x1111111111111111, 0x1F001F001F001F00,
+ 0x1F1F1F1F1F1F1F1F, 0x1FFFFFFFFFFFFFFE, 0x3FFC3FFC3FFC3FFC,
+ 0x5555555555555555, 0x7F7F7F7F7F7F7F7F, 0x8000000000000000,
+ 0x8000001F8000001F, 0x8181818181818181, 0x9999999999999999,
+ 0x9FFF9FFF9FFF9FFF, 0xAAAAAAAAAAAAAAAA, 0xDDDDDDDDDDDDDDDD,
+ 0xE0000000000001FF, 0xF800000000000000, 0xF8000000000001FF,
+ 0xF807F807F807F807, 0xFEFEFEFEFEFEFEFE, 0xFFFEFFFEFFFEFFFE,
+ 0xFFFFF807FFFFF807, 0xFFFFF9FFFFFFF9FF, 0xFFFFFC0FFFFFFC0F,
+ 0xFFFFFC0FFFFFFFFF, 0xFFFFFEFFFFFFFEFF, 0xFFFFFEFFFFFFFFFF,
+ 0xFFFFFF8000000000, 0xFFFFFFFEFFFFFFFE, 0xFFFFFFFFEFFFFFFF,
+ 0xFFFFFFFFF9FFFFFF, 0xFFFFFFFFFF800000, 0xFFFFFFFFFFFFC0FF,
+ 0xFFFFFFFFFFFFFFFE};
// ARM64 arithmetic instructions.
struct AddSub {
@@ -595,7 +593,7 @@ TEST_P(InstructionSelectorAddSubTest, UnsignedExtendByte) {
const MachineType type = dpi.mi.machine_type;
StreamBuilder m(this, type, type, type);
m.Return((m.*dpi.mi.constructor)(
- m.Parameter(0), m.Word32And(m.Parameter(1), m.Int32Constant(0xff))));
+ m.Parameter(0), m.Word32And(m.Parameter(1), m.Int32Constant(0xFF))));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(dpi.mi.arch_opcode, s[0]->arch_opcode());
@@ -610,7 +608,7 @@ TEST_P(InstructionSelectorAddSubTest, UnsignedExtendHalfword) {
const MachineType type = dpi.mi.machine_type;
StreamBuilder m(this, type, type, type);
m.Return((m.*dpi.mi.constructor)(
- m.Parameter(0), m.Word32And(m.Parameter(1), m.Int32Constant(0xffff))));
+ m.Parameter(0), m.Word32And(m.Parameter(1), m.Int32Constant(0xFFFF))));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(dpi.mi.arch_opcode, s[0]->arch_opcode());
@@ -746,7 +744,7 @@ TEST_F(InstructionSelectorTest, SubZeroOnLeftWithShift) {
EXPECT_TRUE(s[0]->InputAt(0)->IsImmediate());
EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(0)));
EXPECT_EQ(shift.mode, s[0]->addressing_mode());
- EXPECT_EQ(0x3f & imm, 0x3f & s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(0x3F & imm, 0x3F & s.ToInt32(s[0]->InputAt(2)));
EXPECT_EQ(1U, s[0]->OutputCount());
}
}
@@ -772,7 +770,7 @@ TEST_F(InstructionSelectorTest, SubZeroOnLeftWithShift) {
EXPECT_TRUE(s[0]->InputAt(0)->IsImmediate());
EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(0)));
EXPECT_EQ(shift.mode, s[0]->addressing_mode());
- EXPECT_EQ(0x3f & imm, 0x3f & s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(0x3F & imm, 0x3F & s.ToInt32(s[0]->InputAt(2)));
EXPECT_EQ(1U, s[0]->OutputCount());
}
}
@@ -836,7 +834,7 @@ TEST_F(InstructionSelectorTest, AddShiftByImmediateOnLeft) {
EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
EXPECT_EQ(shift.mode, s[0]->addressing_mode());
EXPECT_EQ(3U, s[0]->InputCount());
- EXPECT_EQ(0x3f & imm, 0x3f & s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(0x3F & imm, 0x3F & s.ToInt64(s[0]->InputAt(2)));
EXPECT_EQ(1U, s[0]->OutputCount());
}
}
@@ -860,7 +858,7 @@ TEST_F(InstructionSelectorTest, AddShiftByImmediateOnLeft) {
EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
EXPECT_EQ(shift.mode, s[0]->addressing_mode());
EXPECT_EQ(3U, s[0]->InputCount());
- EXPECT_EQ(0x3f & imm, 0x3f & s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(0x3F & imm, 0x3F & s.ToInt64(s[0]->InputAt(2)));
EXPECT_EQ(1U, s[0]->OutputCount());
}
}
@@ -871,7 +869,7 @@ TEST_F(InstructionSelectorTest, AddUnsignedExtendByteOnLeft) {
{
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32());
- m.Return(m.Int32Add(m.Word32And(m.Parameter(0), m.Int32Constant(0xff)),
+ m.Return(m.Int32Add(m.Word32And(m.Parameter(0), m.Int32Constant(0xFF)),
m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -883,7 +881,7 @@ TEST_F(InstructionSelectorTest, AddUnsignedExtendByteOnLeft) {
{
StreamBuilder m(this, MachineType::Int64(), MachineType::Int32(),
MachineType::Int64());
- m.Return(m.Int64Add(m.Word32And(m.Parameter(0), m.Int32Constant(0xff)),
+ m.Return(m.Int64Add(m.Word32And(m.Parameter(0), m.Int32Constant(0xFF)),
m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -899,7 +897,7 @@ TEST_F(InstructionSelectorTest, AddUnsignedExtendHalfwordOnLeft) {
{
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32());
- m.Return(m.Int32Add(m.Word32And(m.Parameter(0), m.Int32Constant(0xffff)),
+ m.Return(m.Int32Add(m.Word32And(m.Parameter(0), m.Int32Constant(0xFFFF)),
m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -911,7 +909,7 @@ TEST_F(InstructionSelectorTest, AddUnsignedExtendHalfwordOnLeft) {
{
StreamBuilder m(this, MachineType::Int64(), MachineType::Int32(),
MachineType::Int64());
- m.Return(m.Int64Add(m.Word32And(m.Parameter(0), m.Int32Constant(0xffff)),
+ m.Return(m.Int64Add(m.Word32And(m.Parameter(0), m.Int32Constant(0xFFFF)),
m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1167,87 +1165,126 @@ TEST_F(InstructionSelectorTest, AddBranchWithImmediateOnLeft) {
}
}
+struct TestAndBranch {
+ MachInst<std::function<Node*(InstructionSelectorTest::StreamBuilder&, Node*,
+ uint32_t mask)>>
+ mi;
+ FlagsCondition cond;
+};
-TEST_F(InstructionSelectorTest, Word32AndBranchWithOneBitMaskOnRight) {
- TRACED_FORRANGE(int, bit, 0, 31) {
- uint32_t mask = 1 << bit;
- StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
- RawMachineLabel a, b;
- m.Branch(m.Word32And(m.Parameter(0), m.Int32Constant(mask)), &a, &b);
- m.Bind(&a);
- m.Return(m.Int32Constant(1));
- m.Bind(&b);
- m.Return(m.Int32Constant(0));
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64TestAndBranch32, s[0]->arch_opcode());
- EXPECT_EQ(kNotEqual, s[0]->flags_condition());
- EXPECT_EQ(4U, s[0]->InputCount());
- EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
- EXPECT_EQ(bit, s.ToInt32(s[0]->InputAt(1)));
- }
-
- TRACED_FORRANGE(int, bit, 0, 31) {
- uint32_t mask = 1 << bit;
- StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
- RawMachineLabel a, b;
- m.Branch(
- m.Word32BinaryNot(m.Word32And(m.Parameter(0), m.Int32Constant(mask))),
- &a, &b);
- m.Bind(&a);
- m.Return(m.Int32Constant(1));
- m.Bind(&b);
- m.Return(m.Int32Constant(0));
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64TestAndBranch32, s[0]->arch_opcode());
- EXPECT_EQ(kEqual, s[0]->flags_condition());
- EXPECT_EQ(4U, s[0]->InputCount());
- EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
- EXPECT_EQ(bit, s.ToInt32(s[0]->InputAt(1)));
- }
+std::ostream& operator<<(std::ostream& os, const TestAndBranch& tb) {
+ return os << tb.mi;
}
-TEST_F(InstructionSelectorTest, Word32AndBranchWithOneBitMaskOnLeft) {
- TRACED_FORRANGE(int, bit, 0, 31) {
- uint32_t mask = 1 << bit;
- StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
- RawMachineLabel a, b;
- m.Branch(m.Word32And(m.Int32Constant(mask), m.Parameter(0)), &a, &b);
- m.Bind(&a);
- m.Return(m.Int32Constant(1));
- m.Bind(&b);
- m.Return(m.Int32Constant(0));
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64TestAndBranch32, s[0]->arch_opcode());
- EXPECT_EQ(kNotEqual, s[0]->flags_condition());
- EXPECT_EQ(4U, s[0]->InputCount());
- EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
- EXPECT_EQ(bit, s.ToInt32(s[0]->InputAt(1)));
- }
-
+const TestAndBranch kTestAndBranchMatchers32[] = {
+ // Branch on the result of Word32And directly.
+ {{[](InstructionSelectorTest::StreamBuilder& m, Node* x, uint32_t mask)
+ -> Node* { return m.Word32And(x, m.Int32Constant(mask)); },
+ "if (x and mask)", kArm64TestAndBranch32, MachineType::Int32()},
+ kNotEqual},
+ {{[](InstructionSelectorTest::StreamBuilder& m, Node* x,
+ uint32_t mask) -> Node* {
+ return m.Word32BinaryNot(m.Word32And(x, m.Int32Constant(mask)));
+ },
+ "if not (x and mask)", kArm64TestAndBranch32, MachineType::Int32()},
+ kEqual},
+ {{[](InstructionSelectorTest::StreamBuilder& m, Node* x, uint32_t mask)
+ -> Node* { return m.Word32And(m.Int32Constant(mask), x); },
+ "if (mask and x)", kArm64TestAndBranch32, MachineType::Int32()},
+ kNotEqual},
+ {{[](InstructionSelectorTest::StreamBuilder& m, Node* x,
+ uint32_t mask) -> Node* {
+ return m.Word32BinaryNot(m.Word32And(m.Int32Constant(mask), x));
+ },
+ "if not (mask and x)", kArm64TestAndBranch32, MachineType::Int32()},
+ kEqual},
+ // Branch on the result of '(x and mask) == mask'. This tests that a bit is
+ // set rather than cleared which is why conditions are inverted.
+ {{[](InstructionSelectorTest::StreamBuilder& m, Node* x,
+ uint32_t mask) -> Node* {
+ return m.Word32Equal(m.Word32And(x, m.Int32Constant(mask)),
+ m.Int32Constant(mask));
+ },
+ "if ((x and mask) == mask)", kArm64TestAndBranch32, MachineType::Int32()},
+ kNotEqual},
+ {{[](InstructionSelectorTest::StreamBuilder& m, Node* x,
+ uint32_t mask) -> Node* {
+ return m.Word32BinaryNot(m.Word32Equal(
+ m.Word32And(x, m.Int32Constant(mask)), m.Int32Constant(mask)));
+ },
+ "if ((x and mask) != mask)", kArm64TestAndBranch32, MachineType::Int32()},
+ kEqual},
+ {{[](InstructionSelectorTest::StreamBuilder& m, Node* x,
+ uint32_t mask) -> Node* {
+ return m.Word32Equal(m.Int32Constant(mask),
+ m.Word32And(x, m.Int32Constant(mask)));
+ },
+ "if (mask == (x and mask))", kArm64TestAndBranch32, MachineType::Int32()},
+ kNotEqual},
+ {{[](InstructionSelectorTest::StreamBuilder& m, Node* x,
+ uint32_t mask) -> Node* {
+ return m.Word32BinaryNot(m.Word32Equal(
+ m.Int32Constant(mask), m.Word32And(x, m.Int32Constant(mask))));
+ },
+ "if (mask != (x and mask))", kArm64TestAndBranch32, MachineType::Int32()},
+ kEqual},
+ // Same as above but swap 'mask' and 'x'.
+ {{[](InstructionSelectorTest::StreamBuilder& m, Node* x,
+ uint32_t mask) -> Node* {
+ return m.Word32Equal(m.Word32And(m.Int32Constant(mask), x),
+ m.Int32Constant(mask));
+ },
+ "if ((mask and x) == mask)", kArm64TestAndBranch32, MachineType::Int32()},
+ kNotEqual},
+ {{[](InstructionSelectorTest::StreamBuilder& m, Node* x,
+ uint32_t mask) -> Node* {
+ return m.Word32BinaryNot(m.Word32Equal(
+ m.Word32And(m.Int32Constant(mask), x), m.Int32Constant(mask)));
+ },
+ "if ((mask and x) != mask)", kArm64TestAndBranch32, MachineType::Int32()},
+ kEqual},
+ {{[](InstructionSelectorTest::StreamBuilder& m, Node* x,
+ uint32_t mask) -> Node* {
+ return m.Word32Equal(m.Int32Constant(mask),
+ m.Word32And(m.Int32Constant(mask), x));
+ },
+ "if (mask == (mask and x))", kArm64TestAndBranch32, MachineType::Int32()},
+ kNotEqual},
+ {{[](InstructionSelectorTest::StreamBuilder& m, Node* x,
+ uint32_t mask) -> Node* {
+ return m.Word32BinaryNot(m.Word32Equal(
+ m.Int32Constant(mask), m.Word32And(m.Int32Constant(mask), x)));
+ },
+ "if (mask != (mask and x))", kArm64TestAndBranch32, MachineType::Int32()},
+ kEqual}};
+
+typedef InstructionSelectorTestWithParam<TestAndBranch>
+ InstructionSelectorTestAndBranchTest;
+
+TEST_P(InstructionSelectorTestAndBranchTest, TestAndBranch32) {
+ const TestAndBranch inst = GetParam();
TRACED_FORRANGE(int, bit, 0, 31) {
uint32_t mask = 1 << bit;
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
RawMachineLabel a, b;
- m.Branch(
- m.Word32BinaryNot(m.Word32And(m.Int32Constant(mask), m.Parameter(0))),
- &a, &b);
+ m.Branch(inst.mi.constructor(m, m.Parameter(0), mask), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
m.Bind(&b);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64TestAndBranch32, s[0]->arch_opcode());
- EXPECT_EQ(kEqual, s[0]->flags_condition());
+ EXPECT_EQ(inst.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(inst.cond, s[0]->flags_condition());
EXPECT_EQ(4U, s[0]->InputCount());
EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(bit, s.ToInt32(s[0]->InputAt(1)));
}
}
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorTestAndBranchTest,
+ ::testing::ValuesIn(kTestAndBranchMatchers32));
TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnRight) {
TRACED_FORRANGE(int, bit, 0, 63) {
@@ -1335,7 +1372,7 @@ TEST_F(InstructionSelectorTest, Word32EqualZeroAndBranchWithOneBitMask) {
TEST_F(InstructionSelectorTest, Word64EqualZeroAndBranchWithOneBitMask) {
TRACED_FORRANGE(int, bit, 0, 63) {
- uint64_t mask = V8_UINT64_C(1) << bit;
+ uint64_t mask = uint64_t{1} << bit;
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
RawMachineLabel a, b;
m.Branch(m.Word64Equal(m.Word64And(m.Int64Constant(mask), m.Parameter(0)),
@@ -1355,7 +1392,7 @@ TEST_F(InstructionSelectorTest, Word64EqualZeroAndBranchWithOneBitMask) {
}
TRACED_FORRANGE(int, bit, 0, 63) {
- uint64_t mask = V8_UINT64_C(1) << bit;
+ uint64_t mask = uint64_t{1} << bit;
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
RawMachineLabel a, b;
m.Branch(
@@ -2925,7 +2962,7 @@ TEST_F(InstructionSelectorTest, Word32EqualWithWord32Shift) {
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
- EXPECT_EQ(0x3f & imm, 0x3f & s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(0x3F & imm, 0x3F & s.ToInt32(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
}
TRACED_FORRANGE(int32_t, imm, -32, 63) {
@@ -2942,7 +2979,7 @@ TEST_F(InstructionSelectorTest, Word32EqualWithWord32Shift) {
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
- EXPECT_EQ(0x3f & imm, 0x3f & s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(0x3F & imm, 0x3F & s.ToInt32(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
}
}
@@ -2955,7 +2992,7 @@ TEST_F(InstructionSelectorTest, Word32EqualWithUnsignedExtendByte) {
MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
- Node* r = m.Word32And(p1, m.Int32Constant(0xff));
+ Node* r = m.Word32And(p1, m.Int32Constant(0xFF));
m.Return(m.Word32Equal(p0, r));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2971,7 +3008,7 @@ TEST_F(InstructionSelectorTest, Word32EqualWithUnsignedExtendByte) {
MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
- Node* r = m.Word32And(p1, m.Int32Constant(0xff));
+ Node* r = m.Word32And(p1, m.Int32Constant(0xFF));
m.Return(m.Word32Equal(r, p0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2991,7 +3028,7 @@ TEST_F(InstructionSelectorTest, Word32EqualWithUnsignedExtendHalfword) {
MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
- Node* r = m.Word32And(p1, m.Int32Constant(0xffff));
+ Node* r = m.Word32And(p1, m.Int32Constant(0xFFFF));
m.Return(m.Word32Equal(p0, r));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -3007,7 +3044,7 @@ TEST_F(InstructionSelectorTest, Word32EqualWithUnsignedExtendHalfword) {
MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
- Node* r = m.Word32And(p1, m.Int32Constant(0xffff));
+ Node* r = m.Word32And(p1, m.Int32Constant(0xFFFF));
m.Return(m.Word32Equal(r, p0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -3204,7 +3241,7 @@ TEST_F(InstructionSelectorTest, Word32CompareNegateWithWord32Shift) {
EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
EXPECT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(shift.mode, s[0]->addressing_mode());
- EXPECT_EQ(0x3f & imm, 0x3f & s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(0x3F & imm, 0x3F & s.ToInt32(s[0]->InputAt(2)));
EXPECT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(kFlags_set, s[0]->flags_mode());
EXPECT_EQ(cmp.cond, s[0]->flags_condition());
@@ -3312,7 +3349,7 @@ TEST_F(InstructionSelectorTest, CmpShiftByImmediateOnLeft) {
EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
EXPECT_EQ(shift.mode, s[0]->addressing_mode());
EXPECT_EQ(3U, s[0]->InputCount());
- EXPECT_EQ(0x3f & imm, 0x3f & s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(0x3F & imm, 0x3F & s.ToInt64(s[0]->InputAt(2)));
EXPECT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(kFlags_set, s[0]->flags_mode());
EXPECT_EQ(cmp.commuted_cond, s[0]->flags_condition());
@@ -3346,7 +3383,7 @@ TEST_F(InstructionSelectorTest, CmnShiftByImmediateOnLeft) {
EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
EXPECT_EQ(shift.mode, s[0]->addressing_mode());
EXPECT_EQ(3U, s[0]->InputCount());
- EXPECT_EQ(0x3f & imm, 0x3f & s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(0x3F & imm, 0x3F & s.ToInt64(s[0]->InputAt(2)));
EXPECT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(kFlags_set, s[0]->flags_mode());
EXPECT_EQ(cmp.cond, s[0]->flags_condition());
@@ -3816,11 +3853,11 @@ TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediate) {
// The available shift operand range is `0 <= imm < 32`, but we also test
// that immediates outside this range are handled properly (modulo-32).
TRACED_FORRANGE(int32_t, shift, -32, 63) {
- int32_t lsb = shift & 0x1f;
+ int32_t lsb = shift & 0x1F;
TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
uint32_t jnk = rng()->NextInt();
jnk = (lsb > 0) ? (jnk >> (32 - lsb)) : 0;
- uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+ uint32_t msk = ((0xFFFFFFFFu >> (32 - width)) << lsb) | jnk;
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Shr(m.Word32And(m.Parameter(0), m.Int32Constant(msk)),
m.Int32Constant(shift)));
@@ -3833,11 +3870,11 @@ TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediate) {
}
}
TRACED_FORRANGE(int32_t, shift, -32, 63) {
- int32_t lsb = shift & 0x1f;
+ int32_t lsb = shift & 0x1F;
TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
uint32_t jnk = rng()->NextInt();
jnk = (lsb > 0) ? (jnk >> (32 - lsb)) : 0;
- uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+ uint32_t msk = ((0xFFFFFFFFu >> (32 - width)) << lsb) | jnk;
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Shr(m.Word32And(m.Int32Constant(msk), m.Parameter(0)),
m.Int32Constant(shift)));
@@ -3856,12 +3893,12 @@ TEST_F(InstructionSelectorTest, Word64ShrWithWord64AndWithImmediate) {
// The available shift operand range is `0 <= imm < 64`, but we also test
// that immediates outside this range are handled properly (modulo-64).
TRACED_FORRANGE(int32_t, shift, -64, 127) {
- int32_t lsb = shift & 0x3f;
+ int32_t lsb = shift & 0x3F;
TRACED_FORRANGE(int32_t, width, 1, 64 - lsb) {
uint64_t jnk = rng()->NextInt64();
jnk = (lsb > 0) ? (jnk >> (64 - lsb)) : 0;
uint64_t msk =
- ((V8_UINT64_C(0xffffffffffffffff) >> (64 - width)) << lsb) | jnk;
+ ((uint64_t{0xFFFFFFFFFFFFFFFF} >> (64 - width)) << lsb) | jnk;
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Word64Shr(m.Word64And(m.Parameter(0), m.Int64Constant(msk)),
m.Int64Constant(shift)));
@@ -3874,12 +3911,12 @@ TEST_F(InstructionSelectorTest, Word64ShrWithWord64AndWithImmediate) {
}
}
TRACED_FORRANGE(int32_t, shift, -64, 127) {
- int32_t lsb = shift & 0x3f;
+ int32_t lsb = shift & 0x3F;
TRACED_FORRANGE(int32_t, width, 1, 64 - lsb) {
uint64_t jnk = rng()->NextInt64();
jnk = (lsb > 0) ? (jnk >> (64 - lsb)) : 0;
uint64_t msk =
- ((V8_UINT64_C(0xffffffffffffffff) >> (64 - width)) << lsb) | jnk;
+ ((uint64_t{0xFFFFFFFFFFFFFFFF} >> (64 - width)) << lsb) | jnk;
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Word64Shr(m.Word64And(m.Int64Constant(msk), m.Parameter(0)),
m.Int64Constant(shift)));
@@ -3898,7 +3935,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithImmediateWithWord32Shr) {
// The available shift operand range is `0 <= imm < 32`, but we also test
// that immediates outside this range are handled properly (modulo-32).
TRACED_FORRANGE(int32_t, shift, -32, 63) {
- int32_t lsb = shift & 0x1f;
+ int32_t lsb = shift & 0x1F;
TRACED_FORRANGE(int32_t, width, 1, 31) {
uint32_t msk = (1 << width) - 1;
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
@@ -3914,7 +3951,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithImmediateWithWord32Shr) {
}
}
TRACED_FORRANGE(int32_t, shift, -32, 63) {
- int32_t lsb = shift & 0x1f;
+ int32_t lsb = shift & 0x1F;
TRACED_FORRANGE(int32_t, width, 1, 31) {
uint32_t msk = (1 << width) - 1;
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
@@ -3937,9 +3974,9 @@ TEST_F(InstructionSelectorTest, Word64AndWithImmediateWithWord64Shr) {
// The available shift operand range is `0 <= imm < 64`, but we also test
// that immediates outside this range are handled properly (modulo-64).
TRACED_FORRANGE(int64_t, shift, -64, 127) {
- int64_t lsb = shift & 0x3f;
+ int64_t lsb = shift & 0x3F;
TRACED_FORRANGE(int64_t, width, 1, 63) {
- uint64_t msk = (V8_UINT64_C(1) << width) - 1;
+ uint64_t msk = (uint64_t{1} << width) - 1;
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Word64And(m.Word64Shr(m.Parameter(0), m.Int64Constant(shift)),
m.Int64Constant(msk)));
@@ -3953,9 +3990,9 @@ TEST_F(InstructionSelectorTest, Word64AndWithImmediateWithWord64Shr) {
}
}
TRACED_FORRANGE(int64_t, shift, -64, 127) {
- int64_t lsb = shift & 0x3f;
+ int64_t lsb = shift & 0x3F;
TRACED_FORRANGE(int64_t, width, 1, 63) {
- uint64_t msk = (V8_UINT64_C(1) << width) - 1;
+ uint64_t msk = (uint64_t{1} << width) - 1;
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(
m.Word64And(m.Int64Constant(msk),
@@ -4013,7 +4050,7 @@ TEST_F(InstructionSelectorTest, Int32MulHighWithSar) {
EXPECT_EQ(kArm64Asr, s[1]->arch_opcode());
ASSERT_EQ(2U, s[1]->InputCount());
EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
- EXPECT_EQ((shift & 0x1f) + 32, s.ToInt64(s[1]->InputAt(1)));
+ EXPECT_EQ((shift & 0x1F) + 32, s.ToInt64(s[1]->InputAt(1)));
ASSERT_EQ(1U, s[1]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[1]->Output()));
}
@@ -4072,7 +4109,7 @@ TEST_F(InstructionSelectorTest, Uint32MulHighWithShr) {
EXPECT_EQ(kArm64Lsr, s[1]->arch_opcode());
ASSERT_EQ(2U, s[1]->InputCount());
EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
- EXPECT_EQ((shift & 0x1f) + 32, s.ToInt64(s[1]->InputAt(1)));
+ EXPECT_EQ((shift & 0x1F) + 32, s.ToInt64(s[1]->InputAt(1)));
ASSERT_EQ(1U, s[1]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[1]->Output()));
}
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.cc b/deps/v8/test/unittests/compiler/graph-unittest.cc
index 55931d51fb..a39f56e6bb 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-unittest.cc
@@ -17,7 +17,8 @@ GraphTest::GraphTest(int num_parameters)
: TestWithNativeContext(),
TestWithIsolateAndZone(),
common_(zone()),
- graph_(zone()) {
+ graph_(zone()),
+ source_positions_(&graph_) {
graph()->SetStart(graph()->NewNode(common()->Start(num_parameters)));
graph()->SetEnd(graph()->NewNode(common()->End(1), graph()->start()));
}
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.h b/deps/v8/test/unittests/compiler/graph-unittest.h
index 8701f1ff6d..9d4a15ad27 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.h
+++ b/deps/v8/test/unittests/compiler/graph-unittest.h
@@ -6,6 +6,7 @@
#define V8_UNITTESTS_COMPILER_GRAPH_UNITTEST_H_
#include "src/compiler/common-operator.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph.h"
#include "src/compiler/typer.h"
#include "test/unittests/test-utils.h"
@@ -58,10 +59,12 @@ class GraphTest : public virtual TestWithNativeContext,
CommonOperatorBuilder* common() { return &common_; }
Graph* graph() { return &graph_; }
+ SourcePositionTable* source_positions() { return &source_positions_; }
private:
CommonOperatorBuilder common_;
Graph graph_;
+ SourcePositionTable source_positions_;
};
diff --git a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
index b9f5fc4b9f..f2767a0bb8 100644
--- a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
+++ b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
@@ -15,7 +15,7 @@ namespace {
// Immediates (random subset).
const int32_t kImmediates[] = {kMinInt, -42, -1, 0, 1, 2,
3, 4, 5, 6, 7, 8,
- 16, 42, 0xff, 0xffff, 0x0f0f0f0f, kMaxInt};
+ 16, 42, 0xFF, 0xFFFF, 0x0F0F0F0F, kMaxInt};
} // namespace
@@ -864,6 +864,15 @@ TEST_F(InstructionSelectorTest, Word32Clz) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
+TEST_F(InstructionSelectorTest, SpeculationFence) {
+ StreamBuilder m(this, MachineType::Int32());
+ m.SpeculationFence();
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLFence, s[0]->arch_opcode());
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
index d629639c49..17d1572e98 100644
--- a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
+++ b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
@@ -432,8 +432,7 @@ InstructionOperand InstructionSequenceTest::ConvertInputOp(TestOperand op) {
default:
break;
}
- CHECK(false);
- return InstructionOperand();
+ UNREACHABLE();
}
@@ -468,8 +467,7 @@ InstructionOperand InstructionSequenceTest::ConvertOutputOp(VReg vreg,
default:
break;
}
- CHECK(false);
- return InstructionOperand();
+ UNREACHABLE();
}
diff --git a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
index a6bd1f2dad..ddb8408e5f 100644
--- a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
@@ -34,8 +34,8 @@ class Int64LoweringTest : public GraphTest {
: GraphTest(),
machine_(zone(), MachineRepresentation::kWord32,
MachineOperatorBuilder::Flag::kAllOptionalOps) {
- value_[0] = 0x1234567890abcdef;
- value_[1] = 0x1edcba098765432f;
+ value_[0] = 0x1234567890ABCDEF;
+ value_[1] = 0x1EDCBA098765432F;
value_[2] = 0x1133557799886644;
}
@@ -85,7 +85,7 @@ class Int64LoweringTest : public GraphTest {
int64_t value(int i) { return value_[i]; }
int32_t low_word_value(int i) {
- return static_cast<int32_t>(value_[i] & 0xffffffff);
+ return static_cast<int32_t>(value_[i] & 0xFFFFFFFF);
}
int32_t high_word_value(int i) {
@@ -760,7 +760,7 @@ TEST_F(Int64LoweringTest, I64Ror) {
IsMerge(IsIfTrue(branch_lt32_matcher), IsIfFalse(branch_lt32_matcher)));
Matcher<Node*> shift_matcher =
- IsWord32And(IsParameter(0), IsInt32Constant(0x1f));
+ IsWord32And(IsParameter(0), IsInt32Constant(0x1F));
Matcher<Node*> bit_mask_matcher = IsWord32Shl(
IsWord32Sar(IsInt32Constant(std::numeric_limits<int32_t>::min()),
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index 485efd6288..b07dbfd0dc 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -382,8 +382,7 @@ TEST_F(JSTypedLoweringTest, JSLoadNamedStringLength) {
Reduce(graph()->NewNode(javascript()->LoadNamed(name, feedback), receiver,
context, EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsLoadField(AccessBuilder::ForStringLength(),
- receiver, effect, control));
+ EXPECT_THAT(r.replacement(), IsStringLength(receiver));
}
diff --git a/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc b/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
index 741021a446..17dc998f6d 100644
--- a/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
+++ b/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
@@ -157,7 +157,9 @@ TEST_F(LinkageTailCall, MoreRegisterAndStackParametersCallee) {
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
EXPECT_TRUE(desc1->CanTailCall(node));
int stack_param_delta = desc2->GetStackParameterDelta(desc1);
- EXPECT_EQ(1, stack_param_delta);
+ // We might need to add one slot of padding to the callee arguments.
+ int expected = kPadArguments ? 2 : 1;
+ EXPECT_EQ(expected, stack_param_delta);
}
@@ -178,7 +180,9 @@ TEST_F(LinkageTailCall, MoreRegisterAndStackParametersCaller) {
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
EXPECT_TRUE(desc1->CanTailCall(node));
int stack_param_delta = desc2->GetStackParameterDelta(desc1);
- EXPECT_EQ(-1, stack_param_delta);
+ // We might need to drop one slot of padding from the caller's arguments.
+ int expected = kPadArguments ? -2 : -1;
+ EXPECT_EQ(expected, stack_param_delta);
}
@@ -313,7 +317,9 @@ TEST_F(LinkageTailCall, MatchingStackParametersExtraCallerRegistersAndStack) {
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
EXPECT_TRUE(desc1->CanTailCall(node));
int stack_param_delta = desc2->GetStackParameterDelta(desc1);
- EXPECT_EQ(-1, stack_param_delta);
+ // We might need to add one slot of padding to the callee arguments.
+ int expected = kPadArguments ? 0 : -1;
+ EXPECT_EQ(expected, stack_param_delta);
}
@@ -341,7 +347,9 @@ TEST_F(LinkageTailCall, MatchingStackParametersExtraCalleeRegistersAndStack) {
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
EXPECT_TRUE(desc1->CanTailCall(node));
int stack_param_delta = desc2->GetStackParameterDelta(desc1);
- EXPECT_EQ(1, stack_param_delta);
+ // We might need to drop one slot of padding from the caller's arguments.
+ int expected = kPadArguments ? 0 : 1;
+ EXPECT_EQ(expected, stack_param_delta);
}
} // namespace compiler
diff --git a/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc b/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
index 969f253ae1..ef129fa7f3 100644
--- a/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
+++ b/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
@@ -74,14 +74,14 @@ class LoopPeelingTest : public GraphTest {
PeeledIteration* PeelOne() {
LoopTree* loop_tree = GetLoopTree();
LoopTree::Loop* loop = loop_tree->outer_loops()[0];
- EXPECT_TRUE(LoopPeeler::CanPeel(loop_tree, loop));
- return Peel(loop_tree, loop);
+ LoopPeeler peeler(graph(), common(), loop_tree, zone(), source_positions());
+ EXPECT_TRUE(peeler.CanPeel(loop));
+ return Peel(peeler, loop);
}
- PeeledIteration* Peel(LoopTree* loop_tree, LoopTree::Loop* loop) {
- EXPECT_TRUE(LoopPeeler::CanPeel(loop_tree, loop));
- PeeledIteration* peeled =
- LoopPeeler::Peel(graph(), common(), loop_tree, loop, zone());
+ PeeledIteration* Peel(LoopPeeler peeler, LoopTree::Loop* loop) {
+ EXPECT_TRUE(peeler.CanPeel(loop));
+ PeeledIteration* peeled = peeler.Peel(loop);
if (FLAG_trace_turbo_graph) {
OFStream os(stdout);
os << AsRPO(*graph());
@@ -250,7 +250,8 @@ TEST_F(LoopPeelingTest, SimpleNestedLoopWithCounter_peel_inner) {
EXPECT_NE(nullptr, loop);
EXPECT_EQ(1u, loop->depth());
- PeeledIteration* peeled = Peel(loop_tree, loop);
+ LoopPeeler peeler(graph(), common(), loop_tree, zone(), source_positions());
+ PeeledIteration* peeled = Peel(peeler, loop);
ExpectNotPeeled(outer.loop, peeled);
ExpectNotPeeled(outer.branch, peeled);
@@ -289,7 +290,8 @@ TEST_F(LoopPeelingTest, SimpleInnerCounter_peel_inner) {
EXPECT_NE(nullptr, loop);
EXPECT_EQ(1u, loop->depth());
- PeeledIteration* peeled = Peel(loop_tree, loop);
+ LoopPeeler peeler(graph(), common(), loop_tree, zone(), source_positions());
+ PeeledIteration* peeled = Peel(peeler, loop);
ExpectNotPeeled(outer.loop, peeled);
ExpectNotPeeled(outer.branch, peeled);
@@ -517,7 +519,8 @@ TEST_F(LoopPeelingTest, SimpleLoopWithUnmarkedExit) {
{
LoopTree* loop_tree = GetLoopTree();
LoopTree::Loop* loop = loop_tree->outer_loops()[0];
- EXPECT_FALSE(LoopPeeler::CanPeel(loop_tree, loop));
+ LoopPeeler peeler(graph(), common(), loop_tree, zone(), source_positions());
+ EXPECT_FALSE(peeler.CanPeel(loop));
}
}
diff --git a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
index 1a4476f488..4448452327 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -173,70 +173,116 @@ const int32_t kInt32Values[] = {
1954730266, 2008792749, 2045320228,
std::numeric_limits<int32_t>::max()};
-
-const int64_t kInt64Values[] = {
- std::numeric_limits<int64_t>::min(), V8_INT64_C(-8974392461363618006),
- V8_INT64_C(-8874367046689588135), V8_INT64_C(-8269197512118230839),
- V8_INT64_C(-8146091527100606733), V8_INT64_C(-7550917981466150848),
- V8_INT64_C(-7216590251577894337), V8_INT64_C(-6464086891160048440),
- V8_INT64_C(-6365616494908257190), V8_INT64_C(-6305630541365849726),
- V8_INT64_C(-5982222642272245453), V8_INT64_C(-5510103099058504169),
- V8_INT64_C(-5496838675802432701), V8_INT64_C(-4047626578868642657),
- V8_INT64_C(-4033755046900164544), V8_INT64_C(-3554299241457877041),
- V8_INT64_C(-2482258764588614470), V8_INT64_C(-1688515425526875335),
- V8_INT64_C(-924784137176548532), V8_INT64_C(-725316567157391307),
- V8_INT64_C(-439022654781092241), V8_INT64_C(-105545757668917080),
- V8_INT64_C(-2088319373), V8_INT64_C(-2073699916),
- V8_INT64_C(-1844949911), V8_INT64_C(-1831090548),
- V8_INT64_C(-1756711933), V8_INT64_C(-1559409497),
- V8_INT64_C(-1281179700), V8_INT64_C(-1211513985),
- V8_INT64_C(-1182371520), V8_INT64_C(-785934753),
- V8_INT64_C(-767480697), V8_INT64_C(-705745662),
- V8_INT64_C(-514362436), V8_INT64_C(-459916580),
- V8_INT64_C(-312328082), V8_INT64_C(-302949707),
- V8_INT64_C(-285499304), V8_INT64_C(-125701262),
- V8_INT64_C(-95139843), V8_INT64_C(-32768),
- V8_INT64_C(-27542), V8_INT64_C(-23600),
- V8_INT64_C(-18582), V8_INT64_C(-17770),
- V8_INT64_C(-9086), V8_INT64_C(-9010),
- V8_INT64_C(-8244), V8_INT64_C(-2890),
- V8_INT64_C(-103), V8_INT64_C(-34),
- V8_INT64_C(-27), V8_INT64_C(-25),
- V8_INT64_C(-9), V8_INT64_C(-7),
- V8_INT64_C(0), V8_INT64_C(2),
- V8_INT64_C(38), V8_INT64_C(58),
- V8_INT64_C(65), V8_INT64_C(93),
- V8_INT64_C(111), V8_INT64_C(1003),
- V8_INT64_C(1267), V8_INT64_C(12797),
- V8_INT64_C(23122), V8_INT64_C(28200),
- V8_INT64_C(30888), V8_INT64_C(42648848),
- V8_INT64_C(116836693), V8_INT64_C(263003643),
- V8_INT64_C(571039860), V8_INT64_C(1079398689),
- V8_INT64_C(1145196402), V8_INT64_C(1184846321),
- V8_INT64_C(1758281648), V8_INT64_C(1859991374),
- V8_INT64_C(1960251588), V8_INT64_C(2042443199),
- V8_INT64_C(296220586027987448), V8_INT64_C(1015494173071134726),
- V8_INT64_C(1151237951914455318), V8_INT64_C(1331941174616854174),
- V8_INT64_C(2022020418667972654), V8_INT64_C(2450251424374977035),
- V8_INT64_C(3668393562685561486), V8_INT64_C(4858229301215502171),
- V8_INT64_C(4919426235170669383), V8_INT64_C(5034286595330341762),
- V8_INT64_C(5055797915536941182), V8_INT64_C(6072389716149252074),
- V8_INT64_C(6185309910199801210), V8_INT64_C(6297328311011094138),
- V8_INT64_C(6932372858072165827), V8_INT64_C(8483640924987737210),
- V8_INT64_C(8663764179455849203), V8_INT64_C(8877197042645298254),
- V8_INT64_C(8901543506779157333), std::numeric_limits<int64_t>::max()};
-
+const int64_t kInt64Values[] = {std::numeric_limits<int64_t>::min(),
+ int64_t{-8974392461363618006},
+ int64_t{-8874367046689588135},
+ int64_t{-8269197512118230839},
+ int64_t{-8146091527100606733},
+ int64_t{-7550917981466150848},
+ int64_t{-7216590251577894337},
+ int64_t{-6464086891160048440},
+ int64_t{-6365616494908257190},
+ int64_t{-6305630541365849726},
+ int64_t{-5982222642272245453},
+ int64_t{-5510103099058504169},
+ int64_t{-5496838675802432701},
+ int64_t{-4047626578868642657},
+ int64_t{-4033755046900164544},
+ int64_t{-3554299241457877041},
+ int64_t{-2482258764588614470},
+ int64_t{-1688515425526875335},
+ int64_t{-924784137176548532},
+ int64_t{-725316567157391307},
+ int64_t{-439022654781092241},
+ int64_t{-105545757668917080},
+ int64_t{-2088319373},
+ int64_t{-2073699916},
+ int64_t{-1844949911},
+ int64_t{-1831090548},
+ int64_t{-1756711933},
+ int64_t{-1559409497},
+ int64_t{-1281179700},
+ int64_t{-1211513985},
+ int64_t{-1182371520},
+ int64_t{-785934753},
+ int64_t{-767480697},
+ int64_t{-705745662},
+ int64_t{-514362436},
+ int64_t{-459916580},
+ int64_t{-312328082},
+ int64_t{-302949707},
+ int64_t{-285499304},
+ int64_t{-125701262},
+ int64_t{-95139843},
+ int64_t{-32768},
+ int64_t{-27542},
+ int64_t{-23600},
+ int64_t{-18582},
+ int64_t{-17770},
+ int64_t{-9086},
+ int64_t{-9010},
+ int64_t{-8244},
+ int64_t{-2890},
+ int64_t{-103},
+ int64_t{-34},
+ int64_t{-27},
+ int64_t{-25},
+ int64_t{-9},
+ int64_t{-7},
+ int64_t{0},
+ int64_t{2},
+ int64_t{38},
+ int64_t{58},
+ int64_t{65},
+ int64_t{93},
+ int64_t{111},
+ int64_t{1003},
+ int64_t{1267},
+ int64_t{12797},
+ int64_t{23122},
+ int64_t{28200},
+ int64_t{30888},
+ int64_t{42648848},
+ int64_t{116836693},
+ int64_t{263003643},
+ int64_t{571039860},
+ int64_t{1079398689},
+ int64_t{1145196402},
+ int64_t{1184846321},
+ int64_t{1758281648},
+ int64_t{1859991374},
+ int64_t{1960251588},
+ int64_t{2042443199},
+ int64_t{296220586027987448},
+ int64_t{1015494173071134726},
+ int64_t{1151237951914455318},
+ int64_t{1331941174616854174},
+ int64_t{2022020418667972654},
+ int64_t{2450251424374977035},
+ int64_t{3668393562685561486},
+ int64_t{4858229301215502171},
+ int64_t{4919426235170669383},
+ int64_t{5034286595330341762},
+ int64_t{5055797915536941182},
+ int64_t{6072389716149252074},
+ int64_t{6185309910199801210},
+ int64_t{6297328311011094138},
+ int64_t{6932372858072165827},
+ int64_t{8483640924987737210},
+ int64_t{8663764179455849203},
+ int64_t{8877197042645298254},
+ int64_t{8901543506779157333},
+ std::numeric_limits<int64_t>::max()};
const uint32_t kUint32Values[] = {
- 0x00000000, 0x00000001, 0xffffffff, 0x1b09788b, 0x04c5fce8, 0xcc0de5bf,
- 0x273a798e, 0x187937a3, 0xece3af83, 0x5495a16b, 0x0b668ecc, 0x11223344,
- 0x0000009e, 0x00000043, 0x0000af73, 0x0000116b, 0x00658ecc, 0x002b3b4c,
- 0x88776655, 0x70000000, 0x07200000, 0x7fffffff, 0x56123761, 0x7fffff00,
- 0x761c4761, 0x80000000, 0x88888888, 0xa0000000, 0xdddddddd, 0xe0000000,
- 0xeeeeeeee, 0xfffffffd, 0xf0000000, 0x007fffff, 0x003fffff, 0x001fffff,
- 0x000fffff, 0x0007ffff, 0x0003ffff, 0x0001ffff, 0x0000ffff, 0x00007fff,
- 0x00003fff, 0x00001fff, 0x00000fff, 0x000007ff, 0x000003ff, 0x000001ff};
-
+ 0x00000000, 0x00000001, 0xFFFFFFFF, 0x1B09788B, 0x04C5FCE8, 0xCC0DE5BF,
+ 0x273A798E, 0x187937A3, 0xECE3AF83, 0x5495A16B, 0x0B668ECC, 0x11223344,
+ 0x0000009E, 0x00000043, 0x0000AF73, 0x0000116B, 0x00658ECC, 0x002B3B4C,
+ 0x88776655, 0x70000000, 0x07200000, 0x7FFFFFFF, 0x56123761, 0x7FFFFF00,
+ 0x761C4761, 0x80000000, 0x88888888, 0xA0000000, 0xDDDDDDDD, 0xE0000000,
+ 0xEEEEEEEE, 0xFFFFFFFD, 0xF0000000, 0x007FFFFF, 0x003FFFFF, 0x001FFFFF,
+ 0x000FFFFF, 0x0007FFFF, 0x0003FFFF, 0x0001FFFF, 0x0000FFFF, 0x00007FFF,
+ 0x00003FFF, 0x00001FFF, 0x00000FFF, 0x000007FF, 0x000003FF, 0x000001FF};
struct ComparisonBinaryOperator {
const Operator* (MachineOperatorBuilder::*constructor)();
@@ -1653,7 +1699,7 @@ TEST_F(MachineOperatorReducerTest, Float64DivWithMinusOne) {
TEST_F(MachineOperatorReducerTest, Float64DivWithPowerOfTwo) {
Node* const p0 = Parameter(0);
- TRACED_FORRANGE(uint64_t, exponent, 1, 0x7fe) {
+ TRACED_FORRANGE(uint64_t, exponent, 1, 0x7FE) {
Double divisor = Double(exponent << Double::kPhysicalSignificandSize);
if (divisor.value() == 1.0) continue; // Skip x / 1.0 => x.
Reduction r = Reduce(graph()->NewNode(machine()->Float64Div(), p0,
@@ -1946,7 +1992,7 @@ TEST_F(MachineOperatorReducerTest, Float64InsertLowWord32WithConstant) {
EXPECT_THAT(
r.replacement(),
IsFloat64Constant(BitEq(bit_cast<double>(
- (bit_cast<uint64_t>(x) & V8_UINT64_C(0xFFFFFFFF00000000)) | y))));
+ (bit_cast<uint64_t>(x) & uint64_t{0xFFFFFFFF00000000}) | y))));
}
}
}
@@ -1965,7 +2011,7 @@ TEST_F(MachineOperatorReducerTest, Float64InsertHighWord32WithConstant) {
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsFloat64Constant(BitEq(bit_cast<double>(
- (bit_cast<uint64_t>(x) & V8_UINT64_C(0xFFFFFFFF)) |
+ (bit_cast<uint64_t>(x) & uint64_t{0xFFFFFFFF}) |
(static_cast<uint64_t>(y) << 32)))));
}
}
@@ -2110,7 +2156,7 @@ TEST_F(MachineOperatorReducerTest, StoreRepWord8WithWord32And) {
Node* const node =
graph()->NewNode(machine()->Store(rep), base, index,
graph()->NewNode(machine()->Word32And(), value,
- Uint32Constant(x | 0xffu)),
+ Uint32Constant(x | 0xFFu)),
effect, control);
Reduction r = Reduce(node);
@@ -2157,7 +2203,7 @@ TEST_F(MachineOperatorReducerTest, StoreRepWord16WithWord32And) {
Node* const node =
graph()->NewNode(machine()->Store(rep), base, index,
graph()->NewNode(machine()->Word32And(), value,
- Uint32Constant(x | 0xffffu)),
+ Uint32Constant(x | 0xFFFFu)),
effect, control);
Reduction r = Reduce(node);
diff --git a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
index 2f1e7e8be2..34faec9690 100644
--- a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
@@ -356,11 +356,11 @@ TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediate) {
// The available shift operand range is `0 <= imm < 32`, but we also test
// that immediates outside this range are handled properly (modulo-32).
TRACED_FORRANGE(int32_t, shift, -32, 63) {
- int32_t lsb = shift & 0x1f;
+ int32_t lsb = shift & 0x1F;
TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
uint32_t jnk = rng()->NextInt();
jnk = (lsb > 0) ? (jnk >> (32 - lsb)) : 0;
- uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+ uint32_t msk = ((0xFFFFFFFFu >> (32 - width)) << lsb) | jnk;
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Shr(m.Word32And(m.Parameter(0), m.Int32Constant(msk)),
m.Int32Constant(shift)));
@@ -373,11 +373,11 @@ TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediate) {
}
}
TRACED_FORRANGE(int32_t, shift, -32, 63) {
- int32_t lsb = shift & 0x1f;
+ int32_t lsb = shift & 0x1F;
TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
uint32_t jnk = rng()->NextInt();
jnk = (lsb > 0) ? (jnk >> (32 - lsb)) : 0;
- uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+ uint32_t msk = ((0xFFFFFFFFu >> (32 - width)) << lsb) | jnk;
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Shr(m.Word32And(m.Int32Constant(msk), m.Parameter(0)),
m.Int32Constant(shift)));
@@ -517,7 +517,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithImmediateWithWord32Shr) {
// The available shift operand range is `0 <= imm < 32`, but we also test
// that immediates outside this range are handled properly (modulo-32).
TRACED_FORRANGE(int32_t, shift, -32, 63) {
- int32_t lsb = shift & 0x1f;
+ int32_t lsb = shift & 0x1F;
TRACED_FORRANGE(int32_t, width, 1, 31) {
uint32_t msk = (1 << width) - 1;
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
@@ -533,7 +533,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithImmediateWithWord32Shr) {
}
}
TRACED_FORRANGE(int32_t, shift, -32, 63) {
- int32_t lsb = shift & 0x1f;
+ int32_t lsb = shift & 0x1F;
TRACED_FORRANGE(int32_t, width, 1, 31) {
uint32_t msk = (1 << width) - 1;
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
diff --git a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
index 74af374379..c090e29321 100644
--- a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
@@ -400,11 +400,11 @@ TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediate) {
// The available shift operand range is `0 <= imm < 32`, but we also test
// that immediates outside this range are handled properly (modulo-32).
TRACED_FORRANGE(int32_t, shift, -32, 63) {
- int32_t lsb = shift & 0x1f;
+ int32_t lsb = shift & 0x1F;
TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
uint32_t jnk = rng()->NextInt();
jnk = (lsb > 0) ? (jnk >> (32 - lsb)) : 0;
- uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+ uint32_t msk = ((0xFFFFFFFFu >> (32 - width)) << lsb) | jnk;
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Shr(m.Word32And(m.Parameter(0), m.Int32Constant(msk)),
m.Int32Constant(shift)));
@@ -417,11 +417,11 @@ TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediate) {
}
}
TRACED_FORRANGE(int32_t, shift, -32, 63) {
- int32_t lsb = shift & 0x1f;
+ int32_t lsb = shift & 0x1F;
TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
uint32_t jnk = rng()->NextInt();
jnk = (lsb > 0) ? (jnk >> (32 - lsb)) : 0;
- uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+ uint32_t msk = ((0xFFFFFFFFu >> (32 - width)) << lsb) | jnk;
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Shr(m.Word32And(m.Int32Constant(msk), m.Parameter(0)),
m.Int32Constant(shift)));
@@ -440,12 +440,12 @@ TEST_F(InstructionSelectorTest, Word64ShrWithWord64AndWithImmediate) {
// The available shift operand range is `0 <= imm < 64`, but we also test
// that immediates outside this range are handled properly (modulo-64).
TRACED_FORRANGE(int32_t, shift, -64, 127) {
- int32_t lsb = shift & 0x3f;
+ int32_t lsb = shift & 0x3F;
TRACED_FORRANGE(int32_t, width, 1, 64 - lsb) {
uint64_t jnk = rng()->NextInt64();
jnk = (lsb > 0) ? (jnk >> (64 - lsb)) : 0;
uint64_t msk =
- ((V8_UINT64_C(0xffffffffffffffff) >> (64 - width)) << lsb) | jnk;
+ ((uint64_t{0xFFFFFFFFFFFFFFFF} >> (64 - width)) << lsb) | jnk;
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Word64Shr(m.Word64And(m.Parameter(0), m.Int64Constant(msk)),
m.Int64Constant(shift)));
@@ -458,12 +458,12 @@ TEST_F(InstructionSelectorTest, Word64ShrWithWord64AndWithImmediate) {
}
}
TRACED_FORRANGE(int32_t, shift, -64, 127) {
- int32_t lsb = shift & 0x3f;
+ int32_t lsb = shift & 0x3F;
TRACED_FORRANGE(int32_t, width, 1, 64 - lsb) {
uint64_t jnk = rng()->NextInt64();
jnk = (lsb > 0) ? (jnk >> (64 - lsb)) : 0;
uint64_t msk =
- ((V8_UINT64_C(0xffffffffffffffff) >> (64 - width)) << lsb) | jnk;
+ ((uint64_t{0xFFFFFFFFFFFFFFFF} >> (64 - width)) << lsb) | jnk;
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Word64Shr(m.Word64And(m.Int64Constant(msk), m.Parameter(0)),
m.Int64Constant(shift)));
@@ -649,7 +649,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithImmediateWithWord32Shr) {
// The available shift operand range is `0 <= imm < 32`, but we also test
// that immediates outside this range are handled properly (modulo-32).
TRACED_FORRANGE(int32_t, shift, -32, 63) {
- int32_t lsb = shift & 0x1f;
+ int32_t lsb = shift & 0x1F;
TRACED_FORRANGE(int32_t, width, 1, 31) {
uint32_t msk = (1 << width) - 1;
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
@@ -665,7 +665,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithImmediateWithWord32Shr) {
}
}
TRACED_FORRANGE(int32_t, shift, -32, 63) {
- int32_t lsb = shift & 0x1f;
+ int32_t lsb = shift & 0x1F;
TRACED_FORRANGE(int32_t, width, 1, 31) {
uint32_t msk = (1 << width) - 1;
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
@@ -688,9 +688,9 @@ TEST_F(InstructionSelectorTest, Word64AndWithImmediateWithWord64Shr) {
// The available shift operand range is `0 <= imm < 64`, but we also test
// that immediates outside this range are handled properly (modulo-64).
TRACED_FORRANGE(int64_t, shift, -64, 127) {
- int64_t lsb = shift & 0x3f;
+ int64_t lsb = shift & 0x3F;
TRACED_FORRANGE(int64_t, width, 1, 63) {
- uint64_t msk = (V8_UINT64_C(1) << width) - 1;
+ uint64_t msk = (uint64_t{1} << width) - 1;
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Word64And(m.Word64Shr(m.Parameter(0), m.Int64Constant(shift)),
m.Int64Constant(msk)));
@@ -704,9 +704,9 @@ TEST_F(InstructionSelectorTest, Word64AndWithImmediateWithWord64Shr) {
}
}
TRACED_FORRANGE(int64_t, shift, -64, 127) {
- int64_t lsb = shift & 0x3f;
+ int64_t lsb = shift & 0x3F;
TRACED_FORRANGE(int64_t, width, 1, 63) {
- uint64_t msk = (V8_UINT64_C(1) << width) - 1;
+ uint64_t msk = (uint64_t{1} << width) - 1;
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(
m.Word64And(m.Int64Constant(msk),
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index 52fd02b0a6..8e7084d1b1 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -2182,6 +2182,7 @@ IS_UNOP_MATCHER(ObjectIsReceiver)
IS_UNOP_MATCHER(ObjectIsSmi)
IS_UNOP_MATCHER(ObjectIsUndetectable)
IS_UNOP_MATCHER(StringFromCharCode)
+IS_UNOP_MATCHER(StringLength)
IS_UNOP_MATCHER(Word32Clz)
IS_UNOP_MATCHER(Word32Ctz)
IS_UNOP_MATCHER(Word32Popcnt)
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index 81e471f30f..3ce6aba0f3 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -272,6 +272,7 @@ Matcher<Node*> IsNumberTan(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsNumberTanh(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsNumberTrunc(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsStringFromCharCode(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsStringLength(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsAllocate(const Matcher<Node*>& size_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
diff --git a/deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc b/deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc
index 0ad114241f..8b3e93fdce 100644
--- a/deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc
@@ -94,8 +94,7 @@ class MoveOptimizerTest : public InstructionSequenceTest {
default:
break;
}
- CHECK(false);
- return InstructionOperand();
+ UNREACHABLE();
}
};
diff --git a/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc b/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc
new file mode 100644
index 0000000000..5f5afe54ef
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc
@@ -0,0 +1,89 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/simplified-lowering.h"
+
+#include "src/compiler/compiler-source-position-table.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/simplified-operator.h"
+
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class SimplifiedLoweringTest : public GraphTest {
+ public:
+ explicit SimplifiedLoweringTest(int num_parameters = 1)
+ : GraphTest(num_parameters),
+ num_parameters_(num_parameters),
+ machine_(zone()),
+ javascript_(zone()),
+ simplified_(zone()),
+ jsgraph_(isolate(), graph(), common(), &javascript_, &simplified_,
+ &machine_) {}
+ ~SimplifiedLoweringTest() override {}
+
+ void LowerGraph(Node* node) {
+ // Make sure we always start with an empty graph.
+ graph()->SetStart(graph()->NewNode(common()->Start(num_parameters())));
+ graph()->SetEnd(graph()->NewNode(common()->End(1), graph()->start()));
+
+ // Return {node} directly, so that we can match it with
+ // "IsReturn(expected)".
+ Node* zero = graph()->NewNode(common()->NumberConstant(0));
+ Node* ret = graph()->NewNode(common()->Return(), zero, node,
+ graph()->start(), graph()->start());
+ NodeProperties::MergeControlToEnd(graph(), common(), ret);
+
+ {
+ // Simplified lowering needs to run w/o the typer decorator so make sure
+ // the object is not live at the same time.
+ Typer typer(isolate(), Typer::kNoFlags, graph());
+ typer.Run();
+ }
+
+ SimplifiedLowering lowering(jsgraph(), zone(), source_positions());
+ lowering.LowerAllNodes();
+ }
+
+ int num_parameters() const { return num_parameters_; }
+ JSGraph* jsgraph() { return &jsgraph_; }
+
+ private:
+ const int num_parameters_;
+ MachineOperatorBuilder machine_;
+ JSOperatorBuilder javascript_;
+ SimplifiedOperatorBuilder simplified_;
+ JSGraph jsgraph_;
+};
+
+const int kSmiValues[] = {Smi::kMinValue,
+ Smi::kMinValue + 1,
+ Smi::kMinValue + 2,
+ 3,
+ 2,
+ 1,
+ 0,
+ -1,
+ -2,
+ -3,
+ Smi::kMaxValue - 2,
+ Smi::kMaxValue - 1,
+ Smi::kMaxValue};
+
+TEST_F(SimplifiedLoweringTest, SmiConstantToIntPtrConstant) {
+ TRACED_FOREACH(int, x, kSmiValues) {
+ LowerGraph(jsgraph()->Constant(x));
+ intptr_t smi = bit_cast<intptr_t>(Smi::FromInt(x));
+ EXPECT_THAT(graph()->end()->InputAt(1),
+ IsReturn(IsIntPtrConstant(smi), start(), start()));
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
index 2e67c0d4df..5dd7d84fbd 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
@@ -92,11 +92,10 @@ const int32_t kInt32Values[] = {
1062628108, 1087581664, 1488498068, 1534668023, 1661587028, 1696896187,
1866841746, 2032089723, 2147483647};
-
const double kNaNs[] = {-std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::quiet_NaN(),
- bit_cast<double>(V8_UINT64_C(0x7FFFFFFFFFFFFFFF)),
- bit_cast<double>(V8_UINT64_C(0xFFFFFFFFFFFFFFFF))};
+ bit_cast<double>(uint64_t{0x7FFFFFFFFFFFFFFF}),
+ bit_cast<double>(uint64_t{0xFFFFFFFFFFFFFFFF})};
const CheckForMinusZeroMode kCheckForMinusZeroModes[] = {
CheckForMinusZeroMode::kDontCheckForMinusZero,
@@ -357,10 +356,10 @@ TEST_F(SimplifiedOperatorReducerTest, CheckedFloat64ToInt32WithConstant) {
Node* effect = graph()->start();
Node* control = graph()->start();
TRACED_FOREACH(int32_t, n, kInt32Values) {
- Reduction r = Reduce(
- graph()->NewNode(simplified()->CheckedFloat64ToInt32(
- CheckForMinusZeroMode::kDontCheckForMinusZero),
- Float64Constant(n), effect, control));
+ Reduction r = Reduce(graph()->NewNode(
+ simplified()->CheckedFloat64ToInt32(
+ CheckForMinusZeroMode::kDontCheckForMinusZero, VectorSlotPair()),
+ Float64Constant(n), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(n));
}
@@ -416,8 +415,8 @@ TEST_F(SimplifiedOperatorReducerTest, CheckSmiWithChangeInt31ToTaggedSigned) {
Node* control = graph()->start();
Node* value =
graph()->NewNode(simplified()->ChangeInt31ToTaggedSigned(), param0);
- Reduction reduction = Reduce(
- graph()->NewNode(simplified()->CheckSmi(), value, effect, control));
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->CheckSmi(VectorSlotPair()), value, effect, control));
ASSERT_TRUE(reduction.Changed());
EXPECT_EQ(value, reduction.replacement());
}
@@ -426,8 +425,8 @@ TEST_F(SimplifiedOperatorReducerTest, CheckSmiWithNumberConstant) {
Node* effect = graph()->start();
Node* control = graph()->start();
Node* value = NumberConstant(1.0);
- Reduction reduction = Reduce(
- graph()->NewNode(simplified()->CheckSmi(), value, effect, control));
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->CheckSmi(VectorSlotPair()), value, effect, control));
ASSERT_TRUE(reduction.Changed());
EXPECT_EQ(value, reduction.replacement());
}
@@ -436,10 +435,10 @@ TEST_F(SimplifiedOperatorReducerTest, CheckSmiWithCheckSmi) {
Node* param0 = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
- Node* value = effect =
- graph()->NewNode(simplified()->CheckSmi(), param0, effect, control);
- Reduction reduction = Reduce(
- graph()->NewNode(simplified()->CheckSmi(), value, effect, control));
+ Node* value = effect = graph()->NewNode(
+ simplified()->CheckSmi(VectorSlotPair()), param0, effect, control);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->CheckSmi(VectorSlotPair()), value, effect, control));
ASSERT_TRUE(reduction.Changed());
EXPECT_EQ(value, reduction.replacement());
}
diff --git a/deps/v8/test/unittests/compiler/state-values-utils-unittest.cc b/deps/v8/test/unittests/compiler/state-values-utils-unittest.cc
index 388dd56247..d53e7d9462 100644
--- a/deps/v8/test/unittests/compiler/state-values-utils-unittest.cc
+++ b/deps/v8/test/unittests/compiler/state-values-utils-unittest.cc
@@ -45,10 +45,12 @@ TEST_F(StateValuesIteratorTest, SimpleIteration) {
TEST_F(StateValuesIteratorTest, EmptyIteration) {
NodeVector inputs(zone());
Node* state_values = StateValuesFromVector(&inputs);
+ bool empty = true;
for (auto node : StateValuesAccess(state_values)) {
USE(node);
- FAIL();
+ empty = false;
}
+ EXPECT_TRUE(empty);
}
diff --git a/deps/v8/test/unittests/compiler/typer-unittest.cc b/deps/v8/test/unittests/compiler/typer-unittest.cc
index 40f3efd2cf..44464b9476 100644
--- a/deps/v8/test/unittests/compiler/typer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typer-unittest.cc
@@ -297,8 +297,8 @@ class TyperTest : public TypedGraphTest {
namespace {
-int32_t shift_left(int32_t x, int32_t y) { return x << (y & 0x1f); }
-int32_t shift_right(int32_t x, int32_t y) { return x >> (y & 0x1f); }
+int32_t shift_left(int32_t x, int32_t y) { return x << (y & 0x1F); }
+int32_t shift_right(int32_t x, int32_t y) { return x >> (y & 0x1F); }
int32_t bit_or(int32_t x, int32_t y) { return x | y; }
int32_t bit_and(int32_t x, int32_t y) { return x & y; }
int32_t bit_xor(int32_t x, int32_t y) { return x ^ y; }
diff --git a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
index d1497392f9..031217b6b8 100644
--- a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
@@ -1519,12 +1519,11 @@ TEST_F(InstructionSelectorTest, Word64ShlWithChangeUint32ToUint64) {
}
}
-
-TEST_F(InstructionSelectorTest, Word32AndWith0xff) {
+TEST_F(InstructionSelectorTest, Word32AndWith0xFF) {
{
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
- Node* const n = m.Word32And(p0, m.Int32Constant(0xff));
+ Node* const n = m.Word32And(p0, m.Int32Constant(0xFF));
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1537,7 +1536,7 @@ TEST_F(InstructionSelectorTest, Word32AndWith0xff) {
{
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
- Node* const n = m.Word32And(m.Int32Constant(0xff), p0);
+ Node* const n = m.Word32And(m.Int32Constant(0xFF), p0);
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1549,12 +1548,11 @@ TEST_F(InstructionSelectorTest, Word32AndWith0xff) {
}
}
-
-TEST_F(InstructionSelectorTest, Word32AndWith0xffff) {
+TEST_F(InstructionSelectorTest, Word32AndWith0xFFFF) {
{
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
- Node* const n = m.Word32And(p0, m.Int32Constant(0xffff));
+ Node* const n = m.Word32And(p0, m.Int32Constant(0xFFFF));
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1567,7 +1565,7 @@ TEST_F(InstructionSelectorTest, Word32AndWith0xffff) {
{
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
- Node* const n = m.Word32And(m.Int32Constant(0xffff), p0);
+ Node* const n = m.Word32And(m.Int32Constant(0xFFFF), p0);
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1643,6 +1641,15 @@ TEST_F(InstructionSelectorTest, LoadAndWord64ShiftRight32) {
}
}
+TEST_F(InstructionSelectorTest, SpeculationFence) {
+ StreamBuilder m(this, MachineType::Int32());
+ m.SpeculationFence();
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLFence, s[0]->arch_opcode());
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/counters-unittest.cc b/deps/v8/test/unittests/counters-unittest.cc
index d32d01060e..887ba54e01 100644
--- a/deps/v8/test/unittests/counters-unittest.cc
+++ b/deps/v8/test/unittests/counters-unittest.cc
@@ -4,6 +4,7 @@
#include <vector>
+#include "src/base/atomic-utils.h"
#include "src/base/platform/time.h"
#include "src/counters-inl.h"
#include "src/counters.h"
@@ -55,8 +56,9 @@ static base::TimeTicks RuntimeCallStatsTestNow() {
class RuntimeCallStatsTest : public TestWithNativeContext {
public:
RuntimeCallStatsTest() {
- FLAG_runtime_stats =
- v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE;
+ base::AsAtomic32::Relaxed_Store(
+ &FLAG_runtime_stats,
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE);
// We need to set {time_} to a non-zero value since it would otherwise
// cause runtime call timers to think they are uninitialized.
Sleep(1);
@@ -67,7 +69,7 @@ class RuntimeCallStatsTest : public TestWithNativeContext {
// Disable RuntimeCallStats before tearing down the isolate to prevent
// printing the tests table. Comment the following line for debugging
// purposes.
- FLAG_runtime_stats = 0;
+ base::AsAtomic32::Relaxed_Store(&FLAG_runtime_stats, 0);
}
static void SetUpTestCase() {
@@ -89,22 +91,24 @@ class RuntimeCallStatsTest : public TestWithNativeContext {
// Print current RuntimeCallStats table. For debugging purposes.
void PrintStats() { stats()->Print(); }
- RuntimeCallStats::CounterId counter_id() {
- return &RuntimeCallStats::TestCounter1;
+ RuntimeCallCounterId counter_id() {
+ return RuntimeCallCounterId::kTestCounter1;
}
- RuntimeCallStats::CounterId counter_id2() {
- return &RuntimeCallStats::TestCounter2;
+ RuntimeCallCounterId counter_id2() {
+ return RuntimeCallCounterId::kTestCounter2;
}
- RuntimeCallStats::CounterId counter_id3() {
- return &RuntimeCallStats::TestCounter3;
+ RuntimeCallCounterId counter_id3() {
+ return RuntimeCallCounterId::kTestCounter3;
}
- RuntimeCallCounter* js_counter() { return &stats()->JS_Execution; }
- RuntimeCallCounter* counter() { return &(stats()->*counter_id()); }
- RuntimeCallCounter* counter2() { return &(stats()->*counter_id2()); }
- RuntimeCallCounter* counter3() { return &(stats()->*counter_id3()); }
+ RuntimeCallCounter* js_counter() {
+ return stats()->GetCounter(RuntimeCallCounterId::kJS_Execution);
+ }
+ RuntimeCallCounter* counter() { return stats()->GetCounter(counter_id()); }
+ RuntimeCallCounter* counter2() { return stats()->GetCounter(counter_id2()); }
+ RuntimeCallCounter* counter3() { return stats()->GetCounter(counter_id3()); }
void Sleep(int64_t microseconds) {
base::TimeDelta delta = base::TimeDelta::FromMicroseconds(microseconds);
@@ -300,7 +304,7 @@ TEST_F(RuntimeCallStatsTest, RuntimeCallTimer) {
RuntimeCallTimer timer;
Sleep(50);
- RuntimeCallStats::Enter(stats(), &timer, counter_id());
+ stats()->Enter(&timer, counter_id());
EXPECT_EQ(counter(), timer.counter());
EXPECT_EQ(nullptr, timer.parent());
EXPECT_TRUE(timer.IsStarted());
@@ -308,7 +312,7 @@ TEST_F(RuntimeCallStatsTest, RuntimeCallTimer) {
Sleep(100);
- RuntimeCallStats::Leave(stats(), &timer);
+ stats()->Leave(&timer);
Sleep(50);
EXPECT_FALSE(timer.IsStarted());
EXPECT_EQ(1, counter()->count());
@@ -319,7 +323,7 @@ TEST_F(RuntimeCallStatsTest, RuntimeCallTimerSubTimer) {
RuntimeCallTimer timer;
RuntimeCallTimer timer2;
- RuntimeCallStats::Enter(stats(), &timer, counter_id());
+ stats()->Enter(&timer, counter_id());
EXPECT_TRUE(timer.IsStarted());
EXPECT_FALSE(timer2.IsStarted());
EXPECT_EQ(counter(), timer.counter());
@@ -328,7 +332,7 @@ TEST_F(RuntimeCallStatsTest, RuntimeCallTimerSubTimer) {
Sleep(50);
- RuntimeCallStats::Enter(stats(), &timer2, counter_id2());
+ stats()->Enter(&timer2, counter_id2());
// timer 1 is paused, while timer 2 is active.
EXPECT_TRUE(timer2.IsStarted());
EXPECT_EQ(counter(), timer.counter());
@@ -338,7 +342,7 @@ TEST_F(RuntimeCallStatsTest, RuntimeCallTimerSubTimer) {
EXPECT_EQ(&timer2, stats()->current_timer());
Sleep(100);
- RuntimeCallStats::Leave(stats(), &timer2);
+ stats()->Leave(&timer2);
// The subtimer subtracts its time from the parent timer.
EXPECT_TRUE(timer.IsStarted());
@@ -351,7 +355,7 @@ TEST_F(RuntimeCallStatsTest, RuntimeCallTimerSubTimer) {
Sleep(100);
- RuntimeCallStats::Leave(stats(), &timer);
+ stats()->Leave(&timer);
EXPECT_FALSE(timer.IsStarted());
EXPECT_EQ(1, counter()->count());
EXPECT_EQ(1, counter2()->count());
@@ -364,13 +368,13 @@ TEST_F(RuntimeCallStatsTest, RuntimeCallTimerRecursive) {
RuntimeCallTimer timer;
RuntimeCallTimer timer2;
- RuntimeCallStats::Enter(stats(), &timer, counter_id());
+ stats()->Enter(&timer, counter_id());
EXPECT_EQ(counter(), timer.counter());
EXPECT_EQ(nullptr, timer.parent());
EXPECT_TRUE(timer.IsStarted());
EXPECT_EQ(&timer, stats()->current_timer());
- RuntimeCallStats::Enter(stats(), &timer2, counter_id());
+ stats()->Enter(&timer2, counter_id());
EXPECT_EQ(counter(), timer2.counter());
EXPECT_EQ(nullptr, timer.parent());
EXPECT_EQ(&timer, timer2.parent());
@@ -379,7 +383,7 @@ TEST_F(RuntimeCallStatsTest, RuntimeCallTimerRecursive) {
Sleep(50);
- RuntimeCallStats::Leave(stats(), &timer2);
+ stats()->Leave(&timer2);
EXPECT_EQ(nullptr, timer.parent());
EXPECT_FALSE(timer2.IsStarted());
EXPECT_TRUE(timer.IsStarted());
@@ -388,7 +392,7 @@ TEST_F(RuntimeCallStatsTest, RuntimeCallTimerRecursive) {
Sleep(100);
- RuntimeCallStats::Leave(stats(), &timer);
+ stats()->Leave(&timer);
EXPECT_FALSE(timer.IsStarted());
EXPECT_EQ(2, counter()->count());
EXPECT_EQ(150, counter()->time().InMicroseconds());
@@ -439,7 +443,8 @@ TEST_F(RuntimeCallStatsTest, RenameTimer) {
RuntimeCallTimerScope scope(stats(), counter_id());
Sleep(100);
}
- CHANGE_CURRENT_RUNTIME_COUNTER(stats(), TestCounter2);
+ CHANGE_CURRENT_RUNTIME_COUNTER(stats(),
+ RuntimeCallCounterId::kTestCounter2);
EXPECT_EQ(1, counter()->count());
EXPECT_EQ(0, counter2()->count());
EXPECT_EQ(100, counter()->time().InMicroseconds());
@@ -558,7 +563,8 @@ TEST_F(RuntimeCallStatsTest, NestedScopes) {
}
TEST_F(RuntimeCallStatsTest, BasicJavaScript) {
- RuntimeCallCounter* counter = &stats()->JS_Execution;
+ RuntimeCallCounter* counter =
+ stats()->GetCounter(RuntimeCallCounterId::kJS_Execution);
EXPECT_EQ(0, counter->count());
EXPECT_EQ(0, counter->time().InMicroseconds());
@@ -579,8 +585,10 @@ TEST_F(RuntimeCallStatsTest, BasicJavaScript) {
}
TEST_F(RuntimeCallStatsTest, FunctionLengthGetter) {
- RuntimeCallCounter* getter_counter = &stats()->FunctionLengthGetter;
- RuntimeCallCounter* js_counter = &stats()->JS_Execution;
+ RuntimeCallCounter* getter_counter =
+ stats()->GetCounter(RuntimeCallCounterId::kFunctionLengthGetter);
+ RuntimeCallCounter* js_counter =
+ stats()->GetCounter(RuntimeCallCounterId::kJS_Execution);
EXPECT_EQ(0, getter_counter->count());
EXPECT_EQ(0, js_counter->count());
EXPECT_EQ(0, getter_counter->time().InMicroseconds());
diff --git a/deps/v8/test/unittests/eh-frame-iterator-unittest.cc b/deps/v8/test/unittests/eh-frame-iterator-unittest.cc
index b228cc9caf..fff38209c5 100644
--- a/deps/v8/test/unittests/eh-frame-iterator-unittest.cc
+++ b/deps/v8/test/unittests/eh-frame-iterator-unittest.cc
@@ -20,40 +20,40 @@ class EhFrameIteratorTest : public testing::Test {};
TEST_F(EhFrameIteratorTest, Values) {
// Assuming little endian.
- static const byte kEncoded[] = {0xde, 0xc0, 0xad, 0xde, 0xef, 0xbe, 0xff};
+ static const byte kEncoded[] = {0xDE, 0xC0, 0xAD, 0xDE, 0xEF, 0xBE, 0xFF};
EhFrameIterator iterator(&kEncoded[0], &kEncoded[0] + sizeof(kEncoded));
- EXPECT_EQ(0xdeadc0de, iterator.GetNextUInt32());
- EXPECT_EQ(0xbeef, iterator.GetNextUInt16());
- EXPECT_EQ(0xff, iterator.GetNextByte());
+ EXPECT_EQ(0xDEADC0DE, iterator.GetNextUInt32());
+ EXPECT_EQ(0xBEEF, iterator.GetNextUInt16());
+ EXPECT_EQ(0xFF, iterator.GetNextByte());
EXPECT_TRUE(iterator.Done());
}
TEST_F(EhFrameIteratorTest, Skip) {
- static const byte kEncoded[] = {0xde, 0xad, 0xc0, 0xde};
+ static const byte kEncoded[] = {0xDE, 0xAD, 0xC0, 0xDE};
EhFrameIterator iterator(&kEncoded[0], &kEncoded[0] + sizeof(kEncoded));
iterator.Skip(2);
EXPECT_EQ(2, iterator.GetCurrentOffset());
- EXPECT_EQ(0xc0, iterator.GetNextByte());
+ EXPECT_EQ(0xC0, iterator.GetNextByte());
iterator.Skip(1);
EXPECT_TRUE(iterator.Done());
}
TEST_F(EhFrameIteratorTest, ULEB128Decoding) {
- static const byte kEncoded[] = {0xe5, 0x8e, 0x26};
+ static const byte kEncoded[] = {0xE5, 0x8E, 0x26};
EhFrameIterator iterator(&kEncoded[0], &kEncoded[0] + sizeof(kEncoded));
EXPECT_EQ(624485u, iterator.GetNextULeb128());
EXPECT_TRUE(iterator.Done());
}
TEST_F(EhFrameIteratorTest, SLEB128DecodingPositive) {
- static const byte kEncoded[] = {0xe5, 0x8e, 0x26};
+ static const byte kEncoded[] = {0xE5, 0x8E, 0x26};
EhFrameIterator iterator(&kEncoded[0], &kEncoded[0] + sizeof(kEncoded));
EXPECT_EQ(624485, iterator.GetNextSLeb128());
EXPECT_TRUE(iterator.Done());
}
TEST_F(EhFrameIteratorTest, SLEB128DecodingNegative) {
- static const byte kEncoded[] = {0x9b, 0xf1, 0x59};
+ static const byte kEncoded[] = {0x9B, 0xF1, 0x59};
EhFrameIterator iterator(&kEncoded[0], &kEncoded[0] + sizeof(kEncoded));
EXPECT_EQ(-624485, iterator.GetNextSLeb128());
EXPECT_TRUE(iterator.Done());
diff --git a/deps/v8/test/unittests/eh-frame-writer-unittest.cc b/deps/v8/test/unittests/eh-frame-writer-unittest.cc
index 0213835e9f..0846fda2f4 100644
--- a/deps/v8/test/unittests/eh-frame-writer-unittest.cc
+++ b/deps/v8/test/unittests/eh-frame-writer-unittest.cc
@@ -47,7 +47,7 @@ TEST_F(EhFrameWriterTest, Alignment) {
}
TEST_F(EhFrameWriterTest, FDEHeader) {
- static const int kProcedureSize = 0x5678abcd;
+ static const int kProcedureSize = 0x5678ABCD;
EhFrameWriter writer(zone());
writer.Initialize();
@@ -76,7 +76,7 @@ TEST_F(EhFrameWriterTest, FDEHeader) {
}
TEST_F(EhFrameWriterTest, SetOffset) {
- static const uint32_t kOffset = 0x0badc0de;
+ static const uint32_t kOffset = 0x0BADC0DE;
EhFrameWriter writer(zone());
writer.Initialize();
@@ -132,7 +132,7 @@ TEST_F(EhFrameWriterTest, SetRegister) {
TEST_F(EhFrameWriterTest, SetRegisterAndOffset) {
Register test_register = Register::from_code(kTestRegisterCode);
- static const uint32_t kOffset = 0x0badc0de;
+ static const uint32_t kOffset = 0x0BADC0DE;
EhFrameWriter writer(zone());
writer.Initialize();
@@ -199,7 +199,7 @@ TEST_F(EhFrameWriterTest, PcOffsetEncoding8bit) {
TEST_F(EhFrameWriterTest, PcOffsetEncoding8bitDelta) {
static const int kFirstOffset = 0x10;
static const int kSecondOffset = 0x70;
- static const int kThirdOffset = 0xb5;
+ static const int kThirdOffset = 0xB5;
EhFrameWriter writer(zone());
writer.Initialize();
diff --git a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
index e7702fda75..e4e9260881 100644
--- a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
+++ b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
@@ -5,6 +5,7 @@
#include <cmath>
#include <limits>
+#include "src/base/platform/platform.h"
#include "src/globals.h"
#include "src/heap/gc-tracer.h"
#include "src/isolate.h"
@@ -294,5 +295,125 @@ TEST_F(GCTracerTest, IncrementalMarkingSpeed) {
tracer->IncrementalMarkingSpeedInBytesPerMillisecond()));
}
+TEST_F(GCTracerTest, BackgroundScavengerScope) {
+ GCTracer* tracer = i_isolate()->heap()->tracer();
+ tracer->ResetForTesting();
+ tracer->Start(SCAVENGER, GarbageCollectionReason::kTesting,
+ "collector unittest");
+ tracer->AddBackgroundScopeSample(
+ GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL, 10,
+ nullptr);
+ tracer->AddBackgroundScopeSample(
+ GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL, 1,
+ nullptr);
+ tracer->Stop(SCAVENGER);
+ EXPECT_DOUBLE_EQ(
+ 11, tracer->current_
+ .scopes[GCTracer::Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL]);
+}
+
+TEST_F(GCTracerTest, BackgroundMinorMCScope) {
+ GCTracer* tracer = i_isolate()->heap()->tracer();
+ tracer->ResetForTesting();
+ tracer->Start(MINOR_MARK_COMPACTOR, GarbageCollectionReason::kTesting,
+ "collector unittest");
+ tracer->AddBackgroundScopeSample(
+ GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING, 10, nullptr);
+ tracer->AddBackgroundScopeSample(
+ GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING, 1, nullptr);
+ tracer->AddBackgroundScopeSample(
+ GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY, 20,
+ nullptr);
+ tracer->AddBackgroundScopeSample(
+ GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY, 2, nullptr);
+ tracer->AddBackgroundScopeSample(
+ GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS,
+ 30, nullptr);
+ tracer->AddBackgroundScopeSample(
+ GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS,
+ 3, nullptr);
+ tracer->Stop(MINOR_MARK_COMPACTOR);
+ EXPECT_DOUBLE_EQ(
+ 11,
+ tracer->current_.scopes[GCTracer::Scope::MINOR_MC_BACKGROUND_MARKING]);
+ EXPECT_DOUBLE_EQ(
+ 22, tracer->current_
+ .scopes[GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY]);
+ EXPECT_DOUBLE_EQ(
+ 33, tracer->current_.scopes
+ [GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS]);
+}
+
+TEST_F(GCTracerTest, BackgroundMajorMCScope) {
+ GCTracer* tracer = i_isolate()->heap()->tracer();
+ tracer->ResetForTesting();
+ tracer->AddBackgroundScopeSample(
+ GCTracer::BackgroundScope::MC_BACKGROUND_MARKING, 100, nullptr);
+ tracer->AddBackgroundScopeSample(
+ GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING, 200, nullptr);
+ tracer->AddBackgroundScopeSample(
+ GCTracer::BackgroundScope::MC_BACKGROUND_MARKING, 10, nullptr);
+ // Scavenger should not affect the major mark-compact scopes.
+ tracer->Start(SCAVENGER, GarbageCollectionReason::kTesting,
+ "collector unittest");
+ tracer->Stop(SCAVENGER);
+ tracer->AddBackgroundScopeSample(
+ GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING, 20, nullptr);
+ tracer->AddBackgroundScopeSample(
+ GCTracer::BackgroundScope::MC_BACKGROUND_MARKING, 1, nullptr);
+ tracer->AddBackgroundScopeSample(
+ GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING, 2, nullptr);
+ tracer->Start(MARK_COMPACTOR, GarbageCollectionReason::kTesting,
+ "collector unittest");
+ tracer->AddBackgroundScopeSample(
+ GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_COPY, 30, nullptr);
+ tracer->AddBackgroundScopeSample(
+ GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_COPY, 3, nullptr);
+ tracer->AddBackgroundScopeSample(
+ GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS, 40,
+ nullptr);
+ tracer->AddBackgroundScopeSample(
+ GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS, 4,
+ nullptr);
+ tracer->Stop(MARK_COMPACTOR);
+ EXPECT_DOUBLE_EQ(
+ 111, tracer->current_.scopes[GCTracer::Scope::MC_BACKGROUND_MARKING]);
+ EXPECT_DOUBLE_EQ(
+ 222, tracer->current_.scopes[GCTracer::Scope::MC_BACKGROUND_SWEEPING]);
+ EXPECT_DOUBLE_EQ(
+ 33,
+ tracer->current_.scopes[GCTracer::Scope::MC_BACKGROUND_EVACUATE_COPY]);
+ EXPECT_DOUBLE_EQ(
+ 44, tracer->current_
+ .scopes[GCTracer::Scope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS]);
+}
+
+class ThreadWithBackgroundScope final : public base::Thread {
+ public:
+ explicit ThreadWithBackgroundScope(GCTracer* tracer)
+ : Thread(Options("ThreadWithBackgroundScope")), tracer_(tracer) {}
+ void Run() override {
+ GCTracer::BackgroundScope scope(
+ tracer_, GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
+ }
+
+ private:
+ GCTracer* tracer_;
+};
+
+TEST_F(GCTracerTest, MultithreadedBackgroundScope) {
+ GCTracer* tracer = i_isolate()->heap()->tracer();
+ ThreadWithBackgroundScope thread1(tracer);
+ ThreadWithBackgroundScope thread2(tracer);
+ tracer->ResetForTesting();
+ thread1.Start();
+ thread2.Start();
+ tracer->FetchBackgroundMarkCompactCounters();
+ thread1.Join();
+ thread2.Join();
+ tracer->FetchBackgroundMarkCompactCounters();
+ EXPECT_LE(0, tracer->current_.scopes[GCTracer::Scope::MC_BACKGROUND_MARKING]);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/heap-unittest.cc b/deps/v8/test/unittests/heap/heap-unittest.cc
index 3b7b610c8c..c63aa2b724 100644
--- a/deps/v8/test/unittests/heap/heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/heap-unittest.cc
@@ -99,7 +99,7 @@ TEST_F(HeapTest, ASLR) {
}
if (hints.size() == 1) {
EXPECT_TRUE((*hints.begin()) == nullptr);
- EXPECT_TRUE(base::OS::GetRandomMmapAddr() == nullptr);
+ EXPECT_TRUE(i::GetRandomMmapAddr() == nullptr);
} else {
// It is unlikely that 1000 random samples will collide to less then 500
// values.
diff --git a/deps/v8/test/unittests/heap/marking-unittest.cc b/deps/v8/test/unittests/heap/marking-unittest.cc
index 9dd432c175..0553dc0ea5 100644
--- a/deps/v8/test/unittests/heap/marking-unittest.cc
+++ b/deps/v8/test/unittests/heap/marking-unittest.cc
@@ -63,7 +63,7 @@ TEST(Marking, SetAndClearRange) {
calloc(Bitmap::kSize / kPointerSize, kPointerSize));
for (int i = 0; i < 3; i++) {
bitmap->SetRange(i, Bitmap::kBitsPerCell + i);
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xffffffffu << i);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xFFFFFFFFu << i);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], (1u << i) - 1);
bitmap->ClearRange(i, Bitmap::kBitsPerCell + i);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0x0u);
@@ -77,9 +77,9 @@ TEST(Marking, ClearMultipleRanges) {
calloc(Bitmap::kSize / kPointerSize, kPointerSize));
CHECK(bitmap->AllBitsClearInRange(0, Bitmap::kBitsPerCell * 3));
bitmap->SetRange(0, Bitmap::kBitsPerCell * 3);
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xffffffffu);
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0xffffffffu);
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[2], 0xffffffffu);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xFFFFFFFFu);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0xFFFFFFFFu);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[2], 0xFFFFFFFFu);
CHECK(bitmap->AllBitsSetInRange(0, Bitmap::kBitsPerCell * 3));
bitmap->ClearRange(Bitmap::kBitsPerCell / 2, Bitmap::kBitsPerCell);
bitmap->ClearRange(Bitmap::kBitsPerCell,
@@ -87,17 +87,17 @@ TEST(Marking, ClearMultipleRanges) {
bitmap->ClearRange(Bitmap::kBitsPerCell * 2 + 8,
Bitmap::kBitsPerCell * 2 + 16);
bitmap->ClearRange(Bitmap::kBitsPerCell * 2 + 24, Bitmap::kBitsPerCell * 3);
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xffffu);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xFFFFu);
CHECK(bitmap->AllBitsSetInRange(0, Bitmap::kBitsPerCell / 2));
CHECK(bitmap->AllBitsClearInRange(Bitmap::kBitsPerCell / 2,
Bitmap::kBitsPerCell));
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0xffff0000u);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0xFFFF0000u);
CHECK(
bitmap->AllBitsSetInRange(Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2,
2 * Bitmap::kBitsPerCell));
CHECK(bitmap->AllBitsClearInRange(
Bitmap::kBitsPerCell, Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2));
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[2], 0xff00ffu);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[2], 0xFF00FFu);
CHECK(bitmap->AllBitsSetInRange(2 * Bitmap::kBitsPerCell,
2 * Bitmap::kBitsPerCell + 8));
CHECK(bitmap->AllBitsClearInRange(2 * Bitmap::kBitsPerCell + 24,
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index bbc9e565c9..26fcd1937b 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -13,6 +13,7 @@
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-register-allocator.h"
#include "src/objects-inl.h"
+#include "test/unittests/interpreter/bytecode-utils.h"
#include "test/unittests/test-utils.h"
namespace v8 {
@@ -41,8 +42,11 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
Register reg(0);
Register other(reg.index() + 1);
Register wide(128);
- RegisterList reg_list(0, 10);
- RegisterList empty, single(0, 1), pair(0, 2), triple(0, 3);
+ RegisterList empty;
+ RegisterList single = BytecodeUtils::NewRegisterList(0, 1);
+ RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
+ RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
+ RegisterList reg_list = BytecodeUtils::NewRegisterList(0, 10);
// Emit argument creation operations.
builder.CreateArguments(CreateArgumentsType::kMappedArguments)
@@ -89,8 +93,6 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
feedback_spec.AddLoadGlobalICSlot(INSIDE_TYPEOF);
FeedbackSlot sloppy_store_global_slot =
feedback_spec.AddStoreGlobalICSlot(LanguageMode::kSloppy);
- FeedbackSlot strict_store_global_slot =
- feedback_spec.AddStoreGlobalICSlot(LanguageMode::kStrict);
FeedbackSlot load_slot = feedback_spec.AddLoadICSlot();
FeedbackSlot keyed_load_slot = feedback_spec.AddKeyedLoadICSlot();
FeedbackSlot sloppy_store_slot =
@@ -109,10 +111,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.LoadGlobal(name, load_global_slot.ToInt(), TypeofMode::NOT_INSIDE_TYPEOF)
.LoadGlobal(name, load_global_typeof_slot.ToInt(),
TypeofMode::INSIDE_TYPEOF)
- .StoreGlobal(name, sloppy_store_global_slot.ToInt(),
- LanguageMode::kSloppy)
- .StoreGlobal(name, strict_store_global_slot.ToInt(),
- LanguageMode::kStrict);
+ .StoreGlobal(name, sloppy_store_global_slot.ToInt());
// Emit context operations.
builder.PushContext(reg)
@@ -387,7 +386,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
// Emit generator operations.
builder.SuspendGenerator(reg, reg_list, 0)
.RestoreGeneratorState(reg)
- .RestoreGeneratorRegisters(reg, reg_list);
+ .ResumeGenerator(reg, reg, reg_list);
// Intrinsics handled by the interpreter.
builder.CallRuntime(Runtime::kInlineIsArray, reg_list);
@@ -398,7 +397,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
// Emit abort bytecode.
{
BytecodeLabel after;
- builder.Abort(kGenerator).Bind(&after);
+ builder.Abort(AbortReason::kOperandIsASmi).Bind(&after);
}
// Insert dummy ops to force longer jumps.
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
index ee5d8803f7..f7c89e2869 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
@@ -7,6 +7,7 @@
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/objects-inl.h"
+#include "test/unittests/interpreter/bytecode-utils.h"
#include "test/unittests/test-utils.h"
namespace v8 {
@@ -33,8 +34,8 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
Smi* smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
Register reg_1(1);
- RegisterList pair(0, 2);
- RegisterList triple(0, 3);
+ RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
+ RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
Register param = Register::FromParameterIndex(2, builder.parameter_count());
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t name_index = 2;
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
index 12cd55c2a9..8d2cd4c501 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
@@ -7,6 +7,7 @@
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-random-iterator.h"
#include "src/objects-inl.h"
+#include "test/unittests/interpreter/bytecode-utils.h"
#include "test/unittests/test-utils.h"
namespace v8 {
@@ -33,8 +34,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, InvalidBeforeStart) {
Smi* smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
Register reg_1(1);
- RegisterList pair(0, 2);
- RegisterList triple(0, 3);
+ RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
+ RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
Register param = Register::FromParameterIndex(2, builder.parameter_count());
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
@@ -87,8 +88,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, InvalidAfterEnd) {
Smi* smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
Register reg_1(1);
- RegisterList pair(0, 2);
- RegisterList triple(0, 3);
+ RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
+ RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
Register param = Register::FromParameterIndex(2, builder.parameter_count());
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
@@ -141,8 +142,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, AccessesFirst) {
Smi* smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
Register reg_1(1);
- RegisterList pair(0, 2);
- RegisterList triple(0, 3);
+ RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
+ RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
Register param = Register::FromParameterIndex(2, builder.parameter_count());
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
@@ -199,8 +200,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, AccessesLast) {
Smi* smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
Register reg_1(1);
- RegisterList pair(0, 2);
- RegisterList triple(0, 3);
+ RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
+ RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
Register param = Register::FromParameterIndex(2, builder.parameter_count());
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
@@ -258,8 +259,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
Smi* smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
Register reg_1(1);
- RegisterList pair(0, 2);
- RegisterList triple(0, 3);
+ RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
+ RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
Register param = Register::FromParameterIndex(2, builder.parameter_count());
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t name_index = 2;
@@ -443,8 +444,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
Smi* smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
Register reg_1(1);
- RegisterList pair(0, 2);
- RegisterList triple(0, 3);
+ RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
+ RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
Register param = Register::FromParameterIndex(2, builder.parameter_count());
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t name_index = 2;
@@ -722,8 +723,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
Smi* smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
Register reg_1(1);
- RegisterList pair(0, 2);
- RegisterList triple(0, 3);
+ RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
+ RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
Register param = Register::FromParameterIndex(2, builder.parameter_count());
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t name_index = 2;
diff --git a/deps/v8/test/unittests/interpreter/bytecode-node-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-node-unittest.cc
index af793ebcfe..8b8cae50ea 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-node-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-node-unittest.cc
@@ -52,7 +52,7 @@ TEST_F(BytecodeNodeTest, Constructor4) {
}
TEST_F(BytecodeNodeTest, Constructor5) {
- uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
+ uint32_t operands[] = {0x71, 0xA5, 0x5A, 0xFC};
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
operands[3]);
CHECK_EQ(node.operand_count(), 4);
@@ -65,7 +65,7 @@ TEST_F(BytecodeNodeTest, Constructor5) {
}
TEST_F(BytecodeNodeTest, Equality) {
- uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
+ uint32_t operands[] = {0x71, 0xA5, 0x5A, 0xFC};
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
operands[3]);
CHECK_EQ(node, node);
@@ -75,7 +75,7 @@ TEST_F(BytecodeNodeTest, Equality) {
}
TEST_F(BytecodeNodeTest, EqualityWithSourceInfo) {
- uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
+ uint32_t operands[] = {0x71, 0xA5, 0x5A, 0xFC};
BytecodeSourceInfo first_source_info(3, true);
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
operands[3], first_source_info);
@@ -87,7 +87,7 @@ TEST_F(BytecodeNodeTest, EqualityWithSourceInfo) {
}
TEST_F(BytecodeNodeTest, NoEqualityWithDifferentSourceInfo) {
- uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
+ uint32_t operands[] = {0x71, 0xA5, 0x5A, 0xFC};
BytecodeSourceInfo source_info(77, true);
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
operands[3], source_info);
diff --git a/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
index 8c7b363ebf..9e3ceb140f 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
@@ -6,6 +6,7 @@
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-register-optimizer.h"
+#include "test/unittests/interpreter/bytecode-utils.h"
#include "test/unittests/test-utils.h"
namespace v8 {
@@ -169,8 +170,8 @@ TEST_F(BytecodeRegisterOptimizerTest, SingleTemporaryNotMaterializedForInput) {
CHECK_EQ(write_count(), 0u);
Register reg = optimizer()->GetInputRegister(temp0);
- RegisterList reg_list =
- optimizer()->GetInputRegisterList(RegisterList(temp0.index(), 1));
+ RegisterList reg_list = optimizer()->GetInputRegisterList(
+ BytecodeUtils::NewRegisterList(temp0.index(), 1));
CHECK_EQ(write_count(), 0u);
CHECK_EQ(parameter.index(), reg.index());
CHECK_EQ(parameter.index(), reg_list.first_register().index());
@@ -189,8 +190,8 @@ TEST_F(BytecodeRegisterOptimizerTest, RangeOfTemporariesMaterializedForInput) {
optimizer()
->PrepareForBytecode<Bytecode::kCallJSRuntime, AccumulatorUse::kWrite>();
- RegisterList reg_list =
- optimizer()->GetInputRegisterList(RegisterList(temp0.index(), 2));
+ RegisterList reg_list = optimizer()->GetInputRegisterList(
+ BytecodeUtils::NewRegisterList(temp0.index(), 2));
CHECK_EQ(temp0.index(), reg_list.first_register().index());
CHECK_EQ(2, reg_list.register_count());
CHECK_EQ(write_count(), 2u);
diff --git a/deps/v8/test/unittests/interpreter/bytecode-utils.h b/deps/v8/test/unittests/interpreter/bytecode-utils.h
index 9a2cee3014..401884559e 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-utils.h
+++ b/deps/v8/test/unittests/interpreter/bytecode-utils.h
@@ -6,6 +6,11 @@
#define V8_UNITTESTS_INTERPRETER_BYTECODE_UTILS_H_
#include "src/frames.h"
+#include "src/interpreter/bytecode-register.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
#if V8_TARGET_LITTLE_ENDIAN
@@ -33,4 +38,19 @@
#define R16(i) U16(REG_OPERAND(i))
#define R32(i) U32(REG_OPERAND(i))
+class BytecodeUtils {
+ public:
+ // Expose raw RegisterList construction to tests.
+ static RegisterList NewRegisterList(int first_reg_index, int register_count) {
+ return RegisterList(first_reg_index, register_count);
+ }
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BytecodeUtils);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
#endif // V8_UNITTESTS_INTERPRETER_BYTECODE_UTILS_H_
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
index 88acf680f5..03d9397c7f 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
@@ -417,7 +417,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, StoreRegister) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerTestState state(this, bytecode);
InterpreterAssemblerForTest m(&state, bytecode);
- Node* store_value = m.Int32Constant(0xdeadbeef);
+ Node* store_value = m.Int32Constant(0xDEADBEEF);
Node* reg_index_node = m.Parameter(0);
Node* store_reg_node = m.StoreRegister(store_value, reg_index_node);
EXPECT_THAT(store_reg_node,
@@ -471,7 +471,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadObjectField) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerTestState state(this, bytecode);
InterpreterAssemblerForTest m(&state, bytecode);
- Node* object = m.IntPtrConstant(0xdeadbeef);
+ Node* object = m.IntPtrConstant(0xDEADBEEF);
int offset = 16;
Node* load_field = m.LoadObjectField(object, offset);
EXPECT_THAT(load_field,
diff --git a/deps/v8/test/unittests/object-unittest.cc b/deps/v8/test/unittests/object-unittest.cc
index 47772a0f20..4cb113a644 100644
--- a/deps/v8/test/unittests/object-unittest.cc
+++ b/deps/v8/test/unittests/object-unittest.cc
@@ -61,7 +61,7 @@ TEST(Object, InstanceTypeListOrder) {
current_type = InstanceType::type; \
current = static_cast<int>(current_type); \
if (current > static_cast<int>(LAST_NAME_TYPE)) { \
- EXPECT_EQ(last + 1, current); \
+ EXPECT_LE(last + 1, current); \
} \
EXPECT_LT(last, current) << " INSTANCE_TYPE_LIST is not ordered: " \
<< "last = " << static_cast<InstanceType>(last) \
@@ -73,7 +73,7 @@ TEST(Object, InstanceTypeListOrder) {
}
TEST(Object, StructListOrder) {
- int current = static_cast<int>(InstanceType::ACCESSOR_INFO_TYPE);
+ int current = static_cast<int>(InstanceType::ACCESS_CHECK_INFO_TYPE);
int last = current - 1;
ASSERT_LT(0, last);
InstanceType current_type = static_cast<InstanceType>(current);
diff --git a/deps/v8/test/unittests/parser/ast-value-unittest.cc b/deps/v8/test/unittests/parser/ast-value-unittest.cc
index 2b7a227e0f..72e35a43a0 100644
--- a/deps/v8/test/unittests/parser/ast-value-unittest.cc
+++ b/deps/v8/test/unittests/parser/ast-value-unittest.cc
@@ -41,10 +41,10 @@ TEST_F(AstValueTest, BigIntToBooleanIsTrue) {
EXPECT_TRUE(NewBigInt("3")->ToBooleanIsTrue());
EXPECT_TRUE(NewBigInt("0b1")->ToBooleanIsTrue());
EXPECT_TRUE(NewBigInt("0o6")->ToBooleanIsTrue());
- EXPECT_TRUE(NewBigInt("0xa")->ToBooleanIsTrue());
+ EXPECT_TRUE(NewBigInt("0xA")->ToBooleanIsTrue());
EXPECT_TRUE(NewBigInt("0b0000001")->ToBooleanIsTrue());
EXPECT_TRUE(NewBigInt("0o00005000")->ToBooleanIsTrue());
- EXPECT_TRUE(NewBigInt("0x0000d00c0")->ToBooleanIsTrue());
+ EXPECT_TRUE(NewBigInt("0x0000D00C0")->ToBooleanIsTrue());
}
} // namespace internal
diff --git a/deps/v8/test/unittests/testcfg.py b/deps/v8/test/unittests/testcfg.py
new file mode 100644
index 0000000000..9b18743566
--- /dev/null
+++ b/deps/v8/test/unittests/testcfg.py
@@ -0,0 +1,70 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+from testrunner.local import command
+from testrunner.local import utils
+from testrunner.local import testsuite
+from testrunner.objects import testcase
+
+
+class TestSuite(testsuite.TestSuite):
+ def ListTests(self, context):
+ shell = os.path.abspath(os.path.join(context.shell_dir, self.name))
+ if utils.IsWindows():
+ shell += ".exe"
+
+ output = None
+ for i in xrange(3): # Try 3 times in case of errors.
+ cmd = command.Command(
+ cmd_prefix=context.command_prefix,
+ shell=shell,
+ args=['--gtest_list_tests'] + context.extra_flags)
+ output = cmd.execute()
+ if output.exit_code == 0:
+ break
+ print "Test executable failed to list the tests (try %d).\n\nCmd:" % i
+ print cmd
+ print "\nStdout:"
+ print output.stdout
+ print "\nStderr:"
+ print output.stderr
+ print "\nExit code: %d" % output.exit_code
+ else:
+ raise Exception("Test executable failed to list the tests.")
+
+ tests = []
+ test_case = ''
+ for line in output.stdout.splitlines():
+ test_desc = line.strip().split()[0]
+ if test_desc.endswith('.'):
+ test_case = test_desc
+ elif test_case and test_desc:
+ test_path = test_case + test_desc
+ tests.append(self._create_test(test_path))
+ tests.sort(key=lambda t: t.path)
+ return tests
+
+ def _test_class(self):
+ return TestCase
+
+ def _LegacyVariantsGeneratorFactory(self):
+ return testsuite.StandardLegacyVariantsGenerator
+
+
+class TestCase(testcase.TestCase):
+ def _get_suite_flags(self, ctx):
+ return (
+ ["--gtest_filter=" + self.path] +
+ ["--gtest_random_seed=%s" % ctx.random_seed] +
+ ["--gtest_print_time=0"]
+ )
+
+ def get_shell(self):
+ return self.suite.name
+
+
+def GetSuite(name, root):
+ return TestSuite(name, root)
diff --git a/deps/v8/test/unittests/unicode-unittest.cc b/deps/v8/test/unittests/unicode-unittest.cc
index c4df42c1c6..e5ccaca7b1 100644
--- a/deps/v8/test/unittests/unicode-unittest.cc
+++ b/deps/v8/test/unittests/unicode-unittest.cc
@@ -37,13 +37,15 @@ void DecodeNormally(const std::vector<byte>& bytes,
void DecodeIncrementally(const std::vector<byte>& bytes,
std::vector<unibrow::uchar>* output) {
unibrow::Utf8::Utf8IncrementalBuffer buffer = 0;
- for (auto b : bytes) {
- unibrow::uchar result = unibrow::Utf8::ValueOfIncremental(b, &buffer);
+ unibrow::Utf8::State state = unibrow::Utf8::State::kAccept;
+ for (size_t i = 0; i < bytes.size();) {
+ unibrow::uchar result =
+ unibrow::Utf8::ValueOfIncremental(bytes[i], &i, &state, &buffer);
if (result != unibrow::Utf8::kIncomplete) {
output->push_back(result);
}
}
- unibrow::uchar result = unibrow::Utf8::ValueOfIncrementalFinish(&buffer);
+ unibrow::uchar result = unibrow::Utf8::ValueOfIncrementalFinish(&state);
if (result != unibrow::Utf8::kBufferEmpty) {
output->push_back(result);
}
@@ -72,344 +74,344 @@ TEST(UnicodeTest, IncrementalUTF8DecodingVsNonIncrementalUtf8Decoding) {
TestCase data[] = {
// Correct UTF-8 text.
- {{0xce, 0xba, 0xe1, 0xbd, 0xb9, 0xcf, 0x83, 0xce, 0xbc, 0xce, 0xb5},
- {0x3ba, 0x1f79, 0x3c3, 0x3bc, 0x3b5}},
+ {{0xCE, 0xBA, 0xE1, 0xBD, 0xB9, 0xCF, 0x83, 0xCE, 0xBC, 0xCE, 0xB5},
+ {0x3BA, 0x1F79, 0x3C3, 0x3BC, 0x3B5}},
// First possible sequence of a certain length:
// 1 byte
{{0x00}, {0x0}},
// 2 bytes
- {{0xc2, 0x80}, {0x80}},
+ {{0xC2, 0x80}, {0x80}},
// 3 bytes
- {{0xe0, 0xa0, 0x80}, {0x800}},
+ {{0xE0, 0xA0, 0x80}, {0x800}},
// 4 bytes
- {{0xf0, 0x90, 0x80, 0x80}, {0x10000}},
+ {{0xF0, 0x90, 0x80, 0x80}, {0x10000}},
// 5 bytes (not supported)
- {{0xf8, 0x88, 0x80, 0x80, 0x80},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xF8, 0x88, 0x80, 0x80, 0x80},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
// 6 bytes (not supported)
- {{0xfc, 0x84, 0x80, 0x80, 0x80, 0x80},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xFC, 0x84, 0x80, 0x80, 0x80, 0x80},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
// Last possible sequence of certain length:
// 1 byte
- {{0x7f}, {0x7f}},
+ {{0x7F}, {0x7F}},
// 2 bytes
- {{0xdf, 0xbf}, {0x7ff}},
+ {{0xDF, 0xBF}, {0x7FF}},
// 3 bytes
- {{0xef, 0xbf, 0xbf}, {0xffff}},
+ {{0xEF, 0xBF, 0xBF}, {0xFFFF}},
// 4 bytes (this sequence is not a valid code point)
- {{0xf7, 0xbf, 0xbf, 0xbf}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xF7, 0xBF, 0xBF, 0xBF}, {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
// 5 bytes (not supported)
- {{0xfb, 0xbf, 0xbf, 0xbf, 0xbf},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xFB, 0xBF, 0xBF, 0xBF, 0xBF},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
// 6 bytes (not supported)
- {{0xfd, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xFD, 0xBF, 0xBF, 0xBF, 0xBF, 0xBF},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
// Other boundary conditions:
- {{0xed, 0x9f, 0xbf}, {0xd7ff}},
- {{0xee, 0x80, 0x80}, {0xe000}},
+ {{0xED, 0x9F, 0xBF}, {0xD7FF}},
+ {{0xEE, 0x80, 0x80}, {0xE000}},
// U+fffd (invalid code point)
- {{0xef, 0xbf, 0xbd}, {0xfffd}},
+ {{0xEF, 0xBF, 0xBD}, {0xFFFD}},
// U+10ffff (last valid code point)
- {{0xf4, 0x8f, 0xbf, 0xbf}, {0x10ffff}},
+ {{0xF4, 0x8F, 0xBF, 0xBF}, {0x10FFFF}},
// First invalid (too large) code point
- {{0xf4, 0x90, 0x80, 0x80}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xF4, 0x90, 0x80, 0x80}, {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
// Malformed sequences:
// Unexpected continuation bytes:
// First continuation byte
- {{0x80}, {0xfffd}},
+ {{0x80}, {0xFFFD}},
// Last continuation byte
- {{0xbf}, {0xfffd}},
+ {{0xBF}, {0xFFFD}},
// 2 continuation bytes
- {{0x80, 0xbf}, {0xfffd, 0xfffd}},
+ {{0x80, 0xBF}, {0xFFFD, 0xFFFD}},
// 3 continuation bytes
- {{0x80, 0xbf, 0x80}, {0xfffd, 0xfffd, 0xfffd}},
+ {{0x80, 0xBF, 0x80}, {0xFFFD, 0xFFFD, 0xFFFD}},
// 4 continuation bytes
- {{0x80, 0xbf, 0x80, 0xbf}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0x80, 0xBF, 0x80, 0xBF}, {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
// 5 continuation bytes
- {{0x80, 0xbf, 0x80, 0xbf, 0x80},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0x80, 0xBF, 0x80, 0xBF, 0x80},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
// 6 continuation bytes
- {{0x80, 0xbf, 0x80, 0xbf, 0x80, 0xbf},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0x80, 0xBF, 0x80, 0xBF, 0x80, 0xBF},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
// 7 continuation bytes
- {{0x80, 0xbf, 0x80, 0xbf, 0x80, 0xbf, 0xbf},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0x80, 0xBF, 0x80, 0xBF, 0x80, 0xBF, 0xBF},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
// Sequence of all 64 possible continuation bytes
- {{0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a,
- 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95,
- 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0,
- 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab,
- 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
- 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd,
- 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd,
- 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd,
- 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd,
- 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd,
- 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd,
- 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd,
- 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A,
+ 0x8B, 0x8C, 0x8D, 0x8E, 0x8F, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95,
+ 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F, 0xA0,
+ 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xAB,
+ 0xAC, 0xAD, 0xAE, 0xAF, 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6,
+ 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD,
+ 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD,
+ 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD,
+ 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD,
+ 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD,
+ 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD,
+ 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD,
+ 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
// Using each possible continuation byte in a two-byte sequence:
- {{0xd0, 0x80, 0xd0, 0x81, 0xd0, 0x82, 0xd0, 0x83, 0xd0, 0x84, 0xd0, 0x85,
- 0xd0, 0x86, 0xd0, 0x87, 0xd0, 0x88, 0xd0, 0x89, 0xd0, 0x8a, 0xd0, 0x8b,
- 0xd0, 0x8c, 0xd0, 0x8d, 0xd0, 0x8e, 0xd0, 0x8f, 0xd0, 0x90, 0xd0, 0x91,
- 0xd0, 0x92, 0xd0, 0x93, 0xd0, 0x94, 0xd0, 0x95, 0xd0, 0x96, 0xd0, 0x97,
- 0xd0, 0x98, 0xd0, 0x99, 0xd0, 0x9a, 0xd0, 0x9b, 0xd0, 0x9c, 0xd0, 0x9d,
- 0xd0, 0x9e, 0xd0, 0x9f, 0xd0, 0xa0, 0xd0, 0xa1, 0xd0, 0xa2, 0xd0, 0xa3,
- 0xd0, 0xa4, 0xd0, 0xa5, 0xd0, 0xa6, 0xd0, 0xa7, 0xd0, 0xa8, 0xd0, 0xa9,
- 0xd0, 0xaa, 0xd0, 0xab, 0xd0, 0xac, 0xd0, 0xad, 0xd0, 0xae, 0xd0, 0xaf,
- 0xd0, 0xb0, 0xd0, 0xb1, 0xd0, 0xb2, 0xd0, 0xb3, 0xd0, 0xb4, 0xd0, 0xb5,
- 0xd0, 0xb6, 0xd0, 0xb7, 0xd0, 0xb8, 0xd0, 0xb9, 0xd0, 0xba, 0xd0, 0xbb,
- 0xd0, 0xbc, 0xd0, 0xbd, 0xd0, 0xbe, 0xd0, 0xbf},
+ {{0xD0, 0x80, 0xD0, 0x81, 0xD0, 0x82, 0xD0, 0x83, 0xD0, 0x84, 0xD0, 0x85,
+ 0xD0, 0x86, 0xD0, 0x87, 0xD0, 0x88, 0xD0, 0x89, 0xD0, 0x8A, 0xD0, 0x8B,
+ 0xD0, 0x8C, 0xD0, 0x8D, 0xD0, 0x8E, 0xD0, 0x8F, 0xD0, 0x90, 0xD0, 0x91,
+ 0xD0, 0x92, 0xD0, 0x93, 0xD0, 0x94, 0xD0, 0x95, 0xD0, 0x96, 0xD0, 0x97,
+ 0xD0, 0x98, 0xD0, 0x99, 0xD0, 0x9A, 0xD0, 0x9B, 0xD0, 0x9C, 0xD0, 0x9D,
+ 0xD0, 0x9E, 0xD0, 0x9F, 0xD0, 0xA0, 0xD0, 0xA1, 0xD0, 0xA2, 0xD0, 0xA3,
+ 0xD0, 0xA4, 0xD0, 0xA5, 0xD0, 0xA6, 0xD0, 0xA7, 0xD0, 0xA8, 0xD0, 0xA9,
+ 0xD0, 0xAA, 0xD0, 0xAB, 0xD0, 0xAC, 0xD0, 0xAD, 0xD0, 0xAE, 0xD0, 0xAF,
+ 0xD0, 0xB0, 0xD0, 0xB1, 0xD0, 0xB2, 0xD0, 0xB3, 0xD0, 0xB4, 0xD0, 0xB5,
+ 0xD0, 0xB6, 0xD0, 0xB7, 0xD0, 0xB8, 0xD0, 0xB9, 0xD0, 0xBA, 0xD0, 0xBB,
+ 0xD0, 0xBC, 0xD0, 0xBD, 0xD0, 0xBE, 0xD0, 0xBF},
{0x400, 0x401, 0x402, 0x403, 0x404, 0x405, 0x406, 0x407, 0x408, 0x409,
- 0x40a, 0x40b, 0x40c, 0x40d, 0x40e, 0x40f, 0x410, 0x411, 0x412, 0x413,
- 0x414, 0x415, 0x416, 0x417, 0x418, 0x419, 0x41a, 0x41b, 0x41c, 0x41d,
- 0x41e, 0x41f, 0x420, 0x421, 0x422, 0x423, 0x424, 0x425, 0x426, 0x427,
- 0x428, 0x429, 0x42a, 0x42b, 0x42c, 0x42d, 0x42e, 0x42f, 0x430, 0x431,
- 0x432, 0x433, 0x434, 0x435, 0x436, 0x437, 0x438, 0x439, 0x43a, 0x43b,
- 0x43c, 0x43d, 0x43e, 0x43f}},
+ 0x40A, 0x40B, 0x40C, 0x40D, 0x40E, 0x40F, 0x410, 0x411, 0x412, 0x413,
+ 0x414, 0x415, 0x416, 0x417, 0x418, 0x419, 0x41A, 0x41B, 0x41C, 0x41D,
+ 0x41E, 0x41F, 0x420, 0x421, 0x422, 0x423, 0x424, 0x425, 0x426, 0x427,
+ 0x428, 0x429, 0x42A, 0x42B, 0x42C, 0x42D, 0x42E, 0x42F, 0x430, 0x431,
+ 0x432, 0x433, 0x434, 0x435, 0x436, 0x437, 0x438, 0x439, 0x43A, 0x43B,
+ 0x43C, 0x43D, 0x43E, 0x43F}},
// Lonely first bytes:
// All 32 first bytes of 32-byte sequences, each followed by a space
// (generates 32 invalid char + space sequences.
- {{0xc0, 0x20, 0xc1, 0x20, 0xc2, 0x20, 0xc3, 0x20, 0xc4, 0x20, 0xc5,
- 0x20, 0xc6, 0x20, 0xc7, 0x20, 0xc8, 0x20, 0xc9, 0x20, 0xca, 0x20,
- 0xcb, 0x20, 0xcc, 0x20, 0xcd, 0x20, 0xce, 0x20, 0xcf, 0x20, 0xd0,
- 0x20, 0xd1, 0x20, 0xd2, 0x20, 0xd3, 0x20, 0xd4, 0x20, 0xd5, 0x20,
- 0xd6, 0x20, 0xd7, 0x20, 0xd8, 0x20, 0xd9, 0x20, 0xda, 0x20, 0xdb,
- 0x20, 0xdc, 0x20, 0xdd, 0x20, 0xde, 0x20, 0xdf, 0x20},
- {0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20,
- 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20,
- 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20,
- 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20,
- 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20,
- 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20,
- 0xfffd, 0x20, 0xfffd, 0x20}},
+ {{0xC0, 0x20, 0xC1, 0x20, 0xC2, 0x20, 0xC3, 0x20, 0xC4, 0x20, 0xC5,
+ 0x20, 0xC6, 0x20, 0xC7, 0x20, 0xC8, 0x20, 0xC9, 0x20, 0xCA, 0x20,
+ 0xCB, 0x20, 0xCC, 0x20, 0xCD, 0x20, 0xCE, 0x20, 0xCF, 0x20, 0xD0,
+ 0x20, 0xD1, 0x20, 0xD2, 0x20, 0xD3, 0x20, 0xD4, 0x20, 0xD5, 0x20,
+ 0xD6, 0x20, 0xD7, 0x20, 0xD8, 0x20, 0xD9, 0x20, 0xDA, 0x20, 0xDB,
+ 0x20, 0xDC, 0x20, 0xDD, 0x20, 0xDE, 0x20, 0xDF, 0x20},
+ {0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20,
+ 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20,
+ 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20,
+ 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20,
+ 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20,
+ 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20,
+ 0xFFFD, 0x20, 0xFFFD, 0x20}},
// All 16 first bytes of 3-byte sequences, each followed by a space
// (generates 16 invalid char + space sequences):
- {{0xe0, 0x20, 0xe1, 0x20, 0xe2, 0x20, 0xe3, 0x20, 0xe4, 0x20, 0xe5,
- 0x20, 0xe6, 0x20, 0xe7, 0x20, 0xe8, 0x20, 0xe9, 0x20, 0xea, 0x20,
- 0xeb, 0x20, 0xec, 0x20, 0xed, 0x20, 0xee, 0x20, 0xef, 0x20},
- {0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20,
- 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20,
- 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20,
- 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20}},
+ {{0xE0, 0x20, 0xE1, 0x20, 0xE2, 0x20, 0xE3, 0x20, 0xE4, 0x20, 0xE5,
+ 0x20, 0xE6, 0x20, 0xE7, 0x20, 0xE8, 0x20, 0xE9, 0x20, 0xEA, 0x20,
+ 0xEB, 0x20, 0xEC, 0x20, 0xED, 0x20, 0xEE, 0x20, 0xEF, 0x20},
+ {0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20,
+ 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20,
+ 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20,
+ 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20}},
// All 8 first bytes of 4-byte sequences, each followed by a space
// (generates 8 invalid char + space sequences):
- {{0xf0, 0x20, 0xf1, 0x20, 0xf2, 0x20, 0xf3, 0x20, 0xf4, 0x20, 0xf5, 0x20,
- 0xf6, 0x20, 0xf7, 0x20},
- {0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20,
- 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20}},
+ {{0xF0, 0x20, 0xF1, 0x20, 0xF2, 0x20, 0xF3, 0x20, 0xF4, 0x20, 0xF5, 0x20,
+ 0xF6, 0x20, 0xF7, 0x20},
+ {0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20,
+ 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20}},
// All 4 first bytes of 5-byte sequences (not supported), each followed by
// a space (generates 4 invalid char + space sequences):
- {{0xf8, 0x20, 0xf9, 0x20, 0xfa, 0x20, 0xfb, 0x20},
- {0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20}},
+ {{0xF8, 0x20, 0xF9, 0x20, 0xFA, 0x20, 0xFB, 0x20},
+ {0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20, 0xFFFD, 0x20}},
// All 2 first bytes of 6-byte sequences (not supported), each followed by
// a space (generates 2 invalid char + space sequences):
- {{0xfc, 0x20, 0xfd, 0x20}, {0xfffd, 0x20, 0xfffd, 0x20}},
+ {{0xFC, 0x20, 0xFD, 0x20}, {0xFFFD, 0x20, 0xFFFD, 0x20}},
// Sequences with last continuation byte missing. Normally the whole
// incomplete sequence generates a single invalid character (exceptions
// explained below).
// 2-byte sequences with last byte missing
- {{0xc0}, {0xfffd}},
- {{0xdf}, {0xfffd}},
+ {{0xC0}, {0xFFFD}},
+ {{0xDF}, {0xFFFD}},
// 3-byte sequences with last byte missing.
- {{0xe8, 0x80}, {0xfffd}},
- {{0xe0, 0xbf}, {0xfffd}},
- {{0xef, 0xbf}, {0xfffd}},
+ {{0xE8, 0x80}, {0xFFFD}},
+ {{0xE0, 0xBF}, {0xFFFD}},
+ {{0xEF, 0xBF}, {0xFFFD}},
// Start of an overlong sequence. The first "maximal subpart" is the first
// byte; it creates an invalid character. Each following byte generates an
// invalid character too.
- {{0xe0, 0x80}, {0xfffd, 0xfffd}},
+ {{0xE0, 0x80}, {0xFFFD, 0xFFFD}},
// 4-byte sequences with last byte missing
- {{0xf1, 0x80, 0x80}, {0xfffd}},
- {{0xf4, 0x8f, 0xbf}, {0xfffd}},
+ {{0xF1, 0x80, 0x80}, {0xFFFD}},
+ {{0xF4, 0x8F, 0xBF}, {0xFFFD}},
// Start of an overlong sequence. The first "maximal subpart" is the first
// byte; it creates an invalid character. Each following byte generates an
// invalid character too.
- {{0xf0, 0x80, 0x80}, {0xfffd, 0xfffd, 0xfffd}},
+ {{0xF0, 0x80, 0x80}, {0xFFFD, 0xFFFD, 0xFFFD}},
// 5-byte sequences (not supported) with last byte missing
- {{0xf8, 0x80, 0x80, 0x80}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
- {{0xfb, 0xbf, 0xbf, 0xbf}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xF8, 0x80, 0x80, 0x80}, {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xFB, 0xBF, 0xBF, 0xBF}, {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
// 6-byte sequences (not supported) with last byte missing
- {{0xfc, 0x80, 0x80, 0x80, 0x80},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
- {{0xfd, 0xbf, 0xbf, 0xbf, 0xbf},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xFC, 0x80, 0x80, 0x80, 0x80},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xFD, 0xBF, 0xBF, 0xBF, 0xBF},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
// Concatenation of incomplete sequences: above incomplete sequences
// concatenated.
- {{0xc0, 0xdf, 0xe8, 0x80, 0xe0, 0xbf, 0xef, 0xbf, 0xe0, 0x80,
- 0xf1, 0x80, 0x80, 0xf4, 0x8f, 0xbf, 0xf0, 0x80, 0x80, 0xf8,
- 0x80, 0x80, 0x80, 0xfb, 0xbf, 0xbf, 0xbf, 0xfc, 0x80, 0x80,
- 0x80, 0x80, 0xfd, 0xbf, 0xbf, 0xbf, 0xbf},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd,
- 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd,
- 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd,
- 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xC0, 0xDF, 0xE8, 0x80, 0xE0, 0xBF, 0xEF, 0xBF, 0xE0, 0x80,
+ 0xF1, 0x80, 0x80, 0xF4, 0x8F, 0xBF, 0xF0, 0x80, 0x80, 0xF8,
+ 0x80, 0x80, 0x80, 0xFB, 0xBF, 0xBF, 0xBF, 0xFC, 0x80, 0x80,
+ 0x80, 0x80, 0xFD, 0xBF, 0xBF, 0xBF, 0xBF},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD,
+ 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD,
+ 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD,
+ 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
// Incomplete sequence tests repeated with a space after the incomplete
// sequence.
// 2-byte sequences with last byte missing
- {{0xc0, 0x20}, {0xfffd, 0x20}},
- {{0xdf, 0x20}, {0xfffd, 0x20}},
+ {{0xC0, 0x20}, {0xFFFD, 0x20}},
+ {{0xDF, 0x20}, {0xFFFD, 0x20}},
// 3-byte sequences with last byte missing
- {{0xe8, 0x80, 0x20}, {0xfffd, 0x20}},
- {{0xe0, 0xbf, 0x20}, {0xfffd, 0x20}},
- {{0xef, 0xbf, 0x20}, {0xfffd, 0x20}},
+ {{0xE8, 0x80, 0x20}, {0xFFFD, 0x20}},
+ {{0xE0, 0xBF, 0x20}, {0xFFFD, 0x20}},
+ {{0xEF, 0xBF, 0x20}, {0xFFFD, 0x20}},
// Start of overlong 3-byte sequence with last byte missing
- {{0xe0, 0x80, 0x20}, {0xfffd, 0xfffd, 0x20}},
+ {{0xE0, 0x80, 0x20}, {0xFFFD, 0xFFFD, 0x20}},
// 4-byte sequences with last byte missing
- {{0xf1, 0x80, 0x80, 0x20}, {0xfffd, 0x20}},
- {{0xf4, 0x8f, 0xbf, 0x20}, {0xfffd, 0x20}},
+ {{0xF1, 0x80, 0x80, 0x20}, {0xFFFD, 0x20}},
+ {{0xF4, 0x8F, 0xBF, 0x20}, {0xFFFD, 0x20}},
// Start of overlong 4-byte sequence with last byte missing
- {{0xf0, 0x80, 0x80, 0x20}, {0xfffd, 0xfffd, 0xfffd, 0x20}},
+ {{0xF0, 0x80, 0x80, 0x20}, {0xFFFD, 0xFFFD, 0xFFFD, 0x20}},
// 5-byte sequences (not supported) with last byte missing
- {{0xf8, 0x80, 0x80, 0x80, 0x20}, {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0x20}},
- {{0xfb, 0xbf, 0xbf, 0xbf, 0x20}, {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0x20}},
+ {{0xF8, 0x80, 0x80, 0x80, 0x20}, {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0x20}},
+ {{0xFB, 0xBF, 0xBF, 0xBF, 0x20}, {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0x20}},
// 6-byte sequences (not supported) with last byte missing
- {{0xfc, 0x80, 0x80, 0x80, 0x80, 0x20},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0x20}},
- {{0xfd, 0xbf, 0xbf, 0xbf, 0xbf, 0x20},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0x20}},
+ {{0xFC, 0x80, 0x80, 0x80, 0x80, 0x20},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0x20}},
+ {{0xFD, 0xBF, 0xBF, 0xBF, 0xBF, 0x20},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0x20}},
// Impossible bytes
- {{0xfe}, {0xfffd}},
- {{0xff}, {0xfffd}},
- {{0xfe, 0xfe, 0xff, 0xff}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xFE}, {0xFFFD}},
+ {{0xFF}, {0xFFFD}},
+ {{0xFE, 0xFE, 0xFF, 0xFF}, {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
// Lead-byte-like bytes which aren't valid lead bytes.
- {{0xc0}, {0xfffd}},
- {{0xc0, 0xaa}, {0xfffd, 0xfffd}},
- {{0xc1}, {0xfffd}},
- {{0xc1, 0xaa}, {0xfffd, 0xfffd}},
- {{0xf5}, {0xfffd}},
- {{0xf5, 0xaa, 0xaa, 0xaa}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
- {{0xf6}, {0xfffd}},
- {{0xf6, 0xaa, 0xaa, 0xaa}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
- {{0xf7}, {0xfffd}},
- {{0xf7, 0xaa, 0xaa, 0xaa}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
- {{0xf8}, {0xfffd}},
- {{0xf8, 0xaa, 0xaa, 0xaa}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
- {{0xf9}, {0xfffd}},
- {{0xf9, 0xaa, 0xaa, 0xaa}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
- {{0xfa}, {0xfffd}},
- {{0xfa, 0xaa, 0xaa, 0xaa}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
- {{0xfb}, {0xfffd}},
- {{0xfb, 0xaa, 0xaa, 0xaa}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
- {{0xfc}, {0xfffd}},
- {{0xfc, 0xaa, 0xaa, 0xaa}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
- {{0xfd}, {0xfffd}},
- {{0xfd, 0xaa, 0xaa, 0xaa}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
- {{0xfe}, {0xfffd}},
- {{0xfe, 0xaa, 0xaa, 0xaa}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
- {{0xff}, {0xfffd}},
- {{0xff, 0xaa, 0xaa, 0xaa}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xC0}, {0xFFFD}},
+ {{0xC0, 0xAA}, {0xFFFD, 0xFFFD}},
+ {{0xC1}, {0xFFFD}},
+ {{0xC1, 0xAA}, {0xFFFD, 0xFFFD}},
+ {{0xF5}, {0xFFFD}},
+ {{0xF5, 0xAA, 0xAA, 0xAA}, {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xF6}, {0xFFFD}},
+ {{0xF6, 0xAA, 0xAA, 0xAA}, {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xF7}, {0xFFFD}},
+ {{0xF7, 0xAA, 0xAA, 0xAA}, {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xF8}, {0xFFFD}},
+ {{0xF8, 0xAA, 0xAA, 0xAA}, {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xF9}, {0xFFFD}},
+ {{0xF9, 0xAA, 0xAA, 0xAA}, {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xFA}, {0xFFFD}},
+ {{0xFA, 0xAA, 0xAA, 0xAA}, {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xFB}, {0xFFFD}},
+ {{0xFB, 0xAA, 0xAA, 0xAA}, {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xFC}, {0xFFFD}},
+ {{0xFC, 0xAA, 0xAA, 0xAA}, {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xFD}, {0xFFFD}},
+ {{0xFD, 0xAA, 0xAA, 0xAA}, {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xFE}, {0xFFFD}},
+ {{0xFE, 0xAA, 0xAA, 0xAA}, {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xFF}, {0xFFFD}},
+ {{0xFF, 0xAA, 0xAA, 0xAA}, {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
// Overlong sequences:
// Overlong encodings for "/"
- {{0xc0, 0xaf}, {0xfffd, 0xfffd}},
- {{0xe0, 0x80, 0xaf}, {0xfffd, 0xfffd, 0xfffd}},
- {{0xf0, 0x80, 0x80, 0xaf}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xC0, 0xAF}, {0xFFFD, 0xFFFD}},
+ {{0xE0, 0x80, 0xAF}, {0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xF0, 0x80, 0x80, 0xAF}, {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
// 5-byte sequence (not supported anyway)
- {{0xf8, 0x80, 0x80, 0x80, 0xaf},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xF8, 0x80, 0x80, 0x80, 0xAF},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
// 6-byte sequence (not supported anyway)
- {{0xfc, 0x80, 0x80, 0x80, 0x80, 0xaf},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xFC, 0x80, 0x80, 0x80, 0x80, 0xAF},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
// Maximum overlong sequences
- {{0xc1, 0xbf}, {0xfffd, 0xfffd}},
- {{0xe0, 0x9f, 0xbf}, {0xfffd, 0xfffd, 0xfffd}},
- {{0xf0, 0x8f, 0xbf, 0xbf}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xC1, 0xBF}, {0xFFFD, 0xFFFD}},
+ {{0xE0, 0x9F, 0xBF}, {0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xF0, 0x8F, 0xBF, 0xBF}, {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
// 5-byte sequence (not supported anyway)
- {{0xf8, 0x87, 0xbf, 0xbf, 0xbf},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xF8, 0x87, 0xBF, 0xBF, 0xBF},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
// 6-byte sequence (not supported anyway)
- {{0xfc, 0x83, 0xbf, 0xbf, 0xbf, 0xbf},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xFC, 0x83, 0xBF, 0xBF, 0xBF, 0xBF},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
// Overlong encodings for 0
- {{0xc0, 0x80}, {0xfffd, 0xfffd}},
- {{0xe0, 0x80, 0x80}, {0xfffd, 0xfffd, 0xfffd}},
- {{0xf0, 0x80, 0x80, 0x80}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xC0, 0x80}, {0xFFFD, 0xFFFD}},
+ {{0xE0, 0x80, 0x80}, {0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xF0, 0x80, 0x80, 0x80}, {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
// 5-byte sequence (not supported anyway)
- {{0xf8, 0x80, 0x80, 0x80, 0x80},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xF8, 0x80, 0x80, 0x80, 0x80},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
// 6-byte sequence (not supported anyway)
- {{0xfc, 0x80, 0x80, 0x80, 0x80, 0x80},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xFC, 0x80, 0x80, 0x80, 0x80, 0x80},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
// Illegal code positions:
// Single UTF-16 surrogates
- {{0xed, 0xa0, 0x80}, {0xfffd, 0xfffd, 0xfffd}},
- {{0xed, 0xa0, 0x80}, {0xfffd, 0xfffd, 0xfffd}},
- {{0xed, 0xad, 0xbf}, {0xfffd, 0xfffd, 0xfffd}},
- {{0xed, 0xae, 0x80}, {0xfffd, 0xfffd, 0xfffd}},
- {{0xed, 0xaf, 0xbf}, {0xfffd, 0xfffd, 0xfffd}},
- {{0xed, 0xb0, 0x80}, {0xfffd, 0xfffd, 0xfffd}},
- {{0xed, 0xbe, 0x80}, {0xfffd, 0xfffd, 0xfffd}},
- {{0xed, 0xbf, 0xbf}, {0xfffd, 0xfffd, 0xfffd}},
+ {{0xED, 0xA0, 0x80}, {0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xED, 0xA0, 0x80}, {0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xED, 0xAD, 0xBF}, {0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xED, 0xAE, 0x80}, {0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xED, 0xAF, 0xBF}, {0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xED, 0xB0, 0x80}, {0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xED, 0xBE, 0x80}, {0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xED, 0xBF, 0xBF}, {0xFFFD, 0xFFFD, 0xFFFD}},
// Paired surrogates
- {{0xed, 0xa0, 0x80, 0xed, 0xb0, 0x80},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
- {{0xed, 0xa0, 0x80, 0xed, 0xbf, 0xbf},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
- {{0xed, 0xad, 0xbf, 0xed, 0xb0, 0x80},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
- {{0xed, 0xad, 0xbf, 0xed, 0xbf, 0xbf},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
- {{0xed, 0xae, 0x80, 0xed, 0xb0, 0x80},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
- {{0xed, 0xae, 0x80, 0xed, 0xbf, 0xbf},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
- {{0xed, 0xaf, 0xbf, 0xed, 0xb0, 0x80},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
- {{0xed, 0xaf, 0xbf, 0xed, 0xbf, 0xbf},
- {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xED, 0xA0, 0x80, 0xED, 0xB0, 0x80},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xED, 0xA0, 0x80, 0xED, 0xBF, 0xBF},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xED, 0xAD, 0xBF, 0xED, 0xB0, 0x80},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xED, 0xAD, 0xBF, 0xED, 0xBF, 0xBF},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xED, 0xAE, 0x80, 0xED, 0xB0, 0x80},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xED, 0xAE, 0x80, 0xED, 0xBF, 0xBF},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xED, 0xAF, 0xBF, 0xED, 0xB0, 0x80},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
+ {{0xED, 0xAF, 0xBF, 0xED, 0xBF, 0xBF},
+ {0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD}},
// Surrogates with the last byte missing.
- {{0xed, 0xa0}, {0xfffd, 0xfffd}},
- {{0xed, 0xa0}, {0xfffd, 0xfffd}},
- {{0xed, 0xad}, {0xfffd, 0xfffd}},
- {{0xed, 0xae}, {0xfffd, 0xfffd}},
- {{0xed, 0xaf}, {0xfffd, 0xfffd}},
- {{0xed, 0xb0}, {0xfffd, 0xfffd}},
- {{0xed, 0xbe}, {0xfffd, 0xfffd}},
- {{0xed, 0xbf}, {0xfffd, 0xfffd}},
+ {{0xED, 0xA0}, {0xFFFD, 0xFFFD}},
+ {{0xED, 0xA0}, {0xFFFD, 0xFFFD}},
+ {{0xED, 0xAD}, {0xFFFD, 0xFFFD}},
+ {{0xED, 0xAE}, {0xFFFD, 0xFFFD}},
+ {{0xED, 0xAF}, {0xFFFD, 0xFFFD}},
+ {{0xED, 0xB0}, {0xFFFD, 0xFFFD}},
+ {{0xED, 0xBE}, {0xFFFD, 0xFFFD}},
+ {{0xED, 0xBF}, {0xFFFD, 0xFFFD}},
// Other non-characters
- {{0xef, 0xbf, 0xbe}, {0xfffe}},
- {{0xef, 0xbf, 0xbf}, {0xffff}},
- {{0xef, 0xb7, 0x90, 0xef, 0xb7, 0x91, 0xef, 0xb7, 0x92, 0xef, 0xb7, 0x93,
- 0xef, 0xb7, 0x94, 0xef, 0xb7, 0x95, 0xef, 0xb7, 0x96, 0xef, 0xb7, 0x97,
- 0xef, 0xb7, 0x98, 0xef, 0xb7, 0x99, 0xef, 0xb7, 0x9a, 0xef, 0xb7, 0x9b,
- 0xef, 0xb7, 0x9c, 0xef, 0xb7, 0x9d, 0xef, 0xb7, 0x9e, 0xef, 0xb7, 0x9f,
- 0xef, 0xb7, 0xa0, 0xef, 0xb7, 0xa1, 0xef, 0xb7, 0xa2, 0xef, 0xb7, 0xa3,
- 0xef, 0xb7, 0xa4, 0xef, 0xb7, 0xa5, 0xef, 0xb7, 0xa6, 0xef, 0xb7, 0xa7,
- 0xef, 0xb7, 0xa8, 0xef, 0xb7, 0xa9, 0xef, 0xb7, 0xaa, 0xef, 0xb7, 0xab,
- 0xef, 0xb7, 0xac, 0xef, 0xb7, 0xad, 0xef, 0xb7, 0xae, 0xef, 0xb7, 0xaf},
- {0xfdd0, 0xfdd1, 0xfdd2, 0xfdd3, 0xfdd4, 0xfdd5, 0xfdd6, 0xfdd7,
- 0xfdd8, 0xfdd9, 0xfdda, 0xfddb, 0xfddc, 0xfddd, 0xfdde, 0xfddf,
- 0xfde0, 0xfde1, 0xfde2, 0xfde3, 0xfde4, 0xfde5, 0xfde6, 0xfde7,
- 0xfde8, 0xfde9, 0xfdea, 0xfdeb, 0xfdec, 0xfded, 0xfdee, 0xfdef}},
- {{0xf0, 0x9f, 0xbf, 0xbe, 0xf0, 0x9f, 0xbf, 0xbf, 0xf0, 0xaf, 0xbf,
- 0xbe, 0xf0, 0xaf, 0xbf, 0xbf, 0xf0, 0xbf, 0xbf, 0xbe, 0xf0, 0xbf,
- 0xbf, 0xbf, 0xf1, 0x8f, 0xbf, 0xbe, 0xf1, 0x8f, 0xbf, 0xbf, 0xf1,
- 0x9f, 0xbf, 0xbe, 0xf1, 0x9f, 0xbf, 0xbf, 0xf1, 0xaf, 0xbf, 0xbe,
- 0xf1, 0xaf, 0xbf, 0xbf, 0xf1, 0xbf, 0xbf, 0xbe, 0xf1, 0xbf, 0xbf,
- 0xbf, 0xf2, 0x8f, 0xbf, 0xbe, 0xf2, 0x8f, 0xbf, 0xbf},
- {0x1fffe, 0x1ffff, 0x2fffe, 0x2ffff, 0x3fffe, 0x3ffff, 0x4fffe, 0x4ffff,
- 0x5fffe, 0x5ffff, 0x6fffe, 0x6ffff, 0x7fffe, 0x7ffff, 0x8fffe,
- 0x8ffff}},
+ {{0xEF, 0xBF, 0xBE}, {0xFFFE}},
+ {{0xEF, 0xBF, 0xBF}, {0xFFFF}},
+ {{0xEF, 0xB7, 0x90, 0xEF, 0xB7, 0x91, 0xEF, 0xB7, 0x92, 0xEF, 0xB7, 0x93,
+ 0xEF, 0xB7, 0x94, 0xEF, 0xB7, 0x95, 0xEF, 0xB7, 0x96, 0xEF, 0xB7, 0x97,
+ 0xEF, 0xB7, 0x98, 0xEF, 0xB7, 0x99, 0xEF, 0xB7, 0x9A, 0xEF, 0xB7, 0x9B,
+ 0xEF, 0xB7, 0x9C, 0xEF, 0xB7, 0x9D, 0xEF, 0xB7, 0x9E, 0xEF, 0xB7, 0x9F,
+ 0xEF, 0xB7, 0xA0, 0xEF, 0xB7, 0xA1, 0xEF, 0xB7, 0xA2, 0xEF, 0xB7, 0xA3,
+ 0xEF, 0xB7, 0xA4, 0xEF, 0xB7, 0xA5, 0xEF, 0xB7, 0xA6, 0xEF, 0xB7, 0xA7,
+ 0xEF, 0xB7, 0xA8, 0xEF, 0xB7, 0xA9, 0xEF, 0xB7, 0xAA, 0xEF, 0xB7, 0xAB,
+ 0xEF, 0xB7, 0xAC, 0xEF, 0xB7, 0xAD, 0xEF, 0xB7, 0xAE, 0xEF, 0xB7, 0xAF},
+ {0xFDD0, 0xFDD1, 0xFDD2, 0xFDD3, 0xFDD4, 0xFDD5, 0xFDD6, 0xFDD7,
+ 0xFDD8, 0xFDD9, 0xFDDA, 0xFDDB, 0xFDDC, 0xFDDD, 0xFDDE, 0xFDDF,
+ 0xFDE0, 0xFDE1, 0xFDE2, 0xFDE3, 0xFDE4, 0xFDE5, 0xFDE6, 0xFDE7,
+ 0xFDE8, 0xFDE9, 0xFDEA, 0xFDEB, 0xFDEC, 0xFDED, 0xFDEE, 0xFDEF}},
+ {{0xF0, 0x9F, 0xBF, 0xBE, 0xF0, 0x9F, 0xBF, 0xBF, 0xF0, 0xAF, 0xBF,
+ 0xBE, 0xF0, 0xAF, 0xBF, 0xBF, 0xF0, 0xBF, 0xBF, 0xBE, 0xF0, 0xBF,
+ 0xBF, 0xBF, 0xF1, 0x8F, 0xBF, 0xBE, 0xF1, 0x8F, 0xBF, 0xBF, 0xF1,
+ 0x9F, 0xBF, 0xBE, 0xF1, 0x9F, 0xBF, 0xBF, 0xF1, 0xAF, 0xBF, 0xBE,
+ 0xF1, 0xAF, 0xBF, 0xBF, 0xF1, 0xBF, 0xBF, 0xBE, 0xF1, 0xBF, 0xBF,
+ 0xBF, 0xF2, 0x8F, 0xBF, 0xBE, 0xF2, 0x8F, 0xBF, 0xBF},
+ {0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE, 0x3FFFF, 0x4FFFE, 0x4FFFF,
+ 0x5FFFE, 0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
+ 0x8FFFF}},
};
for (auto test : data) {
diff --git a/deps/v8/test/unittests/unittests.gyp b/deps/v8/test/unittests/unittests.gyp
index 575f550871..50e820e5f1 100644
--- a/deps/v8/test/unittests/unittests.gyp
+++ b/deps/v8/test/unittests/unittests.gyp
@@ -8,6 +8,7 @@
'variables': {
'v8_code': 1,
'unittests_sources': [ ### gcmole(all) ###
+ 'allocation-unittest.cc',
'api/access-check-unittest.cc',
'api/exception-unittest.cc',
'api/interceptor-unittest.cc',
@@ -90,6 +91,7 @@
'compiler/schedule-unittest.cc',
'compiler/scheduler-unittest.cc',
'compiler/scheduler-rpo-unittest.cc',
+ 'compiler/simplified-lowering-unittest.cc',
'compiler/simplified-operator-reducer-unittest.cc',
'compiler/simplified-operator-unittest.cc',
'compiler/state-values-utils-unittest.cc',
@@ -158,7 +160,7 @@
'wasm/control-transfer-unittest.cc',
'wasm/decoder-unittest.cc',
'wasm/function-body-decoder-unittest.cc',
- 'wasm/wasm-heap-unittest.cc',
+ 'wasm/wasm-code-manager-unittest.cc',
'wasm/leb-helper-unittest.cc',
'wasm/loop-assignment-analysis-unittest.cc',
'wasm/module-decoder-unittest.cc',
diff --git a/deps/v8/test/unittests/unittests.isolate b/deps/v8/test/unittests/unittests.isolate
index ae503bf9bf..ec3bae42d3 100644
--- a/deps/v8/test/unittests/unittests.isolate
+++ b/deps/v8/test/unittests/unittests.isolate
@@ -5,6 +5,7 @@
'variables': {
'files': [
'<(PRODUCT_DIR)/unittests<(EXECUTABLE_SUFFIX)',
+ './testcfg.py',
'./unittests.status',
],
},
@@ -12,4 +13,4 @@
'../../src/base.isolate',
'../../tools/testrunner/testrunner.isolate',
],
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/unittests/value-serializer-unittest.cc b/deps/v8/test/unittests/value-serializer-unittest.cc
index b3e656e917..c143b58090 100644
--- a/deps/v8/test/unittests/value-serializer-unittest.cc
+++ b/deps/v8/test/unittests/value-serializer-unittest.cc
@@ -289,11 +289,11 @@ class ValueSerializerTest : public TestWithIsolate {
TEST_F(ValueSerializerTest, DecodeInvalid) {
// Version tag but no content.
- InvalidDecodeTest({0xff});
+ InvalidDecodeTest({0xFF});
// Version too large.
- InvalidDecodeTest({0xff, 0x7f, 0x5f});
+ InvalidDecodeTest({0xFF, 0x7F, 0x5F});
// Nonsense tag.
- InvalidDecodeTest({0xff, 0x09, 0xdd});
+ InvalidDecodeTest({0xFF, 0x09, 0xDD});
}
TEST_F(ValueSerializerTest, RoundTripOddball) {
@@ -309,27 +309,27 @@ TEST_F(ValueSerializerTest, RoundTripOddball) {
TEST_F(ValueSerializerTest, DecodeOddball) {
// What this code is expected to generate.
- DecodeTest({0xff, 0x09, 0x5f},
+ DecodeTest({0xFF, 0x09, 0x5F},
[](Local<Value> value) { EXPECT_TRUE(value->IsUndefined()); });
- DecodeTest({0xff, 0x09, 0x54},
+ DecodeTest({0xFF, 0x09, 0x54},
[](Local<Value> value) { EXPECT_TRUE(value->IsTrue()); });
- DecodeTest({0xff, 0x09, 0x46},
+ DecodeTest({0xFF, 0x09, 0x46},
[](Local<Value> value) { EXPECT_TRUE(value->IsFalse()); });
- DecodeTest({0xff, 0x09, 0x30},
+ DecodeTest({0xFF, 0x09, 0x30},
[](Local<Value> value) { EXPECT_TRUE(value->IsNull()); });
// What v9 of the Blink code generates.
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x5f, 0x00},
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x5F, 0x00},
[](Local<Value> value) { EXPECT_TRUE(value->IsUndefined()); });
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x54, 0x00},
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x54, 0x00},
[](Local<Value> value) { EXPECT_TRUE(value->IsTrue()); });
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x46, 0x00},
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x46, 0x00},
[](Local<Value> value) { EXPECT_TRUE(value->IsFalse()); });
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x30, 0x00},
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x30, 0x00},
[](Local<Value> value) { EXPECT_TRUE(value->IsNull()); });
// v0 (with no explicit version).
- DecodeTest({0x5f, 0x00},
+ DecodeTest({0x5F, 0x00},
[](Local<Value> value) { EXPECT_TRUE(value->IsUndefined()); });
DecodeTest({0x54, 0x00},
[](Local<Value> value) { EXPECT_TRUE(value->IsTrue()); });
@@ -376,44 +376,40 @@ TEST_F(ValueSerializerTest, RoundTripNumber) {
TEST_F(ValueSerializerTest, DecodeNumber) {
// 42 zig-zag encoded (signed)
- DecodeTest({0xff, 0x09, 0x49, 0x54},
- [](Local<Value> value) {
- ASSERT_TRUE(value->IsInt32());
- EXPECT_EQ(42, Int32::Cast(*value)->Value());
- });
+ DecodeTest({0xFF, 0x09, 0x49, 0x54}, [](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt32());
+ EXPECT_EQ(42, Int32::Cast(*value)->Value());
+ });
// 42 varint encoded (unsigned)
- DecodeTest({0xff, 0x09, 0x55, 0x2a},
- [](Local<Value> value) {
- ASSERT_TRUE(value->IsInt32());
- EXPECT_EQ(42, Int32::Cast(*value)->Value());
- });
+ DecodeTest({0xFF, 0x09, 0x55, 0x2A}, [](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt32());
+ EXPECT_EQ(42, Int32::Cast(*value)->Value());
+ });
// 160 zig-zag encoded (signed)
- DecodeTest({0xff, 0x09, 0x49, 0xc0, 0x02},
- [](Local<Value> value) {
- ASSERT_TRUE(value->IsInt32());
- ASSERT_EQ(160, Int32::Cast(*value)->Value());
- });
+ DecodeTest({0xFF, 0x09, 0x49, 0xC0, 0x02}, [](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt32());
+ ASSERT_EQ(160, Int32::Cast(*value)->Value());
+ });
// 160 varint encoded (unsigned)
- DecodeTest({0xff, 0x09, 0x55, 0xa0, 0x01},
- [](Local<Value> value) {
- ASSERT_TRUE(value->IsInt32());
- ASSERT_EQ(160, Int32::Cast(*value)->Value());
- });
+ DecodeTest({0xFF, 0x09, 0x55, 0xA0, 0x01}, [](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt32());
+ ASSERT_EQ(160, Int32::Cast(*value)->Value());
+ });
#if defined(V8_TARGET_LITTLE_ENDIAN)
// IEEE 754 doubles, little-endian byte order
- DecodeTest({0xff, 0x09, 0x4e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd0, 0xbf},
+ DecodeTest({0xFF, 0x09, 0x4E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD0, 0xBF},
[](Local<Value> value) {
ASSERT_TRUE(value->IsNumber());
EXPECT_EQ(-0.25, Number::Cast(*value)->Value());
});
// quiet NaN
- DecodeTest({0xff, 0x09, 0x4e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x7f},
+ DecodeTest({0xFF, 0x09, 0x4E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF8, 0x7F},
[](Local<Value> value) {
ASSERT_TRUE(value->IsNumber());
EXPECT_TRUE(std::isnan(Number::Cast(*value)->Value()));
});
// signaling NaN
- DecodeTest({0xff, 0x09, 0x4e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf4, 0x7f},
+ DecodeTest({0xFF, 0x09, 0x4E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF4, 0x7F},
[](Local<Value> value) {
ASSERT_TRUE(value->IsNumber());
EXPECT_TRUE(std::isnan(Number::Cast(*value)->Value()));
@@ -458,24 +454,23 @@ TEST_F(ValueSerializerTest, RoundTripString) {
TEST_F(ValueSerializerTest, DecodeString) {
// Decoding the strings above from UTF-8.
- DecodeTest({0xff, 0x09, 0x53, 0x00},
- [](Local<Value> value) {
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(0, String::Cast(*value)->Length());
- });
- DecodeTest({0xff, 0x09, 0x53, 0x05, 'H', 'e', 'l', 'l', 'o'},
+ DecodeTest({0xFF, 0x09, 0x53, 0x00}, [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(0, String::Cast(*value)->Length());
+ });
+ DecodeTest({0xFF, 0x09, 0x53, 0x05, 'H', 'e', 'l', 'l', 'o'},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsString());
EXPECT_EQ(5, String::Cast(*value)->Length());
EXPECT_EQ(kHelloString, Utf8Value(value));
});
- DecodeTest({0xff, 0x09, 0x53, 0x07, 'Q', 'u', 0xc3, 0xa9, 'b', 'e', 'c'},
+ DecodeTest({0xFF, 0x09, 0x53, 0x07, 'Q', 'u', 0xC3, 0xA9, 'b', 'e', 'c'},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsString());
EXPECT_EQ(6, String::Cast(*value)->Length());
EXPECT_EQ(kQuebecString, Utf8Value(value));
});
- DecodeTest({0xff, 0x09, 0x53, 0x04, 0xf0, 0x9f, 0x91, 0x8a},
+ DecodeTest({0xFF, 0x09, 0x53, 0x04, 0xF0, 0x9F, 0x91, 0x8A},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsString());
EXPECT_EQ(2, String::Cast(*value)->Length());
@@ -483,17 +478,17 @@ TEST_F(ValueSerializerTest, DecodeString) {
});
// And from Latin-1 (for the ones that fit).
- DecodeTest({0xff, 0x0a, 0x22, 0x00}, [](Local<Value> value) {
+ DecodeTest({0xFF, 0x0A, 0x22, 0x00}, [](Local<Value> value) {
ASSERT_TRUE(value->IsString());
EXPECT_EQ(0, String::Cast(*value)->Length());
});
- DecodeTest({0xff, 0x0a, 0x22, 0x05, 'H', 'e', 'l', 'l', 'o'},
+ DecodeTest({0xFF, 0x0A, 0x22, 0x05, 'H', 'e', 'l', 'l', 'o'},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsString());
EXPECT_EQ(5, String::Cast(*value)->Length());
EXPECT_EQ(kHelloString, Utf8Value(value));
});
- DecodeTest({0xff, 0x0a, 0x22, 0x06, 'Q', 'u', 0xe9, 'b', 'e', 'c'},
+ DecodeTest({0xFF, 0x0A, 0x22, 0x06, 'Q', 'u', 0xE9, 'b', 'e', 'c'},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsString());
EXPECT_EQ(6, String::Cast(*value)->Length());
@@ -502,26 +497,25 @@ TEST_F(ValueSerializerTest, DecodeString) {
// And from two-byte strings (endianness dependent).
#if defined(V8_TARGET_LITTLE_ENDIAN)
- DecodeTest({0xff, 0x09, 0x63, 0x00},
- [](Local<Value> value) {
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(0, String::Cast(*value)->Length());
- });
- DecodeTest({0xff, 0x09, 0x63, 0x0a, 'H', '\0', 'e', '\0', 'l', '\0', 'l',
+ DecodeTest({0xFF, 0x09, 0x63, 0x00}, [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(0, String::Cast(*value)->Length());
+ });
+ DecodeTest({0xFF, 0x09, 0x63, 0x0A, 'H', '\0', 'e', '\0', 'l', '\0', 'l',
'\0', 'o', '\0'},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsString());
EXPECT_EQ(5, String::Cast(*value)->Length());
EXPECT_EQ(kHelloString, Utf8Value(value));
});
- DecodeTest({0xff, 0x09, 0x63, 0x0c, 'Q', '\0', 'u', '\0', 0xe9, '\0', 'b',
+ DecodeTest({0xFF, 0x09, 0x63, 0x0C, 'Q', '\0', 'u', '\0', 0xE9, '\0', 'b',
'\0', 'e', '\0', 'c', '\0'},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsString());
EXPECT_EQ(6, String::Cast(*value)->Length());
EXPECT_EQ(kQuebecString, Utf8Value(value));
});
- DecodeTest({0xff, 0x09, 0x63, 0x04, 0x3d, 0xd8, 0x4a, 0xdc},
+ DecodeTest({0xFF, 0x09, 0x63, 0x04, 0x3D, 0xD8, 0x4A, 0xDC},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsString());
EXPECT_EQ(2, String::Cast(*value)->Length());
@@ -533,14 +527,14 @@ TEST_F(ValueSerializerTest, DecodeString) {
TEST_F(ValueSerializerTest, DecodeInvalidString) {
// UTF-8 string with too few bytes available.
- InvalidDecodeTest({0xff, 0x09, 0x53, 0x10, 'v', '8'});
+ InvalidDecodeTest({0xFF, 0x09, 0x53, 0x10, 'v', '8'});
// One-byte string with too few bytes available.
- InvalidDecodeTest({0xff, 0x0a, 0x22, 0x10, 'v', '8'});
+ InvalidDecodeTest({0xFF, 0x0A, 0x22, 0x10, 'v', '8'});
#if defined(V8_TARGET_LITTLE_ENDIAN)
// Two-byte string with too few bytes available.
- InvalidDecodeTest({0xff, 0x09, 0x63, 0x10, 'v', '\0', '8', '\0'});
+ InvalidDecodeTest({0xFF, 0x09, 0x63, 0x10, 'v', '\0', '8', '\0'});
// Two-byte string with an odd byte length.
- InvalidDecodeTest({0xff, 0x09, 0x63, 0x03, 'v', '\0', '8'});
+ InvalidDecodeTest({0xFF, 0x09, 0x63, 0x03, 'v', '\0', '8'});
#endif
// TODO(jbroman): The same for big-endian systems.
}
@@ -565,9 +559,9 @@ TEST_F(ValueSerializerTest, EncodeTwoByteStringUsesPadding) {
// what that value may be.
const uint8_t expected_prefix[] = {0x00, 0x63, 0x94, 0x03};
ASSERT_GT(data.size(), sizeof(expected_prefix) + 2);
- EXPECT_EQ(0xff, data[0]);
+ EXPECT_EQ(0xFF, data[0]);
EXPECT_GE(data[1], 0x09);
- EXPECT_LE(data[1], 0x7f);
+ EXPECT_LE(data[1], 0x7F);
EXPECT_TRUE(std::equal(std::begin(expected_prefix),
std::end(expected_prefix), data.begin() + 2));
});
@@ -630,7 +624,7 @@ TEST_F(ValueSerializerTest, RoundTripDictionaryObject) {
TEST_F(ValueSerializerTest, DecodeDictionaryObject) {
// Empty object.
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x6f, 0x7b, 0x00, 0x00},
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x7B, 0x00, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsObject());
EXPECT_TRUE(EvaluateScriptForResultBool(
@@ -640,8 +634,8 @@ TEST_F(ValueSerializerTest, DecodeDictionaryObject) {
});
// String key.
DecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x01, 0x61, 0x3f, 0x01,
- 0x49, 0x54, 0x7b, 0x01},
+ {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01, 0x61, 0x3F, 0x01,
+ 0x49, 0x54, 0x7B, 0x01},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsObject());
EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty('a')"));
@@ -651,8 +645,8 @@ TEST_F(ValueSerializerTest, DecodeDictionaryObject) {
});
// Integer key (treated as a string, but may be encoded differently).
DecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x49, 0x54, 0x3f, 0x01, 0x53,
- 0x01, 0x61, 0x7b, 0x01},
+ {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x49, 0x54, 0x3F, 0x01, 0x53,
+ 0x01, 0x61, 0x7B, 0x01},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsObject());
EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty('42')"));
@@ -662,20 +656,20 @@ TEST_F(ValueSerializerTest, DecodeDictionaryObject) {
});
// Key order must be preserved.
DecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x01, 0x78, 0x3f, 0x01,
- 0x49, 0x02, 0x3f, 0x01, 0x53, 0x01, 0x79, 0x3f, 0x01, 0x49, 0x04, 0x3f,
- 0x01, 0x53, 0x01, 0x61, 0x3f, 0x01, 0x49, 0x06, 0x7b, 0x03},
+ {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01, 0x78, 0x3F, 0x01,
+ 0x49, 0x02, 0x3F, 0x01, 0x53, 0x01, 0x79, 0x3F, 0x01, 0x49, 0x04, 0x3F,
+ 0x01, 0x53, 0x01, 0x61, 0x3F, 0x01, 0x49, 0x06, 0x7B, 0x03},
[this](Local<Value> value) {
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getOwnPropertyNames(result).toString() === 'x,y,a'"));
});
// A harder case of enumeration order.
DecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x49, 0x02, 0x3f, 0x01,
- 0x49, 0x00, 0x3f, 0x01, 0x55, 0xfe, 0xff, 0xff, 0xff, 0x0f, 0x3f,
- 0x01, 0x49, 0x06, 0x3f, 0x01, 0x53, 0x01, 0x61, 0x3f, 0x01, 0x49,
- 0x04, 0x3f, 0x01, 0x53, 0x0a, 0x34, 0x32, 0x39, 0x34, 0x39, 0x36,
- 0x37, 0x32, 0x39, 0x35, 0x3f, 0x01, 0x49, 0x02, 0x7b, 0x04},
+ {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x49, 0x02, 0x3F, 0x01,
+ 0x49, 0x00, 0x3F, 0x01, 0x55, 0xFE, 0xFF, 0xFF, 0xFF, 0x0F, 0x3F,
+ 0x01, 0x49, 0x06, 0x3F, 0x01, 0x53, 0x01, 0x61, 0x3F, 0x01, 0x49,
+ 0x04, 0x3F, 0x01, 0x53, 0x0A, 0x34, 0x32, 0x39, 0x34, 0x39, 0x36,
+ 0x37, 0x32, 0x39, 0x35, 0x3F, 0x01, 0x49, 0x02, 0x7B, 0x04},
[this](Local<Value> value) {
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getOwnPropertyNames(result).toString() === "
@@ -689,8 +683,8 @@ TEST_F(ValueSerializerTest, DecodeDictionaryObject) {
// before its properties are deserialized, so that references to it can be
// resolved.
DecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x04, 0x73,
- 0x65, 0x6c, 0x66, 0x3f, 0x01, 0x5e, 0x00, 0x7b, 0x01, 0x00},
+ {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x04, 0x73,
+ 0x65, 0x6C, 0x66, 0x3F, 0x01, 0x5E, 0x00, 0x7B, 0x01, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsObject());
EXPECT_TRUE(EvaluateScriptForResultBool("result === result.self"));
@@ -702,7 +696,7 @@ TEST_F(ValueSerializerTest, InvalidDecodeObjectWithInvalidKeyType) {
// object keys. The serializer would have obtained them from the own property
// keys list, which should only contain names and indices.
InvalidDecodeTest(
- {0xff, 0x09, 0x6f, 0x61, 0x00, 0x40, 0x00, 0x00, 0x7b, 0x01});
+ {0xFF, 0x09, 0x6F, 0x61, 0x00, 0x40, 0x00, 0x00, 0x7B, 0x01});
}
TEST_F(ValueSerializerTest, RoundTripOnlyOwnEnumerableStringKeys) {
@@ -820,8 +814,7 @@ TEST_F(ValueSerializerTest, RoundTripDictionaryObjectForTransitions) {
TEST_F(ValueSerializerTest, DecodeDictionaryObjectVersion0) {
// Empty object.
DecodeTestForVersion0(
- {0x7b, 0x00},
- [this](Local<Value> value) {
+ {0x7B, 0x00}, [this](Local<Value> value) {
ASSERT_TRUE(value->IsObject());
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === Object.prototype"));
@@ -830,7 +823,7 @@ TEST_F(ValueSerializerTest, DecodeDictionaryObjectVersion0) {
});
// String key.
DecodeTestForVersion0(
- {0x53, 0x01, 0x61, 0x49, 0x54, 0x7b, 0x01, 0x00},
+ {0x53, 0x01, 0x61, 0x49, 0x54, 0x7B, 0x01, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsObject());
EXPECT_TRUE(EvaluateScriptForResultBool(
@@ -842,7 +835,7 @@ TEST_F(ValueSerializerTest, DecodeDictionaryObjectVersion0) {
});
// Integer key (treated as a string, but may be encoded differently).
DecodeTestForVersion0(
- {0x49, 0x54, 0x53, 0x01, 0x61, 0x7b, 0x01, 0x00},
+ {0x49, 0x54, 0x53, 0x01, 0x61, 0x7B, 0x01, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsObject());
EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty('42')"));
@@ -853,14 +846,14 @@ TEST_F(ValueSerializerTest, DecodeDictionaryObjectVersion0) {
// Key order must be preserved.
DecodeTestForVersion0(
{0x53, 0x01, 0x78, 0x49, 0x02, 0x53, 0x01, 0x79, 0x49, 0x04, 0x53, 0x01,
- 0x61, 0x49, 0x06, 0x7b, 0x03, 0x00},
+ 0x61, 0x49, 0x06, 0x7B, 0x03, 0x00},
[this](Local<Value> value) {
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getOwnPropertyNames(result).toString() === 'x,y,a'"));
});
// A property and an element.
DecodeTestForVersion0(
- {0x49, 0x54, 0x53, 0x01, 0x61, 0x53, 0x01, 0x61, 0x49, 0x54, 0x7b, 0x02},
+ {0x49, 0x54, 0x53, 0x01, 0x61, 0x53, 0x01, 0x61, 0x49, 0x54, 0x7B, 0x02},
[this](Local<Value> value) {
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getOwnPropertyNames(result).toString() === '42,a'"));
@@ -954,9 +947,9 @@ TEST_F(ValueSerializerTest, RoundTripArray) {
TEST_F(ValueSerializerTest, DecodeArray) {
// A simple array of integers.
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x41, 0x05, 0x3f, 0x01, 0x49, 0x02,
- 0x3f, 0x01, 0x49, 0x04, 0x3f, 0x01, 0x49, 0x06, 0x3f, 0x01,
- 0x49, 0x08, 0x3f, 0x01, 0x49, 0x0a, 0x24, 0x00, 0x05, 0x00},
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x41, 0x05, 0x3F, 0x01, 0x49, 0x02,
+ 0x3F, 0x01, 0x49, 0x04, 0x3F, 0x01, 0x49, 0x06, 0x3F, 0x01,
+ 0x49, 0x08, 0x3F, 0x01, 0x49, 0x0A, 0x24, 0x00, 0x05, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
EXPECT_EQ(5u, Array::Cast(*value)->Length());
@@ -966,8 +959,8 @@ TEST_F(ValueSerializerTest, DecodeArray) {
"result.toString() === '1,2,3,4,5'"));
});
// A long (sparse) array.
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x61, 0xe8, 0x07, 0x3f, 0x01, 0x49,
- 0xe8, 0x07, 0x3f, 0x01, 0x49, 0x54, 0x40, 0x01, 0xe8, 0x07},
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x61, 0xE8, 0x07, 0x3F, 0x01, 0x49,
+ 0xE8, 0x07, 0x3F, 0x01, 0x49, 0x54, 0x40, 0x01, 0xE8, 0x07},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
EXPECT_EQ(1000u, Array::Cast(*value)->Length());
@@ -975,8 +968,8 @@ TEST_F(ValueSerializerTest, DecodeArray) {
});
// Duplicate reference.
DecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x41, 0x02, 0x3f, 0x01, 0x6f, 0x7b, 0x00, 0x3f,
- 0x02, 0x5e, 0x01, 0x24, 0x00, 0x02},
+ {0xFF, 0x09, 0x3F, 0x00, 0x41, 0x02, 0x3F, 0x01, 0x6F, 0x7B, 0x00, 0x3F,
+ 0x02, 0x5E, 0x01, 0x24, 0x00, 0x02},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
ASSERT_EQ(2u, Array::Cast(*value)->Length());
@@ -984,9 +977,9 @@ TEST_F(ValueSerializerTest, DecodeArray) {
});
// Duplicate reference in a sparse array.
DecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x61, 0xe8, 0x07, 0x3f, 0x01, 0x49,
- 0x02, 0x3f, 0x01, 0x6f, 0x7b, 0x00, 0x3f, 0x02, 0x49, 0xe8,
- 0x07, 0x3f, 0x02, 0x5e, 0x01, 0x40, 0x02, 0xe8, 0x07, 0x00},
+ {0xFF, 0x09, 0x3F, 0x00, 0x61, 0xE8, 0x07, 0x3F, 0x01, 0x49,
+ 0x02, 0x3F, 0x01, 0x6F, 0x7B, 0x00, 0x3F, 0x02, 0x49, 0xE8,
+ 0x07, 0x3F, 0x02, 0x5E, 0x01, 0x40, 0x02, 0xE8, 0x07, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
ASSERT_EQ(1000u, Array::Cast(*value)->Length());
@@ -995,7 +988,7 @@ TEST_F(ValueSerializerTest, DecodeArray) {
EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === result[500]"));
});
// Self reference.
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x41, 0x01, 0x3f, 0x01, 0x5e, 0x00, 0x24,
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x41, 0x01, 0x3F, 0x01, 0x5E, 0x00, 0x24,
0x00, 0x01, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
@@ -1004,8 +997,8 @@ TEST_F(ValueSerializerTest, DecodeArray) {
});
// Self reference in a sparse array.
DecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x61, 0xe8, 0x07, 0x3f, 0x01, 0x49,
- 0x8e, 0x08, 0x3f, 0x01, 0x5e, 0x00, 0x40, 0x01, 0xe8, 0x07},
+ {0xFF, 0x09, 0x3F, 0x00, 0x61, 0xE8, 0x07, 0x3F, 0x01, 0x49,
+ 0x8E, 0x08, 0x3F, 0x01, 0x5E, 0x00, 0x40, 0x01, 0xE8, 0x07},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
ASSERT_EQ(1000u, Array::Cast(*value)->Length());
@@ -1013,8 +1006,8 @@ TEST_F(ValueSerializerTest, DecodeArray) {
});
// Array with additional properties.
DecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x41, 0x02, 0x3f, 0x01, 0x49, 0x02, 0x3f,
- 0x01, 0x49, 0x04, 0x3f, 0x01, 0x53, 0x03, 0x66, 0x6f, 0x6f, 0x3f,
+ {0xFF, 0x09, 0x3F, 0x00, 0x41, 0x02, 0x3F, 0x01, 0x49, 0x02, 0x3F,
+ 0x01, 0x49, 0x04, 0x3F, 0x01, 0x53, 0x03, 0x66, 0x6F, 0x6F, 0x3F,
0x01, 0x53, 0x03, 0x62, 0x61, 0x72, 0x24, 0x01, 0x02, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
@@ -1023,9 +1016,9 @@ TEST_F(ValueSerializerTest, DecodeArray) {
EXPECT_TRUE(EvaluateScriptForResultBool("result.foo === 'bar'"));
});
// Sparse array with additional properties.
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x61, 0xe8, 0x07, 0x3f, 0x01,
- 0x53, 0x03, 0x66, 0x6f, 0x6f, 0x3f, 0x01, 0x53, 0x03,
- 0x62, 0x61, 0x72, 0x40, 0x01, 0xe8, 0x07, 0x00},
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x61, 0xE8, 0x07, 0x3F, 0x01,
+ 0x53, 0x03, 0x66, 0x6F, 0x6F, 0x3F, 0x01, 0x53, 0x03,
+ 0x62, 0x61, 0x72, 0x40, 0x01, 0xE8, 0x07, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
ASSERT_EQ(1000u, Array::Cast(*value)->Length());
@@ -1037,7 +1030,7 @@ TEST_F(ValueSerializerTest, DecodeArray) {
// Note that since the previous output from Chrome fails this test, an
// encoding using the sparse format was constructed instead.
DecodeTest(
- {0xff, 0x09, 0x61, 0x02, 0x49, 0x02, 0x5f, 0x40, 0x01, 0x02},
+ {0xFF, 0x09, 0x61, 0x02, 0x49, 0x02, 0x5F, 0x40, 0x01, 0x02},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
ASSERT_EQ(2u, Array::Cast(*value)->Length());
@@ -1053,9 +1046,9 @@ TEST_F(ValueSerializerTest, DecodeArray) {
TEST_F(ValueSerializerTest, DecodeInvalidOverLargeArray) {
// So large it couldn't exist in the V8 heap, and its size couldn't fit in a
// SMI on 32-bit systems (2^30).
- InvalidDecodeTest({0xff, 0x09, 0x41, 0x80, 0x80, 0x80, 0x80, 0x04});
+ InvalidDecodeTest({0xFF, 0x09, 0x41, 0x80, 0x80, 0x80, 0x80, 0x04});
// Not so large, but there isn't enough data left in the buffer.
- InvalidDecodeTest({0xff, 0x09, 0x41, 0x01});
+ InvalidDecodeTest({0xFF, 0x09, 0x41, 0x01});
}
TEST_F(ValueSerializerTest, RoundTripArrayWithNonEnumerableElement) {
@@ -1245,7 +1238,7 @@ TEST_F(ValueSerializerTest, DecodeSparseArrayVersion0) {
DecodeTestForVersion0(
{0x55, 0x00, 0x53, 0x01, 'a', 0x55, 0x02, 0x55, 0x05, 0x53,
0x03, 'f', 'o', 'o', 0x53, 0x03, 'b', 'a', 'r', 0x53,
- 0x03, 'b', 'a', 'z', 0x49, 0x0b, 0x40, 0x04, 0x03, 0x00},
+ 0x03, 'b', 'a', 'z', 0x49, 0x0B, 0x40, 0x04, 0x03, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
EXPECT_EQ(3u, Array::Cast(*value)->Length());
@@ -1282,17 +1275,17 @@ TEST_F(ValueSerializerTest, RoundTripDenseArrayContainingUndefined) {
TEST_F(ValueSerializerTest, DecodeDenseArrayContainingUndefined) {
// In previous versions, "undefined" in a dense array signified absence of the
// element (for compatibility). In new versions, it has a separate encoding.
- DecodeTest({0xff, 0x09, 0x41, 0x01, 0x5f, 0x24, 0x00, 0x01},
+ DecodeTest({0xFF, 0x09, 0x41, 0x01, 0x5F, 0x24, 0x00, 0x01},
[this](Local<Value> value) {
EXPECT_TRUE(EvaluateScriptForResultBool("!(0 in result)"));
});
DecodeTest(
- {0xff, 0x0b, 0x41, 0x01, 0x5f, 0x24, 0x00, 0x01},
+ {0xFF, 0x0B, 0x41, 0x01, 0x5F, 0x24, 0x00, 0x01},
[this](Local<Value> value) {
EXPECT_TRUE(EvaluateScriptForResultBool("0 in result"));
EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === undefined"));
});
- DecodeTest({0xff, 0x0b, 0x41, 0x01, 0x2d, 0x24, 0x00, 0x01},
+ DecodeTest({0xFF, 0x0B, 0x41, 0x01, 0x2D, 0x24, 0x00, 0x01},
[this](Local<Value> value) {
EXPECT_TRUE(EvaluateScriptForResultBool("!(0 in result)"));
});
@@ -1324,29 +1317,29 @@ TEST_F(ValueSerializerTest, RoundTripDate) {
TEST_F(ValueSerializerTest, DecodeDate) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x80, 0x84,
- 0x2e, 0x41, 0x00},
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x80, 0x84,
+ 0x2E, 0x41, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsDate());
EXPECT_EQ(1e6, Date::Cast(*value)->ValueOf());
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === Date.prototype"));
});
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x44, 0x00, 0x00, 0x20, 0x45, 0x27, 0x89,
- 0x87, 0xc2, 0x00},
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x44, 0x00, 0x00, 0x20, 0x45, 0x27, 0x89,
+ 0x87, 0xC2, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsDate());
EXPECT_TRUE(EvaluateScriptForResultBool(
"result.toISOString() === '1867-07-01T00:00:00.000Z'"));
});
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xf8, 0x7f, 0x00},
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xF8, 0x7F, 0x00},
[](Local<Value> value) {
ASSERT_TRUE(value->IsDate());
EXPECT_TRUE(std::isnan(Date::Cast(*value)->ValueOf()));
});
#else
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x44, 0x41, 0x2e, 0x84, 0x80, 0x00, 0x00,
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x44, 0x41, 0x2E, 0x84, 0x80, 0x00, 0x00,
0x00, 0x00, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsDate());
@@ -1354,14 +1347,14 @@ TEST_F(ValueSerializerTest, DecodeDate) {
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === Date.prototype"));
});
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x44, 0xc2, 0x87, 0x89, 0x27, 0x45, 0x20,
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x44, 0xC2, 0x87, 0x89, 0x27, 0x45, 0x20,
0x00, 0x00, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsDate());
EXPECT_TRUE(EvaluateScriptForResultBool(
"result.toISOString() === '1867-07-01T00:00:00.000Z'"));
});
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x44, 0x7f, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x44, 0x7F, 0xF8, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00},
[](Local<Value> value) {
ASSERT_TRUE(value->IsDate());
@@ -1369,9 +1362,9 @@ TEST_F(ValueSerializerTest, DecodeDate) {
});
#endif
DecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x01, 0x61, 0x3f,
- 0x01, 0x44, 0x00, 0x20, 0x39, 0x50, 0x37, 0x6a, 0x75, 0x42, 0x3f,
- 0x02, 0x53, 0x01, 0x62, 0x3f, 0x02, 0x5e, 0x01, 0x7b, 0x02},
+ {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01, 0x61, 0x3F,
+ 0x01, 0x44, 0x00, 0x20, 0x39, 0x50, 0x37, 0x6A, 0x75, 0x42, 0x3F,
+ 0x02, 0x53, 0x01, 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02},
[this](Local<Value> value) {
EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof Date"));
EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
@@ -1440,37 +1433,35 @@ TEST_F(ValueSerializerTest, RejectsOtherValueObjects) {
TEST_F(ValueSerializerTest, DecodeValueObjects) {
DecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x79, 0x00},
- [this](Local<Value> value) {
+ {0xFF, 0x09, 0x3F, 0x00, 0x79, 0x00}, [this](Local<Value> value) {
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === Boolean.prototype"));
EXPECT_TRUE(EvaluateScriptForResultBool("result.valueOf() === true"));
});
DecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x78, 0x00},
- [this](Local<Value> value) {
+ {0xFF, 0x09, 0x3F, 0x00, 0x78, 0x00}, [this](Local<Value> value) {
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === Boolean.prototype"));
EXPECT_TRUE(EvaluateScriptForResultBool("result.valueOf() === false"));
});
DecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x01, 0x61, 0x3f, 0x01,
- 0x79, 0x3f, 0x02, 0x53, 0x01, 0x62, 0x3f, 0x02, 0x5e, 0x01, 0x7b, 0x02},
+ {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01, 0x61, 0x3F, 0x01,
+ 0x79, 0x3F, 0x02, 0x53, 0x01, 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02},
[this](Local<Value> value) {
EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof Boolean"));
EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
});
#if defined(V8_TARGET_LITTLE_ENDIAN)
DecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45,
- 0xc0, 0x00},
+ {0xFF, 0x09, 0x3F, 0x00, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45,
+ 0xC0, 0x00},
[this](Local<Value> value) {
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === Number.prototype"));
EXPECT_TRUE(EvaluateScriptForResultBool("result.valueOf() === -42"));
});
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xf8, 0x7f, 0x00},
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xF8, 0x7F, 0x00},
[this](Local<Value> value) {
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === Number.prototype"));
@@ -1479,14 +1470,14 @@ TEST_F(ValueSerializerTest, DecodeValueObjects) {
});
#else
DecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x6e, 0xc0, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00,
+ {0xFF, 0x09, 0x3F, 0x00, 0x6E, 0xC0, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00},
[this](Local<Value> value) {
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === Number.prototype"));
EXPECT_TRUE(EvaluateScriptForResultBool("result.valueOf() === -42"));
});
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x6e, 0x7f, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6E, 0x7F, 0xF8, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00},
[this](Local<Value> value) {
EXPECT_TRUE(EvaluateScriptForResultBool(
@@ -1496,14 +1487,14 @@ TEST_F(ValueSerializerTest, DecodeValueObjects) {
});
#endif
DecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x01, 0x61, 0x3f,
- 0x01, 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x40, 0x3f,
- 0x02, 0x53, 0x01, 0x62, 0x3f, 0x02, 0x5e, 0x01, 0x7b, 0x02},
+ {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01, 0x61, 0x3F,
+ 0x01, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x40, 0x3F,
+ 0x02, 0x53, 0x01, 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02},
[this](Local<Value> value) {
EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof Number"));
EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
});
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x73, 0x07, 0x51, 0x75, 0xc3, 0xa9, 0x62,
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x73, 0x07, 0x51, 0x75, 0xC3, 0xA9, 0x62,
0x65, 0x63, 0x00},
[this](Local<Value> value) {
EXPECT_TRUE(EvaluateScriptForResultBool(
@@ -1512,7 +1503,7 @@ TEST_F(ValueSerializerTest, DecodeValueObjects) {
"result.valueOf() === 'Qu\\xe9bec'"));
EXPECT_TRUE(EvaluateScriptForResultBool("result.length === 6"));
});
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x73, 0x04, 0xf0, 0x9f, 0x91, 0x8a},
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x73, 0x04, 0xF0, 0x9F, 0x91, 0x8A},
[this](Local<Value> value) {
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === String.prototype"));
@@ -1521,16 +1512,16 @@ TEST_F(ValueSerializerTest, DecodeValueObjects) {
EXPECT_TRUE(EvaluateScriptForResultBool("result.length === 2"));
});
DecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x01,
- 0x61, 0x3f, 0x01, 0x73, 0x00, 0x3f, 0x02, 0x53, 0x01,
- 0x62, 0x3f, 0x02, 0x5e, 0x01, 0x7b, 0x02, 0x00},
+ {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01,
+ 0x61, 0x3F, 0x01, 0x73, 0x00, 0x3F, 0x02, 0x53, 0x01,
+ 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02, 0x00},
[this](Local<Value> value) {
EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof String"));
EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
});
// String object containing a Latin-1 string.
- DecodeTest({0xff, 0x0c, 0x73, 0x22, 0x06, 'Q', 'u', 0xe9, 'b', 'e', 'c'},
+ DecodeTest({0xFF, 0x0C, 0x73, 0x22, 0x06, 'Q', 'u', 0xE9, 'b', 'e', 'c'},
[this](Local<Value> value) {
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === String.prototype"));
@@ -1567,7 +1558,7 @@ TEST_F(ValueSerializerTest, RoundTripRegExp) {
}
TEST_F(ValueSerializerTest, DecodeRegExp) {
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x52, 0x03, 0x66, 0x6f, 0x6f, 0x01},
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x52, 0x03, 0x66, 0x6F, 0x6F, 0x01},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsRegExp());
EXPECT_TRUE(EvaluateScriptForResultBool(
@@ -1575,7 +1566,7 @@ TEST_F(ValueSerializerTest, DecodeRegExp) {
EXPECT_TRUE(EvaluateScriptForResultBool(
"result.toString() === '/foo/g'"));
});
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x52, 0x07, 0x51, 0x75, 0xc3, 0xa9, 0x62,
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x52, 0x07, 0x51, 0x75, 0xC3, 0xA9, 0x62,
0x65, 0x63, 0x02},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsRegExp());
@@ -1583,16 +1574,16 @@ TEST_F(ValueSerializerTest, DecodeRegExp) {
"result.toString() === '/Qu\\xe9bec/i'"));
});
DecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x52, 0x04, 0xf0, 0x9f, 0x91, 0x8a, 0x11, 0x00},
+ {0xFF, 0x09, 0x3F, 0x00, 0x52, 0x04, 0xF0, 0x9F, 0x91, 0x8A, 0x11, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsRegExp());
EXPECT_TRUE(EvaluateScriptForResultBool(
"result.toString() === '/\\ud83d\\udc4a/gu'"));
});
DecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x01, 0x61,
- 0x3f, 0x01, 0x52, 0x03, 0x66, 0x6f, 0x6f, 0x03, 0x3f, 0x02,
- 0x53, 0x01, 0x62, 0x3f, 0x02, 0x5e, 0x01, 0x7b, 0x02, 0x00},
+ {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01, 0x61,
+ 0x3F, 0x01, 0x52, 0x03, 0x66, 0x6F, 0x6F, 0x03, 0x3F, 0x02,
+ 0x53, 0x01, 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02, 0x00},
[this](Local<Value> value) {
EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof RegExp"));
EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
@@ -1600,7 +1591,7 @@ TEST_F(ValueSerializerTest, DecodeRegExp) {
// RegExp containing a Latin-1 string.
DecodeTest(
- {0xff, 0x0c, 0x52, 0x22, 0x06, 'Q', 'u', 0xe9, 'b', 'e', 'c', 0x02},
+ {0xFF, 0x0C, 0x52, 0x22, 0x06, 'Q', 'u', 0xE9, 'b', 'e', 'c', 0x02},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsRegExp());
EXPECT_TRUE(EvaluateScriptForResultBool(
@@ -1610,7 +1601,7 @@ TEST_F(ValueSerializerTest, DecodeRegExp) {
// Tests that invalid flags are not accepted by the deserializer.
TEST_F(ValueSerializerTest, DecodeRegExpDotAll) {
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x52, 0x03, 0x66, 0x6f, 0x6f, 0x1f},
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x52, 0x03, 0x66, 0x6F, 0x6F, 0x1F},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsRegExp());
EXPECT_TRUE(EvaluateScriptForResultBool(
@@ -1618,7 +1609,7 @@ TEST_F(ValueSerializerTest, DecodeRegExpDotAll) {
EXPECT_TRUE(EvaluateScriptForResultBool(
"result.toString() === '/foo/gimuy'"));
});
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x52, 0x03, 0x66, 0x6f, 0x6f, 0x3f},
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x52, 0x03, 0x66, 0x6F, 0x6F, 0x3F},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsRegExp());
EXPECT_TRUE(EvaluateScriptForResultBool(
@@ -1627,7 +1618,7 @@ TEST_F(ValueSerializerTest, DecodeRegExpDotAll) {
"result.toString() === '/foo/gimsuy'"));
});
InvalidDecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x52, 0x03, 0x66, 0x6f, 0x6f, 0x7f});
+ {0xFF, 0x09, 0x3F, 0x00, 0x52, 0x03, 0x66, 0x6F, 0x6F, 0x7F});
}
TEST_F(ValueSerializerTest, RoundTripMap) {
@@ -1663,8 +1654,8 @@ TEST_F(ValueSerializerTest, RoundTripMap) {
TEST_F(ValueSerializerTest, DecodeMap) {
DecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x3b, 0x3f, 0x01, 0x49, 0x54, 0x3f, 0x01, 0x53,
- 0x03, 0x66, 0x6f, 0x6f, 0x3a, 0x02},
+ {0xFF, 0x09, 0x3F, 0x00, 0x3B, 0x3F, 0x01, 0x49, 0x54, 0x3F, 0x01, 0x53,
+ 0x03, 0x66, 0x6F, 0x6F, 0x3A, 0x02},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsMap());
EXPECT_TRUE(EvaluateScriptForResultBool(
@@ -1672,8 +1663,8 @@ TEST_F(ValueSerializerTest, DecodeMap) {
EXPECT_TRUE(EvaluateScriptForResultBool("result.size === 1"));
EXPECT_TRUE(EvaluateScriptForResultBool("result.get(42) === 'foo'"));
});
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3b, 0x3f, 0x01, 0x5e, 0x00, 0x3f, 0x01,
- 0x5e, 0x00, 0x3a, 0x02, 0x00},
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3B, 0x3F, 0x01, 0x5E, 0x00, 0x3F, 0x01,
+ 0x5E, 0x00, 0x3A, 0x02, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsMap());
EXPECT_TRUE(EvaluateScriptForResultBool("result.size === 1"));
@@ -1681,10 +1672,10 @@ TEST_F(ValueSerializerTest, DecodeMap) {
"result.get(result) === result"));
});
// Iteration order must be preserved.
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3b, 0x3f, 0x01, 0x49, 0x02, 0x3f,
- 0x01, 0x49, 0x00, 0x3f, 0x01, 0x53, 0x01, 0x61, 0x3f, 0x01,
- 0x49, 0x00, 0x3f, 0x01, 0x49, 0x06, 0x3f, 0x01, 0x49, 0x00,
- 0x3f, 0x01, 0x49, 0x04, 0x3f, 0x01, 0x49, 0x00, 0x3a, 0x08},
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3B, 0x3F, 0x01, 0x49, 0x02, 0x3F,
+ 0x01, 0x49, 0x00, 0x3F, 0x01, 0x53, 0x01, 0x61, 0x3F, 0x01,
+ 0x49, 0x00, 0x3F, 0x01, 0x49, 0x06, 0x3F, 0x01, 0x49, 0x00,
+ 0x3F, 0x01, 0x49, 0x04, 0x3F, 0x01, 0x49, 0x00, 0x3A, 0x08},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsMap());
EXPECT_TRUE(EvaluateScriptForResultBool(
@@ -1763,8 +1754,8 @@ TEST_F(ValueSerializerTest, RoundTripSet) {
}
TEST_F(ValueSerializerTest, DecodeSet) {
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x27, 0x3f, 0x01, 0x49, 0x54, 0x3f, 0x01,
- 0x53, 0x03, 0x66, 0x6f, 0x6f, 0x2c, 0x02},
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x27, 0x3F, 0x01, 0x49, 0x54, 0x3F, 0x01,
+ 0x53, 0x03, 0x66, 0x6F, 0x6F, 0x2C, 0x02},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsSet());
EXPECT_TRUE(EvaluateScriptForResultBool(
@@ -1774,7 +1765,7 @@ TEST_F(ValueSerializerTest, DecodeSet) {
EXPECT_TRUE(EvaluateScriptForResultBool("result.has('foo')"));
});
DecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x27, 0x3f, 0x01, 0x5e, 0x00, 0x2c, 0x01, 0x00},
+ {0xFF, 0x09, 0x3F, 0x00, 0x27, 0x3F, 0x01, 0x5E, 0x00, 0x2C, 0x01, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsSet());
EXPECT_TRUE(EvaluateScriptForResultBool("result.size === 1"));
@@ -1782,8 +1773,8 @@ TEST_F(ValueSerializerTest, DecodeSet) {
});
// Iteration order must be preserved.
DecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x27, 0x3f, 0x01, 0x49, 0x02, 0x3f, 0x01, 0x53,
- 0x01, 0x61, 0x3f, 0x01, 0x49, 0x06, 0x3f, 0x01, 0x49, 0x04, 0x2c, 0x04},
+ {0xFF, 0x09, 0x3F, 0x00, 0x27, 0x3F, 0x01, 0x49, 0x02, 0x3F, 0x01, 0x53,
+ 0x01, 0x61, 0x3F, 0x01, 0x49, 0x06, 0x3F, 0x01, 0x49, 0x04, 0x2C, 0x04},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsSet());
EXPECT_TRUE(EvaluateScriptForResultBool(
@@ -1849,14 +1840,14 @@ TEST_F(ValueSerializerTest, RoundTripArrayBuffer) {
}
TEST_F(ValueSerializerTest, DecodeArrayBuffer) {
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x42, 0x00},
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x42, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArrayBuffer());
EXPECT_EQ(0u, ArrayBuffer::Cast(*value)->ByteLength());
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === ArrayBuffer.prototype"));
});
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x42, 0x03, 0x00, 0x80, 0xff, 0x00},
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x42, 0x03, 0x00, 0x80, 0xFF, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArrayBuffer());
EXPECT_EQ(3u, ArrayBuffer::Cast(*value)->ByteLength());
@@ -1864,9 +1855,9 @@ TEST_F(ValueSerializerTest, DecodeArrayBuffer) {
"new Uint8Array(result).toString() === '0,128,255'"));
});
DecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x01,
- 0x61, 0x3f, 0x01, 0x42, 0x00, 0x3f, 0x02, 0x53, 0x01,
- 0x62, 0x3f, 0x02, 0x5e, 0x01, 0x7b, 0x02, 0x00},
+ {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01,
+ 0x61, 0x3F, 0x01, 0x42, 0x00, 0x3F, 0x02, 0x53, 0x01,
+ 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02, 0x00},
[this](Local<Value> value) {
EXPECT_TRUE(
EvaluateScriptForResultBool("result.a instanceof ArrayBuffer"));
@@ -1875,7 +1866,7 @@ TEST_F(ValueSerializerTest, DecodeArrayBuffer) {
}
TEST_F(ValueSerializerTest, DecodeInvalidArrayBuffer) {
- InvalidDecodeTest({0xff, 0x09, 0x42, 0xff, 0xff, 0x00});
+ InvalidDecodeTest({0xFF, 0x09, 0x42, 0xFF, 0xFF, 0x00});
}
// An array buffer allocator that never has available memory.
@@ -1904,8 +1895,8 @@ TEST_F(ValueSerializerTest, DecodeArrayBufferOOM) {
Context::Scope context_scope(context);
TryCatch try_catch(isolate);
- const std::vector<uint8_t> data = {0xff, 0x09, 0x3f, 0x00, 0x42,
- 0x03, 0x00, 0x80, 0xff, 0x00};
+ const std::vector<uint8_t> data = {0xFF, 0x09, 0x3F, 0x00, 0x42,
+ 0x03, 0x00, 0x80, 0xFF, 0x00};
ValueDeserializer deserializer(isolate, &data[0],
static_cast<int>(data.size()), nullptr);
deserializer.SetSupportsLegacyWireFormat(true);
@@ -1931,7 +1922,7 @@ class ValueSerializerTestWithArrayBufferTransfer : public ValueSerializerTest {
{
Context::Scope scope(deserialization_context());
output_buffer_ = ArrayBuffer::New(isolate(), kTestByteLength);
- const uint8_t data[kTestByteLength] = {0x00, 0x01, 0x80, 0xff};
+ const uint8_t data[kTestByteLength] = {0x00, 0x01, 0x80, 0xFF};
memcpy(output_buffer_->GetContents().Data(), data, kTestByteLength);
}
}
@@ -2044,7 +2035,7 @@ TEST_F(ValueSerializerTest, RoundTripTypedArray) {
TEST_F(ValueSerializerTest, DecodeTypedArray) {
// Check that the right type comes out the other side for every kind of typed
// array.
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3f, 0x00, 0x42, 0x02, 0x00, 0x00, 0x56,
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x02, 0x00, 0x00, 0x56,
0x42, 0x00, 0x02},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsUint8Array());
@@ -2053,7 +2044,7 @@ TEST_F(ValueSerializerTest, DecodeTypedArray) {
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === Uint8Array.prototype"));
});
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3f, 0x00, 0x42, 0x02, 0x00, 0x00, 0x56,
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x02, 0x00, 0x00, 0x56,
0x62, 0x00, 0x02},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsInt8Array());
@@ -2063,7 +2054,7 @@ TEST_F(ValueSerializerTest, DecodeTypedArray) {
"Object.getPrototypeOf(result) === Int8Array.prototype"));
});
#if defined(V8_TARGET_LITTLE_ENDIAN)
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3f, 0x00, 0x42, 0x04, 0x00, 0x00, 0x00,
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x04, 0x00, 0x00, 0x00,
0x00, 0x56, 0x57, 0x00, 0x04},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsUint16Array());
@@ -2072,7 +2063,7 @@ TEST_F(ValueSerializerTest, DecodeTypedArray) {
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === Uint16Array.prototype"));
});
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3f, 0x00, 0x42, 0x04, 0x00, 0x00, 0x00,
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x04, 0x00, 0x00, 0x00,
0x00, 0x56, 0x77, 0x00, 0x04},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsInt16Array());
@@ -2081,7 +2072,7 @@ TEST_F(ValueSerializerTest, DecodeTypedArray) {
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === Int16Array.prototype"));
});
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3f, 0x00, 0x42, 0x08, 0x00, 0x00,
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x08, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x44, 0x00, 0x08},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsUint32Array());
@@ -2090,7 +2081,7 @@ TEST_F(ValueSerializerTest, DecodeTypedArray) {
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === Uint32Array.prototype"));
});
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3f, 0x00, 0x42, 0x08, 0x00, 0x00,
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x08, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x64, 0x00, 0x08},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsInt32Array());
@@ -2099,7 +2090,7 @@ TEST_F(ValueSerializerTest, DecodeTypedArray) {
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === Int32Array.prototype"));
});
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3f, 0x00, 0x42, 0x08, 0x00, 0x00,
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x08, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x66, 0x00, 0x08},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsFloat32Array());
@@ -2108,7 +2099,7 @@ TEST_F(ValueSerializerTest, DecodeTypedArray) {
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === Float32Array.prototype"));
});
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3f, 0x00, 0x42, 0x10, 0x00, 0x00,
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x10, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x56, 0x46, 0x00, 0x10},
[this](Local<Value> value) {
@@ -2121,22 +2112,22 @@ TEST_F(ValueSerializerTest, DecodeTypedArray) {
#endif // V8_TARGET_LITTLE_ENDIAN
// Check that values of various kinds are suitably preserved.
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3f, 0x00, 0x42, 0x03, 0x01, 0x80, 0xff,
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x03, 0x01, 0x80, 0xFF,
0x56, 0x42, 0x00, 0x03, 0x00},
[this](Local<Value> value) {
EXPECT_TRUE(EvaluateScriptForResultBool(
"result.toString() === '1,128,255'"));
});
#if defined(V8_TARGET_LITTLE_ENDIAN)
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3f, 0x00, 0x42, 0x06, 0x00, 0x00, 0x00,
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x06, 0x00, 0x00, 0x00,
0x01, 0x00, 0x80, 0x56, 0x77, 0x00, 0x06},
[this](Local<Value> value) {
EXPECT_TRUE(EvaluateScriptForResultBool(
"result.toString() === '0,256,-32768'"));
});
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3f, 0x00, 0x42, 0x10, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0xbf, 0x00, 0x00, 0xc0, 0x7f,
- 0x00, 0x00, 0x80, 0x7f, 0x56, 0x66, 0x00, 0x10},
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xBF, 0x00, 0x00, 0xC0, 0x7F,
+ 0x00, 0x00, 0x80, 0x7F, 0x56, 0x66, 0x00, 0x10},
[this](Local<Value> value) {
EXPECT_TRUE(EvaluateScriptForResultBool(
"result.toString() === '0,-0.5,NaN,Infinity'"));
@@ -2146,14 +2137,14 @@ TEST_F(ValueSerializerTest, DecodeTypedArray) {
// Array buffer views sharing a buffer should do so on the other side.
// Similarly, multiple references to the same typed array should be resolved.
DecodeTest(
- {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x02, 0x75, 0x38, 0x3f,
- 0x01, 0x3f, 0x01, 0x42, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x02, 0x75, 0x38, 0x3F,
+ 0x01, 0x3F, 0x01, 0x42, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x56, 0x42, 0x00, 0x20, 0x3f, 0x03, 0x53, 0x04, 0x75, 0x38, 0x5f,
- 0x32, 0x3f, 0x03, 0x5e, 0x02, 0x3f, 0x03, 0x53, 0x03, 0x66, 0x33, 0x32,
- 0x3f, 0x03, 0x3f, 0x03, 0x5e, 0x01, 0x56, 0x66, 0x04, 0x14, 0x3f, 0x04,
- 0x53, 0x01, 0x62, 0x3f, 0x04, 0x5e, 0x01, 0x7b, 0x04, 0x00},
+ 0x00, 0x56, 0x42, 0x00, 0x20, 0x3F, 0x03, 0x53, 0x04, 0x75, 0x38, 0x5F,
+ 0x32, 0x3F, 0x03, 0x5E, 0x02, 0x3F, 0x03, 0x53, 0x03, 0x66, 0x33, 0x32,
+ 0x3F, 0x03, 0x3F, 0x03, 0x5E, 0x01, 0x56, 0x66, 0x04, 0x14, 0x3F, 0x04,
+ 0x53, 0x01, 0x62, 0x3F, 0x04, 0x5E, 0x01, 0x7B, 0x04, 0x00},
[this](Local<Value> value) {
EXPECT_TRUE(
EvaluateScriptForResultBool("result.u8 instanceof Uint8Array"));
@@ -2170,19 +2161,19 @@ TEST_F(ValueSerializerTest, DecodeTypedArray) {
TEST_F(ValueSerializerTest, DecodeInvalidTypedArray) {
// Byte offset out of range.
InvalidDecodeTest(
- {0xff, 0x09, 0x42, 0x02, 0x00, 0x00, 0x56, 0x42, 0x03, 0x01});
+ {0xFF, 0x09, 0x42, 0x02, 0x00, 0x00, 0x56, 0x42, 0x03, 0x01});
// Byte offset in range, offset + length out of range.
InvalidDecodeTest(
- {0xff, 0x09, 0x42, 0x02, 0x00, 0x00, 0x56, 0x42, 0x01, 0x03});
+ {0xFF, 0x09, 0x42, 0x02, 0x00, 0x00, 0x56, 0x42, 0x01, 0x03});
// Byte offset not divisible by element size.
InvalidDecodeTest(
- {0xff, 0x09, 0x42, 0x04, 0x00, 0x00, 0x00, 0x00, 0x56, 0x77, 0x01, 0x02});
+ {0xFF, 0x09, 0x42, 0x04, 0x00, 0x00, 0x00, 0x00, 0x56, 0x77, 0x01, 0x02});
// Byte length not divisible by element size.
InvalidDecodeTest(
- {0xff, 0x09, 0x42, 0x04, 0x00, 0x00, 0x00, 0x00, 0x56, 0x77, 0x02, 0x01});
- // Invalid view type (0xff).
+ {0xFF, 0x09, 0x42, 0x04, 0x00, 0x00, 0x00, 0x00, 0x56, 0x77, 0x02, 0x01});
+ // Invalid view type (0xFF).
InvalidDecodeTest(
- {0xff, 0x09, 0x42, 0x02, 0x00, 0x00, 0x56, 0xff, 0x01, 0x01});
+ {0xFF, 0x09, 0x42, 0x02, 0x00, 0x00, 0x56, 0xFF, 0x01, 0x01});
}
TEST_F(ValueSerializerTest, RoundTripDataView) {
@@ -2198,8 +2189,8 @@ TEST_F(ValueSerializerTest, RoundTripDataView) {
}
TEST_F(ValueSerializerTest, DecodeDataView) {
- DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3f, 0x00, 0x42, 0x04, 0x00, 0x00, 0x00,
- 0x00, 0x56, 0x3f, 0x01, 0x02},
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x04, 0x00, 0x00, 0x00,
+ 0x00, 0x56, 0x3F, 0x01, 0x02},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsDataView());
EXPECT_EQ(1u, DataView::Cast(*value)->ByteOffset());
@@ -2213,10 +2204,10 @@ TEST_F(ValueSerializerTest, DecodeDataView) {
TEST_F(ValueSerializerTest, DecodeInvalidDataView) {
// Byte offset out of range.
InvalidDecodeTest(
- {0xff, 0x09, 0x42, 0x02, 0x00, 0x00, 0x56, 0x3f, 0x03, 0x01});
+ {0xFF, 0x09, 0x42, 0x02, 0x00, 0x00, 0x56, 0x3F, 0x03, 0x01});
// Byte offset in range, offset + length out of range.
InvalidDecodeTest(
- {0xff, 0x09, 0x42, 0x02, 0x00, 0x00, 0x56, 0x3f, 0x01, 0x03});
+ {0xFF, 0x09, 0x42, 0x02, 0x00, 0x00, 0x56, 0x3F, 0x01, 0x03});
}
class ValueSerializerTestWithSharedArrayBufferTransfer
@@ -2303,7 +2294,7 @@ bool ValueSerializerTestWithSharedArrayBufferTransfer::flag_was_enabled_ =
TEST_F(ValueSerializerTestWithSharedArrayBufferTransfer,
RoundTripSharedArrayBufferTransfer) {
- InitializeData({0x00, 0x01, 0x80, 0xff});
+ InitializeData({0x00, 0x01, 0x80, 0xFF});
EXPECT_CALL(serializer_delegate_,
GetSharedArrayBufferId(isolate(), input_buffer()))
@@ -2345,7 +2336,7 @@ TEST_F(ValueSerializerTestWithSharedArrayBufferTransfer,
bool flag_was_enabled = i::FLAG_experimental_wasm_threads;
i::FLAG_experimental_wasm_threads = true;
- std::vector<uint8_t> data = {0x00, 0x01, 0x80, 0xff};
+ std::vector<uint8_t> data = {0x00, 0x01, 0x80, 0xFF};
data.resize(65536);
InitializeData(data);
@@ -2628,7 +2619,7 @@ TEST_F(ValueSerializerTestWithHostObject, DecodeSimpleHostObject) {
return NewHostObject(deserialization_context(), 0, nullptr);
}));
DecodeTest(
- {0xff, 0x0d, 0x5c, kExampleHostObjectTag}, [this](Local<Value> value) {
+ {0xFF, 0x0D, 0x5C, kExampleHostObjectTag}, [this](Local<Value> value) {
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === ExampleHostObject.prototype"));
});
@@ -2963,67 +2954,67 @@ TEST_F(ValueSerializerTestWithWasm, ComplexObjectWithManyInline) {
// As produced around Chrome 56.
const unsigned char kSerializedIncrementerWasm[] = {
- 0xff, 0x09, 0x3f, 0x00, 0x57, 0x79, 0x2d, 0x00, 0x61, 0x73, 0x6d, 0x0d,
- 0x00, 0x00, 0x00, 0x01, 0x06, 0x01, 0x60, 0x01, 0x7f, 0x01, 0x7f, 0x03,
- 0x02, 0x01, 0x00, 0x07, 0x0d, 0x01, 0x09, 0x69, 0x6e, 0x63, 0x72, 0x65,
- 0x6d, 0x65, 0x6e, 0x74, 0x00, 0x00, 0x0a, 0x08, 0x01, 0x06, 0x00, 0x20,
- 0x00, 0x41, 0x01, 0x6a, 0xf8, 0x04, 0xa1, 0x06, 0xde, 0xc0, 0xc6, 0x44,
- 0x3c, 0x29, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x02, 0x00, 0x00, 0x81, 0x4e,
- 0xce, 0x7c, 0x05, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x30, 0x02,
- 0x00, 0x00, 0xb0, 0x25, 0x30, 0xe3, 0xf2, 0xdb, 0x2e, 0x48, 0x00, 0x00,
- 0x00, 0x80, 0xe8, 0x00, 0x00, 0x80, 0xe0, 0x01, 0x00, 0x80, 0x00, 0x00,
+ 0xFF, 0x09, 0x3F, 0x00, 0x57, 0x79, 0x2D, 0x00, 0x61, 0x73, 0x6D, 0x0D,
+ 0x00, 0x00, 0x00, 0x01, 0x06, 0x01, 0x60, 0x01, 0x7F, 0x01, 0x7F, 0x03,
+ 0x02, 0x01, 0x00, 0x07, 0x0D, 0x01, 0x09, 0x69, 0x6E, 0x63, 0x72, 0x65,
+ 0x6D, 0x65, 0x6E, 0x74, 0x00, 0x00, 0x0A, 0x08, 0x01, 0x06, 0x00, 0x20,
+ 0x00, 0x41, 0x01, 0x6A, 0xF8, 0x04, 0xA1, 0x06, 0xDE, 0xC0, 0xC6, 0x44,
+ 0x3C, 0x29, 0x00, 0x00, 0x00, 0x00, 0x1F, 0x02, 0x00, 0x00, 0x81, 0x4E,
+ 0xCE, 0x7C, 0x05, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x30, 0x02,
+ 0x00, 0x00, 0xB0, 0x25, 0x30, 0xE3, 0xF2, 0xDB, 0x2E, 0x48, 0x00, 0x00,
+ 0x00, 0x80, 0xE8, 0x00, 0x00, 0x80, 0xE0, 0x01, 0x00, 0x80, 0x00, 0x00,
0x00, 0x80, 0x00, 0x00, 0x00, 0x80, 0x07, 0x08, 0x00, 0x00, 0x09, 0x04,
- 0x10, 0x02, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3c, 0x8c, 0xc0, 0x00, 0x00,
- 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x01, 0x10, 0x8c, 0xc0, 0x00, 0x00,
- 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x70, 0x94, 0x01, 0x0c, 0x8b,
- 0xc1, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x25, 0xdc, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x9e, 0x01, 0x10, 0x8c, 0xc0, 0x00, 0x00,
- 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x84, 0xc0, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x05, 0x7d, 0x01, 0x1a, 0xe1, 0x02, 0x00, 0x00,
+ 0x10, 0x02, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3C, 0x8C, 0xC0, 0x00, 0x00,
+ 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x01, 0x10, 0x8C, 0xC0, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x70, 0x94, 0x01, 0x0C, 0x8B,
+ 0xC1, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x25, 0xDC, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x9E, 0x01, 0x10, 0x8C, 0xC0, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x84, 0xC0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x7D, 0x01, 0x1A, 0xE1, 0x02, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x23, 0x88, 0x42, 0x32, 0x03,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x00, 0x00, 0x04, 0x00,
- 0x00, 0x02, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff,
- 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x49, 0x3b, 0xa5, 0x60, 0x0c, 0x00,
- 0x00, 0x0f, 0x86, 0x04, 0x00, 0x00, 0x00, 0x83, 0xc0, 0x01, 0xc3, 0x55,
- 0x48, 0x89, 0xe5, 0x49, 0xba, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
- 0x00, 0x41, 0x52, 0x48, 0x83, 0xec, 0x08, 0x48, 0x89, 0x45, 0xf0, 0x48,
- 0xbb, 0xb0, 0x67, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0xc0, 0x48,
- 0xbe, 0xe1, 0x57, 0x81, 0x85, 0xf6, 0x14, 0x00, 0x00, 0xe8, 0xfc, 0x3c,
- 0xea, 0xff, 0x48, 0x8b, 0x45, 0xf0, 0x48, 0x8b, 0xe5, 0x5d, 0xeb, 0xbf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x00, 0x00, 0x04, 0x00,
+ 0x00, 0x02, 0xA1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x49, 0x3B, 0xA5, 0x60, 0x0C, 0x00,
+ 0x00, 0x0F, 0x86, 0x04, 0x00, 0x00, 0x00, 0x83, 0xC0, 0x01, 0xC3, 0x55,
+ 0x48, 0x89, 0xE5, 0x49, 0xBA, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x41, 0x52, 0x48, 0x83, 0xEC, 0x08, 0x48, 0x89, 0x45, 0xF0, 0x48,
+ 0xBB, 0xB0, 0x67, 0xC6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0xC0, 0x48,
+ 0xBE, 0xE1, 0x57, 0x81, 0x85, 0xF6, 0x14, 0x00, 0x00, 0xE8, 0xFC, 0x3C,
+ 0xEA, 0xFF, 0x48, 0x8B, 0x45, 0xF0, 0x48, 0x8B, 0xE5, 0x5D, 0xEB, 0xBF,
0x66, 0x90, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x44, 0x00,
- 0x00, 0x00, 0xff, 0xff, 0xff, 0x0f, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x0f, 0x20, 0x84, 0x0f, 0x7d, 0x01, 0x0d, 0x00, 0x0f, 0x04,
- 0x6d, 0x08, 0x0f, 0xf0, 0x02, 0x80, 0x94, 0x01, 0x0c, 0x8b, 0xc1, 0x00,
- 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xed, 0xa9, 0x2d, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x9e, 0xe0, 0x38, 0x1a, 0x61, 0x03, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x0F, 0x20, 0x84, 0x0F, 0x7D, 0x01, 0x0D, 0x00, 0x0F, 0x04,
+ 0x6D, 0x08, 0x0F, 0xF0, 0x02, 0x80, 0x94, 0x01, 0x0C, 0x8B, 0xC1, 0x00,
+ 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xED, 0xA9, 0x2D, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x9E, 0xE0, 0x38, 0x1A, 0x61, 0x03, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x23, 0x88, 0x42, 0x32, 0x03, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9a, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x4e, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00,
- 0x02, 0xf9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff,
- 0xff, 0x00, 0x00, 0x00, 0x00, 0x55, 0x48, 0x89, 0xe5, 0x56, 0x57, 0x48,
- 0x8b, 0x45, 0x10, 0xe8, 0x11, 0xed, 0xed, 0xff, 0xa8, 0x01, 0x0f, 0x85,
- 0x2d, 0x00, 0x00, 0x00, 0x48, 0xc1, 0xe8, 0x20, 0xc5, 0xf9, 0x57, 0xc0,
- 0xc5, 0xfb, 0x2a, 0xc0, 0xc4, 0xe1, 0xfb, 0x2c, 0xc0, 0x48, 0x83, 0xf8,
- 0x01, 0x0f, 0x80, 0x34, 0x00, 0x00, 0x00, 0x8b, 0xc0, 0xe8, 0x27, 0xfe,
- 0xff, 0xff, 0x48, 0xc1, 0xe0, 0x20, 0x48, 0x8b, 0xe5, 0x5d, 0xc2, 0x10,
- 0x00, 0x49, 0x39, 0x45, 0xa0, 0x0f, 0x84, 0x07, 0x00, 0x00, 0x00, 0xc5,
- 0xfb, 0x10, 0x40, 0x07, 0xeb, 0xce, 0x49, 0xba, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0xf8, 0x7f, 0xc4, 0xc1, 0xf9, 0x6e, 0xc2, 0xeb, 0xbd, 0x48,
- 0x83, 0xec, 0x08, 0xc5, 0xfb, 0x11, 0x04, 0x24, 0xe8, 0xcc, 0xfe, 0xff,
- 0xff, 0x48, 0x83, 0xc4, 0x08, 0xeb, 0xb8, 0x66, 0x90, 0x02, 0x00, 0x00,
- 0x00, 0x03, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff,
- 0x0f, 0x39, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x0f, 0xff, 0xff, 0x00,
- 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x20, 0x84,
- 0x0f, 0xcc, 0x6e, 0x7d, 0x01, 0x72, 0x98, 0x00, 0x0f, 0xdc, 0x6d, 0x0c,
- 0x0f, 0xb0, 0x84, 0x0d, 0x04, 0x84, 0xe3, 0xc0, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x84, 0xe0, 0x84, 0x84, 0x18, 0x2f, 0x2f, 0x2f,
- 0x2f, 0x2f};
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9A, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4E, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00,
+ 0x02, 0xF9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0x00, 0x00, 0x00, 0x00, 0x55, 0x48, 0x89, 0xE5, 0x56, 0x57, 0x48,
+ 0x8B, 0x45, 0x10, 0xE8, 0x11, 0xED, 0xED, 0xFF, 0xA8, 0x01, 0x0F, 0x85,
+ 0x2D, 0x00, 0x00, 0x00, 0x48, 0xC1, 0xE8, 0x20, 0xC5, 0xF9, 0x57, 0xC0,
+ 0xC5, 0xFB, 0x2A, 0xC0, 0xC4, 0xE1, 0xFB, 0x2C, 0xC0, 0x48, 0x83, 0xF8,
+ 0x01, 0x0F, 0x80, 0x34, 0x00, 0x00, 0x00, 0x8B, 0xC0, 0xE8, 0x27, 0xFE,
+ 0xFF, 0xFF, 0x48, 0xC1, 0xE0, 0x20, 0x48, 0x8B, 0xE5, 0x5D, 0xC2, 0x10,
+ 0x00, 0x49, 0x39, 0x45, 0xA0, 0x0F, 0x84, 0x07, 0x00, 0x00, 0x00, 0xC5,
+ 0xFB, 0x10, 0x40, 0x07, 0xEB, 0xCE, 0x49, 0xBA, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xF8, 0x7F, 0xC4, 0xC1, 0xF9, 0x6E, 0xC2, 0xEB, 0xBD, 0x48,
+ 0x83, 0xEC, 0x08, 0xC5, 0xFB, 0x11, 0x04, 0x24, 0xE8, 0xCC, 0xFE, 0xFF,
+ 0xFF, 0x48, 0x83, 0xC4, 0x08, 0xEB, 0xB8, 0x66, 0x90, 0x02, 0x00, 0x00,
+ 0x00, 0x03, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF,
+ 0x0F, 0x39, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0x00,
+ 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x20, 0x84,
+ 0x0F, 0xCC, 0x6E, 0x7D, 0x01, 0x72, 0x98, 0x00, 0x0F, 0xDC, 0x6D, 0x0C,
+ 0x0F, 0xB0, 0x84, 0x0D, 0x04, 0x84, 0xE3, 0xC0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x84, 0xE0, 0x84, 0x84, 0x18, 0x2F, 0x2F, 0x2F,
+ 0x2F, 0x2F};
TEST_F(ValueSerializerTestWithWasm, DecodeWasmModule) {
- if (true) return; // TODO(mtrofin): fix this test
+ if ((true)) return; // TODO(mtrofin): fix this test
std::vector<uint8_t> raw(
kSerializedIncrementerWasm,
kSerializedIncrementerWasm + sizeof(kSerializedIncrementerWasm));
@@ -3037,14 +3028,14 @@ TEST_F(ValueSerializerTestWithWasm, DecodeWasmModule) {
// As above, but with empty compiled data. Should work due to fallback to wire
// data.
const unsigned char kSerializedIncrementerWasmWithInvalidCompiledData[] = {
- 0xff, 0x09, 0x3f, 0x00, 0x57, 0x79, 0x2d, 0x00, 0x61, 0x73, 0x6d,
- 0x0d, 0x00, 0x00, 0x00, 0x01, 0x06, 0x01, 0x60, 0x01, 0x7f, 0x01,
- 0x7f, 0x03, 0x02, 0x01, 0x00, 0x07, 0x0d, 0x01, 0x09, 0x69, 0x6e,
- 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x00, 0x00, 0x0a, 0x08,
- 0x01, 0x06, 0x00, 0x20, 0x00, 0x41, 0x01, 0x6a, 0x00};
+ 0xFF, 0x09, 0x3F, 0x00, 0x57, 0x79, 0x2D, 0x00, 0x61, 0x73, 0x6D,
+ 0x0D, 0x00, 0x00, 0x00, 0x01, 0x06, 0x01, 0x60, 0x01, 0x7F, 0x01,
+ 0x7F, 0x03, 0x02, 0x01, 0x00, 0x07, 0x0D, 0x01, 0x09, 0x69, 0x6E,
+ 0x63, 0x72, 0x65, 0x6D, 0x65, 0x6E, 0x74, 0x00, 0x00, 0x0A, 0x08,
+ 0x01, 0x06, 0x00, 0x20, 0x00, 0x41, 0x01, 0x6A, 0x00};
TEST_F(ValueSerializerTestWithWasm, DecodeWasmModuleWithInvalidCompiledData) {
- if (true) return; // TODO(titzer): regenerate this test
+ if ((true)) return; // TODO(titzer): regenerate this test
std::vector<uint8_t> raw(
kSerializedIncrementerWasmWithInvalidCompiledData,
kSerializedIncrementerWasmWithInvalidCompiledData +
@@ -3058,7 +3049,7 @@ TEST_F(ValueSerializerTestWithWasm, DecodeWasmModuleWithInvalidCompiledData) {
// As above, but also with empty wire data. Should fail.
const unsigned char kSerializedIncrementerWasmInvalid[] = {
- 0xff, 0x09, 0x3f, 0x00, 0x57, 0x79, 0x00, 0x00};
+ 0xFF, 0x09, 0x3F, 0x00, 0x57, 0x79, 0x00, 0x00};
TEST_F(ValueSerializerTestWithWasm,
DecodeWasmModuleWithInvalidCompiledAndWireData) {
@@ -3069,8 +3060,8 @@ TEST_F(ValueSerializerTestWithWasm,
}
TEST_F(ValueSerializerTestWithWasm, DecodeWasmModuleWithInvalidDataLength) {
- InvalidDecodeTest({0xff, 0x09, 0x3f, 0x00, 0x57, 0x79, 0x7f, 0x00});
- InvalidDecodeTest({0xff, 0x09, 0x3f, 0x00, 0x57, 0x79, 0x00, 0x7f});
+ InvalidDecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x57, 0x79, 0x7F, 0x00});
+ InvalidDecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x57, 0x79, 0x00, 0x7F});
}
} // namespace
diff --git a/deps/v8/test/unittests/wasm/decoder-unittest.cc b/deps/v8/test/unittests/wasm/decoder-unittest.cc
index 24606a43fd..627a9da3ee 100644
--- a/deps/v8/test/unittests/wasm/decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/decoder-unittest.cc
@@ -185,7 +185,7 @@ TEST_F(DecoderTest, ReadU32v_FiveByte) {
const uint32_t max = 0xFFFFFFFFu;
for (uint32_t i = 1; i < 32; i++) {
- uint32_t val = 0x983489aau << i;
+ uint32_t val = 0x983489AAu << i;
CHECK_UINT32V_INLINE(val, 5, U32V_5(val), 0);
}
@@ -442,7 +442,7 @@ TEST_F(DecoderTest, ReadU32v_extra_bits) {
TEST_F(DecoderTest, ReadI32v_extra_bits_negative) {
// OK for negative signed values to have extra ones.
unsigned length = 0;
- byte data[] = {0xff, 0xff, 0xff, 0xff, 0x7f};
+ byte data[] = {0xFF, 0xFF, 0xFF, 0xFF, 0x7F};
decoder.Reset(data, data + sizeof(data));
decoder.read_i32v<Decoder::kValidate>(decoder.start(), &length);
EXPECT_EQ(5u, length);
@@ -463,11 +463,11 @@ TEST_F(DecoderTest, ReadU32v_Bits) {
// A more exhaustive test.
const int kMaxSize = 5;
const uint32_t kVals[] = {
- 0xaabbccdd, 0x11223344, 0x33445566, 0xffeeddcc, 0xF0F0F0F0, 0x0F0F0F0F,
- 0xEEEEEEEE, 0xAAAAAAAA, 0x12345678, 0x9abcdef0, 0x80309488, 0x729ed997,
- 0xc4a0cf81, 0x16c6eb85, 0x4206db8e, 0xf3b089d5, 0xaa2e223e, 0xf99e29c8,
- 0x4a4357d8, 0x1890b1c1, 0x8d80a085, 0xacb6ae4c, 0x1b827e10, 0xeb5c7bd9,
- 0xbb1bc146, 0xdf57a33l};
+ 0xAABBCCDD, 0x11223344, 0x33445566, 0xFFEEDDCC, 0xF0F0F0F0, 0x0F0F0F0F,
+ 0xEEEEEEEE, 0xAAAAAAAA, 0x12345678, 0x9ABCDEF0, 0x80309488, 0x729ED997,
+ 0xC4A0CF81, 0x16C6EB85, 0x4206DB8E, 0xF3B089D5, 0xAA2E223E, 0xF99E29C8,
+ 0x4A4357D8, 0x1890B1C1, 0x8D80A085, 0xACB6AE4C, 0x1B827E10, 0xEB5C7BD9,
+ 0xBB1BC146, 0xDF57A33l};
byte data[kMaxSize];
// foreach value in above array
@@ -560,11 +560,11 @@ TEST_F(DecoderTest, ReadU64v_PowerOf2) {
TEST_F(DecoderTest, ReadU64v_Bits) {
const int kMaxSize = 10;
const uint64_t kVals[] = {
- 0xaabbccdd11223344ull, 0x33445566ffeeddccull, 0xF0F0F0F0F0F0F0F0ull,
+ 0xAABBCCDD11223344ull, 0x33445566FFEEDDCCull, 0xF0F0F0F0F0F0F0F0ull,
0x0F0F0F0F0F0F0F0Full, 0xEEEEEEEEEEEEEEEEull, 0xAAAAAAAAAAAAAAAAull,
- 0x123456789abcdef0ull, 0x80309488729ed997ull, 0xc4a0cf8116c6eb85ull,
- 0x4206db8ef3b089d5ull, 0xaa2e223ef99e29c8ull, 0x4a4357d81890b1c1ull,
- 0x8d80a085acb6ae4cull, 0x1b827e10eb5c7bd9ull, 0xbb1bc146df57a338ull};
+ 0x123456789ABCDEF0ull, 0x80309488729ED997ull, 0xC4A0CF8116C6EB85ull,
+ 0x4206DB8EF3B089D5ull, 0xAA2E223EF99E29C8ull, 0x4A4357D81890B1C1ull,
+ 0x8D80A085ACB6AE4Cull, 0x1B827E10EB5C7BD9ull, 0xBB1BC146DF57A338ull};
byte data[kMaxSize];
// foreach value in above array
@@ -603,11 +603,11 @@ TEST_F(DecoderTest, ReadI64v_Bits) {
const int kMaxSize = 10;
// Exhaustive signedness test.
const uint64_t kVals[] = {
- 0xaabbccdd11223344ull, 0x33445566ffeeddccull, 0xF0F0F0F0F0F0F0F0ull,
+ 0xAABBCCDD11223344ull, 0x33445566FFEEDDCCull, 0xF0F0F0F0F0F0F0F0ull,
0x0F0F0F0F0F0F0F0Full, 0xEEEEEEEEEEEEEEEEull, 0xAAAAAAAAAAAAAAAAull,
- 0x123456789abcdef0ull, 0x80309488729ed997ull, 0xc4a0cf8116c6eb85ull,
- 0x4206db8ef3b089d5ull, 0xaa2e223ef99e29c8ull, 0x4a4357d81890b1c1ull,
- 0x8d80a085acb6ae4cull, 0x1b827e10eb5c7bd9ull, 0xbb1bc146df57a338ull};
+ 0x123456789ABCDEF0ull, 0x80309488729ED997ull, 0xC4A0CF8116C6EB85ull,
+ 0x4206DB8EF3B089D5ull, 0xAA2E223EF99E29C8ull, 0x4A4357D81890B1C1ull,
+ 0x8D80A085ACB6AE4Cull, 0x1B827E10EB5C7BD9ull, 0xBB1BC146DF57A338ull};
byte data[kMaxSize];
// foreach value in above array
@@ -656,7 +656,7 @@ TEST_F(DecoderTest, ReadU64v_extra_bits) {
TEST_F(DecoderTest, ReadI64v_extra_bits_negative) {
// OK for negative signed values to have extra ones.
unsigned length = 0;
- byte data[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f};
+ byte data[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F};
decoder.Reset(data, data + sizeof(data));
decoder.read_i64v<Decoder::kValidate>(decoder.start(), &length);
EXPECT_EQ(10u, length);
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index d02dca36be..5cc4bf8196 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -52,36 +52,40 @@ static const WasmOpcode kInt32BinopOpcodes[] = {
#define WASM_BRV_IF_ZERO(depth, val) \
val, WASM_ZERO, kExprBrIf, static_cast<byte>(depth)
-#define EXPECT_VERIFIES_C(sig, x) Verify(true, sigs.sig(), x, x + arraysize(x))
+#define EXPECT_VERIFIES_C(sig, x) \
+ Verify(true, sigs.sig(), x, x + arraysize(x), kAppendEnd)
-#define EXPECT_FAILURE_C(sig, x) Verify(false, sigs.sig(), x, x + arraysize(x))
+#define EXPECT_FAILURE_C(sig, x) \
+ Verify(false, sigs.sig(), x, x + arraysize(x), kAppendEnd)
-#define EXPECT_VERIFIES_SC(sig, x) Verify(true, sig, x, x + arraysize(x))
+#define EXPECT_VERIFIES_SC(sig, x) \
+ Verify(true, sig, x, x + arraysize(x), kAppendEnd)
-#define EXPECT_FAILURE_SC(sig, x) Verify(false, sig, x, x + arraysize(x))
+#define EXPECT_FAILURE_SC(sig, x) \
+ Verify(false, sig, x, x + arraysize(x), kAppendEnd)
-#define EXPECT_VERIFIES_S(env, ...) \
- do { \
- static byte code[] = {__VA_ARGS__}; \
- Verify(true, env, code, code + arraysize(code)); \
+#define EXPECT_VERIFIES_S(env, ...) \
+ do { \
+ static byte code[] = {__VA_ARGS__}; \
+ Verify(true, env, code, code + arraysize(code), kAppendEnd); \
} while (false)
-#define EXPECT_FAILURE_S(env, ...) \
- do { \
- static byte code[] = {__VA_ARGS__}; \
- Verify(false, env, code, code + arraysize(code)); \
+#define EXPECT_FAILURE_S(env, ...) \
+ do { \
+ static byte code[] = {__VA_ARGS__}; \
+ Verify(false, env, code, code + arraysize(code), kAppendEnd); \
} while (false)
-#define EXPECT_VERIFIES(sig, ...) \
- do { \
- static const byte code[] = {__VA_ARGS__}; \
- Verify(true, sigs.sig(), code, code + sizeof(code)); \
+#define EXPECT_VERIFIES(sig, ...) \
+ do { \
+ static const byte code[] = {__VA_ARGS__}; \
+ Verify(true, sigs.sig(), code, code + sizeof(code), kAppendEnd); \
} while (false)
-#define EXPECT_FAILURE(sig, ...) \
- do { \
- static const byte code[] = {__VA_ARGS__}; \
- Verify(false, sigs.sig(), code, code + sizeof(code)); \
+#define EXPECT_FAILURE(sig, ...) \
+ do { \
+ static const byte code[] = {__VA_ARGS__}; \
+ Verify(false, sigs.sig(), code, code + sizeof(code), kAppendEnd); \
} while (false)
class FunctionBodyDecoderTest : public TestWithZone {
@@ -98,18 +102,24 @@ class FunctionBodyDecoderTest : public TestWithZone {
local_decls.AddLocals(count, type);
}
- void PrepareBytecode(const byte** startp, const byte** endp) {
+ enum AppendEnd : bool { kAppendEnd, kOmitEnd };
+
+ void PrepareBytecode(const byte** startp, const byte** endp,
+ AppendEnd append_end) {
const byte* start = *startp;
const byte* end = *endp;
size_t locals_size = local_decls.Size();
- size_t total_size = end - start + locals_size + 1;
+ size_t total_size = end - start + locals_size;
+ if (append_end == kAppendEnd) ++total_size;
byte* buffer = static_cast<byte*>(zone()->New(total_size));
// Prepend the local decls to the code.
local_decls.Emit(buffer);
// Emit the code.
memcpy(buffer + locals_size, start, end - start);
- // Append an extra end opcode.
- buffer[total_size - 1] = kExprEnd;
+ if (append_end == kAppendEnd) {
+ // Append an extra end opcode.
+ buffer[total_size - 1] = kExprEnd;
+ }
*startp = buffer;
*endp = buffer + total_size;
@@ -118,8 +128,8 @@ class FunctionBodyDecoderTest : public TestWithZone {
// Prepends local variable declarations and renders nice error messages for
// verification failures.
void Verify(bool expected_success, FunctionSig* sig, const byte* start,
- const byte* end) {
- PrepareBytecode(&start, &end);
+ const byte* end, AppendEnd append_end) {
+ PrepareBytecode(&start, &end, append_end);
// Verify the code.
DecodeResult result =
@@ -253,8 +263,8 @@ TEST_F(FunctionBodyDecoderTest, Int32Const1) {
TEST_F(FunctionBodyDecoderTest, EmptyFunction) {
byte code[] = {0};
- Verify(true, sigs.v_v(), code, code);
- Verify(false, sigs.i_i(), code, code);
+ Verify(true, sigs.v_v(), code, code, kAppendEnd);
+ Verify(false, sigs.i_i(), code, code, kAppendEnd);
}
TEST_F(FunctionBodyDecoderTest, IncompleteIf1) {
@@ -307,10 +317,12 @@ TEST_F(FunctionBodyDecoderTest, Float64Const) {
}
TEST_F(FunctionBodyDecoderTest, Int32Const_off_end) {
- byte code[] = {kExprI32Const, 0xaa, 0xbb, 0xcc, 0x44};
+ byte code[] = {kExprI32Const, 0xAA, 0xBB, 0xCC, 0x44};
for (int size = 1; size <= 4; size++) {
- Verify(false, sigs.i_i(), code, code + size);
+ Verify(false, sigs.i_i(), code, code + size, kAppendEnd);
+ // Should also fail without the trailing 'end' opcode.
+ Verify(false, sigs.i_i(), code, code + size, kOmitEnd);
}
}
@@ -496,7 +508,7 @@ TEST_F(FunctionBodyDecoderTest, BlockN) {
buffer[0] = kExprBlock;
buffer[1] = kLocalVoid;
buffer[i + 2] = kExprEnd;
- Verify(true, sigs.v_i(), buffer, buffer + i + 3);
+ Verify(true, sigs.v_i(), buffer, buffer + i + 3, kAppendEnd);
}
}
@@ -643,7 +655,8 @@ TEST_F(FunctionBodyDecoderTest, BlockN_off_end) {
byte code[] = {WASM_BLOCK(kExprNop, kExprNop, kExprNop, kExprNop)};
EXPECT_VERIFIES_C(v_v, code);
for (size_t i = 1; i < arraysize(code); i++) {
- Verify(false, sigs.v_v(), code, code + i);
+ Verify(false, sigs.v_v(), code, code + i, kAppendEnd);
+ Verify(false, sigs.v_v(), code, code + i, kOmitEnd);
}
}
@@ -973,7 +986,8 @@ TEST_F(FunctionBodyDecoderTest, If_off_end) {
static const byte kCode[] = {
WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0))};
for (size_t len = 3; len < arraysize(kCode); len++) {
- Verify(false, sigs.i_i(), kCode, kCode + len);
+ Verify(false, sigs.i_i(), kCode, kCode + len, kAppendEnd);
+ Verify(false, sigs.i_i(), kCode, kCode + len, kOmitEnd);
}
}
@@ -1566,6 +1580,40 @@ TEST_F(FunctionBodyDecoderTest, IndirectCallsWithoutTableCrash) {
WASM_I32V_2(72)));
}
+TEST_F(FunctionBodyDecoderTest, IncompleteIndirectCall) {
+ FunctionSig* sig = sigs.i_i();
+ TestModuleBuilder builder;
+ builder.InitializeFunctionTable();
+ module = builder.module();
+
+ static byte code[] = {kExprCallIndirect};
+ Verify(false, sig, code, code + arraysize(code), kOmitEnd);
+}
+
+TEST_F(FunctionBodyDecoderTest, IncompleteStore) {
+ FunctionSig* sig = sigs.i_i();
+ TestModuleBuilder builder;
+ builder.InitializeMemory();
+ builder.InitializeFunctionTable();
+ module = builder.module();
+
+ static byte code[] = {kExprI32StoreMem};
+ Verify(false, sig, code, code + arraysize(code), kOmitEnd);
+}
+
+TEST_F(FunctionBodyDecoderTest, IncompleteS8x16Shuffle) {
+ EXPERIMENTAL_FLAG_SCOPE(simd);
+ FunctionSig* sig = sigs.i_i();
+ TestModuleBuilder builder;
+ builder.InitializeMemory();
+ builder.InitializeFunctionTable();
+ module = builder.module();
+
+ static byte code[] = {kSimdPrefix,
+ static_cast<byte>(kExprS8x16Shuffle & 0xff)};
+ Verify(false, sig, code, code + arraysize(code), kOmitEnd);
+}
+
TEST_F(FunctionBodyDecoderTest, SimpleImportCalls) {
FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
@@ -2139,7 +2187,8 @@ TEST_F(FunctionBodyDecoderTest, BrTable2b) {
TEST_F(FunctionBodyDecoderTest, BrTable_off_end) {
static byte code[] = {B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(0)))};
for (size_t len = 1; len < sizeof(code); len++) {
- Verify(false, sigs.i_i(), code, code + len);
+ Verify(false, sigs.i_i(), code, code + len, kAppendEnd);
+ Verify(false, sigs.i_i(), code, code + len, kOmitEnd);
}
}
@@ -2616,7 +2665,7 @@ TEST_F(FunctionBodyDecoderTest, Regression709741) {
byte code[] = {WASM_NOP};
const byte* start = code;
const byte* end = code + sizeof(code);
- PrepareBytecode(&start, &end);
+ PrepareBytecode(&start, &end, kAppendEnd);
for (const byte* i = start; i < end; i++) {
DecodeResult result =
@@ -2919,16 +2968,16 @@ TEST_F(WasmOpcodeLengthTest, SimpleExpressions) {
TEST_F(WasmOpcodeLengthTest, SimdExpressions) {
#define TEST_SIMD(name, opcode, sig) \
- EXPECT_LENGTH_N(2, kSimdPrefix, static_cast<byte>(kExpr##name & 0xff));
+ EXPECT_LENGTH_N(2, kSimdPrefix, static_cast<byte>(kExpr##name & 0xFF));
FOREACH_SIMD_0_OPERAND_OPCODE(TEST_SIMD)
#undef TEST_SIMD
#define TEST_SIMD(name, opcode, sig) \
- EXPECT_LENGTH_N(3, kSimdPrefix, static_cast<byte>(kExpr##name & 0xff));
+ EXPECT_LENGTH_N(3, kSimdPrefix, static_cast<byte>(kExpr##name & 0xFF));
FOREACH_SIMD_1_OPERAND_OPCODE(TEST_SIMD)
#undef TEST_SIMD
- EXPECT_LENGTH_N(18, kSimdPrefix, static_cast<byte>(kExprS8x16Shuffle & 0xff));
+ EXPECT_LENGTH_N(18, kSimdPrefix, static_cast<byte>(kExprS8x16Shuffle & 0xFF));
// test for bad simd opcode
- EXPECT_LENGTH_N(2, kSimdPrefix, 0xff);
+ EXPECT_LENGTH_N(2, kSimdPrefix, 0xFF);
}
#undef EXPECT_LENGTH
diff --git a/deps/v8/test/unittests/wasm/leb-helper-unittest.cc b/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
index ec9fd3efb3..704703a3ea 100644
--- a/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
+++ b/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
@@ -119,7 +119,7 @@ TEST_F(LEBHelperTest, WriteAndDecode_u32v) {
CheckEncodeDecode_u32v(87348723);
CheckEncodeDecode_u32v(77777);
- for (uint32_t val = 0x3a; val != 0; val = val << 1) {
+ for (uint32_t val = 0x3A; val != 0; val = val << 1) {
CheckEncodeDecode_u32v(val);
}
}
@@ -141,7 +141,7 @@ TEST_F(LEBHelperTest, WriteAndDecode_i32v) {
CheckEncodeDecode_i32v(-87328723);
CheckEncodeDecode_i32v(-77377);
- for (uint32_t val = 0x3a; val != 0; val = val << 1) {
+ for (uint32_t val = 0x3A; val != 0; val = val << 1) {
CheckEncodeDecode_i32v(bit_cast<int32_t>(val));
}
@@ -159,7 +159,7 @@ TEST_F(LEBHelperTest, WriteAndDecode_u64v) {
CheckEncodeDecode_u64v(87348723);
CheckEncodeDecode_u64v(77777);
- for (uint64_t val = 0x3a; val != 0; val = val << 1) {
+ for (uint64_t val = 0x3A; val != 0; val = val << 1) {
CheckEncodeDecode_u64v(val);
}
}
@@ -180,7 +180,7 @@ TEST_F(LEBHelperTest, WriteAndDecode_i64v) {
CheckEncodeDecode_i64v(-87648723);
CheckEncodeDecode_i64v(-77377);
- for (uint64_t val = 0x3a; val != 0; val = val << 1) {
+ for (uint64_t val = 0x3A; val != 0; val = val << 1) {
CheckEncodeDecode_i64v(bit_cast<int64_t>(val));
}
diff --git a/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc b/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
index d089d94ca2..20f3d2bf3b 100644
--- a/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
+++ b/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
@@ -176,7 +176,7 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, Loop2) {
}
TEST_F(WasmLoopAssignmentAnalyzerTest, Malformed) {
- byte code[] = {kExprLoop, kLocalVoid, kExprF32Neg, kExprBrTable, 0x0e, 'h',
+ byte code[] = {kExprLoop, kLocalVoid, kExprF32Neg, kExprBrTable, 0x0E, 'h',
'e', 'l', 'l', 'o', ',', ' ',
'w', 'o', 'r', 'l', 'd', '!'};
BitVector* assigned = Analyze(code, code + arraysize(code));
@@ -185,8 +185,8 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, Malformed) {
TEST_F(WasmLoopAssignmentAnalyzerTest, regress_642867) {
static const byte code[] = {
- WASM_LOOP(WASM_ZERO, kExprSetLocal, 0xfa, 0xff, 0xff, 0xff,
- 0x0f)}; // local index LEB128 0xfffffffa
+ WASM_LOOP(WASM_ZERO, kExprSetLocal, 0xFA, 0xFF, 0xFF, 0xFF,
+ 0x0F)}; // local index LEB128 0xFFFFFFFA
// Just make sure that the analysis does not crash.
Analyze(code, code + arraysize(code));
}
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index ae98bd9a70..a472623096 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -290,7 +290,7 @@ TEST_F(WasmModuleVerifyTest, ExportMutableGlobal) {
static void AppendUint32v(std::vector<byte>& buffer, uint32_t val) {
while (true) {
uint32_t next = val >> 7;
- uint32_t out = val & 0x7f;
+ uint32_t out = val & 0x7F;
if (next) {
buffer.push_back(static_cast<byte>(0x80 | out));
val = next;
@@ -553,7 +553,7 @@ TEST_F(WasmModuleVerifyTest, DataSegmentWithImmutableGlobal) {
1,
kLocalI32, // local type
0, // immutable
- WASM_INIT_EXPR_I32V_3(0x9bbaa), // init
+ WASM_INIT_EXPR_I32V_3(0x9BBAA), // init
SECTION(Data, 9),
ENTRY_COUNT(1),
LINEAR_MEMORY_INDEX_0,
@@ -577,7 +577,7 @@ TEST_F(WasmModuleVerifyTest, OneDataSegment) {
SECTION(Data, 11),
ENTRY_COUNT(1),
LINEAR_MEMORY_INDEX_0,
- WASM_INIT_EXPR_I32V_3(0x9bbaa), // dest addr
+ WASM_INIT_EXPR_I32V_3(0x9BBAA), // dest addr
U32V_1(3), // source size
'a',
'b',
@@ -595,7 +595,7 @@ TEST_F(WasmModuleVerifyTest, OneDataSegment) {
const WasmDataSegment* segment = &result.val->data_segments.back();
EXPECT_EQ(WasmInitExpr::kI32Const, segment->dest_addr.kind);
- EXPECT_EQ(0x9bbaa, segment->dest_addr.val.i32_const);
+ EXPECT_EQ(0x9BBAA, segment->dest_addr.val.i32_const);
EXPECT_EQ(kDataSegmentSourceOffset, segment->source.offset());
EXPECT_EQ(3u, segment->source.length());
}
@@ -616,14 +616,14 @@ TEST_F(WasmModuleVerifyTest, TwoDataSegments) {
SECTION(Data, 29),
ENTRY_COUNT(2), // segment count
LINEAR_MEMORY_INDEX_0,
- WASM_INIT_EXPR_I32V_3(0x7ffee), // #0: dest addr
+ WASM_INIT_EXPR_I32V_3(0x7FFEE), // #0: dest addr
U32V_1(4), // source size
1,
2,
3,
4, // data bytes
LINEAR_MEMORY_INDEX_0,
- WASM_INIT_EXPR_I32V_3(0x6ddcc), // #1: dest addr
+ WASM_INIT_EXPR_I32V_3(0x6DDCC), // #1: dest addr
U32V_1(10), // source size
1,
2,
@@ -648,12 +648,12 @@ TEST_F(WasmModuleVerifyTest, TwoDataSegments) {
const WasmDataSegment* s1 = &result.val->data_segments[1];
EXPECT_EQ(WasmInitExpr::kI32Const, s0->dest_addr.kind);
- EXPECT_EQ(0x7ffee, s0->dest_addr.val.i32_const);
+ EXPECT_EQ(0x7FFEE, s0->dest_addr.val.i32_const);
EXPECT_EQ(kDataSegment0SourceOffset, s0->source.offset());
EXPECT_EQ(4u, s0->source.length());
EXPECT_EQ(WasmInitExpr::kI32Const, s1->dest_addr.kind);
- EXPECT_EQ(0x6ddcc, s1->dest_addr.val.i32_const);
+ EXPECT_EQ(0x6DDCC, s1->dest_addr.val.i32_const);
EXPECT_EQ(kDataSegment1SourceOffset, s1->source.offset());
EXPECT_EQ(10u, s1->source.length());
}
@@ -666,7 +666,7 @@ TEST_F(WasmModuleVerifyTest, DataWithoutMemory) {
SECTION(Data, 11),
ENTRY_COUNT(1),
LINEAR_MEMORY_INDEX_0,
- WASM_INIT_EXPR_I32V_3(0x9bbaa), // dest addr
+ WASM_INIT_EXPR_I32V_3(0x9BBAA), // dest addr
U32V_1(3), // source size
'a',
'b',
@@ -718,7 +718,7 @@ TEST_F(WasmModuleVerifyTest, DataSegmentEndOverflow) {
ENTRY_COUNT(1), // one entry
LINEAR_MEMORY_INDEX_0, // mem index
WASM_INIT_EXPR_I32V_1(0), // offset
- U32V_5(0xffffffff) // size
+ U32V_5(0xFFFFFFFF) // size
};
EXPECT_FAILURE(data);
@@ -731,7 +731,7 @@ TEST_F(WasmModuleVerifyTest, OneIndirectFunction) {
// funcs ---------------------------------------------------------------
ONE_EMPTY_FUNCTION,
// table declaration ---------------------------------------------------
- SECTION(Table, 4), ENTRY_COUNT(1), kWasmAnyFunctionTypeForm, 0, 1};
+ SECTION(Table, 4), ENTRY_COUNT(1), kWasmAnyFunctionTypeCode, 0, 1};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
@@ -746,7 +746,7 @@ TEST_F(WasmModuleVerifyTest, OneIndirectFunction) {
TEST_F(WasmModuleVerifyTest, ElementSectionWithInternalTable) {
static const byte data[] = {
// table ---------------------------------------------------------------
- SECTION(Table, 4), ENTRY_COUNT(1), kWasmAnyFunctionTypeForm, 0, 1,
+ SECTION(Table, 4), ENTRY_COUNT(1), kWasmAnyFunctionTypeCode, 0, 1,
// elements ------------------------------------------------------------
SECTION(Element, 1),
0 // entry count
@@ -764,7 +764,7 @@ TEST_F(WasmModuleVerifyTest, ElementSectionWithImportedTable) {
NAME_LENGTH(1), // --
't', // table name
kExternalTable, // import kind
- kWasmAnyFunctionTypeForm, // elem_type
+ kWasmAnyFunctionTypeCode, // elem_type
0, // no maximum field
1, // initial size
// elements ------------------------------------------------------------
@@ -797,13 +797,13 @@ TEST_F(WasmModuleVerifyTest, Regression_735887) {
// funcs ---------------------------------------------------------------
ONE_EMPTY_FUNCTION,
// table declaration ---------------------------------------------------
- SECTION(Table, 4), ENTRY_COUNT(1), kWasmAnyFunctionTypeForm, 0, 1,
+ SECTION(Table, 4), ENTRY_COUNT(1), kWasmAnyFunctionTypeCode, 0, 1,
// elements ------------------------------------------------------------
SECTION(Element, 7),
1, // entry count
TABLE_INDEX(0), WASM_INIT_EXPR_I32V_1(0),
1, // elements count
- 0x9a // invalid I32V as function index
+ 0x9A // invalid I32V as function index
};
EXPECT_FAILURE(data);
@@ -816,7 +816,7 @@ TEST_F(WasmModuleVerifyTest, OneIndirectFunction_one_entry) {
// funcs ---------------------------------------------------------------
ONE_EMPTY_FUNCTION,
// table declaration ---------------------------------------------------
- SECTION(Table, 4), ENTRY_COUNT(1), kWasmAnyFunctionTypeForm, 0, 1,
+ SECTION(Table, 4), ENTRY_COUNT(1), kWasmAnyFunctionTypeCode, 0, 1,
// elements ------------------------------------------------------------
SECTION(Element, 7),
1, // entry count
@@ -844,7 +844,7 @@ TEST_F(WasmModuleVerifyTest, MultipleIndirectFunctions) {
// funcs ------------------------------------------------------
FOUR_EMPTY_FUNCTIONS,
// table declaration -------------------------------------------
- SECTION(Table, 4), ENTRY_COUNT(1), kWasmAnyFunctionTypeForm, 0, 8,
+ SECTION(Table, 4), ENTRY_COUNT(1), kWasmAnyFunctionTypeCode, 0, 8,
// table elements ----------------------------------------------
SECTION(Element, 14),
1, // entry count
@@ -974,7 +974,7 @@ TEST_F(WasmSignatureDecodeTest, Ok_i_tt) {
}
TEST_F(WasmSignatureDecodeTest, TooManyParams) {
- static const byte data[] = {kWasmFunctionTypeForm,
+ static const byte data[] = {kWasmFunctionTypeCode,
WASM_I32V_3(kV8MaxWasmFunctionParams + 1),
kLocalI32, 0};
FunctionSig* sig =
@@ -988,7 +988,7 @@ TEST_F(WasmSignatureDecodeTest, TooManyReturns) {
const int max_return_count = static_cast<int>(
FLAG_experimental_wasm_mv ? kV8MaxWasmFunctionMultiReturns
: kV8MaxWasmFunctionReturns);
- byte data[] = {kWasmFunctionTypeForm, 0, WASM_I32V_3(max_return_count + 1),
+ byte data[] = {kWasmFunctionTypeCode, 0, WASM_I32V_3(max_return_count + 1),
kLocalI32};
FunctionSig* sig =
DecodeWasmSignatureForTesting(zone(), data, data + sizeof(data));
@@ -1108,11 +1108,11 @@ TEST_F(WasmModuleVerifyTest, OnlyUnknownSectionEmpty) {
TEST_F(WasmModuleVerifyTest, OnlyUnknownSectionNonEmpty) {
const byte data[] = {
UNKNOWN_SECTION(5),
- 0xff,
- 0xff,
- 0xff,
- 0xff,
- 0xff, // section data
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF, // section data
};
EXPECT_VERIFIES(data);
}
@@ -1131,7 +1131,7 @@ TEST_F(WasmModuleVerifyTest, SignatureFollowedByUnknownSection) {
// signatures
SIGNATURES_SECTION_VOID_VOID,
// -----------------------------------------------------------
- UNKNOWN_SECTION(5), 0xff, 0xff, 0xff, 0xff, 0xff,
+ UNKNOWN_SECTION(5), 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
};
EXPECT_VERIFIES(data);
}
@@ -1497,7 +1497,7 @@ TEST_F(WasmModuleVerifyTest, Regression_738097) {
FUNCTION_SIGNATURES_SECTION(1, 0), // --
SECTION(Code, 1 + 5 + 1), // --
1, // --
- U32V_5(0xffffffff), // function size,
+ U32V_5(0xFFFFFFFF), // function size,
0 // No real body
};
EXPECT_FAILURE(data);
@@ -1620,16 +1620,16 @@ TEST_F(WasmModuleVerifyTest, Names_two_empty) {
TEST_F(WasmModuleVerifyTest, Regression684855) {
static const byte data[] = {
SECTION_NAMES(12),
- 0xfb, // functions count
+ 0xFB, // functions count
0x27, // |
0x00, // function name length
- 0xff, // local names count
- 0xff, // |
- 0xff, // |
- 0xff, // |
- 0xff, // |
- 0xff, // error: "varint too large"
- 0xff, // |
+ 0xFF, // local names count
+ 0xFF, // |
+ 0xFF, // |
+ 0xFF, // |
+ 0xFF, // |
+ 0xFF, // error: "varint too large"
+ 0xFF, // |
0x00, // --
0x00 // --
};
@@ -1703,7 +1703,7 @@ TEST_F(WasmModuleVerifyTest, Multiple_Named_Sections) {
}
TEST_F(WasmModuleVerifyTest, Section_Name_No_UTF8) {
- static const byte data[] = {SECTION(Unknown, 4), 1, 0xff, 17, 18};
+ static const byte data[] = {SECTION(Unknown, 4), 1, 0xFF, 17, 18};
EXPECT_FAILURE(data);
}
diff --git a/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc b/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
index 41211ac960..8655651332 100644
--- a/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
@@ -140,20 +140,20 @@ TEST_F(WasmStreamingDecoderTest, IncompleteModuleHeader) {
TEST_F(WasmStreamingDecoderTest, MagicAndVersion) {
const uint8_t data[] = {U32_LE(kWasmMagic), U32_LE(kWasmVersion)};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 0, 0);
+ ExpectVerifies(ArrayVector(data), 0, 0);
}
TEST_F(WasmStreamingDecoderTest, BadMagic) {
for (uint32_t x = 1; x; x <<= 1) {
const uint8_t data[] = {U32_LE(kWasmMagic ^ x), U32_LE(kWasmVersion)};
- ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectFailure(ArrayVector(data));
}
}
TEST_F(WasmStreamingDecoderTest, BadVersion) {
for (uint32_t x = 1; x; x <<= 1) {
const uint8_t data[] = {U32_LE(kWasmMagic), U32_LE(kWasmVersion ^ x)};
- ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectFailure(ArrayVector(data));
}
}
@@ -170,7 +170,7 @@ TEST_F(WasmStreamingDecoderTest, OneSection) {
0x0, // 5
0x0 // 6
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 1, 0);
+ ExpectVerifies(ArrayVector(data), 1, 0);
}
TEST_F(WasmStreamingDecoderTest, OneSection_b) {
@@ -187,7 +187,7 @@ TEST_F(WasmStreamingDecoderTest, OneSection_b) {
0x0, // 5
0x0 // 6
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 1, 0);
+ ExpectVerifies(ArrayVector(data), 1, 0);
}
TEST_F(WasmStreamingDecoderTest, OneShortSection) {
@@ -201,7 +201,7 @@ TEST_F(WasmStreamingDecoderTest, OneShortSection) {
0x0, // Payload
0x0 // 2
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 1, 0);
+ ExpectVerifies(ArrayVector(data), 1, 0);
}
TEST_F(WasmStreamingDecoderTest, OneShortSection_b) {
@@ -215,7 +215,7 @@ TEST_F(WasmStreamingDecoderTest, OneShortSection_b) {
0x0, // Payload
0x0 // 2
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 1, 0);
+ ExpectVerifies(ArrayVector(data), 1, 0);
}
TEST_F(WasmStreamingDecoderTest, OneEmptySection) {
@@ -225,7 +225,7 @@ TEST_F(WasmStreamingDecoderTest, OneEmptySection) {
0x1, // Section ID
0x0 // Section Length
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 1, 0);
+ ExpectVerifies(ArrayVector(data), 1, 0);
}
TEST_F(WasmStreamingDecoderTest, OneSectionNotEnoughPayload1) {
@@ -240,7 +240,7 @@ TEST_F(WasmStreamingDecoderTest, OneSectionNotEnoughPayload1) {
0x0, // 4
0x0 // 5
};
- ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectFailure(ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, OneSectionNotEnoughPayload2) {
@@ -251,7 +251,7 @@ TEST_F(WasmStreamingDecoderTest, OneSectionNotEnoughPayload2) {
0x6, // Section Length
0x0 // Payload
};
- ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectFailure(ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, OneSectionInvalidLength) {
@@ -265,7 +265,7 @@ TEST_F(WasmStreamingDecoderTest, OneSectionInvalidLength) {
0x80, // --
0x80, // --
};
- ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectFailure(ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, TwoLongSections) {
@@ -290,7 +290,7 @@ TEST_F(WasmStreamingDecoderTest, TwoLongSections) {
0x0, // 6
0x0 // 7
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 2, 0);
+ ExpectVerifies(ArrayVector(data), 2, 0);
}
TEST_F(WasmStreamingDecoderTest, TwoShortSections) {
@@ -305,7 +305,7 @@ TEST_F(WasmStreamingDecoderTest, TwoShortSections) {
0x0, // Payload
0x0, // 2
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 2, 0);
+ ExpectVerifies(ArrayVector(data), 2, 0);
}
TEST_F(WasmStreamingDecoderTest, TwoSectionsShortLong) {
@@ -325,7 +325,7 @@ TEST_F(WasmStreamingDecoderTest, TwoSectionsShortLong) {
0x0, // 6
0x0 // 7
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 2, 0);
+ ExpectVerifies(ArrayVector(data), 2, 0);
}
TEST_F(WasmStreamingDecoderTest, TwoEmptySections) {
@@ -337,7 +337,7 @@ TEST_F(WasmStreamingDecoderTest, TwoEmptySections) {
0x2, // Section ID
0x0 // Section Length
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 2, 0);
+ ExpectVerifies(ArrayVector(data), 2, 0);
}
TEST_F(WasmStreamingDecoderTest, OneFunction) {
@@ -355,7 +355,7 @@ TEST_F(WasmStreamingDecoderTest, OneFunction) {
0x0, // 5
0x0, // 6
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 0, 1);
+ ExpectVerifies(ArrayVector(data), 0, 1);
}
TEST_F(WasmStreamingDecoderTest, OneShortFunction) {
@@ -368,7 +368,7 @@ TEST_F(WasmStreamingDecoderTest, OneShortFunction) {
0x1, // Function Length
0x0, // Function
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 0, 1);
+ ExpectVerifies(ArrayVector(data), 0, 1);
}
TEST_F(WasmStreamingDecoderTest, EmptyFunction) {
@@ -380,7 +380,7 @@ TEST_F(WasmStreamingDecoderTest, EmptyFunction) {
0x1, // Number of Functions
0x0, // Function Length
};
- ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectFailure(ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, TwoFunctions) {
@@ -406,7 +406,7 @@ TEST_F(WasmStreamingDecoderTest, TwoFunctions) {
0x0, // 6
0x0, // 7
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 0, 2);
+ ExpectVerifies(ArrayVector(data), 0, 2);
}
TEST_F(WasmStreamingDecoderTest, TwoFunctions_b) {
@@ -414,7 +414,7 @@ TEST_F(WasmStreamingDecoderTest, TwoFunctions_b) {
U32_LE(kWasmMagic), // --
U32_LE(kWasmVersion), // --
kCodeSectionCode, // Section ID
- 0xb, // Section Length
+ 0xB, // Section Length
0x2, // Number of Functions
0x1, // Function Length
0x0, // Function
@@ -427,7 +427,7 @@ TEST_F(WasmStreamingDecoderTest, TwoFunctions_b) {
0x0, // 6
0x0, // 7
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 0, 2);
+ ExpectVerifies(ArrayVector(data), 0, 2);
}
TEST_F(WasmStreamingDecoderTest, CodeSectionLengthZero) {
@@ -437,7 +437,7 @@ TEST_F(WasmStreamingDecoderTest, CodeSectionLengthZero) {
kCodeSectionCode, // Section ID
0x0, // Section Length
};
- ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectFailure(ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooHigh) {
@@ -445,7 +445,7 @@ TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooHigh) {
U32_LE(kWasmMagic), // --
U32_LE(kWasmVersion), // --
kCodeSectionCode, // Section ID
- 0xd, // Section Length
+ 0xD, // Section Length
0x2, // Number of Functions
0x7, // Function Length
0x0, // Function
@@ -458,7 +458,7 @@ TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooHigh) {
0x1, // Function Length
0x0, // Function
};
- ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectFailure(ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooHighZeroFunctions) {
@@ -466,7 +466,7 @@ TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooHighZeroFunctions) {
U32_LE(kWasmMagic), // --
U32_LE(kWasmVersion), // --
kCodeSectionCode, // Section ID
- 0xd, // Section Length
+ 0xD, // Section Length
0x0, // Number of Functions
};
ExpectFailure(ArrayVector(data));
@@ -490,7 +490,7 @@ TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLow) {
0x1, // Function Length
0x0, // Function
};
- ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectFailure(ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLowEndsInNumFunctions) {
@@ -513,7 +513,7 @@ TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLowEndsInNumFunctions) {
0x1, // Function Length
0x0, // Function
};
- ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectFailure(ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLowEndsInFunctionLength) {
@@ -538,7 +538,7 @@ TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLowEndsInFunctionLength) {
0x1, // Function Length
0x0, // Function
};
- ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectFailure(ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, NumberOfFunctionsTooHigh) {
@@ -546,7 +546,7 @@ TEST_F(WasmStreamingDecoderTest, NumberOfFunctionsTooHigh) {
U32_LE(kWasmMagic), // --
U32_LE(kWasmVersion), // --
kCodeSectionCode, // Section ID
- 0xb, // Section Length
+ 0xB, // Section Length
0x4, // Number of Functions
0x7, // Function Length
0x0, // Function
@@ -559,7 +559,7 @@ TEST_F(WasmStreamingDecoderTest, NumberOfFunctionsTooHigh) {
0x1, // Function Length
0x0, // Function
};
- ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectFailure(ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, NumberOfFunctionsTooLow) {
@@ -567,7 +567,7 @@ TEST_F(WasmStreamingDecoderTest, NumberOfFunctionsTooLow) {
U32_LE(kWasmMagic), // --
U32_LE(kWasmVersion), // --
kCodeSectionCode, // Section ID
- 0xe, // Section Length
+ 0xE, // Section Length
0x2, // Number of Functions
0x1, // Function Length
0x0, // Function
@@ -583,7 +583,7 @@ TEST_F(WasmStreamingDecoderTest, NumberOfFunctionsTooLow) {
0x0, // 6
0x0, // 7
};
- ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectFailure(ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, TwoCodeSections) {
@@ -601,7 +601,7 @@ TEST_F(WasmStreamingDecoderTest, TwoCodeSections) {
0x1, // Function Length
0x0, // Function
};
- ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectFailure(ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, UnknownSection) {
@@ -619,7 +619,7 @@ TEST_F(WasmStreamingDecoderTest, UnknownSection) {
0x1, // Name
0x0, // Content
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 1, 1);
+ ExpectVerifies(ArrayVector(data), 1, 1);
}
TEST_F(WasmStreamingDecoderTest, UnknownSectionSandwich) {
@@ -642,7 +642,7 @@ TEST_F(WasmStreamingDecoderTest, UnknownSectionSandwich) {
0x1, // Function Length
0x0, // Function
};
- ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectFailure(ArrayVector(data));
}
} // namespace wasm
diff --git a/deps/v8/test/unittests/wasm/wasm-heap-unittest.cc b/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
index d0c9284f93..1b6af25a4a 100644
--- a/deps/v8/test/unittests/wasm/wasm-heap-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
@@ -5,7 +5,7 @@
#include "test/unittests/test-utils.h"
#include "testing/gmock/include/gmock/gmock.h"
-#include "src/wasm/wasm-heap.h"
+#include "src/wasm/wasm-code-manager.h"
namespace v8 {
namespace internal {
@@ -190,7 +190,7 @@ class WasmCodeManagerTest : public TestWithIsolate {
return native_module->AddCode(desc, 0, index, 0, {}, false);
}
- size_t page() const { return base::OS::AllocatePageSize(); }
+ size_t page() const { return AllocatePageSize(); }
v8::Isolate* v8_isolate() const {
return reinterpret_cast<v8::Isolate*>(isolate());
}
diff --git a/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc b/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
index 45a4c5a59b..d1087c02a6 100644
--- a/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
@@ -37,7 +37,7 @@ TEST_F(WasmMacroGenTest, Constants) {
EXPECT_SIZE(4, WASM_I64V_3(10000));
EXPECT_SIZE(5, WASM_I64V_4(-9828934));
EXPECT_SIZE(6, WASM_I64V_5(-1119828934));
- EXPECT_SIZE(10, WASM_I64V_9(0x123456789abcdef0ULL));
+ EXPECT_SIZE(10, WASM_I64V_9(0x123456789ABCDEF0ULL));
EXPECT_SIZE(5, WASM_F32(1.0f));
EXPECT_SIZE(5, WASM_F32(10000.0f));
diff --git a/deps/v8/test/wasm-spec-tests/testcfg.py b/deps/v8/test/wasm-spec-tests/testcfg.py
index d1eae764e3..7f99ed4711 100644
--- a/deps/v8/test/wasm-spec-tests/testcfg.py
+++ b/deps/v8/test/wasm-spec-tests/testcfg.py
@@ -7,10 +7,7 @@ import os
from testrunner.local import testsuite
from testrunner.objects import testcase
-class WasmSpecTestsTestSuite(testsuite.TestSuite):
- def __init__(self, name, root):
- super(WasmSpecTestsTestSuite, self).__init__(name, root)
-
+class TestSuite(testsuite.TestSuite):
def ListTests(self, context):
tests = []
for dirname, dirs, files in os.walk(self.root):
@@ -21,15 +18,18 @@ class WasmSpecTestsTestSuite(testsuite.TestSuite):
fullpath = os.path.join(dirname, filename)
relpath = fullpath[len(self.root) + 1 : -3]
testname = relpath.replace(os.path.sep, "/")
- test = testcase.TestCase(self, testname)
+ test = self._create_test(testname)
tests.append(test)
return tests
- def GetParametersForTestCase(self, testcase, context):
- flags = testcase.flags + context.mode_flags
- files = [os.path.join(self.root, testcase.path + self.suffix())]
- return files, flags, {}
+ def _test_class(self):
+ return TestCase
+
+
+class TestCase(testcase.TestCase):
+ def _get_files_params(self, ctx):
+ return [os.path.join(self.suite.root, self.path + self._get_suffix())]
def GetSuite(name, root):
- return WasmSpecTestsTestSuite(name, root)
+ return TestSuite(name, root)
diff --git a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1 b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
index a0f2e20e1f..6fc33c12e2 100644
--- a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
@@ -1 +1 @@
-4ca2075a2ceb1c7b6e4d7b2d26d23fdb9998fd56 \ No newline at end of file
+310ca173c041a53775a713ac948c3627ae357f8d \ No newline at end of file
diff --git a/deps/v8/test/webkit/date-constructor-expected.txt b/deps/v8/test/webkit/date-constructor-expected.txt
index 366a39de8d..3ad2cb0f79 100644
--- a/deps/v8/test/webkit/date-constructor-expected.txt
+++ b/deps/v8/test/webkit/date-constructor-expected.txt
@@ -27,37 +27,36 @@ On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE
PASS isNaN(new Date("")) is true
-PASS new Date(1111).getTime() is 1111
-PASS new Date(object).getTime() is 1111
-PASS new Date(new Date(1111)).getTime() is 1111
-PASS new Date(new Date(1111).toString()).getTime() is 1000
-PASS new Date(1111, 1).getTime() - timeZoneOffset is -27104803200000
-PASS new Date(1111, 1, 1).getTime() - timeZoneOffset is -27104803200000
-PASS new Date(1111, 1, 1, 1).getTime() - timeZoneOffset is -27104799600000
-PASS new Date(1111, 1, 1, 1, 1).getTime() - timeZoneOffset is -27104799540000
-PASS new Date(1111, 1, 1, 1, 1, 1).getTime() - timeZoneOffset is -27104799539000
-PASS new Date(1111, 1, 1, 1, 1, 1, 1).getTime() - timeZoneOffset is -27104799538999
-PASS new Date(1111, 1, 1, 1, 1, 1, 1, 1).getTime() - timeZoneOffset is -27104799538999
-PASS new Date(1111, 1, 1, 1, 1, 1, 1, 1, 1).getTime() - timeZoneOffset is -27104799538999
-PASS new Date(1111, 1, 1, 1, 1, 1, 1, 1, 1).getTime() - timeZoneOffset is -27104799538999
-PASS new Date(new Date(1111, 1)).getTime() - timeZoneOffset is -27104803200000
-PASS new Date(new Date(1111, 1, 1)).getTime() - timeZoneOffset is -27104803200000
-PASS new Date(new Date(1111, 1, 1, 1)).getTime() - timeZoneOffset is -27104799600000
-PASS new Date(new Date(1111, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset is -27104799539000
-PASS new Date(new Date(1111, 1, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset is -27104799538999
-PASS new Date(new Date(1111, 1, 1, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset is -27104799538999
-PASS new Date(new Date(1111, 1, 1, 1, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset is -27104799538999
+PASS new Date(1995).getTime() is 1995
+PASS new Date(object).getTime() is 1995
+PASS new Date(new Date(1995)).getTime() is 1995
+PASS new Date(new Date(1995).toString()).getTime() is 1000
+PASS new Date(1995, 1).getTime() - timeZoneOffset is 791596800000
+PASS new Date(1995, 1, 1).getTime() - timeZoneOffset is 791596800000
+PASS new Date(1995, 1, 1, 1).getTime() - timeZoneOffset is 791600400000
+PASS new Date(1995, 1, 1, 1, 1).getTime() - timeZoneOffset is 791600460000
+PASS new Date(1995, 1, 1, 1, 1, 1).getTime() - timeZoneOffset is 791600461000
+PASS new Date(1995, 1, 1, 1, 1, 1, 1).getTime() - timeZoneOffset is 791600461001
+PASS new Date(1995, 1, 1, 1, 1, 1, 1, 1).getTime() - timeZoneOffset is 791600461001
+PASS new Date(1995, 1, 1, 1, 1, 1, 1, 1, 1).getTime() - timeZoneOffset is 791600461001
+PASS new Date(new Date(1995, 1)).getTime() - timeZoneOffset is 791596800000
+PASS new Date(new Date(1995, 1, 1)).getTime() - timeZoneOffset is 791596800000
+PASS new Date(new Date(1995, 1, 1, 1)).getTime() - timeZoneOffset is 791600400000
+PASS new Date(new Date(1995, 1, 1, 1, 1)).getTime() - timeZoneOffset is 791600460000
+PASS new Date(new Date(1995, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset is 791600461000
+PASS new Date(new Date(1995, 1, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset is 791600461001
+PASS new Date(new Date(1995, 1, 1, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset is 791600461001
+PASS new Date(new Date(1995, 1, 1, 1, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset is 791600461001
PASS Number(new Date(new Date(Infinity, 1, 1, 1, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset) is Number.NaN
-PASS Number(new Date(new Date(1, Infinity, 1, 1, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset) is Number.NaN
-PASS Number(new Date(new Date(1, 1, Infinity, 1, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset) is Number.NaN
-PASS Number(new Date(new Date(1, 1, 1, Infinity, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset) is Number.NaN
-PASS Number(new Date(new Date(1, 1, 1, 1, Infinity, 1, 1, 1, 1)).getTime() - timeZoneOffset) is Number.NaN
-PASS Number(new Date(new Date(1, 1, 1, 1, 1, Infinity, 1, 1, 1)).getTime() - timeZoneOffset) is Number.NaN
-PASS Number(new Date(new Date(1, 1, 1, 1, 1, 1, Infinity, 1, 1)).getTime() - timeZoneOffset) is Number.NaN
-PASS Number(new Date(new Date(1, 1, 1, 1, 1, 1, 1, 1, Infinity)).getTime() - timeZoneOffset) is -2174770738999
+PASS Number(new Date(new Date(95, Infinity, 1, 1, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset) is Number.NaN
+PASS Number(new Date(new Date(95, 1, Infinity, 1, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset) is Number.NaN
+PASS Number(new Date(new Date(95, 1, 1, Infinity, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset) is Number.NaN
+PASS Number(new Date(new Date(95, 1, 1, 1, Infinity, 1, 1, 1, 1)).getTime() - timeZoneOffset) is Number.NaN
+PASS Number(new Date(new Date(95, 1, 1, 1, 1, Infinity, 1, 1, 1)).getTime() - timeZoneOffset) is Number.NaN
+PASS Number(new Date(new Date(95, 1, 1, 1, 1, 1, Infinity, 1, 1)).getTime() - timeZoneOffset) is Number.NaN
+PASS Number(new Date(new Date(95, 1, 1, 1, 1, 1, 1, 1, Infinity)).getTime() - timeZoneOffset) is 791600461001
PASS testStr is "1234567"
PASS testStr is "1234567"
PASS successfullyParsed is true
TEST COMPLETE
-
diff --git a/deps/v8/test/webkit/date-constructor.js b/deps/v8/test/webkit/date-constructor.js
index e96d970193..ff37d0b9d1 100644
--- a/deps/v8/test/webkit/date-constructor.js
+++ b/deps/v8/test/webkit/date-constructor.js
@@ -28,44 +28,44 @@ description(
);
var object = new Object;
-object.valueOf = function() { return 1111; }
+object.valueOf = function() { return 1995; }
object.toString = function() { return "2222"; }
shouldBe('isNaN(new Date(""))', 'true');
-var timeZoneOffset = Date.parse("Dec 25 1995") - Date.parse("Dec 25 1995 GMT");
+var timeZoneOffset = Date.parse("Feb 1 1995") - Date.parse("Feb 1 1995 GMT");
-shouldBe('new Date(1111).getTime()', '1111');
-shouldBe('new Date(object).getTime()', '1111');
-shouldBe('new Date(new Date(1111)).getTime()', '1111');
-shouldBe('new Date(new Date(1111).toString()).getTime()', '1000');
+shouldBe('new Date(1995).getTime()', '1995');
+shouldBe('new Date(object).getTime()', '1995');
+shouldBe('new Date(new Date(1995)).getTime()', '1995');
+shouldBe('new Date(new Date(1995).toString()).getTime()', '1000');
-shouldBe('new Date(1111, 1).getTime() - timeZoneOffset', '-27104803200000');
-shouldBe('new Date(1111, 1, 1).getTime() - timeZoneOffset', '-27104803200000');
-shouldBe('new Date(1111, 1, 1, 1).getTime() - timeZoneOffset', '-27104799600000');
-shouldBe('new Date(1111, 1, 1, 1, 1).getTime() - timeZoneOffset', '-27104799540000');
-shouldBe('new Date(1111, 1, 1, 1, 1, 1).getTime() - timeZoneOffset', '-27104799539000');
-shouldBe('new Date(1111, 1, 1, 1, 1, 1, 1).getTime() - timeZoneOffset', '-27104799538999');
-shouldBe('new Date(1111, 1, 1, 1, 1, 1, 1, 1).getTime() - timeZoneOffset', '-27104799538999');
-shouldBe('new Date(1111, 1, 1, 1, 1, 1, 1, 1, 1).getTime() - timeZoneOffset', '-27104799538999');
-shouldBe('new Date(1111, 1, 1, 1, 1, 1, 1, 1, 1).getTime() - timeZoneOffset', '-27104799538999');
+shouldBe('new Date(1995, 1).getTime() - timeZoneOffset', '791596800000');
+shouldBe('new Date(1995, 1, 1).getTime() - timeZoneOffset', '791596800000');
+shouldBe('new Date(1995, 1, 1, 1).getTime() - timeZoneOffset', '791600400000');
+shouldBe('new Date(1995, 1, 1, 1, 1).getTime() - timeZoneOffset', '791600460000');
+shouldBe('new Date(1995, 1, 1, 1, 1, 1).getTime() - timeZoneOffset', '791600461000');
+shouldBe('new Date(1995, 1, 1, 1, 1, 1, 1).getTime() - timeZoneOffset', '791600461001');
+shouldBe('new Date(1995, 1, 1, 1, 1, 1, 1, 1).getTime() - timeZoneOffset', '791600461001');
+shouldBe('new Date(1995, 1, 1, 1, 1, 1, 1, 1, 1).getTime() - timeZoneOffset', '791600461001');
-shouldBe('new Date(new Date(1111, 1)).getTime() - timeZoneOffset', '-27104803200000');
-shouldBe('new Date(new Date(1111, 1, 1)).getTime() - timeZoneOffset', '-27104803200000');
-shouldBe('new Date(new Date(1111, 1, 1, 1)).getTime() - timeZoneOffset', '-27104799600000');
-shouldBe('new Date(new Date(1111, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset', '-27104799539000');
-shouldBe('new Date(new Date(1111, 1, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset', '-27104799538999');
-shouldBe('new Date(new Date(1111, 1, 1, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset', '-27104799538999');
-shouldBe('new Date(new Date(1111, 1, 1, 1, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset', '-27104799538999');
+shouldBe('new Date(new Date(1995, 1)).getTime() - timeZoneOffset', '791596800000');
+shouldBe('new Date(new Date(1995, 1, 1)).getTime() - timeZoneOffset', '791596800000');
+shouldBe('new Date(new Date(1995, 1, 1, 1)).getTime() - timeZoneOffset', '791600400000');
+shouldBe('new Date(new Date(1995, 1, 1, 1, 1)).getTime() - timeZoneOffset', '791600460000');
+shouldBe('new Date(new Date(1995, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset', '791600461000');
+shouldBe('new Date(new Date(1995, 1, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset', '791600461001');
+shouldBe('new Date(new Date(1995, 1, 1, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset', '791600461001');
+shouldBe('new Date(new Date(1995, 1, 1, 1, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset', '791600461001');
shouldBe("Number(new Date(new Date(Infinity, 1, 1, 1, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset)", 'Number.NaN');
-shouldBe("Number(new Date(new Date(1, Infinity, 1, 1, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset)", 'Number.NaN');
-shouldBe("Number(new Date(new Date(1, 1, Infinity, 1, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset)", 'Number.NaN');
-shouldBe("Number(new Date(new Date(1, 1, 1, Infinity, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset)", 'Number.NaN');
-shouldBe("Number(new Date(new Date(1, 1, 1, 1, Infinity, 1, 1, 1, 1)).getTime() - timeZoneOffset)", 'Number.NaN');
-shouldBe("Number(new Date(new Date(1, 1, 1, 1, 1, Infinity, 1, 1, 1)).getTime() - timeZoneOffset)", 'Number.NaN');
-shouldBe("Number(new Date(new Date(1, 1, 1, 1, 1, 1, Infinity, 1, 1)).getTime() - timeZoneOffset)", 'Number.NaN');
-shouldBe("Number(new Date(new Date(1, 1, 1, 1, 1, 1, 1, 1, Infinity)).getTime() - timeZoneOffset)", '-2174770738999');
+shouldBe("Number(new Date(new Date(95, Infinity, 1, 1, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset)", 'Number.NaN');
+shouldBe("Number(new Date(new Date(95, 1, Infinity, 1, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset)", 'Number.NaN');
+shouldBe("Number(new Date(new Date(95, 1, 1, Infinity, 1, 1, 1, 1, 1)).getTime() - timeZoneOffset)", 'Number.NaN');
+shouldBe("Number(new Date(new Date(95, 1, 1, 1, Infinity, 1, 1, 1, 1)).getTime() - timeZoneOffset)", 'Number.NaN');
+shouldBe("Number(new Date(new Date(95, 1, 1, 1, 1, Infinity, 1, 1, 1)).getTime() - timeZoneOffset)", 'Number.NaN');
+shouldBe("Number(new Date(new Date(95, 1, 1, 1, 1, 1, Infinity, 1, 1)).getTime() - timeZoneOffset)", 'Number.NaN');
+shouldBe("Number(new Date(new Date(95, 1, 1, 1, 1, 1, 1, 1, Infinity)).getTime() - timeZoneOffset)", '791600461001');
// In Firefox, the results of the following tests are timezone-dependent, which likely implies that the implementation is not quite correct.
// Our results are even worse, though, as the dates are clipped: (new Date(1111, 1201).getTime()) == (new Date(1111, 601).getTime())
diff --git a/deps/v8/test/webkit/testcfg.py b/deps/v8/test/webkit/testcfg.py
index 855a1327ba..bd0149ffbc 100644
--- a/deps/v8/test/webkit/testcfg.py
+++ b/deps/v8/test/webkit/testcfg.py
@@ -25,24 +25,19 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import itertools
import os
import re
from testrunner.local import testsuite
from testrunner.objects import testcase
+from testrunner.outproc import webkit
-FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
SELF_SCRIPT_PATTERN = re.compile(r"//\s+Env: TEST_FILE_NAME")
# TODO (machenbach): Share commonalities with mjstest.
-class WebkitTestSuite(testsuite.TestSuite):
-
- def __init__(self, name, root):
- super(WebkitTestSuite, self).__init__(name, root)
-
+class TestSuite(testsuite.TestSuite):
def ListTests(self, context):
tests = []
for dirname, dirs, files in os.walk(self.root):
@@ -58,17 +53,23 @@ class WebkitTestSuite(testsuite.TestSuite):
fullpath = os.path.join(dirname, filename)
relpath = fullpath[len(self.root) + 1 : -3]
testname = relpath.replace(os.path.sep, "/")
- test = testcase.TestCase(self, testname)
+ test = self._create_test(testname)
tests.append(test)
return tests
- def GetParametersForTestCase(self, testcase, context):
- source = self.GetSourceForTest(testcase)
- flags = testcase.flags + context.mode_flags
- flags_match = re.findall(FLAGS_PATTERN, source)
- for match in flags_match:
- flags += match.strip().split()
+ def _test_class(self):
+ return TestCase
+
+
+class TestCase(testcase.TestCase):
+ def __init__(self, *args, **kwargs):
+ super(TestCase, self).__init__(*args, **kwargs)
+
+ source = self.get_source()
+ self._source_files = self._parse_source_files(source)
+ self._source_flags = self._parse_source_flags(source)
+ def _parse_source_files(self, source):
files_list = [] # List of file names to append to command arguments.
files_match = FILES_PATTERN.search(source);
# Accept several lines of 'Files:'.
@@ -78,82 +79,35 @@ class WebkitTestSuite(testsuite.TestSuite):
files_match = FILES_PATTERN.search(source, files_match.end())
else:
break
- files = [ os.path.normpath(os.path.join(self.root, '..', '..', f))
+ files = [ os.path.normpath(os.path.join(self.suite.root, '..', '..', f))
for f in files_list ]
- testfilename = os.path.join(self.root, testcase.path + self.suffix())
+ testfilename = os.path.join(self.suite.root, self.path + self._get_suffix())
if SELF_SCRIPT_PATTERN.search(source):
env = ["-e", "TEST_FILE_NAME=\"%s\"" % testfilename.replace("\\", "\\\\")]
files = env + files
- files.append(os.path.join(self.root, "resources/standalone-pre.js"))
+ files.append(os.path.join(self.suite.root, "resources/standalone-pre.js"))
files.append(testfilename)
- files.append(os.path.join(self.root, "resources/standalone-post.js"))
-
- all_files = list(files)
- if context.isolates:
- all_files += ["--isolate"] + files
-
- return all_files, flags, {}
-
- def GetSourceForTest(self, testcase):
- filename = os.path.join(self.root, testcase.path + self.suffix())
- with open(filename) as f:
- return f.read()
-
- # TODO(machenbach): Share with test/message/testcfg.py
- def _IgnoreLine(self, string):
- """Ignore empty lines, valgrind output, Android output and trace
- incremental marking output."""
- if not string: return True
- return (string.startswith("==") or string.startswith("**") or
- string.startswith("ANDROID") or "[IncrementalMarking]" in string or
- # FIXME(machenbach): The test driver shouldn't try to use slow
- # asserts if they weren't compiled. This fails in optdebug=2.
- string == "Warning: unknown flag --enable-slow-asserts." or
- string == "Try --help for options")
-
- def IsFailureOutput(self, testcase):
- if super(WebkitTestSuite, self).IsFailureOutput(testcase):
- return True
- file_name = os.path.join(self.root, testcase.path) + "-expected.txt"
- with file(file_name, "r") as expected:
- expected_lines = expected.readlines()
-
- def ExpIterator():
- for line in expected_lines:
- if line.startswith("#") or not line.strip(): continue
- yield line.strip()
-
- def ActIterator(lines):
- for line in lines:
- if self._IgnoreLine(line.strip()): continue
- yield line.strip()
-
- def ActBlockIterator():
- """Iterates over blocks of actual output lines."""
- lines = testcase.output.stdout.splitlines()
- start_index = 0
- found_eqeq = False
- for index, line in enumerate(lines):
- # If a stress test separator is found:
- if line.startswith("=="):
- # Iterate over all lines before a separator except the first.
- if not found_eqeq:
- found_eqeq = True
- else:
- yield ActIterator(lines[start_index:index])
- # The next block of output lines starts after the separator.
- start_index = index + 1
- # Iterate over complete output if no separator was found.
- if not found_eqeq:
- yield ActIterator(lines)
-
- for act_iterator in ActBlockIterator():
- for (expected, actual) in itertools.izip_longest(
- ExpIterator(), act_iterator, fillvalue=''):
- if expected != actual:
- return True
- return False
+ files.append(os.path.join(self.suite.root, "resources/standalone-post.js"))
+ return files
+
+ def _get_files_params(self, ctx):
+ files = self._source_files
+ if ctx.isolates:
+ files = files + ['--isolate'] + files
+ return files
+
+ def _get_source_flags(self):
+ return self._source_flags
+
+ def _get_source_path(self):
+ return os.path.join(self.suite.root, self.path + self._get_suffix())
+
+ @property
+ def output_proc(self):
+ return webkit.OutProc(
+ self.expected_outcomes,
+ os.path.join(self.suite.root, self.path) + '-expected.txt')
def GetSuite(name, root):
- return WebkitTestSuite(name, root)
+ return TestSuite(name, root)
diff --git a/deps/v8/third_party/binutils/Linux_ia32/binutils.tar.bz2.sha1 b/deps/v8/third_party/binutils/Linux_ia32/binutils.tar.bz2.sha1
index 9d046d1f66..93942d8908 100644
--- a/deps/v8/third_party/binutils/Linux_ia32/binutils.tar.bz2.sha1
+++ b/deps/v8/third_party/binutils/Linux_ia32/binutils.tar.bz2.sha1
@@ -1 +1 @@
-24f937cfdad77bdcd6ad8cacc542d806f3eb4b0f
+81fd042fef3e2ff2e807a8c1fb4ea621b665d6b3 \ No newline at end of file
diff --git a/deps/v8/third_party/binutils/Linux_x64/binutils.tar.bz2.sha1 b/deps/v8/third_party/binutils/Linux_x64/binutils.tar.bz2.sha1
index 09c5366c5c..6bc9f8c8c1 100644
--- a/deps/v8/third_party/binutils/Linux_x64/binutils.tar.bz2.sha1
+++ b/deps/v8/third_party/binutils/Linux_x64/binutils.tar.bz2.sha1
@@ -1 +1 @@
-d9064388bed0e7225b1366d80b59289b1509d7c2
+dbe488f8a5c2e11573a38e8b01e8c96bebed3365 \ No newline at end of file
diff --git a/deps/v8/third_party/inspector_protocol/CodeGenerator.py b/deps/v8/third_party/inspector_protocol/CodeGenerator.py
index 6be153d7e6..e630b02985 100644
--- a/deps/v8/third_party/inspector_protocol/CodeGenerator.py
+++ b/deps/v8/third_party/inspector_protocol/CodeGenerator.py
@@ -325,6 +325,7 @@ class Protocol(object):
self.patch_full_qualified_refs()
self.create_notification_types()
self.create_type_definitions()
+ self.generate_used_types()
def read_protocol_file(self, file_name):
@@ -362,6 +363,56 @@ class Protocol(object):
patch_full_qualified_refs_in_domain(domain, domain["domain"])
+ def all_references(self, json):
+ refs = set()
+ if isinstance(json, list):
+ for item in json:
+ refs |= self.all_references(item)
+ if not isinstance(json, dict):
+ return refs
+ for key in json:
+ if key != "$ref":
+ refs |= self.all_references(json[key])
+ else:
+ refs.add(json["$ref"])
+ return refs
+
+ def generate_used_types(self):
+ all_refs = set()
+ for domain in self.json_api["domains"]:
+ domain_name = domain["domain"]
+ if "commands" in domain:
+ for command in domain["commands"]:
+ if self.generate_command(domain_name, command["name"]):
+ all_refs |= self.all_references(command)
+ if "events" in domain:
+ for event in domain["events"]:
+ if self.generate_event(domain_name, event["name"]):
+ all_refs |= self.all_references(event)
+ all_refs.add(domain_name + "." + to_title_case(event["name"]) + "Notification")
+
+ dependencies = self.generate_type_dependencies()
+ queue = set(all_refs)
+ while len(queue):
+ ref = queue.pop()
+ if ref in dependencies:
+ queue |= dependencies[ref] - all_refs
+ all_refs |= dependencies[ref]
+ self.used_types = all_refs
+
+
+ def generate_type_dependencies(self):
+ dependencies = dict()
+ domains_with_types = (x for x in self.json_api["domains"] if "types" in x)
+ for domain in domains_with_types:
+ domain_name = domain["domain"]
+ for type in domain["types"]:
+ related_types = self.all_references(type)
+ if len(related_types):
+ dependencies[domain_name + "." + type["id"]] = related_types
+ return dependencies
+
+
def create_notification_types(self):
for domain in self.json_api["domains"]:
if "events" in domain:
@@ -444,9 +495,7 @@ class Protocol(object):
def generate_type(self, domain, typename):
- if not self.config.protocol.options:
- return domain in self.generate_domains
- return self.check_options(self.config.protocol.options, domain, typename, "include_types", "exclude_types", True)
+ return domain + "." + typename in self.used_types
def is_async_command(self, domain, command):
diff --git a/deps/v8/third_party/inspector_protocol/ConvertProtocolToJSON.py b/deps/v8/third_party/inspector_protocol/ConvertProtocolToJSON.py
new file mode 100644
index 0000000000..56fc09d78c
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/ConvertProtocolToJSON.py
@@ -0,0 +1,183 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import json
+import os.path
+import re
+import sys
+
+file_name = None
+description = ''
+
+primitiveTypes = ['integer', 'number', 'boolean', 'string', 'object', 'any', 'array']
+
+
+def assignType(item, type, isArray=False):
+ if isArray:
+ item['type'] = 'array'
+ item['items'] = collections.OrderedDict()
+ assignType(item['items'], type)
+ return
+
+ if type == 'enum':
+ type = 'string'
+ if type in primitiveTypes:
+ item['type'] = type
+ else:
+ item['$ref'] = type
+
+
+def createItem(d, experimental, deprecated, name=None):
+ result = collections.OrderedDict(d)
+ if name:
+ result['name'] = name
+ global description
+ if description:
+ result['description'] = description.strip()
+ if experimental:
+ result['experimental'] = True
+ if deprecated:
+ result['deprecated'] = True
+ return result
+
+
+def parse(data):
+ protocol = collections.OrderedDict()
+ protocol['version'] = collections.OrderedDict()
+ protocol['domains'] = []
+ domain = None
+ item = None
+ subitems = None
+ nukeDescription = False
+ global description
+ lines = data.split('\n')
+ for i in range(0, len(lines)):
+ if nukeDescription:
+ description = ''
+ nukeDescription = False
+ line = lines[i]
+ trimLine = line.strip()
+
+ if trimLine.startswith('#'):
+ if len(description):
+ description += '\n'
+ description += trimLine[2:]
+ continue
+ else:
+ nukeDescription = True
+
+ if len(trimLine) == 0:
+ continue
+
+ match = re.compile('^(experimental )?(deprecated )?domain (.*)').match(line)
+ if match:
+ domain = createItem({'domain' : match.group(3)}, match.group(1), match.group(2))
+ protocol['domains'].append(domain)
+ continue
+
+ match = re.compile('^ depends on ([^\s]+)').match(line)
+ if match:
+ if 'dependencies' not in domain:
+ domain['dependencies'] = []
+ domain['dependencies'].append(match.group(1))
+ continue
+
+ match = re.compile('^ (experimental )?(deprecated )?type (.*) extends (array of )?([^\s]+)').match(line)
+ if match:
+ if 'types' not in domain:
+ domain['types'] = []
+ item = createItem({'id': match.group(3)}, match.group(1), match.group(2))
+ assignType(item, match.group(5), match.group(4))
+ domain['types'].append(item)
+ continue
+
+ match = re.compile('^ (experimental )?(deprecated )?(command|event) (.*)').match(line)
+ if match:
+ list = []
+ if match.group(3) == 'command':
+ if 'commands' in domain:
+ list = domain['commands']
+ else:
+ list = domain['commands'] = []
+ else:
+ if 'events' in domain:
+ list = domain['events']
+ else:
+ list = domain['events'] = []
+
+ item = createItem({}, match.group(1), match.group(2), match.group(4))
+ list.append(item)
+ continue
+
+ match = re.compile('^ (experimental )?(deprecated )?(optional )?(array of )?([^\s]+) ([^\s]+)').match(line)
+ if match:
+ param = createItem({}, match.group(1), match.group(2), match.group(6))
+ if match.group(3):
+ param['optional'] = True
+ assignType(param, match.group(5), match.group(4))
+ if match.group(5) == 'enum':
+ enumliterals = param['enum'] = []
+ subitems.append(param)
+ continue
+
+ match = re.compile('^ (parameters|returns|properties)').match(line)
+ if match:
+ subitems = item[match.group(1)] = []
+ continue
+
+ match = re.compile('^ enum').match(line)
+ if match:
+ enumliterals = item['enum'] = []
+ continue
+
+ match = re.compile('^version').match(line)
+ if match:
+ continue
+
+ match = re.compile('^ major (\d+)').match(line)
+ if match:
+ protocol['version']['major'] = match.group(1)
+ continue
+
+ match = re.compile('^ minor (\d+)').match(line)
+ if match:
+ protocol['version']['minor'] = match.group(1)
+ continue
+
+ match = re.compile('^ redirect ([^\s]+)').match(line)
+ if match:
+ item['redirect'] = match.group(1)
+ continue
+
+ match = re.compile('^ ( )?[^\s]+$').match(line)
+ if match:
+ # enum literal
+ enumliterals.append(trimLine)
+ continue
+
+ print 'Error in %s:%s, illegal token: \t%s' % (file_name, i, line)
+ sys.exit(1)
+ return protocol
+
+def main(argv):
+ if len(argv) < 2:
+ sys.stderr.write("Usage: %s <protocol.pdl> <protocol.json>\n" % sys.argv[0])
+ return 1
+ global file_name
+ file_name = os.path.normpath(argv[0])
+ input_file = open(file_name, "r")
+ pdl_string = input_file.read()
+ protocol = parse(pdl_string)
+ output_file = open(argv[0].replace('.pdl', '.json'), 'wb')
+ json.dump(protocol, output_file, indent=4, separators=(',', ': '))
+ output_file.close()
+
+ output_file = open(os.path.normpath(argv[1]), 'wb')
+ json.dump(protocol, output_file, indent=4, separators=(',', ': '))
+ output_file.close()
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/deps/v8/third_party/inspector_protocol/README.v8 b/deps/v8/third_party/inspector_protocol/README.v8
index fdb24d41d4..8a82f2a9c9 100644
--- a/deps/v8/third_party/inspector_protocol/README.v8
+++ b/deps/v8/third_party/inspector_protocol/README.v8
@@ -2,7 +2,7 @@ Name: inspector protocol
Short Name: inspector_protocol
URL: https://chromium.googlesource.com/deps/inspector_protocol/
Version: 0
-Revision: 65caa48c1d301e35f60b94ae770b0c68c34960d4
+Revision: 752d4abd13119010cf30e454e8ef9b5fb7ef43a3
License: BSD
License File: LICENSE
Security Critical: no
diff --git a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template
index 287f306420..cecef743bf 100644
--- a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template
+++ b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template
@@ -118,23 +118,6 @@ void DispatcherBase::markFallThrough(int callbackId)
m_lastCallbackFallThrough = true;
}
-// static
-bool DispatcherBase::getCommandName(const String& message, String* result)
-{
- std::unique_ptr<protocol::Value> value = StringUtil::parseJSON(message);
- if (!value)
- return false;
-
- protocol::DictionaryValue* object = DictionaryValue::cast(value.get());
- if (!object)
- return false;
-
- if (!object->getString("method", result))
- return false;
-
- return true;
-}
-
void DispatcherBase::sendResponse(int callId, const DispatchResponse& response, std::unique_ptr<protocol::DictionaryValue> result)
{
if (!m_frontendChannel)
@@ -272,7 +255,7 @@ DispatchResponse::Status UberDispatcher::dispatch(std::unique_ptr<Value> parsedM
if (outCallId)
*outCallId = callId;
if (!success) {
- reportProtocolErrorTo(m_frontendChannel, DispatchResponse::kInvalidRequest, "Message must have integer 'id' porperty");
+ reportProtocolErrorTo(m_frontendChannel, DispatchResponse::kInvalidRequest, "Message must have integer 'id' property");
return DispatchResponse::kError;
}
@@ -282,7 +265,7 @@ DispatchResponse::Status UberDispatcher::dispatch(std::unique_ptr<Value> parsedM
if (outMethod)
*outMethod = method;
if (!success) {
- reportProtocolErrorTo(m_frontendChannel, callId, DispatchResponse::kInvalidRequest, "Message must have string 'method' porperty", nullptr);
+ reportProtocolErrorTo(m_frontendChannel, callId, DispatchResponse::kInvalidRequest, "Message must have string 'method' property", nullptr);
return DispatchResponse::kError;
}
@@ -308,6 +291,29 @@ DispatchResponse::Status UberDispatcher::dispatch(std::unique_ptr<Value> parsedM
return it->second->dispatch(callId, method, std::move(messageObject));
}
+bool UberDispatcher::getCommandName(const String& message, String* method, std::unique_ptr<protocol::DictionaryValue>* parsedMessage)
+{
+ std::unique_ptr<protocol::Value> value = StringUtil::parseJSON(message);
+ if (!value) {
+ reportProtocolErrorTo(m_frontendChannel, DispatchResponse::kParseError, "Message must be a valid JSON");
+ return false;
+ }
+
+ protocol::DictionaryValue* object = DictionaryValue::cast(value.get());
+ if (!object) {
+ reportProtocolErrorTo(m_frontendChannel, DispatchResponse::kInvalidRequest, "Message must be an object");
+ return false;
+ }
+
+ if (!object->getString("method", method)) {
+ reportProtocolErrorTo(m_frontendChannel, DispatchResponse::kInvalidRequest, "Message must have string 'method' property");
+ return false;
+ }
+
+ parsedMessage->reset(DictionaryValue::cast(value.release()));
+ return true;
+}
+
UberDispatcher::~UberDispatcher() = default;
// static
diff --git a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template
index 5404281dc6..d70a4afe71 100644
--- a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template
@@ -85,8 +85,6 @@ public:
explicit DispatcherBase(FrontendChannel*);
virtual ~DispatcherBase();
- static bool getCommandName(const String& message, String* result);
-
virtual DispatchResponse::Status dispatch(int callId, const String& method, std::unique_ptr<protocol::DictionaryValue> messageObject) = 0;
void sendResponse(int callId, const DispatchResponse&, std::unique_ptr<protocol::DictionaryValue> result);
@@ -118,6 +116,7 @@ public:
FrontendChannel* channel() { return m_frontendChannel; }
bool fallThroughForNotFound() { return m_fallThroughForNotFound; }
void setFallThroughForNotFound(bool);
+ bool getCommandName(const String& message, String* method, std::unique_ptr<protocol::DictionaryValue>* parsedMessage);
virtual ~UberDispatcher();
private:
diff --git a/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template b/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template
index 11d529bce9..744d496026 100644
--- a/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template
+++ b/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template
@@ -32,14 +32,11 @@ namespace {{domain.domain}} {
{% if not protocol.generate_type(domain.domain, type.id) %}{% continue %}{% endif %}
{% if type.type == "object" %}
{% if "properties" in type %}
-// {{type.description}}
class {{type.id}};
{% else %}
-// {{type.description}}
using {{type.id}} = Object;
{% endif %}
{% elif type.type != "array" %}
-// {{type.description}}
using {{type.id}} = {{protocol.resolve_type(type).type}};
{% endif %}
{% endfor %}
@@ -74,7 +71,6 @@ namespace {{param.name | to_title_case}}Enum {
{% if not protocol.generate_type(domain.domain, type.id) %}{% continue %}{% endif %}
{% if not (type.type == "object") or not ("properties" in type) %}{% continue %}{% endif %}
-// {{type.description}}
class {{config.protocol.export_macro}} {{type.id}} : public Serializable{% if protocol.is_exported(domain.domain, type.id) %}, public API::{{type.id}}{% endif %}{
PROTOCOL_DISALLOW_COPY({{type.id}});
public:
diff --git a/deps/v8/tools/BUILD.gn b/deps/v8/tools/BUILD.gn
index a15058a186..1c0864d0d8 100644
--- a/deps/v8/tools/BUILD.gn
+++ b/deps/v8/tools/BUILD.gn
@@ -50,6 +50,7 @@ v8_isolate_run("run-gcmole") {
isolate = "gcmole/run-gcmole.isolate"
}
+# TODO(machenbach): Add tests as dependencies.
v8_isolate_run("run-num-fuzzer") {
deps = [
"..:d8_run",
diff --git a/deps/v8/tools/callstats.html b/deps/v8/tools/callstats.html
index a75edf8968..2618b50b71 100644
--- a/deps/v8/tools/callstats.html
+++ b/deps/v8/tools/callstats.html
@@ -1727,6 +1727,8 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
Group.add('callback', new Group('Blink C++', /.*Callback.*/, "#109618"));
Group.add('api', new Group('API', /.*API.*/, "#990099"));
Group.add('gc-custom', new Group('GC-Custom', /GC_Custom_.*/, "#0099C6"));
+ Group.add('gc-background',
+ new Group('GC-Background', /.*GC.*BACKGROUND.*/, "#00597c"));
Group.add('gc', new Group('GC', /GC_.*|AllocateInTargetSpace/, "#00799c"));
Group.add('javascript', new Group('JavaScript', /JS_Execution/, "#DD4477"));
Group.add('runtime', new Group('V8 C++', /.*/, "#88BB00"));
diff --git a/deps/v8/tools/callstats.py b/deps/v8/tools/callstats.py
index 1b123cb936..5215d6319f 100755
--- a/deps/v8/tools/callstats.py
+++ b/deps/v8/tools/callstats.py
@@ -356,6 +356,7 @@ def read_stats(path, domain, args):
('Group-Callback', re.compile(".*Callback.*")),
('Group-API', re.compile(".*API.*")),
('Group-GC-Custom', re.compile("GC_Custom_.*")),
+ ('Group-GC-Background', re.compile(".*GC.*BACKGROUND.*")),
('Group-GC', re.compile("GC_.*|AllocateInTargetSpace")),
('Group-JavaScript', re.compile("JS_Execution")),
('Group-Runtime', re.compile(".*"))]
diff --git a/deps/v8/tools/foozzie/testdata/failure_output.txt b/deps/v8/tools/foozzie/testdata/failure_output.txt
index 654a84fb98..85b1d7ab77 100644
--- a/deps/v8/tools/foozzie/testdata/failure_output.txt
+++ b/deps/v8/tools/foozzie/testdata/failure_output.txt
@@ -11,7 +11,7 @@
# Flags of x64,ignition:
--abort_on_stack_or_string_length_overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --random-seed 12345 --turbo-filter=~ --noopt --suppress-asm-messages
# Flags of x64,ignition_turbo:
---abort_on_stack_or_string_length_overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --random-seed 12345 --suppress-asm-messages
+--abort_on_stack_or_string_length_overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --random-seed 12345 --suppress-asm-messages --stress-scavenge=100
#
# Difference:
- unknown
diff --git a/deps/v8/tools/foozzie/v8_foozzie.py b/deps/v8/tools/foozzie/v8_foozzie.py
index 2b61410ce4..9bb3512bcf 100755
--- a/deps/v8/tools/foozzie/v8_foozzie.py
+++ b/deps/v8/tools/foozzie/v8_foozzie.py
@@ -12,6 +12,7 @@ import hashlib
import itertools
import json
import os
+import random
import re
import sys
import traceback
@@ -55,8 +56,35 @@ CONFIGS = dict(
'--no-lazy-inner-functions',
'--suppress-asm-messages',
],
+ slow_path=[
+ '--force-slow-path',
+ '--suppress-asm-messages',
+ ],
+ slow_path_opt=[
+ '--always-opt',
+ '--force-slow-path',
+ '--suppress-asm-messages',
+ ],
+ trusted=[
+ '--no-untrusted-code-mitigations',
+ '--suppress-asm-messages',
+ ],
+ trusted_opt=[
+ '--always-opt',
+ '--no-untrusted-code-mitigations',
+ '--suppress-asm-messages',
+ ],
)
+# Additional flag experiments. List of tuples like
+# (<likelihood to use flags in [0,1)>, <flag>).
+ADDITIONAL_FLAGS = [
+ (0.1, '--stress-marking=100'),
+ (0.1, '--stress-scavenge=100'),
+ (0.1, '--stress-compaction-random'),
+ (0.1, '--random-gc-interval=2000'),
+]
+
# Timeout in seconds for one d8 run.
TIMEOUT = 3
@@ -229,6 +257,7 @@ def fail_bailout(output, ignore_by_output_fun):
def main():
options = parse_args()
+ rng = random.Random(options.random_seed)
# Suppressions are architecture and configuration specific.
suppress = v8_suppressions.get_suppression(
@@ -249,6 +278,11 @@ def main():
first_config_flags = common_flags + CONFIGS[options.first_config]
second_config_flags = common_flags + CONFIGS[options.second_config]
+ # Add additional flags to second config based on experiment percentages.
+ for p, flag in ADDITIONAL_FLAGS:
+ if rng.random() < p:
+ second_config_flags.append(flag)
+
def run_d8(d8, config_flags):
preamble = PREAMBLE[:]
if options.first_arch != options.second_arch:
diff --git a/deps/v8/tools/foozzie/v8_suppressions.py b/deps/v8/tools/foozzie/v8_suppressions.py
index 42fdc7a241..87b1972e94 100644
--- a/deps/v8/tools/foozzie/v8_suppressions.py
+++ b/deps/v8/tools/foozzie/v8_suppressions.py
@@ -46,11 +46,6 @@ IGNORE_SOURCES = {
'/v8/test/mjsunit/regress/regress-2989.js',
],
- 'crbug.com/718739': [
- '/v8/test/mjsunit/regress/regress-105.js',
- '/v8/test/mjsunit/regress/regress-crbug-599714.js',
- ],
-
'crbug.com/688159': [
'/v8/test/mjsunit/es7/exponentiation-operator.js',
],
@@ -70,17 +65,25 @@ IGNORE_SOURCES = {
],
}
-# Ignore by test case pattern. Map from bug->regexp.
+# Ignore by test case pattern. Map from config->bug->regexp. Config '' is used
+# to match all configurations. Otherwise use either a compiler configuration,
+# e.g. ignition or validate_asm or an architecture, e.g. x64 or ia32.
+# Bug is preferred to be a crbug.com/XYZ, but can be any short distinguishable
+# label.
# Regular expressions are assumed to be compiled. We use regexp.search.
IGNORE_TEST_CASES = {
- 'crbug.com/718739': re.compile(r'\.caller'),
+ 'slow_path': {
+ 'crbug.com/800651':
+ re.compile(r'async', re.S),
+ },
+ 'slow_path_opt': {
+ 'crbug.com/800651':
+ re.compile(r'async', re.S),
+ },
}
-# Ignore by output pattern. Map from config->bug->regexp. Config '' is used
-# to match all configurations. Otherwise use either a compiler configuration,
-# e.g. fullcode or validate_asm or an architecture, e.g. x64 or ia32 or a
-# comma-separated combination, e.g. x64,fullcode, for more specific
-# suppressions.
+# Ignore by output pattern. Map from config->bug->regexp. See IGNORE_TEST_CASES
+# on how to specify config keys.
# Bug is preferred to be a crbug.com/XYZ, but can be any short distinguishable
# label.
# Regular expressions are assumed to be compiled. We use regexp.search.
@@ -250,16 +253,16 @@ class Suppression(object):
return None
def ignore_by_metadata(self, metadata):
- return False
+ return None
def ignore_by_content(self, testcase):
- return False
+ return None
def ignore_by_output1(self, output):
- return False
+ return None
def ignore_by_output2(self, output):
- return False
+ return None
class V8Suppression(Suppression):
@@ -282,23 +285,25 @@ class V8Suppression(Suppression):
# Strip off test case preamble.
try:
lines = testcase.splitlines()
- lines = lines[lines.index('print("js-mutation: start generated test case");'):]
+ lines = lines[lines.index(
+ 'print("js-mutation: start generated test case");'):]
content = '\n'.join(lines)
except ValueError:
# Search the whole test case if preamble can't be found. E.g. older
# already minimized test cases might have dropped the delimiter line.
content = testcase
- for bug, exp in IGNORE_TEST_CASES.iteritems():
- if exp.search(content):
- return bug
- return False
+ for key in ['', self.arch1, self.arch2, self.config1, self.config2]:
+ for bug, exp in IGNORE_TEST_CASES.get(key, {}).iteritems():
+ if exp.search(content):
+ return bug
+ return None
def ignore_by_metadata(self, metadata):
for bug, sources in IGNORE_SOURCES.iteritems():
for source in sources:
if source in metadata['sources']:
return bug
- return False
+ return None
def ignore_by_output1(self, output):
return self.ignore_by_output(output, self.arch1, self.config1)
@@ -312,16 +317,8 @@ class V8Suppression(Suppression):
if exp.search(output):
return bug
return None
- bug = check(IGNORE_OUTPUT.get('', {}))
- if bug:
- return bug
- bug = check(IGNORE_OUTPUT.get(arch, {}))
- if bug:
- return bug
- bug = check(IGNORE_OUTPUT.get(config, {}))
- if bug:
- return bug
- bug = check(IGNORE_OUTPUT.get('%s,%s' % (arch, config), {}))
- if bug:
- return bug
+ for key in ['', arch, config]:
+ bug = check(IGNORE_OUTPUT.get(key, {}))
+ if bug:
+ return bug
return None
diff --git a/deps/v8/tools/gdbinit b/deps/v8/tools/gdbinit
index 03ecfdda30..fa9f434fb3 100644
--- a/deps/v8/tools/gdbinit
+++ b/deps/v8/tools/gdbinit
@@ -13,7 +13,7 @@ end
# Print v8::Local handle value.
define jlh
-call _v8_internal_Print_Object(*(v8::internal::Object**)(*$arg0))
+call _v8_internal_Print_Object(*((v8::internal::Object**)($arg0).val_))
end
document jlh
Print content of a v8::Local handle
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index a618d74ed3..043ecc306d 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -164,8 +164,8 @@ consts_misc = [
'value': 'Map::ElementsKindBits::kMask' },
{ 'name': 'bit_field2_elements_kind_shift',
'value': 'Map::ElementsKindBits::kShift' },
- { 'name': 'bit_field3_dictionary_map_shift',
- 'value': 'Map::DictionaryMap::kShift' },
+ { 'name': 'bit_field3_is_dictionary_map_shift',
+ 'value': 'Map::IsDictionaryMapBit::kShift' },
{ 'name': 'bit_field3_number_of_own_descriptors_mask',
'value': 'Map::NumberOfOwnDescriptorsBits::kMask' },
{ 'name': 'bit_field3_number_of_own_descriptors_shift',
diff --git a/deps/v8/tools/heap-stats/README.md b/deps/v8/tools/heap-stats/README.md
new file mode 100644
index 0000000000..70083fe257
--- /dev/null
+++ b/deps/v8/tools/heap-stats/README.md
@@ -0,0 +1,15 @@
+# Heap Stats
+
+Heap stats is a HTML-based tool for visualizing V8-internal object statistics.
+For example, the tool can be used to visualize how much heap memory is used for
+maintaining internal state versus actually allocated by the user.
+
+The tool consumes log files produced by d8 (or Chromium) by passing
+`--trace-gc-object-stats` or a trace captured using Chrome's tracing
+infrastructure. Chrome trace files need to be unpacked before they can
+be used though.
+
+Hosting requires a web server, e.g.:
+
+ cd tools/heap-stats
+ python -m SimpleHTTPServer 8000
diff --git a/deps/v8/tools/heap-stats/categories.js b/deps/v8/tools/heap-stats/categories.js
new file mode 100644
index 0000000000..0a836d5f6c
--- /dev/null
+++ b/deps/v8/tools/heap-stats/categories.js
@@ -0,0 +1,167 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Categories for instance types.
+const CATEGORIES = new Map([
+ [
+ 'user', new Set([
+ '*FIXED_ARRAY_CONTEXT_SUB_TYPE',
+ '*FIXED_ARRAY_COPY_ON_WRITE_SUB_TYPE',
+ '*FIXED_ARRAY_DICTIONARY_PROPERTIES_SUB_TYPE',
+ '*FIXED_ARRAY_JS_COLLECTION_SUB_TYPE',
+ '*FIXED_ARRAY_JS_WEAK_COLLECTION_SUB_TYPE',
+ '*FIXED_ARRAY_PACKED_ELEMENTS_SUB_TYPE',
+ 'CONS_ONE_BYTE_STRING_TYPE',
+ 'CONS_STRING_TYPE',
+ 'DESCRIPTOR_ARRAY_TYPE',
+ 'EXTERNAL_INTERNALIZED_STRING_TYPE',
+ 'EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE',
+ 'EXTERNAL_ONE_BYTE_STRING_TYPE',
+ 'EXTERNAL_STRING_TYPE',
+ 'EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE',
+ 'FIXED_DOUBLE_ARRAY_TYPE',
+ 'FIXED_FLOAT32_ARRAY_TYPE',
+ 'FIXED_FLOAT64_ARRAY_TYPE',
+ 'FIXED_INT16_ARRAY_TYPE',
+ 'FIXED_INT32_ARRAY_TYPE',
+ 'FIXED_INT8_ARRAY_TYPE',
+ 'FIXED_UINT16_ARRAY_TYPE',
+ 'FIXED_UINT32_ARRAY_TYPE',
+ 'FIXED_UINT8_ARRAY_TYPE',
+ 'FIXED_UINT8_CLAMPED_ARRAY_TYPE',
+ 'HEAP_NUMBER_TYPE',
+ 'INTERNALIZED_STRING_TYPE',
+ 'JS_ARGUMENTS_TYPE',
+ 'JS_ARRAY_BUFFER_TYPE',
+ 'JS_ARRAY_TYPE',
+ 'JS_BOUND_FUNCTION_TYPE',
+ 'JS_DATE_TYPE',
+ 'JS_ERROR_TYPE',
+ 'JS_FAST_ARRAY_KEY_ITERATOR_TYPE',
+ 'JS_FAST_ARRAY_VALUE_ITERATOR_TYPE',
+ 'JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE',
+ 'JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE',
+ 'JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE',
+ 'JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE',
+ 'JS_FUNCTION_TYPE',
+ 'JS_GENERATOR_OBJECT_TYPE',
+ 'JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE',
+ 'JS_GLOBAL_OBJECT_TYPE',
+ 'JS_GLOBAL_PROXY_TYPE',
+ 'JS_MAP_KEY_VALUE_ITERATOR_TYPE',
+ 'JS_MAP_TYPE',
+ 'JS_MESSAGE_OBJECT_TYPE',
+ 'JS_OBJECT_TYPE',
+ 'JS_PROMISE_TYPE',
+ 'JS_REGEXP_TYPE',
+ 'JS_SET_TYPE',
+ 'JS_STRING_ITERATOR_TYPE',
+ 'JS_TYPED_ARRAY_TYPE',
+ 'JS_VALUE_TYPE',
+ 'JS_WEAK_MAP_TYPE',
+ 'MUTABLE_HEAP_NUMBER_TYPE',
+ 'ONE_BYTE_INTERNALIZED_STRING_TYPE',
+ 'ONE_BYTE_STRING_TYPE',
+ 'PROPERTY_ARRAY_TYPE',
+ 'SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE',
+ 'SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE',
+ 'SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE',
+ 'SHORT_EXTERNAL_STRING_TYPE',
+ 'SLICED_ONE_BYTE_STRING_TYPE',
+ 'SLICED_STRING_TYPE',
+ 'STRING_TYPE',
+ 'SYMBOL_TYPE',
+ 'THIN_ONE_BYTE_STRING_TYPE',
+ 'THIN_STRING_TYPE',
+ ])
+ ],
+ [
+ 'system', new Set([
+ 'ACCESS_CHECK_INFO_TYPE',
+ 'ACCESSOR_INFO_TYPE',
+ 'ACCESSOR_PAIR_TYPE',
+ 'ALLOCATION_MEMENTO_TYPE',
+ 'ALLOCATION_SITE_TYPE',
+ 'BOILERPLATE_ELEMENTS_TYPE',
+ 'BOILERPLATE_NAME_DICTIONARY_TYPE',
+ 'BOILERPLATE_PROPERTY_ARRAY_TYPE',
+ 'BYTE_ARRAY_TYPE',
+ 'CELL_TYPE',
+ 'CONTEXT_EXTENSION_TYPE',
+ '*FIXED_ARRAY_DEPENDENT_CODE_SUB_TYPE',
+ '*FIXED_ARRAY_ENUM_CACHE_SUB_TYPE',
+ '*FIXED_ARRAY_ENUM_INDICES_CACHE_SUB_TYPE',
+ '*FIXED_ARRAY_FAST_TEMPLATE_INSTANTIATIONS_CACHE_SUB_TYPE',
+ '*FIXED_ARRAY_NUMBER_STRING_CACHE_SUB_TYPE',
+ '*FIXED_ARRAY_PROTOTYPE_USERS_SUB_TYPE',
+ '*FIXED_ARRAY_REGEXP_MULTIPLE_CACHE_SUB_TYPE',
+ '*FIXED_ARRAY_RETAINED_MAPS_SUB_TYPE',
+ '*FIXED_ARRAY_SCOPE_INFO_SUB_TYPE',
+ '*FIXED_ARRAY_SCRIPT_LIST_SUB_TYPE',
+ '*FIXED_ARRAY_SINGLE_CHARACTER_STRING_CACHE_SUB_TYPE',
+ '*FIXED_ARRAY_STRING_SPLIT_CACHE_SUB_TYPE',
+ '*FIXED_ARRAY_TEMPLATE_INFO_SUB_TYPE',
+ '*FIXED_ARRAY_WEAK_NEW_SPACE_OBJECT_TO_CODE_SUB_TYPE',
+ 'FOREIGN_TYPE',
+ 'FUNCTION_TEMPLATE_INFO_TYPE',
+ 'INTERCEPTOR_INFO_TYPE',
+ 'JS_API_OBJECT_TYPE',
+ 'JS_ARRAY_BOILERPLATE_TYPE',
+ 'JS_OBJECT_BOILERPLATE_TYPE',
+ 'JS_SPECIAL_API_OBJECT_TYPE',
+ 'MAP_TYPE',
+ 'OBJECT_TEMPLATE_INFO_TYPE',
+ 'ODDBALL_TYPE',
+ 'PROMISE_REACTION_JOB_INFO_TYPE',
+ 'PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE',
+ 'PROPERTY_CELL_TYPE',
+ 'PROTOTYPE_INFO_TYPE',
+ 'STACK_FRAME_INFO_TYPE',
+ 'TRANSITION_ARRAY_TYPE',
+ 'WEAK_CELL_TYPE',
+ ])
+ ],
+ [
+ 'code', new Set([
+ '*CODE_BUILTIN',
+ '*CODE_BYTECODE_HANDLER',
+ '*CODE_OPTIMIZED_FUNCTION',
+ '*CODE_REGEXP',
+ '*CODE_STUB',
+ '*FIXED_ARRAY_BYTECODE_ARRAY_CONSTANT_POOL_SUB_TYPE',
+ '*FIXED_ARRAY_BYTECODE_ARRAY_HANDLER_TABLE_SUB_TYPE',
+ '*FIXED_ARRAY_CODE_STUBS_TABLE_SUB_TYPE',
+ '*FIXED_ARRAY_COMPILATION_CACHE_TABLE_SUB_TYPE',
+ '*FIXED_ARRAY_DEOPTIMIZATION_DATA_SUB_TYPE',
+ '*FIXED_ARRAY_EMBEDDED_OBJECT_SUB_TYPE',
+ '*FIXED_ARRAY_HANDLER_TABLE_SUB_TYPE',
+ '*FIXED_ARRAY_NOSCRIPT_SHARED_FUNCTION_INFOS_SUB_TYPE',
+ '*FIXED_ARRAY_OPTIMIZED_CODE_LITERALS_SUB_TYPE',
+ '*FIXED_ARRAY_SHARED_FUNCTION_INFOS_SUB_TYPE',
+ 'BYTECODE_ARRAY_TYPE',
+ 'CODE_DATA_CONTAINER_TYPE',
+ 'FEEDBACK_VECTOR_TYPE',
+ 'LOAD_HANDLER_TYPE',
+ 'SCRIPT_TYPE',
+ 'SHARED_FUNCTION_INFO_TYPE',
+ 'STORE_HANDLER_TYPE',
+ ])
+ ],
+ ['unclassified', new Set()],
+]);
+
+// Maps category to description text that is shown in html.
+const CATEGORY_NAMES = new Map([
+ ['user', 'JS'],
+ ['system', 'Metadata'],
+ ['code', 'Code'],
+ ['unclassified', 'Unclassified'],
+]);
+
+// Instance types that are constructed from their sub types and
+// should thus be hidden.
+const IGNORED_INSTANCE_TYPES = new Set([
+ 'FIXED_ARRAY_TYPE',
+ 'CODE_TYPE',
+]);
diff --git a/deps/v8/tools/heap-stats/details-selection.html b/deps/v8/tools/heap-stats/details-selection.html
new file mode 100644
index 0000000000..d60aef9669
--- /dev/null
+++ b/deps/v8/tools/heap-stats/details-selection.html
@@ -0,0 +1,72 @@
+<!-- Copyright 2018 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+<template id="details-selection-template">
+<style>
+.box {
+ border-left: dashed 1px #666666;
+ border-right: dashed 1px #666666;
+ border-bottom: dashed 1px #666666;
+ padding: 10px;
+ overflow: hidden;
+}
+
+.box:nth-of-type(1) {
+ border-top: dashed 1px #666666;
+ border-radius: 5px 5px 0px 0px;
+}
+
+.box:last-of-type {
+ border-radius: 0px 0px 5px 5px;
+}
+
+span {
+ display: block;
+ padding: 5px;
+ font-weight: bold;
+}
+
+.boxDiv {
+ padding: 3px;
+ float: left;
+}
+
+.boxDiv > label {
+ font-size: xx-small;
+}
+
+#categories {
+ margin-top: 10px;
+}
+</style>
+<h2>Data selection</h2>
+<ul>
+ <li>
+ <label for="isolate-select">
+ Isolate
+ </label>
+ <select id="isolate-select">
+ <option>No data</option>
+ </select>
+ </li>
+ <li>
+ <label for="dataset-select">
+ Data set
+ </label>
+ <select id="dataset-select">
+ <option>No data</option>
+ </select>
+ </li>
+ <li>
+ <input type="checkbox" id="merge-categories" checked=checked />
+ <label for="merge-categories">
+ Merge categories
+ </label>
+ </li>
+</ul>
+
+
+<div id="categories"></div>
+</template>
+<script type="text/javascript" src="categories.js"></script>
+<script type="text/javascript" src="details-selection.js"></script> \ No newline at end of file
diff --git a/deps/v8/tools/heap-stats/details-selection.js b/deps/v8/tools/heap-stats/details-selection.js
new file mode 100644
index 0000000000..43c000d3f4
--- /dev/null
+++ b/deps/v8/tools/heap-stats/details-selection.js
@@ -0,0 +1,211 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+const details_selection_template =
+ document.currentScript.ownerDocument.querySelector(
+ '#details-selection-template');
+
+class DetailsSelection extends HTMLElement {
+ constructor() {
+ super();
+ const shadowRoot = this.attachShadow({mode: 'open'});
+ shadowRoot.appendChild(details_selection_template.content.cloneNode(true));
+ this.isolateSelect.addEventListener(
+ 'change', e => this.handleIsolateChange(e));
+ this.datasetSelect.addEventListener(
+ 'change', e => this.notifySelectionChanged(e));
+ this.$('#merge-categories')
+ .addEventListener('change', e => this.notifySelectionChanged(e));
+ }
+
+ connectedCallback() {
+ for (let category of CATEGORIES.keys()) {
+ this.$('#categories').appendChild(this.buildCategory(category));
+ }
+ }
+
+ set data(value) {
+ this._data = value;
+ this.dataChanged();
+ }
+
+ get data() {
+ return this._data;
+ }
+
+ buildCategory(name) {
+ const div = document.createElement('div');
+ div.id = name;
+ div.classList.add('box');
+ const span = document.createElement('span');
+ div.appendChild(span);
+ span.innerHTML = CATEGORY_NAMES.get(name) + ' ';
+ const all_button = document.createElement('button');
+ span.appendChild(all_button);
+ all_button.innerHTML = 'All';
+ all_button.addEventListener('click', e => this.selectCategory(name));
+ const none_button = document.createElement('button');
+ span.appendChild(none_button);
+ none_button.innerHTML = 'None';
+ none_button.addEventListener('click', e => this.unselectCategory(name));
+ const innerDiv = document.createElement('div');
+ div.appendChild(innerDiv);
+ innerDiv.id = name + 'Content';
+ return div;
+ }
+
+ $(id) {
+ return this.shadowRoot.querySelector(id);
+ }
+
+ get datasetSelect() {
+ return this.$('#dataset-select');
+ }
+
+ get isolateSelect() {
+ return this.$('#isolate-select');
+ }
+
+ dataChanged() {
+ this.clearUI();
+ this.populateSelect('#isolate-select', Object.keys(this.data));
+ this.handleIsolateChange();
+ }
+
+ clearUI() {
+ this.selection = {categories: {}};
+ removeAllChildren(this.isolateSelect);
+ removeAllChildren(this.datasetSelect);
+ this.clearCategories();
+ }
+
+ handleIsolateChange(e) {
+ this.selection.isolate = this.isolateSelect.value;
+ if (this.selection.isolate.length === 0) {
+ this.selection.isolate = null;
+ return;
+ }
+
+ this.populateSelect(
+ '#dataset-select', this.data[this.selection.isolate].data_sets, 'live');
+ this.populateCategories();
+ this.notifySelectionChanged();
+ }
+
+ notifySelectionChanged(e) {
+ if (!this.selection.isolate) return;
+
+ this.selection.categories = {};
+ for (let category of CATEGORIES.keys()) {
+ const selected = this.selectedInCategory(category);
+ if (selected.length > 0) this.selection.categories[category] = selected;
+ }
+ this.selection.category_names = CATEGORY_NAMES;
+ this.selection.data_set = this.datasetSelect.value;
+ this.selection.merge_categories = this.$('#merge-categories').checked;
+ this.dispatchEvent(new CustomEvent(
+ 'change', {bubbles: true, composed: true, detail: this.selection}));
+ }
+
+ selectedInCategory(category) {
+ const selected = this.shadowRoot.querySelectorAll(
+ 'input[name=' + category + 'Checkbox]:checked');
+ var tmp = [];
+ for (var val of selected.values()) tmp.push(val.value);
+ return tmp;
+ }
+
+ categoryForType(instance_type) {
+ for (let [key, value] of CATEGORIES.entries()) {
+ if (value.has(instance_type)) return key;
+ }
+ return 'unclassified';
+ }
+
+ createOption(text) {
+ const option = document.createElement('option');
+ option.value = text;
+ option.text = text;
+ return option;
+ }
+
+ populateSelect(id, iterable, autoselect = null) {
+ for (let option_value of iterable) {
+ const option = this.createOption(option_value);
+ if (autoselect === option_value) {
+ option.selected = 'selected';
+ }
+ this.$(id).appendChild(option);
+ }
+ }
+
+ clearCategories() {
+ for (const category of CATEGORIES.keys()) {
+ let f = this.$('#' + category + 'Content');
+ while (f.firstChild) {
+ f.removeChild(f.firstChild);
+ }
+ }
+ }
+
+ populateCategories() {
+ this.clearCategories();
+ const categories = {};
+ for (let cat of CATEGORIES.keys()) {
+ categories[cat] = [];
+ }
+
+ for (let instance_type of this.data[this.selection.isolate]
+ .non_empty_instance_types) {
+ if (IGNORED_INSTANCE_TYPES.has(instance_type)) continue;
+ const category = this.categoryForType(instance_type);
+ categories[category].push(instance_type);
+ }
+ for (let category of Object.keys(categories)) {
+ categories[category].sort();
+ for (let instance_type of categories[category]) {
+ this.$('#' + category + 'Content')
+ .appendChild(this.createCheckBox(instance_type, category));
+ }
+ }
+ }
+
+ unselectCategory(category) {
+ for (let checkbox of this.shadowRoot.querySelectorAll(
+ 'input[name=' + category + 'Checkbox]')) {
+ checkbox.checked = false;
+ }
+ this.notifySelectionChanged();
+ }
+
+ selectCategory(category) {
+ for (let checkbox of this.shadowRoot.querySelectorAll(
+ 'input[name=' + category + 'Checkbox]')) {
+ checkbox.checked = true;
+ }
+ this.notifySelectionChanged();
+ }
+
+ createCheckBox(instance_type, category) {
+ const div = document.createElement('div');
+ div.classList.add('boxDiv');
+ const input = document.createElement('input');
+ div.appendChild(input);
+ input.type = 'checkbox';
+ input.name = category + 'Checkbox';
+ input.checked = 'checked';
+ input.id = instance_type + 'Checkbox';
+ input.value = instance_type;
+ input.addEventListener('change', e => this.notifySelectionChanged(e));
+ const label = document.createElement('label');
+ div.appendChild(label);
+ label.innerText = instance_type;
+ label.htmlFor = instance_type + 'Checkbox';
+ return div;
+ }
+}
+
+customElements.define('details-selection', DetailsSelection);
diff --git a/deps/v8/tools/heap-stats/global-timeline.html b/deps/v8/tools/heap-stats/global-timeline.html
new file mode 100644
index 0000000000..788f966735
--- /dev/null
+++ b/deps/v8/tools/heap-stats/global-timeline.html
@@ -0,0 +1,16 @@
+<!-- Copyright 2018 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+<template id="global-timeline-template">
+<style>
+#chart {
+ width: 100%;
+ height: 500px;
+}
+</style>
+<div id="container" style="display: none;">
+ <h2>Timeline</h2>
+ <div id="chart"></div>
+</div>
+</template>
+<script type="text/javascript" src="global-timeline.js"></script> \ No newline at end of file
diff --git a/deps/v8/tools/heap-stats/global-timeline.js b/deps/v8/tools/heap-stats/global-timeline.js
new file mode 100644
index 0000000000..0533f21432
--- /dev/null
+++ b/deps/v8/tools/heap-stats/global-timeline.js
@@ -0,0 +1,135 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+const KB = 1024;
+const MB = KB * KB;
+
+const global_timeline_template =
+ document.currentScript.ownerDocument.querySelector(
+ '#global-timeline-template');
+
+class GlobalTimeline extends HTMLElement {
+ constructor() {
+ super();
+ const shadowRoot = this.attachShadow({mode: 'open'});
+ shadowRoot.appendChild(global_timeline_template.content.cloneNode(true));
+ }
+
+ $(id) {
+ return this.shadowRoot.querySelector(id);
+ }
+
+ set data(value) {
+ this._data = value;
+ this.stateChanged();
+ }
+
+ get data() {
+ return this._data;
+ }
+
+ set selection(value) {
+ this._selection = value;
+ this.stateChanged();
+ }
+
+ get selection() {
+ return this._selection;
+ }
+
+ isValid() {
+ return this.data && this.selection;
+ }
+
+ hide() {
+ this.$('#container').style.display = 'none';
+ }
+
+ show() {
+ this.$('#container').style.display = 'block';
+ }
+
+ stateChanged() {
+ if (this.isValid()) {
+ this.drawChart();
+ } else {
+ this.hide();
+ }
+ }
+
+ getCategoryData() {
+ const categories = Object.keys(this.selection.categories)
+ .map(k => this.selection.category_names.get(k));
+ const labels = ['Time', ...categories];
+ const chart_data = [labels];
+ const isolate_data = this.data[this.selection.isolate];
+ Object.keys(isolate_data.gcs).forEach(gc_key => {
+ const gc_data = isolate_data.gcs[gc_key];
+ const data_set = gc_data[this.selection.data_set].instance_type_data;
+ const data = [];
+ data.push(gc_data.time);
+ Object.values(this.selection.categories).forEach(instance_types => {
+ data.push(
+ instance_types
+ .map(instance_type => {
+ return data_set[instance_type].overall;
+ })
+ .reduce((accu, current) => accu + current, 0) /
+ KB);
+ });
+ chart_data.push(data);
+ });
+ return chart_data;
+ }
+
+ getInstanceTypeData() {
+ const categories = Object.keys(this.selection.categories);
+ const instance_types =
+ Object.values(this.selection.categories)
+ .reduce((accu, current) => accu.concat(current), []);
+ const labels = ['Time', ...instance_types];
+ const chart_data = [labels];
+ const isolate_data = this.data[this.selection.isolate];
+ Object.keys(isolate_data.gcs).forEach(gc_key => {
+ const gc_data = isolate_data.gcs[gc_key];
+ const data_set = gc_data[this.selection.data_set].instance_type_data;
+ const data = [];
+ data.push(gc_data.time);
+ instance_types.forEach(instance_type => {
+ data.push(data_set[instance_type].overall / KB);
+ });
+ chart_data.push(data);
+ });
+ return chart_data;
+ }
+
+ drawChart() {
+ console.assert(this.data, 'invalid data');
+ console.assert(this.selection, 'invalid selection');
+
+ const chart_data = (this.selection.merge_categories) ?
+ this.getCategoryData() :
+ this.getInstanceTypeData();
+ const data = google.visualization.arrayToDataTable(chart_data);
+ const options = {
+ isStacked: true,
+ hAxis: {
+ title: 'Time [ms]',
+ },
+ vAxis: {title: 'Memory consumption [KBytes]'},
+ chartArea: {width: '85%', height: '70%'},
+ legend: {position: 'top', maxLines: '1'},
+ pointsVisible: true,
+ pointSize: 5,
+ explorer: {},
+ };
+ const chart = new google.visualization.AreaChart(this.$('#chart'));
+ this.show();
+ chart.draw(data, google.charts.Line.convertOptions(options));
+ }
+}
+
+customElements.define('global-timeline', GlobalTimeline);
diff --git a/deps/v8/tools/heap-stats/index.html b/deps/v8/tools/heap-stats/index.html
new file mode 100644
index 0000000000..3c2e62b6d0
--- /dev/null
+++ b/deps/v8/tools/heap-stats/index.html
@@ -0,0 +1,88 @@
+<!DOCTYPE html>
+<!-- Copyright 2018 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+
+<html lang="en">
+
+<head>
+ <meta charset="UTF-8">
+ <title>V8 Heap Statistics</title>
+ <link href='https://fonts.googleapis.com/css?family=Roboto' rel='stylesheet' type='text/css'>
+ <script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
+
+ <link rel="import" href="details-selection.html">
+ <link rel="import" href="global-timeline.html">
+ <link rel="import" href="trace-file-reader.html">
+
+ <style type="text/css">
+
+body {
+ font-family: 'Roboto', sans-serif;
+ margin-left: 5%;
+ margin-right: 5%;
+}
+
+ </style>
+ <script type="text/javascript">
+
+'use strict';
+
+google.charts.load('current', {'packages':['line', 'corechart']});
+
+function $(id) { return document.querySelector(id); }
+
+function removeAllChildren(node) {
+ while (node.firstChild) {
+ node.removeChild(node.firstChild);
+ }
+}
+
+let state = Object.create(null);
+
+function globalDataChanged(e) {
+ state.data = e.detail;
+ // Emit one entry with the whole model for debugging purposes.
+ console.log(state.data);
+ state.selection = null;
+ $('#global-timeline').selection = state.selection;
+ $('#global-timeline').data = state.data;
+ $('#type-details').selection = state.selection;
+ $('#type-details').data = state.data;
+ $('#details-selection').data = state.data;
+}
+
+function globalSelectionChangedA(e) {
+ state.selection = e.detail;
+ $('#global-timeline').selection = state.selection;
+ $('#type-details').selection = state.selection;
+}
+
+ </script>
+</head>
+
+<body>
+ <trace-file-reader onchange="globalDataChanged(event)"></trace-file-reader>
+ <h1>V8 Heap Statistics</h1>
+ <p>Visualize object statistics that have been gathered using</p>
+ <ul>
+ <li><code>--trace-gc-object-stats on V8</code></li>
+ <li>
+ <a
+ href="https://www.chromium.org/developers/how-tos/trace-event-profiling-tool">Chrome's
+ tracing infrastructure</a> collecting data for the category
+ <code>v8.gc_stats</code>. The trace file needs to be unpacked (e.g. using
+ <code>gunzip</code>).
+ </li>
+ </ul>
+ <p>
+ Note that the visualizer needs to run on a web server due to HTML imports
+ requiring <a
+ href="https://en.wikipedia.org/wiki/Cross-origin_resource_sharing">CORS</a>.
+ </p>
+ <details-selection id="details-selection" onchange="globalSelectionChangedA(event)"></details-selection>
+ <global-timeline id="global-timeline"></global-timeline>
+ <type-details id="type-details"></type-details>
+</body>
+
+</html>
diff --git a/deps/v8/tools/heap-stats/trace-file-reader.html b/deps/v8/tools/heap-stats/trace-file-reader.html
new file mode 100644
index 0000000000..98c2ef0c60
--- /dev/null
+++ b/deps/v8/tools/heap-stats/trace-file-reader.html
@@ -0,0 +1,26 @@
+<!-- Copyright 2018 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+<template id="trace-file-reader-template">
+<style>
+#fileReader {
+ width: 100%;
+ height: 100px;
+ line-height: 100px;
+ text-align: center;
+ border: solid 1px #000000;
+ border-radius: 5px;
+}
+
+#fileReader > input {
+ display: none;
+}
+</style>
+<div id="fileReader">
+ <span id="label">
+ Drag and drop a trace file into this area, or click to choose from disk.
+ </span>
+ <input id="file" type="file" name="file" />
+</div>
+</template>
+<script type="text/javascript" src="trace-file-reader.js"></script>
diff --git a/deps/v8/tools/heap-stats/trace-file-reader.js b/deps/v8/tools/heap-stats/trace-file-reader.js
new file mode 100644
index 0000000000..59825fe514
--- /dev/null
+++ b/deps/v8/tools/heap-stats/trace-file-reader.js
@@ -0,0 +1,300 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+const trace_file_reader_template =
+ document.currentScript.ownerDocument.querySelector(
+ '#trace-file-reader-template');
+
+class TraceFileReader extends HTMLElement {
+ constructor() {
+ super();
+ const shadowRoot = this.attachShadow({mode: 'open'});
+ shadowRoot.appendChild(trace_file_reader_template.content.cloneNode(true));
+ this.addEventListener('click', e => this.handleClick(e));
+ this.addEventListener('dragover', e => this.handleDragOver(e));
+ this.addEventListener('drop', e => this.handleChange(e));
+ this.$('#file').addEventListener('change', e => this.handleChange(e));
+ }
+
+ $(id) {
+ return this.shadowRoot.querySelector(id);
+ }
+
+ updateLabel(text) {
+ this.$('#label').innerText = text;
+ }
+
+ handleClick(event) {
+ this.$('#file').click();
+ }
+
+ handleChange(event) {
+ // Used for drop and file change.
+ event.preventDefault();
+ var host = event.dataTransfer ? event.dataTransfer : event.target;
+ this.readFile(host.files[0]);
+ }
+
+ handleDragOver(event) {
+ event.preventDefault();
+ }
+
+ connectedCallback() {}
+
+ readFile(file) {
+ if (!file) {
+ this.updateLabel('Failed to load file.');
+ return;
+ }
+
+ const result = new FileReader();
+ result.onload = (e) => {
+ let contents = e.target.result.split('\n');
+ const return_data = (e.target.result.includes('V8.GC_Objects_Stats')) ?
+ this.createModelFromChromeTraceFile(contents) :
+ this.createModelFromV8TraceFile(contents);
+ this.updateLabel('Finished loading \'' + file.name + '\'.');
+ this.dispatchEvent(new CustomEvent(
+ 'change', {bubbles: true, composed: true, detail: return_data}));
+ };
+ result.readAsText(file);
+ }
+
+ createOrUpdateEntryIfNeeded(data, keys, entry) {
+ console.assert(entry.isolate, 'entry should have an isolate');
+ if (!(entry.isolate in keys)) {
+ keys[entry.isolate] = new Set();
+ }
+ if (!(entry.isolate in data)) {
+ data[entry.isolate] = {
+ non_empty_instance_types: new Set(),
+ gcs: {},
+ zonetags: [],
+ samples: {zone: {}},
+ start: null,
+ end: null,
+ data_sets: new Set()
+ };
+ }
+ const data_object = data[entry.isolate];
+ if (('id' in entry) && !(entry.id in data_object.gcs)) {
+ data_object.gcs[entry.id] = {non_empty_instance_types: new Set()};
+ }
+ if ('time' in entry) {
+ if (data_object.end === null || data_object.end < entry.time)
+ data_object.end = entry.time;
+ if (data_object.start === null || data_object.start > entry.time)
+ data_object.start = entry.time;
+ }
+ }
+
+ createDatasetIfNeeded(data, keys, entry, data_set) {
+ if (!(data_set in data[entry.isolate].gcs[entry.id])) {
+ data[entry.isolate].gcs[entry.id][data_set] = {
+ instance_type_data: {},
+ non_empty_instance_types: new Set(),
+ overall: 0
+ };
+ data[entry.isolate].data_sets.add(data_set);
+ }
+ }
+
+ addInstanceTypeData(
+ data, keys, isolate, gc_id, data_set, instance_type, entry) {
+ keys[isolate].add(data_set);
+ data[isolate].gcs[gc_id][data_set].instance_type_data[instance_type] = {
+ overall: entry.overall,
+ count: entry.count,
+ histogram: entry.histogram,
+ over_allocated: entry.over_allocated,
+ over_allocated_histogram: entry.over_allocated_histogram
+ };
+ data[isolate].gcs[gc_id][data_set].overall += entry.overall;
+ if (entry.overall !== 0) {
+ data[isolate].gcs[gc_id][data_set].non_empty_instance_types.add(
+ instance_type);
+ data[isolate].gcs[gc_id].non_empty_instance_types.add(instance_type);
+ data[isolate].non_empty_instance_types.add(instance_type);
+ }
+ }
+
+ extendAndSanitizeModel(data, keys) {
+ const checkNonNegativeProperty = (obj, property) => {
+ console.assert(obj[property] >= 0, 'negative property', obj, property);
+ };
+
+ for (const isolate of Object.keys(data)) {
+ for (const gc of Object.keys(data[isolate].gcs)) {
+ for (const data_set_key of keys[isolate]) {
+ const data_set = data[isolate].gcs[gc][data_set_key];
+ // 1. Create a ranked instance type array that sorts instance
+ // types by memory size (overall).
+ data_set.ranked_instance_types =
+ [...data_set.non_empty_instance_types].sort(function(a, b) {
+ if (data_set.instance_type_data[a].overall >
+ data_set.instance_type_data[b].overall) {
+ return 1;
+ } else if (
+ data_set.instance_type_data[a].overall <
+ data_set.instance_type_data[b].overall) {
+ return -1;
+ }
+ return 0;
+ });
+
+ let known_count = 0;
+ let known_overall = 0;
+ let known_histogram =
+ Array(
+ data_set.instance_type_data.FIXED_ARRAY_TYPE.histogram.length)
+ .fill(0);
+ for (const instance_type in data_set.instance_type_data) {
+ if (!instance_type.startsWith('*FIXED_ARRAY')) continue;
+ const subtype = data_set.instance_type_data[instance_type];
+ known_count += subtype.count;
+ known_overall += subtype.count;
+ for (let i = 0; i < subtype.histogram.length; i++) {
+ known_histogram[i] += subtype.histogram[i];
+ }
+ }
+
+ const fixed_array_data = data_set.instance_type_data.FIXED_ARRAY_TYPE;
+ const unknown_entry = {
+ count: fixed_array_data.count - known_count,
+ overall: fixed_array_data.overall - known_overall,
+ histogram: fixed_array_data.histogram.map(
+ (value, index) => value - known_histogram[index])
+ };
+
+ // Check for non-negative values.
+ checkNonNegativeProperty(unknown_entry, 'count');
+ checkNonNegativeProperty(unknown_entry, 'overall');
+ for (let i = 0; i < unknown_entry.histogram.length; i++) {
+ checkNonNegativeProperty(unknown_entry.histogram, i);
+ }
+
+ data_set.instance_type_data['*FIXED_ARRAY_UNKNOWN_SUB_TYPE'] =
+ unknown_entry;
+ data_set.non_empty_instance_types.add(
+ '*FIXED_ARRAY_UNKNOWN_SUB_TYPE');
+ }
+ }
+ }
+ }
+
+ createModelFromChromeTraceFile(contents) {
+ console.log('Processing log as chrome trace file.');
+ const data = Object.create(null); // Final data container.
+ const keys = Object.create(null); // Collecting 'keys' per isolate.
+
+ // Pop last line in log as it might be broken.
+ contents.pop();
+ // Remove trailing comma.
+ contents[contents.length - 1] = contents[contents.length - 1].slice(0, -1);
+ // Terminate JSON.
+ const sanitized_contents = [...contents, ']}'].join('');
+ try {
+ const raw_data = JSON.parse(sanitized_contents);
+ const objects_stats_data =
+ raw_data.traceEvents.filter(e => e.name == 'V8.GC_Objects_Stats');
+ objects_stats_data.forEach(trace_data => {
+ const actual_data = trace_data.args;
+ const data_sets = new Set(Object.keys(actual_data));
+ Object.keys(actual_data).forEach(data_set => {
+ const string_entry = actual_data[data_set];
+ try {
+ const entry = JSON.parse(string_entry);
+ this.createOrUpdateEntryIfNeeded(data, keys, entry);
+ this.createDatasetIfNeeded(data, keys, entry, data_set);
+ const isolate = entry.isolate;
+ const time = entry.time;
+ const gc_id = entry.id;
+ data[isolate].gcs[gc_id].time = time;
+ data[isolate].gcs[gc_id][data_set].bucket_sizes =
+ entry.bucket_sizes;
+ for (let [instance_type, value] of Object.entries(
+ entry.type_data)) {
+ // Trace file format uses markers that do not have actual
+ // properties.
+ if (!('overall' in value)) continue;
+ this.addInstanceTypeData(
+ data, keys, isolate, gc_id, data_set, instance_type, value);
+ }
+ } catch (e) {
+ console.log('Unable to parse data set entry', e);
+ }
+ });
+ });
+ } catch (e) {
+ console.log('Unable to parse chrome trace file.', e);
+ }
+ this.extendAndSanitizeModel(data, keys);
+ return data;
+ }
+
+ createModelFromV8TraceFile(contents) {
+ console.log('Processing log as V8 trace file.');
+ contents = contents.map(function(line) {
+ try {
+ // Strip away a potentially present adb logcat prefix.
+ line = line.replace(/^I\/v8\s*\(\d+\):\s+/g, '');
+ return JSON.parse(line);
+ } catch (e) {
+ console.log('Unable to parse line: \'' + line + '\'\' (' + e + ')');
+ }
+ return null;
+ });
+
+ const data = Object.create(null); // Final data container.
+ const keys = Object.create(null); // Collecting 'keys' per isolate.
+
+ for (var entry of contents) {
+ if (entry === null || entry.type === undefined) {
+ continue;
+ }
+ if (entry.type === 'zone') {
+ this.createOrUpdateEntryIfNeeded(data, keys, entry);
+ const stacktrace = ('stacktrace' in entry) ? entry.stacktrace : [];
+ data[entry.isolate].samples.zone[entry.time] = {
+ allocated: entry.allocated,
+ pooled: entry.pooled,
+ stacktrace: stacktrace
+ };
+ } else if (
+ entry.type === 'zonecreation' || entry.type === 'zonedestruction') {
+ this.createOrUpdateEntryIfNeeded(data, keys, entry);
+ data[entry.isolate].zonetags.push(
+ Object.assign({opening: entry.type === 'zonecreation'}, entry));
+ } else if (entry.type === 'gc_descriptor') {
+ this.createOrUpdateEntryIfNeeded(data, keys, entry);
+ data[entry.isolate].gcs[entry.id].time = entry.time;
+ if ('zone' in entry)
+ data[entry.isolate].gcs[entry.id].malloced = entry.zone;
+ } else if (entry.type === 'instance_type_data') {
+ if (entry.id in data[entry.isolate].gcs) {
+ this.createOrUpdateEntryIfNeeded(data, keys, entry);
+ this.createDatasetIfNeeded(data, keys, entry, entry.key);
+ this.addInstanceTypeData(
+ data, keys, entry.isolate, entry.id, entry.key,
+ entry.instance_type_name, entry);
+ }
+ } else if (entry.type === 'bucket_sizes') {
+ if (entry.id in data[entry.isolate].gcs) {
+ this.createOrUpdateEntryIfNeeded(data, keys, entry);
+ this.createDatasetIfNeeded(data, keys, entry, entry.key);
+ data[entry.isolate].gcs[entry.id][entry.key].bucket_sizes =
+ entry.sizes;
+ }
+ } else {
+ console.log('Unknown entry type: ' + entry.type);
+ }
+ }
+ this.extendAndSanitizeModel(data, keys);
+ return data;
+ }
+}
+
+customElements.define('trace-file-reader', TraceFileReader);
diff --git a/deps/v8/tools/js2c.py b/deps/v8/tools/js2c.py
index 105be0c1b6..0107436df6 100755
--- a/deps/v8/tools/js2c.py
+++ b/deps/v8/tools/js2c.py
@@ -50,10 +50,12 @@ def ToCArray(byte_sequence):
return textwrap.fill(joined, 80)
-def RemoveCommentsAndTrailingWhitespace(lines):
+def RemoveCommentsEmptyLinesAndWhitespace(lines):
+ lines = re.sub(r'\n+', '\n', lines) # empty lines
lines = re.sub(r'//.*\n', '\n', lines) # end-of-line comments
lines = re.sub(re.compile(r'/\*.*?\*/', re.DOTALL), '', lines) # comments.
- lines = re.sub(r'\s+\n+', '\n', lines) # trailing whitespace
+ lines = re.sub(r'\s+\n', '\n', lines) # trailing whitespace
+ lines = re.sub(r'\n\s+', '\n', lines) # initial whitespace
return lines
@@ -342,7 +344,7 @@ def BuildFilterChain(macro_filename, message_template_file):
filter_chain.append(lambda l: ExpandConstants(l, message_templates))
filter_chain.extend([
- RemoveCommentsAndTrailingWhitespace,
+ RemoveCommentsEmptyLinesAndWhitespace,
ExpandInlineMacros,
ExpandInlineConstants,
Validate,
@@ -355,7 +357,7 @@ def BuildFilterChain(macro_filename, message_template_file):
return reduce(chain, filter_chain)
def BuildExtraFilterChain():
- return lambda x: RemoveCommentsAndTrailingWhitespace(Validate(x))
+ return lambda x: RemoveCommentsEmptyLinesAndWhitespace(Validate(x))
class Sources:
def __init__(self):
@@ -365,7 +367,7 @@ class Sources:
def IsDebuggerFile(filename):
- return "debug" in filename
+ return os.path.basename(os.path.dirname(filename)) == "debug"
def IsMacroFile(filename):
return filename.endswith("macros.py")
diff --git a/deps/v8/tools/map-processor b/deps/v8/tools/map-processor
new file mode 100755
index 0000000000..c0713bdf13
--- /dev/null
+++ b/deps/v8/tools/map-processor
@@ -0,0 +1,41 @@
+#!/bin/sh
+
+# find the name of the log file to process, it must not start with a dash.
+log_file="v8.log"
+for arg in "$@"
+do
+ if ! expr "X${arg}" : "^X-" > /dev/null; then
+ log_file=${arg}
+ fi
+done
+
+tools_path=`cd $(dirname "$0");pwd`
+if [ ! "$D8_PATH" ]; then
+ d8_public=`which d8`
+ if [ -x "$d8_public" ]; then D8_PATH=$(dirname "$d8_public"); fi
+fi
+[ -n "$D8_PATH" ] || D8_PATH=$tools_path/..
+d8_exec=$D8_PATH/d8
+
+if [ ! -x "$d8_exec" ]; then
+ D8_PATH=`pwd`/out/native
+ d8_exec=$D8_PATH/d8
+fi
+
+if [ ! -x "$d8_exec" ]; then
+ d8_exec=`grep -m 1 -o '".*/d8"' $log_file | sed 's/"//g'`
+fi
+
+if [ ! -x "$d8_exec" ]; then
+ echo "d8 shell not found in $D8_PATH"
+ echo "To build, execute 'make native' from the V8 directory"
+ exit 1
+fi
+
+# nm spits out 'no symbols found' messages to stderr.
+cat $log_file | $d8_exec $tools_path/splaytree.js $tools_path/codemap.js \
+ $tools_path/csvparser.js $tools_path/consarray.js \
+ $tools_path/profile.js $tools_path/profile_view.js \
+ $tools_path/logreader.js $tools_path/arguments.js \
+ $tools_path/map-processor.js $tools_path/SourceMap.js \
+ $tools_path/map-processor-driver.js -- $@ 2>/dev/null
diff --git a/deps/v8/tools/map-processor-driver.js b/deps/v8/tools/map-processor-driver.js
new file mode 100644
index 0000000000..31a4860849
--- /dev/null
+++ b/deps/v8/tools/map-processor-driver.js
@@ -0,0 +1,33 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function processArguments(args) {
+ var processor = new ArgumentsProcessor(args);
+ if (processor.parse()) {
+ return processor.result();
+ } else {
+ processor.printUsageAndExit();
+ }
+}
+
+function initSourceMapSupport() {
+ // Pull dev tools source maps into our name space.
+ SourceMap = WebInspector.SourceMap;
+
+ // Overwrite the load function to load scripts synchronously.
+ SourceMap.load = function(sourceMapURL) {
+ var content = readFile(sourceMapURL);
+ var sourceMapObject = (JSON.parse(content));
+ return new SourceMap(sourceMapURL, sourceMapObject);
+ };
+}
+
+var params = processArguments(arguments);
+var sourceMap = null;
+if (params.sourceMap) {
+ initSourceMapSupport();
+ sourceMap = SourceMap.load(params.sourceMap);
+}
+var mapProcessor = new MapProcessor();
+mapProcessor.processLogFile(params.logFileName);
diff --git a/deps/v8/tools/map-processor.html b/deps/v8/tools/map-processor.html
new file mode 100644
index 0000000000..4029e96343
--- /dev/null
+++ b/deps/v8/tools/map-processor.html
@@ -0,0 +1,1254 @@
+<!DOCTYPE html>
+<html>
+ <!--
+ Copyright 2017 the V8 project authors. All rights reserved. Use of this source
+ code is governed by a BSD-style license that can be found in the LICENSE file.
+ -->
+<head>
+<meta charset="UTF-8">
+<style>
+html, body {
+ font-family: sans-serif;
+ padding: 0px;
+ margin: 0px;
+}
+h1, h2, h3, section {
+ padding-left: 15px;
+}
+#stats table {
+ display: inline-block;
+ padding-right: 50px;
+}
+#stats .transitionTable {
+ max-height: 200px;
+ overflow-y: scroll;
+}
+#timeline {
+ position: relative;
+ height: 300px;
+ overflow-y: hidden;
+ overflow-x: scroll;
+ user-select: none;
+}
+#timelineChunks {
+ height: 250px;
+ position: absolute;
+ margin-right: 100px;
+}
+#timelineCanvas {
+ height: 250px;
+ position: relative;
+ overflow: visible;
+ pointer-events: none;
+}
+.chunk {
+ width: 6px;
+ border: 0px white solid;
+ border-width: 0 2px 0 2px;
+ position: absolute;
+ background-size: 100% 100%;
+ image-rendering: pixelated;
+ bottom: 0px;
+}
+.timestamp {
+ height: 250px;
+ width: 100px;
+ border-left: 1px black dashed;
+ padding-left: 4px;
+ position: absolute;
+ pointer-events: none;
+ font-size: 10px;
+ opacity: 0.5;
+}
+#timelineOverview {
+ width: 100%;
+ height: 50px;
+ position: relative;
+ margin-top: -50px;
+ margin-bottom: 10px;
+ background-size: 100% 100%;
+ border: 1px black solid;
+ border-width: 1px 0 1px 0;
+ overflow: hidden;
+}
+#timelineOverviewIndicator {
+ height: 100%;
+ position: absolute;
+ box-shadow: 0px 2px 20px -5px black inset;
+ top: 0px;
+ cursor: ew-resize;
+}
+#timelineOverviewIndicator .leftMask,
+#timelineOverviewIndicator .rightMask {
+ background-color: rgba(200, 200, 200, 0.5);
+ width: 10000px;
+ height: 100%;
+ position: absolute;
+ top: 0px;
+}
+#timelineOverviewIndicator .leftMask {
+ right: 100%;
+}
+#timelineOverviewIndicator .rightMask {
+ left: 100%;
+}
+#mapDetails {
+ font-family: monospace;
+ white-space: pre;
+}
+#transitionView {
+ overflow-x: scroll;
+ white-space: nowrap;
+ min-height: 50px;
+ max-height: 200px;
+ padding: 50px 0 0 0;
+ margin-top: -25px;
+ width: 100%;
+}
+.map {
+ width: 20px;
+ height: 20px;
+ display: inline-block;
+ border-radius: 50%;
+ background-color: black;
+ border: 4px solid white;
+ font-size: 10px;
+ text-align: center;
+ line-height: 18px;
+ color: white;
+ vertical-align: top;
+ margin-top: -13px;
+ /* raise z-index */
+ position: relative;
+ z-index: 2;
+ cursor: pointer;
+}
+.map.selected {
+ border-color: black;
+}
+.transitions {
+ display: inline-block;
+ margin-left: -15px;
+}
+.transition {
+ min-height: 55px;
+ margin: 0 0 -2px 2px;
+}
+/* gray out deprecated transitions */
+.deprecated > .transitionEdge,
+.deprecated > .map {
+ opacity: 0.5;
+}
+.deprecated > .transition {
+ border-color: rgba(0, 0, 0, 0.5);
+}
+/* Show a border for all but the first transition */
+.transition:nth-of-type(2),
+.transition:nth-last-of-type(n+2) {
+ border-left: 2px solid;
+ margin-left: 0px;
+}
+/* special case for 2 transitions */
+.transition:nth-last-of-type(1) {
+ border-left: none;
+}
+/* topmost transitions are not related */
+#transitionView > .transition {
+ border-left: none;
+}
+/* topmost transition edge needs initial offset to be aligned */
+#transitionView > .transition > .transitionEdge {
+ margin-left: 13px;
+}
+.transitionEdge {
+ height: 2px;
+ width: 80px;
+ display: inline-block;
+ margin: 0 0 2px 0;
+ background-color: black;
+ vertical-align: top;
+ padding-left: 15px;
+}
+.transitionLabel {
+ color: black;
+ transform: rotate(-15deg);
+ transform-origin: top left;
+ margin-top: -10px;
+ font-size: 10px;
+ white-space: normal;
+ word-break: break-all;
+ background-color: rgba(255,255,255,0.5);
+}
+.red {
+ background-color: red;
+}
+.green {
+ background-color: green;
+}
+.yellow {
+ background-color: yellow;
+ color: black;
+}
+.blue {
+ background-color: blue;
+}
+.orange {
+ background-color: orange;
+}
+.violet {
+ background-color: violet;
+ color: black;
+}
+.showSubtransitions {
+ width: 0;
+ height: 0;
+ border-left: 6px solid transparent;
+ border-right: 6px solid transparent;
+ border-top: 10px solid black;
+ cursor: zoom-in;
+ margin: 4px 0 0 4px;
+}
+.showSubtransitions.opened {
+ border-top: none;
+ border-bottom: 10px solid black;
+ cursor: zoom-out;
+}
+#tooltip {
+ position: absolute;
+ width: 10px;
+ height: 10px;
+ background-color: red;
+ pointer-events: none;
+ z-index: 100;
+ display: none;
+}
+</style>
+<script src="./splaytree.js"></script>
+<script src="./codemap.js"></script>
+<script src="./csvparser.js"></script>
+<script src="./consarray.js"></script>
+<script src="./profile.js"></script>
+<script src="./profile_view.js"></script>
+<script src="./logreader.js"></script>
+<script src="./SourceMap.js"></script>
+<script src="./arguments.js"></script>
+<script src="./map-processor.js"></script>
+<script>
+"use strict"
+// =========================================================================
+const kChunkHeight = 250;
+const kChunkWidth = 10;
+
+class State {
+ constructor() {
+ this._nofChunks = 400;
+ this._map = undefined;
+ this._timeline = undefined;
+ this._chunks = undefined;
+ this._view = new View(this);
+ this._navigation = new Navigation(this, this.view);
+ }
+ get timeline() { return this._timeline }
+ set timeline(value) {
+ this._timeline = value;
+ this.updateChunks();
+ this.view.updateTimeline();
+ this.view.updateStats();
+ }
+ get chunks() { return this._chunks }
+ get nofChunks() { return this._nofChunks }
+ set nofChunks(count) {
+ this._nofChunks = count;
+ this.updateChunks();
+ this.view.updateTimeline();
+ }
+ get view() { return this._view }
+ get navigation() { return this._navigation }
+ get map() { return this._map }
+ set map(value) {
+ this._map = value;
+ this._navigation.updateUrl();
+ this.view.updateMapDetails();
+ this.view.redraw();
+ }
+ updateChunks() {
+ this._chunks = this._timeline.chunks(this._nofChunks);
+ }
+ get entries() {
+ if (!this.map) return {};
+ return {
+ map: this.map.id,
+ time: this.map.time
+ }
+ }
+}
+
+// =========================================================================
+// DOM Helper
+function $(id) {
+ return document.getElementById(id)
+}
+
+function removeAllChildren(node) {
+ while (node.lastChild) {
+ node.removeChild(node.lastChild);
+ }
+}
+
+function selectOption(select, match) {
+ let options = select.options;
+ for (let i = 0; i < options.length; i++) {
+ if (match(i, options[i])) {
+ select.selectedIndex = i;
+ return;
+ }
+ }
+}
+
+function div(classes) {
+ let node = document.createElement('div');
+ if (classes !== void 0) {
+ if (typeof classes == "string") {
+ node.classList.add(classes);
+ } else {
+ classes.forEach(cls => node.classList.add(cls));
+ }
+ }
+ return node;
+}
+
+function table(className) {
+ let node = document.createElement("table")
+ if (className) node.classList.add(className)
+ return node;
+}
+function td(text) {
+ let node = document.createElement("td");
+ node.innerText = text;
+ return node;
+}
+function tr() {
+ let node = document.createElement("tr");
+ return node;
+}
+
+function define(prototype, name, fn) {
+ Object.defineProperty(prototype, name, {value:fn, enumerable:false});
+}
+
+define(Array.prototype, "max", function(fn) {
+ if (this.length == 0) return undefined;
+ if (fn == undefined) fn = (each) => each;
+ let max = fn(this[0]);
+ for (let i = 1; i < this.length; i++) {
+ max = Math.max(max, fn(this[i]));
+ }
+ return max;
+})
+define(Array.prototype, "histogram", function(mapFn) {
+ let histogram = [];
+ for (let i = 0; i < this.length; i++) {
+ let value = this[i];
+ let index = Math.round(mapFn(value))
+ let bucket = histogram[index];
+ if (bucket !== undefined) {
+ bucket.push(value);
+ } else {
+ histogram[index] = [value];
+ }
+ }
+ for (let i = 0; i < histogram.length; i++) {
+ histogram[i] = histogram[i] || [];
+ }
+ return histogram;
+});
+
+define(Array.prototype, "first", function() { return this[0] });
+define(Array.prototype, "last", function() { return this[this.length - 1] });
+
+// =========================================================================
+// EventHandlers
+function handleBodyLoad() {
+ let upload = $('uploadInput');
+ upload.onclick = (e) => { e.target.value = null };
+ upload.onchange = (e) => { handleLoadFile(e.target) };
+ upload.focus();
+
+ document.state = new State();
+ $("transitionView").addEventListener("mousemove", e => {
+ let tooltip = $("tooltip");
+ tooltip.style.left = e.pageX + "px";
+ tooltip.style.top = e.pageY + "px";
+ let map = e.target.map;
+ if (map) {
+ $("tooltipContents").innerText = map.description.join("\n");
+ }
+ });
+}
+
+function handleLoadFile(upload) {
+ let files = upload.files;
+ let file = files[0];
+ let reader = new FileReader();
+ reader.onload = function(evt) {
+ handleLoadText(this.result);
+ }
+ reader.readAsText(file);
+}
+
+function handleLoadText(text) {
+ let mapProcessor = new MapProcessor();
+ document.state.timeline = mapProcessor.processString(text);
+}
+
+function handleKeyDown(event) {
+ let nav = document.state.navigation;
+ switch(event.key) {
+ case "ArrowUp":
+ event.preventDefault();
+ if (event.shiftKey) {
+ nav.selectPrevEdge();
+ } else {
+ nav.moveInChunk(-1);
+ }
+ return false;
+ case "ArrowDown":
+ event.preventDefault();
+ if (event.shiftKey) {
+ nav.selectNextEdge();
+ } else {
+ nav.moveInChunk(1);
+ }
+ return false;
+ case "ArrowLeft":
+ nav.moveInChunks(false);
+ break;
+ case "ArrowRight":
+ nav.moveInChunks(true);
+ break;
+ case "+":
+ nav.increaseTimelineResolution();
+ break;
+ case "-":
+ nav.decreaseTimelineResolution();
+ break;
+ }
+};
+document.onkeydown = handleKeyDown;
+
+function handleTimelineIndicatorMove(event) {
+ if (event.buttons == 0) return;
+ let timelineTotalWidth = $("timelineCanvas").offsetWidth;
+ let factor = $("timelineOverview").offsetWidth / timelineTotalWidth;
+ $("timeline").scrollLeft += event.movementX / factor;
+}
+
+// =========================================================================
+
+Object.defineProperty(Edge.prototype, 'getColor', { value:function() {
+ return transitionTypeToColor(this.type);
+}});
+
+class Navigation {
+ constructor(state, view) {
+ this.state = state;
+ this.view = view;
+ }
+ get map() { return this.state.map }
+ set map(value) { this.state.map = value }
+ get chunks() { return this.state.chunks }
+
+ increaseTimelineResolution() {
+ this.state.nofChunks *= 1.5;
+ }
+
+ decreaseTimelineResolution() {
+ this.state.nofChunks /= 1.5;
+ }
+
+ selectNextEdge() {
+ if (!this.map) return;
+ if (this.map.children.length != 1) return;
+ this.map = this.map.children[0].to;
+ }
+
+ selectPrevEdge() {
+ if (!this.map) return;
+ if (!this.map.parent()) return;
+ this.map = this.map.parent();
+ }
+
+ selectDefaultMap() {
+ this.map = this.chunks[0].at(0);
+ }
+ moveInChunks(next) {
+ if (!this.map) return this.selectDefaultMap();
+ let chunkIndex = this.map.chunkIndex(this.chunks);
+ let chunk = this.chunks[chunkIndex];
+ let index = chunk.indexOf(this.map);
+ if (next) {
+ chunk = chunk.next(this.chunks);
+ } else {
+ chunk = chunk.prev(this.chunks);
+ }
+ if (!chunk) return;
+ index = Math.min(index, chunk.size()-1);
+ this.map = chunk.at(index);
+ }
+
+ moveInChunk(delta) {
+ if (!this.map) return this.selectDefaultMap();
+ let chunkIndex = this.map.chunkIndex(this.chunks)
+ let chunk = this.chunks[chunkIndex];
+ let index = chunk.indexOf(this.map) + delta;
+ let map;
+ if (index < 0) {
+ map = chunk.prev(this.chunks).last();
+ } else if (index >= chunk.size()) {
+ map = chunk.next(this.chunks).first()
+ } else {
+ map = chunk.at(index);
+ }
+ this.map = map;
+ }
+
+ updateUrl() {
+ let entries = this.state.entries;
+ let params = new URLSearchParams(entries);
+ window.history.pushState(entries, "", "?" + params.toString());
+ }
+}
+
+class View {
+ constructor(state) {
+ this.state = state;
+ setInterval(this.updateOverviewWindow, 50);
+ this.backgroundCanvas = document.createElement("canvas");
+ this.transitionView = new TransitionView(state, $("transitionView"));
+ this.statsView = new StatsView(state, $("stats"));
+ this.isLocked = false;
+ }
+ get chunks() { return this.state.chunks }
+ get timeline() { return this.state.timeline }
+ get map() { return this.state.map }
+
+ updateStats() {
+ this.statsView.update();
+ }
+
+ updateMapDetails() {
+ let details = "";
+ if (this.map) {
+ details += "ID: " + this.map.id;
+ details += "\n" + this.map.description;
+ }
+ $("mapDetails").innerText = details;
+ this.transitionView.showMap(this.map);
+ }
+
+ updateTimeline() {
+ let chunksNode = $("timelineChunks");
+ removeAllChildren(chunksNode);
+ let chunks = this.chunks;
+ let max = chunks.max(each => each.size());
+ let start = this.timeline.startTime;
+ let end = this.timeline.endTime;
+ let duration = end - start;
+ const timeToPixel = chunks.length * kChunkWidth / duration;
+ let addTimestamp = (time, name) => {
+ let timeNode = div("timestamp");
+ timeNode.innerText = name;
+ timeNode.style.left = ((time-start) * timeToPixel) + "px";
+ chunksNode.appendChild(timeNode);
+ };
+ for (let i = 0; i < chunks.length; i++) {
+ let chunk = chunks[i];
+ let height = (chunk.size() / max * kChunkHeight);
+ chunk.height = height;
+ if (chunk.isEmpty()) continue;
+ let node = div();
+ node.className = "chunk";
+ node.style.left = (i * kChunkWidth) + "px";
+ node.style.height = height + "px";
+ node.chunk = chunk;
+ node.addEventListener("mousemove", e => this.handleChunkMouseMove(e));
+ node.addEventListener("click", e => this.handleChunkClick(e));
+ node.addEventListener("dblclick", e => this.handleChunkDoubleClick(e));
+ this.setTimelineChunkBackground(chunk, node);
+ chunksNode.appendChild(node);
+ chunk.markers.forEach(marker => addTimestamp(marker.time, marker.name));
+ }
+ // Put a time marker roughly every 20 chunks.
+ let expected = duration / chunks.length * 20;
+ let interval = (10 ** Math.floor(Math.log10(expected)));
+ let correction = Math.log10(expected / interval);
+ correction = (correction < 0.33) ? 1 : (correction < 0.75) ? 2.5 : 5;
+ interval *= correction;
+
+ let time = start;
+ while (time < end) {
+ addTimestamp(time, ((time-start) / 1000) + " ms");
+ time += interval;
+ }
+ this.drawOverview();
+ this.drawHistograms();
+ this.redraw();
+ }
+
+ handleChunkMouseMove(event) {
+ if (this.isLocked) return false;
+ let chunk = event.target.chunk;
+ if (!chunk) return;
+ // topmost map (at chunk.height) == map #0.
+ let relativeIndex =
+ Math.round(event.layerY / event.target.offsetHeight * chunk.size());
+ let map = chunk.at(relativeIndex);
+ this.state.map = map;
+ }
+
+ handleChunkClick(event) {
+ this.isLocked = !this.isLocked;
+ }
+
+ handleChunkDoubleClick(event) {
+ this.isLocked = true;
+ let chunk = event.target.chunk;
+ if (!chunk) return;
+ this.transitionView.showMaps(chunk.getUniqueTransitions());
+ }
+
+ setTimelineChunkBackground(chunk, node) {
+ // Render the types of transitions as bar charts
+ const kHeight = chunk.height;
+ const kWidth = 1;
+ this.backgroundCanvas.width = kWidth;
+ this.backgroundCanvas.height = kHeight;
+ let ctx = this.backgroundCanvas.getContext("2d");
+ ctx.clearRect(0, 0, kWidth, kHeight);
+ let y = 0;
+ let total = chunk.size();
+ let type, count;
+ if (true) {
+ chunk.getTransitionBreakdown().forEach(([type, count]) => {
+ ctx.fillStyle = transitionTypeToColor(type);
+ let height = count / total * kHeight;
+ ctx.fillRect(0, y, kWidth, y + height);
+ y += height;
+ });
+ } else {
+ chunk.items.forEach(map => {
+ ctx.fillStyle = transitionTypeToColor(map.getType());
+ let y = chunk.yOffset(map);
+ ctx.fillRect(0, y, kWidth, y + 1);
+ });
+ }
+
+ let imageData = this.backgroundCanvas.toDataURL("image/png");
+ node.style.backgroundImage = "url(" + imageData + ")";
+ }
+
+ updateOverviewWindow() {
+ let indicator = $("timelineOverviewIndicator");
+ let totalIndicatorWidth = $("timelineOverview").offsetWidth;
+ let div = $("timeline");
+ let timelineTotalWidth = $("timelineCanvas").offsetWidth;
+ let factor = $("timelineOverview").offsetWidth / timelineTotalWidth;
+ let width = div.offsetWidth * factor;
+ let left = div.scrollLeft * factor;
+ indicator.style.width = width + "px";
+ indicator.style.left = left + "px";
+ }
+
+ drawOverview() {
+ const height = 50;
+ const kFactor = 2;
+ let canvas = this.backgroundCanvas;
+ canvas.height = height;
+ canvas.width = window.innerWidth;
+ let ctx = canvas.getContext("2d");
+
+ let chunks = this.state.timeline.chunkSizes(canvas.width * kFactor);
+ let max = chunks.max();
+
+ ctx.clearRect(0, 0, canvas.width, height);
+ ctx.strokeStyle = "black";
+ ctx.fillStyle = "black";
+ ctx.beginPath();
+ ctx.moveTo(0,height);
+ for (let i = 0; i < chunks.length; i++) {
+ ctx.lineTo(i/kFactor, height - chunks[i]/max * height);
+ }
+ ctx.lineTo(chunks.length, height);
+ ctx.stroke();
+ ctx.closePath();
+ ctx.fill();
+ let imageData = canvas.toDataURL("image/png");
+ $("timelineOverview").style.backgroundImage = "url(" + imageData + ")";
+ }
+
+ drawHistograms() {
+ $("mapsDepthHistogram").histogram = this.timeline.depthHistogram();
+ $("mapsFanOutHistogram").histogram = this.timeline.fanOutHistogram();
+ }
+
+ drawMapsDepthHistogram() {
+ let canvas = $("mapsDepthCanvas");
+ let histogram = this.timeline.depthHistogram();
+ this.drawHistogram(canvas, histogram, true);
+ }
+
+ drawMapsFanOutHistogram() {
+ let canvas = $("mapsFanOutCanvas");
+ let histogram = this.timeline.fanOutHistogram();
+ this.drawHistogram(canvas, histogram, true, true);
+ }
+
+ drawHistogram(canvas, histogram, logScaleX=false, logScaleY=false) {
+ let ctx = canvas.getContext("2d");
+ let yMax = histogram.max(each => each.length);
+ if (logScaleY) yMax = Math.log(yMax);
+ let xMax = histogram.length;
+ if (logScaleX) xMax = Math.log(xMax);
+ ctx.clearRect(0, 0, canvas.width, canvas.height);
+ ctx.beginPath();
+ ctx.moveTo(0,canvas.height);
+ for (let i = 0; i < histogram.length; i++) {
+ let x = i;
+ if (logScaleX) x = Math.log(x);
+ x = x / xMax * canvas.width;
+ let bucketLength = histogram[i].length;
+ if (logScaleY) bucketLength = Math.log(bucketLength);
+ let y = (1 - bucketLength / yMax) * canvas.height;
+ ctx.lineTo(x, y);
+ }
+ ctx.lineTo(canvas.width, canvas.height);
+ ctx.closePath;
+ ctx.stroke();
+ ctx.fill();
+ }
+
+ redraw() {
+ let canvas= $("timelineCanvas");
+ canvas.width = (this.chunks.length+1) * kChunkWidth;
+ canvas.height = kChunkHeight;
+ let ctx = canvas.getContext("2d");
+ ctx.clearRect(0, 0, canvas.width, kChunkHeight);
+ if (!this.state.map) return;
+ this.drawEdges(ctx);
+ }
+
+ setMapStyle(map, ctx) {
+ ctx.fillStyle = map.edge && map.edge.from ? "black" : "green";
+ }
+
+ setEdgeStyle(edge, ctx) {
+ let color = edge.getColor();
+ ctx.strokeStyle = color;
+ ctx.fillStyle = color;
+ }
+
+ markMap(ctx, map) {
+ let [x, y] = map.position(this.state.chunks);
+ ctx.beginPath();
+ this.setMapStyle(map, ctx);
+ ctx.arc(x, y, 3, 0, 2 * Math.PI);
+ ctx.fill();
+ ctx.beginPath();
+ ctx.fillStyle = "white";
+ ctx.arc(x, y, 2, 0, 2 * Math.PI);
+ ctx.fill();
+ }
+
+ markSelectedMap(ctx, map) {
+ let [x, y] = map.position(this.state.chunks);
+ ctx.beginPath();
+ this.setMapStyle(map, ctx);
+ ctx.arc(x, y, 6, 0, 2 * Math.PI);
+ ctx.stroke();
+ }
+
+ drawEdges(ctx) {
+ // Draw the trace of maps in reverse order to make sure the outgoing
+ // transitions of previous maps aren't drawn over.
+ const kMaxOutgoingEdges = 100;
+ let nofEdges = 0;
+ let stack = [];
+ let current = this.state.map;
+ while (current && nofEdges < kMaxOutgoingEdges) {
+ nofEdges += current.children.length;
+ stack.push(current);
+ current = current.parent();
+ }
+ ctx.save();
+ this.drawOutgoingEdges(ctx, this.state.map, 3);
+ ctx.restore();
+
+ let labelOffset = 15;
+ let xPrev = 0;
+ while (current = stack.pop()) {
+ if (current.edge) {
+ this.setEdgeStyle(current.edge, ctx);
+ let [xTo, yTo] = this.drawEdge(ctx, current.edge, true, labelOffset);
+ if (xTo == xPrev) {
+ labelOffset += 8;
+ } else {
+ labelOffset = 15
+ }
+ xPrev = xTo;
+ }
+ this.markMap(ctx, current);
+ current = current.parent();
+ ctx.save();
+ // this.drawOutgoingEdges(ctx, current, 1);
+ ctx.restore();
+ }
+ // Mark selected map
+ this.markSelectedMap(ctx, this.state.map);
+ }
+
+ drawEdge(ctx, edge, showLabel=true, labelOffset=20) {
+ if (!edge.from || !edge.to) return [-1, -1];
+ let [xFrom, yFrom] = edge.from.position(this.chunks);
+ let [xTo, yTo] = edge.to.position(this.chunks);
+ let sameChunk = xTo == xFrom;
+ if (sameChunk) labelOffset += 8;
+
+ ctx.beginPath();
+ ctx.moveTo(xFrom, yFrom);
+ let offsetX = 20;
+ let offsetY = 20;
+ let midX = xFrom + (xTo- xFrom) / 2;
+ let midY = (yFrom + yTo) / 2 - 100;
+ if (!sameChunk) {
+ ctx.quadraticCurveTo(midX, midY, xTo, yTo);
+ } else {
+ ctx.lineTo(xTo, yTo);
+ }
+ if (!showLabel) {
+ ctx.stroke();
+ } else {
+ let centerX, centerY;
+ if (!sameChunk) {
+ centerX = (xFrom/2 + midX + xTo/2)/2;
+ centerY = (yFrom/2 + midY + yTo/2)/2;
+ } else {
+ centerX = xTo;
+ centerY = yTo;
+ }
+ ctx.moveTo(centerX, centerY);
+ ctx.lineTo(centerX + offsetX, centerY - labelOffset);
+ ctx.stroke();
+ ctx.textAlign = "left";
+ ctx.fillText(edge.toString(), centerX + offsetX + 2, centerY - labelOffset)
+ }
+ return [xTo, yTo];
+ }
+
+ drawOutgoingEdges(ctx, map, max=10, depth=0) {
+ if (!map) return;
+ if (depth >= max) return;
+ ctx.globalAlpha = 0.5 - depth * (0.3/max);
+ ctx.strokeStyle = "#666";
+
+ const limit = Math.min(map.children.length, 100)
+ for (let i = 0; i < limit; i++) {
+ let edge = map.children[i];
+ this.drawEdge(ctx, edge, true);
+ this.drawOutgoingEdges(ctx, edge.to, max, depth+1);
+ }
+ }
+}
+
+
+class TransitionView {
+ constructor(state, node) {
+ this.state = state;
+ this.container = node;
+ this.currentNode = node;
+ this.currentMap = undefined;
+ }
+
+ selectMap(map) {
+ this.currentMap = map;
+ this.state.map = map;
+ }
+
+ showMap(map) {
+ if (this.currentMap === map) return;
+ this.currentMap = map;
+ this._showMaps([map]);
+ }
+
+ showMaps(list, name) {
+ this.state.view.isLocked = true;
+ this._showMaps(list);
+ }
+
+ _showMaps(list, name) {
+ // Hide the container to avoid any layouts.
+ this.container.style.display = "none";
+ removeAllChildren(this.container);
+ list.forEach(map => this.addMapAndParentTransitions(map));
+ this.container.style.display = ""
+ }
+
+ addMapAndParentTransitions(map) {
+ if (map === void 0) return;
+ this.currentNode = this.container;
+ let parents = map.getParents();
+ if (parents.length > 0) {
+ this.addTransitionTo(parents.pop());
+ parents.reverse().forEach(each => this.addTransitionTo(each));
+ }
+ let mapNode = this.addSubtransitions(map);
+ // Mark and show the selected map.
+ mapNode.classList.add("selected");
+ if (this.selectedMap == map) {
+ setTimeout(() => mapNode.scrollIntoView({
+ behavior: "smooth", block: "nearest", inline: "nearest"
+ }), 1);
+ }
+ }
+
+ addMapNode(map) {
+ let node = div("map");
+ if (map.edge) node.classList.add(map.edge.getColor());
+ node.map = map;
+ node.addEventListener("click", () => this.selectMap(map));
+ if (map.children.length > 1) {
+ node.innerText = map.children.length;
+ let showSubtree = div("showSubtransitions");
+ showSubtree.addEventListener("click", (e) => this.toggleSubtree(e, node));
+ node.appendChild(showSubtree);
+ } else if (map.children.length == 0) {
+ node.innerHTML = "&#x25CF;"
+ }
+ this.currentNode.appendChild(node);
+ return node;
+ }
+
+ addSubtransitions(map) {
+ let mapNode = this.addTransitionTo(map);
+ // Draw outgoing linear transition line.
+ let current = map;
+ while (current.children.length == 1) {
+ current = current.children[0].to;
+ this.addTransitionTo(current);
+ }
+ return mapNode;
+ }
+
+ addTransitionEdge(map) {
+ let classes = ["transitionEdge", map.edge.getColor()];
+ let edge = div(classes);
+ let labelNode = div("transitionLabel");
+ labelNode.innerText = map.edge.toString();
+ edge.appendChild(labelNode);
+ return edge;
+ }
+
+ addTransitionTo(map) {
+ // transition[ transitions[ transition[...], transition[...], ...]];
+
+ let transition = div("transition");
+ if (map.isDeprecated()) transition.classList.add("deprecated");
+ if (map.edge) {
+ transition.appendChild(this.addTransitionEdge(map));
+ }
+ let mapNode = this.addMapNode(map);
+ transition.appendChild(mapNode);
+
+ let subtree = div("transitions");
+ transition.appendChild(subtree);
+
+ this.currentNode.appendChild(transition);
+ this.currentNode = subtree;
+
+ return mapNode;
+
+ }
+
+ toggleSubtree(event, node) {
+ let map = node.map;
+ event.target.classList.toggle("opened");
+ let transitionsNode = node.parentElement.querySelector(".transitions");
+ let subtransitionNodes = transitionsNode.children;
+ if (subtransitionNodes.length <= 1) {
+ // Add subtransitions excepth the one that's already shown.
+ let visibleTransitionMap = subtransitionNodes.length == 1 ?
+ transitionsNode.querySelector(".map").map : void 0;
+ map.children.forEach(edge => {
+ if (edge.to != visibleTransitionMap) {
+ this.currentNode = transitionsNode;
+ this.addSubtransitions(edge.to);
+ }
+ });
+ } else {
+ // remove all but the first (currently selected) subtransition
+ for (let i = subtransitionNodes.length-1; i > 0; i--) {
+ transitionsNode.removeChild(subtransitionNodes[i]);
+ }
+ }
+ }
+}
+
+class StatsView {
+ constructor(state, node) {
+ this.state = state;
+ this.node = node;
+ }
+ get timeline() { return this.state.timeline }
+ get transitionView() { return this.state.view.transitionView; }
+ update() {
+ removeAllChildren(this.node);
+ this.updateGeneralStats();
+ this.updateNamedTransitionsStats();
+ }
+ updateGeneralStats() {
+ let pairs = [
+ ["Maps", e => true],
+ ["Transitions", e => e.edge && e.edge.isTransition()],
+ ["Fast to Slow", e => e.edge && e.edge.isFastToSlow()],
+ ["Slow to Fast", e => e.edge && e.edge.isSlowToFast()],
+ ["Initial Map", e => e.edge && e.edge.isInitial()],
+ ["Replace Descriptors", e => e.edge && e.edge.isReplaceDescriptors()],
+ ["Copy as Prototype", e => e.edge && e.edge.isCopyAsPrototype()],
+ ["Optimize as Prototype", e => e.edge && e.edge.isOptimizeAsPrototype()],
+ ["Deprecated", e => e.isDeprecated()],
+ ];
+
+ let text = "";
+ let tableNode = table();
+ let name, filter;
+ let total = this.timeline.size();
+ pairs.forEach(([name, filter]) => {
+ let row = tr();
+ row.maps = this.timeline.filterUniqueTransitions(filter);
+ row.addEventListener("click",
+ e => this.transitionView.showMaps(e.target.parentNode.maps));
+ row.appendChild(td(name));
+ let count = this.timeline.count(filter);
+ row.appendChild(td(count));
+ let percent = Math.round(count / total * 1000) / 10;
+ row.appendChild(td(percent + "%"));
+ tableNode.appendChild(row);
+ });
+ this.node.appendChild(tableNode);
+ };
+ updateNamedTransitionsStats() {
+ let tableNode = table("transitionTable");
+ let nameMapPairs = Array.from(this.timeline.transitions.entries());
+ nameMapPairs
+ .sort((a,b) => b[1].length - a[1].length)
+ .forEach(([name, maps]) => {
+ let row = tr();
+ row.maps = maps;
+ row.addEventListener("click",
+ e => this.transitionView.showMaps(
+ e.target.parentNode.maps.map(map => map.to)));
+ row.appendChild(td(name));
+ row.appendChild(td(maps.length));
+ tableNode.appendChild(row);
+ });
+ this.node.appendChild(tableNode);
+ }
+}
+
+// =========================================================================
+
+function transitionTypeToColor(type) {
+ switch(type) {
+ case "new": return "green";
+ case "Normalize": return "violet";
+ case "map=SlowToFast": return "orange";
+ case "InitialMap": return "yellow";
+ case "Transition": return "black";
+ case "ReplaceDescriptors": return "red";
+ }
+ return "black";
+}
+
+// ShadowDom elements =========================================================
+customElements.define('x-histogram', class extends HTMLElement {
+ constructor() {
+ super();
+ let shadowRoot = this.attachShadow({mode: 'open'});
+ const t = document.querySelector('#x-histogram-template');
+ const instance = t.content.cloneNode(true);
+ shadowRoot.appendChild(instance);
+ this._histogram = undefined;
+ this.mouseX = 0;
+ this.mouseY = 0;
+ this.canvas.addEventListener('mousemove', event => this.handleCanvasMove(event));
+ }
+ setBoolAttribute(name, value) {
+ if (value) {
+ this.setAttribute(name, "");
+ } else {
+ this.deleteAttribute(name);
+ }
+ }
+ static get observedAttributes() {
+ return ['title', 'xlog', 'ylog', 'xlabel', 'ylabel'];
+ }
+ $(query) { return this.shadowRoot.querySelector(query) }
+ get h1() { return this.$("h2") }
+ get canvas() { return this.$("canvas") }
+ get xLabelDiv() { return this.$("#xLabel") }
+ get yLabelDiv() { return this.$("#yLabel") }
+
+ get histogram() {
+ return this._histogram;
+ }
+ set histogram(array) {
+ this._histogram = array;
+ if (this._histogram) {
+ this.yMax = this._histogram.max(each => each.length);
+ this.xMax = this._histogram.length;
+ }
+ this.draw();
+ }
+
+ get title() { return this.getAttribute("title") }
+ set title(string) { this.setAttribute("title", string) }
+ get xLabel() { return this.getAttribute("xlabel") }
+ set xLabel(string) { this.setAttribute("xlabel", string)}
+ get yLabel() { return this.getAttribute("ylabel") }
+ set yLabel(string) { this.setAttribute("ylabel", string)}
+ get xLog() { return this.hasAttribute("xlog") }
+ set xLog(value) { this.setBoolAttribute("xlog", value) }
+ get yLog() { return this.hasAttribute("ylog") }
+ set yLog(value) { this.setBoolAttribute("ylog", value) }
+
+ attributeChangedCallback(name, oldValue, newValue) {
+ if (name == "title") {
+ this.h1.innerText = newValue;
+ return;
+ }
+ if (name == "ylabel") {
+ this.yLabelDiv.innerText = newValue;
+ return;
+ }
+ if (name == "xlabel") {
+ this.xLabelDiv.innerText = newValue;
+ return;
+ }
+ this.draw();
+ }
+
+ handleCanvasMove(event) {
+ this.mouseX = event.offsetX;
+ this.mouseY = event.offsetY;
+ this.draw();
+ }
+ xPosition(i) {
+ let x = i;
+ if (this.xLog) x = Math.log(x);
+ return x / this.xMax * this.canvas.width;
+ }
+ yPosition(i) {
+ let bucketLength = this.histogram[i].length;
+ if (this.yLog) {
+ return (1 - Math.log(bucketLength) / Math.log(this.yMax)) * this.drawHeight + 10;
+ } else {
+ return (1 - bucketLength / this.yMax) * this.drawHeight + 10;
+ }
+ }
+
+ get drawHeight() { return this.canvas.height - 10 }
+
+ draw() {
+ if (!this.histogram) return;
+ let width = this.canvas.width;
+ let height = this.drawHeight;
+ let ctx = this.canvas.getContext("2d");
+ if (this.xLog) yMax = Math.log(yMax);
+ let xMax = this.histogram.length;
+ if (this.yLog) xMax = Math.log(xMax);
+ ctx.clearRect(0, 0, this.canvas.width, this.canvas.height);
+ ctx.beginPath();
+ ctx.moveTo(0, height);
+ for (let i = 0; i < this.histogram.length; i++) {
+ ctx.lineTo(this.xPosition(i), this.yPosition(i));
+ }
+ ctx.lineTo(width, height);
+ ctx.closePath;
+ ctx.stroke();
+ ctx.fill();
+ if (!this.mouseX) return;
+ ctx.beginPath();
+ let index = Math.round(this.mouseX);
+ let yBucket = this.histogram[index];
+ let y = this.yPosition(index);
+ if (this.yLog) y = Math.log(y);
+ ctx.moveTo(0, y);
+ ctx.lineTo(width-40, y);
+ ctx.moveTo(this.mouseX, 0);
+ ctx.lineTo(this.mouseX, height);
+ ctx.stroke();
+ ctx.textAlign = "left";
+ ctx.fillText(yBucket.length, width-30, y);
+ }
+});
+
+</script>
+</head>
+<template id="x-histogram-template">
+ <style>
+ #yLabel {
+ transform: rotate(90deg);
+ }
+ canvas, #yLabel, #info { float: left; }
+ #xLabel { clear: both }
+ </style>
+ <h2></h2>
+ <div id="yLabel"></div>
+ <canvas height=50></canvas>
+ <div id="info">
+ </div>
+ <div id="xLabel"></div>
+</template>
+
+<body onload="handleBodyLoad(event)" onkeypress="handleKeyDown(event)">
+ <h2>Data</h2>
+ <section>
+ <form name="fileForm">
+ <p>
+ <input id="uploadInput" type="file" name="files">
+ </p>
+ </form>
+ </section>
+
+ <h2>Stats</h2>
+ <section id="stats"></section>
+
+ <h2>Timeline</h2>
+ <div id="timeline">
+ <div id=timelineChunks></div>
+ <canvas id="timelineCanvas" ></canvas>
+ </div>
+ <div id="timelineOverview"
+ onmousemove="handleTimelineIndicatorMove(event)" >
+ <div id="timelineOverviewIndicator">
+ <div class="leftMask"></div>
+ <div class="rightMask"></div>
+ </div>
+ </div>
+
+ <h2>Transitions</h2>
+ <section id="transitionView"></section>
+ <br/>
+
+ <h2>Selected Map</h2>
+ <section id="mapDetails"></section>
+
+ <x-histogram id="mapsDepthHistogram"
+ title="Maps Depth" xlabel="depth" ylabel="nof"></x-histogram>
+ <x-histogram id="mapsFanOutHistogram" xlabel="fan-out"
+ title="Maps Fan-out" ylabel="nof"></x-histogram>
+
+ <div id="tooltip">
+ <div id="tooltipContents"></div>
+ </div>
+</body>
+</html>
diff --git a/deps/v8/tools/map-processor.js b/deps/v8/tools/map-processor.js
new file mode 100644
index 0000000000..5b0e46909c
--- /dev/null
+++ b/deps/v8/tools/map-processor.js
@@ -0,0 +1,717 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// ===========================================================================
+class MapProcessor extends LogReader {
+ constructor() {
+ super();
+ this.dispatchTable_ = {
+ 'code-creation': {
+ parsers: [null, parseInt, parseInt, parseInt, parseInt, null, 'var-args'],
+ processor: this.processCodeCreation
+ },
+ 'code-move': {
+ parsers: [parseInt, parseInt],
+ 'sfi-move': {
+ parsers: [parseInt, parseInt],
+ processor: this.processCodeMove
+ },
+ 'code-delete': {
+ parsers: [parseInt],
+ processor: this.processCodeDelete
+ },
+ processor: this.processFunctionMove
+ },
+ 'map-create': {
+ parsers: [parseInt, parseInt, null],
+ processor: this.processMapCreate
+ },
+ 'map': {
+ parsers: [null, parseInt, parseInt, parseInt, parseInt, parseInt,
+ null, null, null
+ ],
+ processor: this.processMap
+ },
+ 'map-details': {
+ parsers: [parseInt, parseInt, null],
+ processor: this.processMapDetails
+ }
+ };
+ this.deserializedEntriesNames_ = [];
+ this.profile_ = new Profile();
+ this.timeline_ = new Timeline();
+ }
+
+ printError(str) {
+ console.error(str);
+ throw str
+ }
+
+ processString(string) {
+ let end = string.length;
+ let current = 0;
+ let next = 0;
+ let line;
+ let i = 0;
+ let entry;
+ try {
+ while (current < end) {
+ next = string.indexOf("\n", current);
+ if (next === -1) break;
+ i++;
+ line = string.substring(current, next);
+ current = next + 1;
+ this.processLogLine(line);
+ }
+ } catch(e) {
+ console.log("Error occurred during parsing, trying to continue: " + e);
+ }
+ return this.finalize();
+ }
+
+ processLogFile(fileName) {
+ this.collectEntries = true
+ this.lastLogFileName_ = fileName;
+ let line;
+ while (line = readline()) {
+ this.processLogLine(line);
+ }
+ return this.finalize();
+ }
+
+ finalize() {
+ // TODO(cbruni): print stats;
+ this.timeline_.finalize();
+ return this.timeline_;
+ }
+
+ addEntry(entry) {
+ this.entries.push(entry);
+ }
+
+ /**
+ * Parser for dynamic code optimization state.
+ */
+ parseState(s) {
+ switch (s) {
+ case "":
+ return Profile.CodeState.COMPILED;
+ case "~":
+ return Profile.CodeState.OPTIMIZABLE;
+ case "*":
+ return Profile.CodeState.OPTIMIZED;
+ }
+ throw new Error("unknown code state: " + s);
+ }
+
+ processCodeCreation(
+ type, kind, timestamp, start, size, name, maybe_func) {
+ name = this.deserializedEntriesNames_[start] || name;
+ if (name.startsWith("onComplete")) {
+ console.log(name);
+ }
+ if (maybe_func.length) {
+ let funcAddr = parseInt(maybe_func[0]);
+ let state = this.parseState(maybe_func[1]);
+ this.profile_.addFuncCode(type, name, timestamp, start, size, funcAddr, state);
+ } else {
+ this.profile_.addCode(type, name, timestamp, start, size);
+ }
+ }
+
+ processCodeMove(from, to) {
+ this.profile_.moveCode(from, to);
+ }
+
+ processCodeDelete(start) {
+ this.profile_.deleteCode(start);
+ }
+
+ processFunctionMove(from, to) {
+ this.profile_.moveFunc(from, to);
+ }
+
+ formatPC(pc, line, column) {
+ let entry = this.profile_.findEntry(pc);
+ if (!entry) return "<unknown>"
+ if (entry.type == "Builtin") {
+ return entry.name;
+ }
+ let name = entry.func.getName();
+ let re = /(.*):[0-9]+:[0-9]+$/;
+ let array = re.exec(name);
+ if (!array) {
+ entry = name;
+ } else {
+ entry = entry.getState() + array[1];
+ }
+ return entry + ":" + line + ":" + column;
+ }
+
+ processMap(type, time, from, to, pc, line, column, reason, name) {
+ time = parseInt(time);
+ if (type == "Deprecate") return this.deprecateMap(type, time, from);
+ from = this.getExistingMap(from, time);
+ to = this.getExistingMap(to, time);
+ let edge = new Edge(type, name, reason, time, from, to);
+ edge.filePosition = this.formatPC(pc, line, column);
+ edge.finishSetup();
+ }
+
+ deprecateMap(type, time, id) {
+ this.getExistingMap(id, time).deprecate();
+ }
+
+ processMapCreate(time, id, string) {
+ // map-create events might override existing maps if the addresses get
+ // rcycled. Hence we do not check for existing maps.
+ let map = this.createMap(id, time);
+ map.description = string;
+ }
+
+ processMapDetails(time, id, string) {
+ //TODO(cbruni): fix initial map logging.
+ let map = this.getExistingMap(id, time);
+ if (!map.description) {
+ map.description = string;
+ }
+ }
+
+ createMap(id, time) {
+ if (id == 0x1821257d1761) {
+ console.log(id);
+ }
+ let map = new V8Map(id, time);
+ this.timeline_.push(map);
+ return map;
+ }
+
+ getExistingMap(id, time) {
+ if (id === 0) return undefined;
+ let map = V8Map.get(id);
+ if (map === undefined) {
+ console.error("No map details provided: id=" + id);
+ // Manually patch in a map to continue running.
+ return this.createMap(id, time);
+ };
+ return map;
+ }
+}
+
+// ===========================================================================
+
+class V8Map {
+ constructor(id, time = -1) {
+ if (!id) throw "Invalid ID";
+ this.id = id;
+ this.time = time;
+ if (!(time > 0)) throw "Invalid time";
+ this.description = "";
+ this.edge = void 0;
+ this.children = [];
+ this.depth = 0;
+ this._isDeprecated = false;
+ this.deprecationTargets = null;
+ V8Map.set(id, this);
+ this.leftId = 0;
+ this.rightId = 0;
+ }
+
+ finalize(id) {
+ // Initialize preorder tree traversal Ids for fast subtree inclusion checks
+ if (id <= 0) throw "invalid id";
+ let currentId = id;
+ this.leftId = currentId
+ this.children.forEach(edge => {
+ let map = edge.to;
+ currentId = map.finalize(currentId + 1);
+ });
+ this.rightId = currentId + 1;
+ return currentId + 1;
+ }
+
+ parent() {
+ if (this.edge === void 0) return void 0;
+ return this.edge.from;
+ }
+
+ isDeprecated() {
+ return this._isDeprecated;
+ }
+
+ deprecate() {
+ this._isDeprecated = true;
+ }
+
+ isRoot() {
+ return this.edge == void 0 || this.edge.from == void 0;
+ }
+
+ contains(map) {
+ return this.leftId < map.leftId && map.rightId < this.rightId;
+ }
+
+ addEdge(edge) {
+ this.children.push(edge);
+ }
+
+ chunkIndex(chunks) {
+ // Did anybody say O(n)?
+ for (let i = 0; i < chunks.length; i++) {
+ let chunk = chunks[i];
+ if (chunk.isEmpty()) continue;
+ if (chunk.last().time < this.time) continue;
+ return i;
+ }
+ return -1;
+ }
+
+ position(chunks) {
+ let index = this.chunkIndex(chunks);
+ let xFrom = (index + 0.5) * kChunkWidth;
+ let yFrom = kChunkHeight - chunks[index].yOffset(this);
+ return [xFrom, yFrom];
+ }
+
+ transitions() {
+ let transitions = Object.create(null);
+ let current = this;
+ while (current) {
+ let edge = current.edge;
+ if (edge && edge.isTransition()) {
+ transitions[edge.name] = edge;
+ }
+ current = current.parent()
+ }
+ return transitions;
+ }
+
+ getType() {
+ return this.edge === void 0 ? "new" : this.edge.type;
+ }
+
+ getParents() {
+ let parents = [];
+ let current = this.parent();
+ while (current) {
+ parents.push(current);
+ current = current.parent();
+ }
+ return parents;
+ }
+
+ static get(id) {
+ if (!this.cache) return undefined;
+ return this.cache.get(id);
+ }
+
+ static set(id, map) {
+ if (!this.cache) this.cache = new Map();
+ this.cache.set(id, map);
+ }
+}
+
+
+// ===========================================================================
+class Edge {
+ constructor(type, name, reason, time, from, to) {
+ this.type = type;
+ this.name = name;
+ this.reason = reason;
+ this.time = time;
+ this.from = from;
+ this.to = to;
+ this.filePosition = "";
+ }
+
+ finishSetup() {
+ if (this.from) this.from.addEdge(this);
+ if (this.to) {
+ this.to.edge = this;
+ if (this.to === this.from) throw "From and to must be distinct.";
+ if (this.from) {
+ if (this.to.time < this.from.time) {
+ console.error("invalid time order");
+ }
+ let newDepth = this.from.depth + 1;
+ if (this.to.depth > 0 && this.to.depth != newDepth) {
+ console.error("Depth has already been initialized");
+ }
+ this.to.depth = newDepth;
+ }
+ }
+ }
+
+ chunkIndex(chunks) {
+ // Did anybody say O(n)?
+ for (let i = 0; i < chunks.length; i++) {
+ let chunk = chunks[i];
+ if (chunk.isEmpty()) continue;
+ if (chunk.last().time < this.time) continue;
+ return i;
+ }
+ return -1;
+ }
+
+ parentEdge() {
+ if (!this.from) return undefined;
+ return this.from.edge;
+ }
+
+ chainLength() {
+ let length = 0;
+ let prev = this;
+ while (prev) {
+ prev = this.parent;
+ length++;
+ }
+ return length;
+ }
+
+ isTransition() {
+ return this.type == "Transition"
+ }
+
+ isFastToSlow() {
+ return this.type == "Normalize"
+ }
+
+ isSlowToFast() {
+ return this.type == "SlowToFast"
+ }
+
+ isInitial() {
+ return this.type == "InitialMap"
+ }
+
+ isReplaceDescriptors() {
+ return this.type == "ReplaceDescriptors"
+ }
+
+ isCopyAsPrototype() {
+ return this.reason == "CopyAsPrototype"
+ }
+
+ isOptimizeAsPrototype() {
+ return this.reason == "OptimizeAsPrototype"
+ }
+
+ symbol() {
+ if (this.isTransition()) return "+";
+ if (this.isFastToSlow()) return "⊡";
+ if (this.isSlowToFast()) return "⊛";
+ if (this.isReplaceDescriptors()) {
+ if (this.name) return "+";
+ return "∥";
+ }
+ return "";
+ }
+
+ toString() {
+ let s = this.symbol();
+ if (this.isTransition()) return s + this.name;
+ if (this.isFastToSlow()) return s + this.reason;
+ if (this.isCopyAsPrototype()) return s + "Copy as Prototype";
+ if (this.isOptimizeAsPrototype()) {
+ return s + "Optimize as Prototype";
+ }
+ if (this.isReplaceDescriptors() && this.name) {
+ return this.type + " " + this.symbol() + this.name;
+ }
+ return this.type + " " + (this.reason ? this.reason : "") + " " +
+ (this.name ? this.name : "")
+ }
+}
+
+
+// ===========================================================================
+class Marker {
+ constructor(time, name) {
+ this.time = parseInt(time);
+ this.name = name;
+ }
+}
+
+// ===========================================================================
+class Timeline {
+ constructor() {
+ this.values = [];
+ this.transitions = new Map();
+ this.markers = [];
+ this.startTime = 0;
+ this.endTime = 0;
+ }
+
+ push(map) {
+ let time = map.time;
+ if (!this.isEmpty() && this.last().time > time) {
+ // Invalid insertion order, might happen without --single-process,
+ // finding insertion point.
+ let insertionPoint = this.find(time);
+ this.values.splice(insertionPoint, map);
+ } else {
+ this.values.push(map);
+ }
+ if (time > 0) {
+ this.endTime = Math.max(this.endTime, time);
+ if (this.startTime === 0) {
+ this.startTime = time;
+ } else {
+ this.startTime = Math.min(this.startTime, time);
+ }
+ }
+ }
+
+ addMarker(time, message) {
+ this.markers.push(new Marker(time, message));
+ }
+
+ finalize() {
+ let id = 0;
+ this.forEach(map => {
+ if (map.isRoot()) id = map.finalize(id + 1);
+ if (map.edge && map.edge.name) {
+ let edge = map.edge;
+ let list = this.transitions.get(edge.name);
+ if (list === undefined) {
+ this.transitions.set(edge.name, [edge]);
+ } else {
+ list.push(edge);
+ }
+ }
+ });
+ this.markers.sort((a, b) => b.time - a.time);
+ }
+
+ at(index) {
+ return this.values[index]
+ }
+
+ isEmpty() {
+ return this.size() == 0
+ }
+
+ size() {
+ return this.values.length
+ }
+
+ first() {
+ return this.values.first()
+ }
+
+ last() {
+ return this.values.last()
+ }
+
+ duration() {
+ return this.last().time - this.first().time
+ }
+
+ forEachChunkSize(count, fn) {
+ const increment = this.duration() / count;
+ let currentTime = this.first().time + increment;
+ let index = 0;
+ for (let i = 0; i < count; i++) {
+ let nextIndex = this.find(currentTime, index);
+ let nextTime = currentTime + increment;
+ fn(index, nextIndex, currentTime, nextTime);
+ index = nextIndex
+ currentTime = nextTime;
+ }
+ }
+
+ chunkSizes(count) {
+ let chunks = [];
+ this.forEachChunkSize(count, (start, end) => chunks.push(end - start));
+ return chunks;
+ }
+
+ chunks(count) {
+ let chunks = [];
+ let emptyMarkers = [];
+ this.forEachChunkSize(count, (start, end, startTime, endTime) => {
+ let items = this.values.slice(start, end);
+ let markers = this.markersAt(startTime, endTime);
+ chunks.push(new Chunk(chunks.length, startTime, endTime, items, markers));
+ });
+ return chunks;
+ }
+
+ range(start, end) {
+ const first = this.find(start);
+ if (first < 0) return [];
+ const last = this.find(end, first);
+ return this.values.slice(first, last);
+ }
+
+ find(time, offset = 0) {
+ return this.basicFind(this.values, each => each.time - time, offset);
+ }
+
+ markersAt(startTime, endTime) {
+ let start = this.basicFind(this.markers, each => each.time - startTime);
+ let end = this.basicFind(this.markers, each => each.time - endTime, start);
+ return this.markers.slice(start, end);
+ }
+
+ basicFind(array, cmp, offset = 0) {
+ let min = offset;
+ let max = array.length;
+ while (min < max) {
+ let mid = min + Math.floor((max - min) / 2);
+ let result = cmp(array[mid]);
+ if (result > 0) {
+ max = mid - 1;
+ } else {
+ min = mid + 1;
+ }
+ }
+ return min;
+ }
+
+ count(filter) {
+ return this.values.reduce((sum, each) => {
+ return sum + (filter(each) ? 1 : 0);
+ }, 0);
+ }
+
+ filter(predicate) {
+ return this.values.filter(predicate);
+ }
+
+ filterUniqueTransitions(filter) {
+ // Returns a list of Maps whose parent is not in the list.
+ return this.values.filter(map => {
+ if (!filter(map)) return false;
+ let parent = map.parent();
+ if (!parent) return true;
+ return !filter(parent);
+ });
+ }
+
+ depthHistogram() {
+ return this.values.histogram(each => each.depth);
+ }
+
+ fanOutHistogram() {
+ return this.values.histogram(each => each.children.length);
+ }
+
+ forEach(fn) {
+ return this.values.forEach(fn)
+ }
+}
+
+
+// ===========================================================================
+class Chunk {
+ constructor(index, start, end, items, markers) {
+ this.index = index;
+ this.start = start;
+ this.end = end;
+ this.items = items;
+ this.markers = markers
+ this.height = 0;
+ }
+
+ isEmpty() {
+ return this.items.length == 0;
+ }
+
+ last() {
+ return this.at(this.size() - 1);
+ }
+
+ first() {
+ return this.at(0);
+ }
+
+ at(index) {
+ return this.items[index];
+ }
+
+ size() {
+ return this.items.length;
+ }
+
+ yOffset(map) {
+ // items[0] == oldest map, displayed at the top of the chunk
+ // items[n-1] == youngest map, displayed at the bottom of the chunk
+ return (1 - (this.indexOf(map) + 0.5) / this.size()) * this.height;
+ }
+
+ indexOf(map) {
+ return this.items.indexOf(map);
+ }
+
+ has(map) {
+ if (this.isEmpty()) return false;
+ return this.first().time <= map.time && map.time <= this.last().time;
+ }
+
+ next(chunks) {
+ return this.findChunk(chunks, 1);
+ }
+
+ prev(chunks) {
+ return this.findChunk(chunks, -1);
+ }
+
+ findChunk(chunks, delta) {
+ let i = this.index + delta;
+ let chunk = chunks[i];
+ while (chunk && chunk.size() == 0) {
+ i += delta;
+ chunk = chunks[i]
+ }
+ return chunk;
+ }
+
+ getTransitionBreakdown() {
+ return BreakDown(this.items, map => map.getType())
+ }
+
+ getUniqueTransitions() {
+ // Filter out all the maps that have parents within the same chunk.
+ return this.items.filter(map => !map.parent() || !this.has(map.parent()));
+ }
+}
+
+
+// ===========================================================================
+function BreakDown(list, map_fn) {
+ if (map_fn === void 0) {
+ map_fn = each => each;
+ }
+ let breakdown = {__proto__:null};
+ list.forEach(each=> {
+ let type = map_fn(each);
+ let v = breakdown[type];
+ breakdown[type] = (v | 0) + 1
+ });
+ return Object.entries(breakdown)
+ .sort((a,b) => a[1] - b[1]);
+}
+
+
+// ===========================================================================
+class ArgumentsProcessor extends BaseArgumentsProcessor {
+ getArgsDispatch() {
+ return {
+ '--range': ['range', 'auto,auto',
+ 'Specify the range limit as [start],[end]'
+ ],
+ '--source-map': ['sourceMap', null,
+ 'Specify the source map that should be used for output'
+ ]
+ };
+ }
+
+ getDefaultResults() {
+ return {
+ logFileName: 'v8.log',
+ range: 'auto,auto',
+ };
+ }
+}
diff --git a/deps/v8/tools/release/backport_node.py b/deps/v8/tools/node/backport_node.py
index 5523525671..50b0b077fa 100755
--- a/deps/v8/tools/release/backport_node.py
+++ b/deps/v8/tools/node/backport_node.py
@@ -27,12 +27,19 @@ import subprocess
import re
import sys
-from common_includes import *
-
TARGET_SUBDIR = os.path.join("deps", "v8")
VERSION_FILE = os.path.join("include", "v8-version.h")
VERSION_PATTERN = r'(?<=#define V8_PATCH_LEVEL )\d+'
+def FileToText(file_name):
+ with open(file_name) as f:
+ return f.read()
+
+def TextToFile(text, file_name):
+ with open(file_name, "w") as f:
+ f.write(text)
+
+
def Clean(options):
print ">> Cleaning target directory."
subprocess.check_call(["git", "clean", "-fd"],
diff --git a/deps/v8/tools/node/build_gn.py b/deps/v8/tools/node/build_gn.py
new file mode 100755
index 0000000000..8ab2a635ea
--- /dev/null
+++ b/deps/v8/tools/node/build_gn.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Use this script to build libv8_monolith.a as dependency for Node.js
+Required dependencies can be fetched with fetch_deps.py.
+
+Usage: build_gn.py <Debug/Release> <v8-path> <build-path> [<build-flags>]...
+
+Build flags are passed either as "strings" or numeric value. True/false
+are represented as 1/0. E.g.
+
+ v8_promise_internal_field_count=2
+ target_cpu="x64"
+ v8_enable_disassembler=0
+"""
+
+import os
+import subprocess
+import sys
+
+import node_common
+
+GN_ARGS = [
+ "v8_monolithic = true",
+ "is_component_build = false",
+ "v8_use_external_startup_data = false",
+ "use_custom_libcxx = false",
+ "use_sysroot = false",
+]
+
+BUILD_SUBDIR = "gn"
+
+# TODO: make this cross-platform.
+GN_SUBDIR = ["buildtools", "linux64", "gn"]
+
+def Build(v8_path, build_path, depot_tools, is_debug, build_flags):
+ print "Setting GN args."
+ lines = []
+ lines.extend(GN_ARGS)
+ for flag in build_flags:
+ flag = flag.replace("=1", "=true")
+ flag = flag.replace("=0", "=false")
+ flag = flag.replace("target_cpu=ia32", "target_cpu=\"x86\"")
+ lines.append(flag)
+ lines.append("is_debug = %s" % ("true" if is_debug else "false"))
+ with open(os.path.join(build_path, "args.gn"), "w") as args_file:
+ args_file.write("\n".join(lines))
+ gn = os.path.join(v8_path, *GN_SUBDIR)
+ subprocess.check_call([gn, "gen", "-C", build_path], cwd=v8_path)
+ ninja = os.path.join(depot_tools, "ninja")
+ print "Building."
+ subprocess.check_call([ninja, "-v", "-C", build_path, "v8_monolith"],
+ cwd=v8_path)
+
+def Main(v8_path, build_path, is_debug, build_flags):
+ # Verify paths.
+ v8_path = os.path.abspath(v8_path)
+ assert os.path.isdir(v8_path)
+ build_path = os.path.abspath(build_path)
+ build_path = os.path.join(build_path, BUILD_SUBDIR)
+ if not os.path.isdir(build_path):
+ os.makedirs(build_path)
+
+ # Check that we have depot tools.
+ depot_tools = node_common.EnsureDepotTools(v8_path, False)
+
+ # Build with GN.
+ Build(v8_path, build_path, depot_tools, is_debug, build_flags)
+
+if __name__ == "__main__":
+ # TODO: use argparse to parse arguments.
+ build_mode = sys.argv[1]
+ v8_path = sys.argv[2]
+ build_path = sys.argv[3]
+ assert build_mode == "Debug" or build_mode == "Release"
+ is_debug = build_mode == "Debug"
+ # TODO: introduce "--" flag for pass-through flags.
+ build_flags = sys.argv[4:]
+ Main(v8_path, build_path, is_debug, build_flags)
diff --git a/deps/v8/tools/node/fetch_deps.py b/deps/v8/tools/node/fetch_deps.py
new file mode 100755
index 0000000000..a3e6d74917
--- /dev/null
+++ b/deps/v8/tools/node/fetch_deps.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Use this script to fetch all dependencies for V8 to run build_gn.py.
+
+Usage: fetch_deps.py <v8-path>
+"""
+
+import os
+import subprocess
+import sys
+
+import node_common
+
+GCLIENT_SOLUTION = [
+ { "name" : "v8",
+ "url" : "https://chromium.googlesource.com/v8/v8.git",
+ "deps_file" : "DEPS",
+ "managed" : False,
+ "custom_deps" : {
+ # These deps are already part of Node.js.
+ "v8/base/trace_event/common" : None,
+ "v8/testing/gtest" : None,
+ "v8/third_party/jinja2" : None,
+ "v8/third_party/markupsafe" : None,
+ # These deps are unnecessary for building.
+ "v8/test/benchmarks/data" : None,
+ "v8/testing/gmock" : None,
+ "v8/test/mozilla/data" : None,
+ "v8/test/test262/data" : None,
+ "v8/test/test262/harness" : None,
+ "v8/test/wasm-js" : None,
+ "v8/third_party/android_tools" : None,
+ "v8/third_party/catapult" : None,
+ "v8/third_party/colorama/src" : None,
+ "v8/third_party/instrumented_libraries" : None,
+ "v8/tools/gyp" : None,
+ "v8/tools/luci-go" : None,
+ "v8/tools/swarming_client" : None,
+ },
+ "custom_vars": {
+ "build_for_node" : True,
+ },
+ },
+]
+
+def EnsureGit(v8_path):
+ expected_git_dir = os.path.join(v8_path, ".git")
+ actual_git_dir = subprocess.check_output(
+ ["git", "rev-parse", "--absolute-git-dir"], cwd=v8_path).strip()
+ if expected_git_dir == actual_git_dir:
+ print "V8 is tracked stand-alone by git."
+ return False
+ print "Initializing temporary git repository in v8."
+ subprocess.check_call(["git", "init"], cwd=v8_path)
+ subprocess.check_call(["git", "commit", "--allow-empty", "-m", "init"],
+ cwd=v8_path)
+ return True
+
+def FetchDeps(v8_path):
+ # Verify path.
+ v8_path = os.path.abspath(v8_path)
+ assert os.path.isdir(v8_path)
+
+ # Check out depot_tools if necessary.
+ depot_tools = node_common.EnsureDepotTools(v8_path, True)
+
+ temporary_git = EnsureGit(v8_path)
+ try:
+ print "Fetching dependencies."
+ env = os.environ.copy()
+ # gclient needs to have depot_tools in the PATH.
+ env["PATH"] = depot_tools + os.pathsep + env["PATH"]
+ spec = "solutions = %s" % GCLIENT_SOLUTION
+ subprocess.check_call(["gclient", "sync", "--spec", spec],
+ cwd=os.path.join(v8_path, os.path.pardir),
+ env=env)
+ except:
+ raise
+ finally:
+ if temporary_git:
+ node_common.UninitGit(v8_path)
+ # Clean up .gclient_entries file.
+ gclient_entries = os.path.normpath(
+ os.path.join(v8_path, os.pardir, ".gclient_entries"))
+ if os.path.isfile(gclient_entries):
+ os.remove(gclient_entries)
+ # Enable building with GN for configure script.
+ return True
+
+
+if __name__ == "__main__":
+ FetchDeps(sys.argv[1])
diff --git a/deps/v8/tools/node/node_common.py b/deps/v8/tools/node/node_common.py
new file mode 100755
index 0000000000..f7ca3a6a79
--- /dev/null
+++ b/deps/v8/tools/node/node_common.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import shutil
+import stat
+import subprocess
+
+DEPOT_TOOLS_URL = \
+ "https://chromium.googlesource.com/chromium/tools/depot_tools.git"
+
+def EnsureDepotTools(v8_path, fetch_if_not_exist):
+ def _Get(v8_path):
+ depot_tools = os.path.join(v8_path, "_depot_tools")
+ try:
+ gclient_path = os.path.join(depot_tools, "gclient")
+ gclient_check = subprocess.check_output([gclient_path, "--version"])
+ if "gclient.py" in gclient_check:
+ return depot_tools
+ except:
+ pass
+ if fetch_if_not_exist:
+ print "Checking out depot_tools."
+ subprocess.check_call(["git", "clone", DEPOT_TOOLS_URL, depot_tools])
+ return depot_tools
+ return None
+ depot_tools = _Get(v8_path)
+ assert depot_tools is not None
+ print "Using depot tools in %s" % depot_tools
+ return depot_tools
+
+def UninitGit(v8_path):
+ print "Uninitializing temporary git repository"
+ target = os.path.join(v8_path, ".git")
+ if os.path.isdir(target):
+ print ">> Cleaning up %s" % target
+ def OnRmError(func, path, exec_info):
+ # This might happen on Windows
+ os.chmod(path, stat.S_IWRITE)
+ os.unlink(path)
+ shutil.rmtree(target, onerror=OnRmError)
diff --git a/deps/v8/tools/release/test_backport_node.py b/deps/v8/tools/node/test_backport_node.py
index f9c015baf1..3c61a402c4 100755
--- a/deps/v8/tools/release/test_backport_node.py
+++ b/deps/v8/tools/node/test_backport_node.py
@@ -10,7 +10,6 @@ import sys
import tempfile
import unittest
-from common_includes import FileToText
import backport_node
# Base paths.
@@ -65,7 +64,7 @@ class TestUpdateNode(unittest.TestCase):
# Check version.
version_file = os.path.join(node_cwd, "deps", "v8", "include", "v8-version.h")
- self.assertIn('#define V8_PATCH_LEVEL 4322', FileToText(version_file))
+ self.assertIn('#define V8_PATCH_LEVEL 4322', backport_node.FileToText(version_file))
if __name__ == "__main__":
unittest.main()
diff --git a/deps/v8/tools/release/test_update_node.py b/deps/v8/tools/node/test_update_node.py
index bff3d08c2f..1a29b4ea61 100755
--- a/deps/v8/tools/release/test_update_node.py
+++ b/deps/v8/tools/node/test_update_node.py
@@ -32,6 +32,7 @@ EXPECTED_GITIGNORE = """
EXPECTED_GIT_DIFF = """
create mode 100644 deps/v8/base/trace_event/common/common
rename deps/v8/baz/{delete_me => v8_new} (100%)
+ delete mode 100644 deps/v8/include/v8-version.h
rename deps/v8/{delete_me => new/v8_new} (100%)
create mode 100644 deps/v8/third_party/jinja2/jinja2
create mode 100644 deps/v8/third_party/markupsafe/markupsafe
diff --git a/deps/v8/tools/release/testdata/node/deps/v8/.gitignore b/deps/v8/tools/node/testdata/node/deps/v8/.gitignore
index 23c2024827..23c2024827 100644
--- a/deps/v8/tools/release/testdata/node/deps/v8/.gitignore
+++ b/deps/v8/tools/node/testdata/node/deps/v8/.gitignore
diff --git a/deps/v8/tools/release/testdata/node/deps/v8/baz/delete_me b/deps/v8/tools/node/testdata/node/deps/v8/baz/delete_me
index eb1ae458f8..eb1ae458f8 100644
--- a/deps/v8/tools/release/testdata/node/deps/v8/baz/delete_me
+++ b/deps/v8/tools/node/testdata/node/deps/v8/baz/delete_me
diff --git a/deps/v8/tools/release/testdata/node/deps/v8/baz/v8_foo b/deps/v8/tools/node/testdata/node/deps/v8/baz/v8_foo
index eb1ae458f8..eb1ae458f8 100644
--- a/deps/v8/tools/release/testdata/node/deps/v8/baz/v8_foo
+++ b/deps/v8/tools/node/testdata/node/deps/v8/baz/v8_foo
diff --git a/deps/v8/tools/release/testdata/node/deps/v8/delete_me b/deps/v8/tools/node/testdata/node/deps/v8/delete_me
index eb1ae458f8..eb1ae458f8 100644
--- a/deps/v8/tools/release/testdata/node/deps/v8/delete_me
+++ b/deps/v8/tools/node/testdata/node/deps/v8/delete_me
diff --git a/deps/v8/tools/release/testdata/node/deps/v8/include/v8-version.h b/deps/v8/tools/node/testdata/node/deps/v8/include/v8-version.h
index fe8b2712e3..fe8b2712e3 100644
--- a/deps/v8/tools/release/testdata/node/deps/v8/include/v8-version.h
+++ b/deps/v8/tools/node/testdata/node/deps/v8/include/v8-version.h
diff --git a/deps/v8/tools/release/testdata/node/deps/v8/v8_foo b/deps/v8/tools/node/testdata/node/deps/v8/v8_foo
index eb1ae458f8..eb1ae458f8 100644
--- a/deps/v8/tools/release/testdata/node/deps/v8/v8_foo
+++ b/deps/v8/tools/node/testdata/node/deps/v8/v8_foo
diff --git a/deps/v8/tools/release/testdata/v8/.gitignore b/deps/v8/tools/node/testdata/v8/.gitignore
index 855286229f..855286229f 100644
--- a/deps/v8/tools/release/testdata/v8/.gitignore
+++ b/deps/v8/tools/node/testdata/v8/.gitignore
diff --git a/deps/v8/tools/node/testdata/v8/base/trace_event/common/common b/deps/v8/tools/node/testdata/v8/base/trace_event/common/common
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/tools/node/testdata/v8/base/trace_event/common/common
diff --git a/deps/v8/tools/release/testdata/v8/baz/v8_foo b/deps/v8/tools/node/testdata/v8/baz/v8_foo
index eb1ae458f8..eb1ae458f8 100644
--- a/deps/v8/tools/release/testdata/v8/baz/v8_foo
+++ b/deps/v8/tools/node/testdata/v8/baz/v8_foo
diff --git a/deps/v8/tools/release/testdata/v8/baz/v8_new b/deps/v8/tools/node/testdata/v8/baz/v8_new
index eb1ae458f8..eb1ae458f8 100644
--- a/deps/v8/tools/release/testdata/v8/baz/v8_new
+++ b/deps/v8/tools/node/testdata/v8/baz/v8_new
diff --git a/deps/v8/tools/release/testdata/v8/new/v8_new b/deps/v8/tools/node/testdata/v8/new/v8_new
index eb1ae458f8..eb1ae458f8 100644
--- a/deps/v8/tools/release/testdata/v8/new/v8_new
+++ b/deps/v8/tools/node/testdata/v8/new/v8_new
diff --git a/deps/v8/tools/release/testdata/v8/v8_foo b/deps/v8/tools/node/testdata/v8/v8_foo
index eb1ae458f8..eb1ae458f8 100644
--- a/deps/v8/tools/release/testdata/v8/v8_foo
+++ b/deps/v8/tools/node/testdata/v8/v8_foo
diff --git a/deps/v8/tools/release/testdata/v8/v8_new b/deps/v8/tools/node/testdata/v8/v8_new
index eb1ae458f8..eb1ae458f8 100644
--- a/deps/v8/tools/release/testdata/v8/v8_new
+++ b/deps/v8/tools/node/testdata/v8/v8_new
diff --git a/deps/v8/tools/node/update_node.py b/deps/v8/tools/node/update_node.py
new file mode 100755
index 0000000000..ebd953a903
--- /dev/null
+++ b/deps/v8/tools/node/update_node.py
@@ -0,0 +1,167 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Use this script to update V8 in a Node.js checkout.
+
+Requirements:
+ - Node.js checkout in which V8 should be updated.
+ - V8 checkout at the commit to which Node.js should be updated.
+
+Usage:
+ $ update_node.py <path_to_v8> <path_to_node>
+
+ This will synchronize the content of <path_to_node>/deps/v8 with <path_to_v8>,
+ and a few V8 dependencies require in Node.js. It will also update .gitignore
+ appropriately.
+
+Optional flags:
+ --gclient Run `gclient sync` on the V8 checkout before updating.
+ --commit Create commit with the updated V8 in the Node.js checkout.
+ --with-patch Also include currently staged files in the V8 checkout.
+"""
+
+import argparse
+import os
+import shutil
+import subprocess
+import sys
+import stat
+import node_common
+
+TARGET_SUBDIR = os.path.join("deps", "v8")
+
+SUB_REPOSITORIES = [ ["base", "trace_event", "common"],
+ ["testing", "gtest"],
+ ["third_party", "jinja2"],
+ ["third_party", "markupsafe"] ]
+
+DELETE_FROM_GITIGNORE = [ "/base",
+ "/testing/gtest",
+ "/third_party/jinja2",
+ "/third_party/markupsafe" ]
+
+# Node.js requires only a single header file from gtest to build V8.
+# Both jinja2 and markupsafe are required to generate part of the inspector.
+ADD_TO_GITIGNORE = [ "/testing/gtest/*",
+ "!/testing/gtest/include",
+ "/testing/gtest/include/*",
+ "!/testing/gtest/include/gtest",
+ "/testing/gtest/include/gtest/*",
+ "!/testing/gtest/include/gtest/gtest_prod.h",
+ "!/third_party/jinja2",
+ "!/third_party/markupsafe" ]
+
+def RunGclient(path):
+ assert os.path.isdir(path)
+ print ">> Running gclient sync"
+ subprocess.check_call(["gclient", "sync", "--nohooks"], cwd=path)
+
+def CommitPatch(options):
+ """Makes a dummy commit for the changes in the index.
+
+ On trybots, bot_updated applies the patch to the index. We commit it to make
+ the fake git clone fetch it into node.js. We can leave the commit, as
+ bot_update will ensure a clean state on each run.
+ """
+ print ">> Committing patch"
+ subprocess.check_call(
+ ["git", "-c", "user.name=fake", "-c", "user.email=fake@chromium.org",
+ "commit", "--allow-empty", "-m", "placeholder-commit"],
+ cwd=options.v8_path,
+ )
+
+def UpdateTarget(repository, options):
+ source = os.path.join(options.v8_path, *repository)
+ target = os.path.join(options.node_path, TARGET_SUBDIR, *repository)
+ print ">> Updating target directory %s" % target
+ print ">> from active branch at %s" % source
+ if not os.path.exists(target):
+ os.makedirs(target)
+ # Remove possible remnants of previous incomplete runs.
+ node_common.UninitGit(target)
+
+ git_commands = [
+ ["git", "init"], # initialize target repo
+ ["git", "remote", "add", "origin", source], # point to the source repo
+ ["git", "fetch", "origin", "HEAD"], # sync to the current branch
+ ["git", "reset", "--hard", "FETCH_HEAD"], # reset to the current branch
+ ["git", "clean", "-fd"], # delete removed files
+ ]
+ try:
+ for command in git_commands:
+ subprocess.check_call(command, cwd=target)
+ except:
+ raise
+ finally:
+ node_common.UninitGit(target)
+
+def UpdateGitIgnore(options):
+ file_name = os.path.join(options.node_path, TARGET_SUBDIR, ".gitignore")
+ assert os.path.isfile(file_name)
+ print ">> Updating .gitignore with lines"
+ with open(file_name) as gitignore:
+ content = gitignore.readlines()
+ content = [x.strip() for x in content]
+ for x in DELETE_FROM_GITIGNORE:
+ if x in content:
+ print "- %s" % x
+ content.remove(x)
+ for x in ADD_TO_GITIGNORE:
+ if x not in content:
+ print "+ %s" % x
+ content.append(x)
+ content.sort(key=lambda x: x[1:] if x.startswith("!") else x)
+ with open(file_name, "w") as gitignore:
+ for x in content:
+ gitignore.write("%s\n" % x)
+
+def CreateCommit(options):
+ print ">> Creating commit."
+ # Find git hash from source.
+ githash = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"],
+ cwd=options.v8_path).strip()
+ # Create commit at target.
+ git_commands = [
+ ["git", "checkout", "-b", "update_v8_to_%s" % githash], # new branch
+ ["git", "add", "."], # add files
+ ["git", "commit", "-m", "Update V8 to %s" % githash] # new commit
+ ]
+ for command in git_commands:
+ subprocess.check_call(command, cwd=options.node_path)
+
+def ParseOptions(args):
+ parser = argparse.ArgumentParser(description="Update V8 in Node.js")
+ parser.add_argument("v8_path", help="Path to V8 checkout")
+ parser.add_argument("node_path", help="Path to Node.js checkout")
+ parser.add_argument("--gclient", action="store_true", help="Run gclient sync")
+ parser.add_argument("--commit", action="store_true", help="Create commit")
+ parser.add_argument("--with-patch", action="store_true",
+ help="Apply also staged files")
+ options = parser.parse_args(args)
+ assert os.path.isdir(options.v8_path)
+ options.v8_path = os.path.abspath(options.v8_path)
+ assert os.path.isdir(options.node_path)
+ options.node_path = os.path.abspath(options.node_path)
+ return options
+
+def Main(args):
+ options = ParseOptions(args)
+ if options.gclient:
+ RunGclient(options.v8_path)
+ # Commit patch on trybots to main V8 repository.
+ if options.with_patch:
+ CommitPatch(options)
+ # Update main V8 repository.
+ UpdateTarget([""], options)
+ # Patch .gitignore before updating sub-repositories.
+ UpdateGitIgnore(options)
+ for repo in SUB_REPOSITORIES:
+ UpdateTarget(repo, options)
+ if options.commit:
+ CreateCommit(options)
+
+if __name__ == "__main__":
+ Main(sys.argv[1:])
diff --git a/deps/v8/tools/parse-processor b/deps/v8/tools/parse-processor
new file mode 100755
index 0000000000..588f120b4e
--- /dev/null
+++ b/deps/v8/tools/parse-processor
@@ -0,0 +1,41 @@
+#!/bin/sh
+
+# find the name of the log file to process, it must not start with a dash.
+log_file="v8.log"
+for arg in "$@"
+do
+ if ! expr "X${arg}" : "^X-" > /dev/null; then
+ log_file=${arg}
+ fi
+done
+
+tools_path=`cd $(dirname "$0");pwd`
+if [ ! "$D8_PATH" ]; then
+ d8_public=`which d8`
+ if [ -x "$d8_public" ]; then D8_PATH=$(dirname "$d8_public"); fi
+fi
+[ -n "$D8_PATH" ] || D8_PATH=$tools_path/..
+d8_exec=$D8_PATH/d8
+
+if [ ! -x "$d8_exec" ]; then
+ D8_PATH=`pwd`/out.gn/optdebug
+ d8_exec=$D8_PATH/d8
+fi
+
+if [ ! -x "$d8_exec" ]; then
+ d8_exec=`grep -m 1 -o '".*/d8"' $log_file | sed 's/"//g'`
+fi
+
+if [ ! -x "$d8_exec" ]; then
+ echo "d8 shell not found in $D8_PATH"
+ echo "To build, execute 'make native' from the V8 directory"
+ exit 1
+fi
+
+# nm spits out 'no symbols found' messages to stderr.
+cat $log_file | $d8_exec --trace-maps --allow-natives-syntax --trace-deopt $tools_path/splaytree.js $tools_path/codemap.js \
+ $tools_path/csvparser.js $tools_path/consarray.js \
+ $tools_path/profile.js $tools_path/profile_view.js \
+ $tools_path/logreader.js $tools_path/arguments.js \
+ $tools_path/parse-processor.js $tools_path/SourceMap.js \
+ $tools_path/parse-processor-driver.js -- $@ 2>/dev/null
diff --git a/deps/v8/tools/parse-processor-driver.js b/deps/v8/tools/parse-processor-driver.js
new file mode 100644
index 0000000000..f8f0c15254
--- /dev/null
+++ b/deps/v8/tools/parse-processor-driver.js
@@ -0,0 +1,33 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function processArguments(args) {
+ var processor = new ArgumentsProcessor(args);
+ if (processor.parse()) {
+ return processor.result();
+ } else {
+ processor.printUsageAndExit();
+ }
+}
+
+function initSourceMapSupport() {
+ // Pull dev tools source maps into our name space.
+ SourceMap = WebInspector.SourceMap;
+
+ // Overwrite the load function to load scripts synchronously.
+ SourceMap.load = function(sourceMapURL) {
+ var content = readFile(sourceMapURL);
+ var sourceMapObject = (JSON.parse(content));
+ return new SourceMap(sourceMapURL, sourceMapObject);
+ };
+}
+
+var params = processArguments(arguments);
+var sourceMap = null;
+if (params.sourceMap) {
+ initSourceMapSupport();
+ sourceMap = SourceMap.load(params.sourceMap);
+}
+var parseProcessor = new ParseProcessor();
+parseProcessor.processLogFile(params.logFileName);
diff --git a/deps/v8/tools/parse-processor.html b/deps/v8/tools/parse-processor.html
new file mode 100644
index 0000000000..e41fffbd5f
--- /dev/null
+++ b/deps/v8/tools/parse-processor.html
@@ -0,0 +1,337 @@
+<html>
+<!--
+Copyright 2016 the V8 project authors. All rights reserved. Use of this source
+code is governed by a BSD-style license that can be found in the LICENSE file.
+-->
+
+<head>
+<style>
+ html {
+ font-family: monospace;
+ }
+
+ .parse {
+ background-color: red;
+ border: 1px red solid;
+ }
+
+ .preparse {
+ background-color: orange;
+ border: 1px orange solid;
+ }
+
+ .resolution {
+ background-color: green;
+ border: 1px green solid;
+ }
+
+ .execution {
+ background-color: black;
+ border-left: 2px black solid;
+ z-index: -1;
+ }
+
+ .script {
+ margin-top: 1em;
+ overflow: visible;
+ clear: both;
+ border-top: 2px black dotted;
+ }
+ .script h3 {
+ height: 20px;
+ margin-bottom: 0.5em;
+ white-space: nowrap;
+ }
+
+ .script-details {
+ float: left;
+ }
+
+ .chart {
+ float: left;
+ margin-right: 2em;
+ }
+
+ .funktion-list {
+ float: left;
+ height: 400px;
+ }
+
+ .funktion-list > ul {
+ height: 80%;
+ overflow-y: scroll;
+ }
+
+
+ .funktion {
+ }
+
+</style>
+<script src="./splaytree.js" type="text/javascript"></script>
+<script src="./codemap.js" type="text/javascript"></script>
+<script src="./csvparser.js" type="text/javascript"></script>
+<script src="./consarray.js" type="text/javascript"></script>
+<script src="./profile.js" type="text/javascript"></script>
+<script src="./profile_view.js" type="text/javascript"></script>
+<script src="./logreader.js" type="text/javascript"></script>
+<script src="./arguments.js" type="text/javascript"></script>
+<script src="./parse-processor.js" type="text/javascript"></script>
+<script src="./SourceMap.js" type="text/javascript"></script>
+<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
+<script type="text/javascript">
+"use strict";
+google.charts.load('current', {packages: ['corechart']});
+
+function $(query) {
+ return document.querySelector(query);
+}
+
+
+function loadFile() {
+ let files = $('#uploadInput').files;
+
+ let file = files[0];
+ let reader = new FileReader();
+
+ reader.onload = function(evt) {
+ const kTimerName = 'parse log file';
+ console.time(kTimerName);
+ let parseProcessor = new ParseProcessor();
+ parseProcessor.processString(this.result);
+ console.timeEnd(kTimerName);
+ renderParseResults(parseProcessor);
+ document.parseProcessor = parseProcessor;
+ }
+ reader.readAsText(file);
+}
+
+function handleOnLoad() {
+ document.querySelector("#uploadInput").focus();
+}
+
+function createNode(tag, classNames) {
+ let node = document.createElement(tag);
+ if (classNames) {
+ if (Array.isArray(classNames)) {
+ node.classList.add(...classNames);
+ } else {
+ node.className = classNames;
+ }
+ }
+ return node;
+}
+
+function div(...args) {
+ return createNode('div', ...args);
+}
+
+function h1(string) {
+ let node = createNode('h1');
+ node.appendChild(text(string));
+ return node;
+}
+
+function h3(string, ...args) {
+ let node = createNode('h3', ...args);
+ if (string) node.appendChild(text(string));
+ return node;
+}
+
+function a(href, string, ...args) {
+ let link = createNode('a', ...args);
+ if (href.length) link.href = href;
+ if (string) link.appendChild(text(string));
+ return link;
+}
+
+function text(string) {
+ return document.createTextNode(string);
+}
+
+function delay(t) {
+ return new Promise(resolve = > setTimeout(resolve, t));
+}
+
+function renderParseResults(parseProcessor) {
+ let result = $('#result');
+ // clear out all existing result pages;
+ result.innerHTML = '';
+ const start = parseProcessor.firstEvent;
+ const end = parseProcessor.lastEvent;
+ renderScript(result, parseProcessor.totalScript, start, end);
+ // Build up the graphs lazily to keep the page responsive.
+ parseProcessor.scripts.forEach(
+ script => renderScript(result, script, start, end));
+ // Install an intersection observer to lazily load the graphs when the script
+ // div becomes visible for the first time.
+ var io = new IntersectionObserver((entries, observer) => {
+ entries.forEach(entry => {
+ if (entry.intersectionRatio == 0) return;
+ console.assert(!entry.target.querySelector('.graph'));
+ let target = entry.target;
+ appendGraph(target.script, target, start, end);
+ observer.unobserve(entry.target);
+ });
+ }, {});
+ document.querySelectorAll('.script').forEach(div => io.observe(div));
+}
+
+
+const kTimeFactor = 10;
+const kHeight = 20;
+const kFunktionTopOffset = 50;
+
+function renderScript(result, script, start, end) {
+ // Filter out empty scripts.
+ if (script.isEmpty() || script.lastParseEvent == 0) return;
+
+ let scriptDiv = div('script');
+ scriptDiv.script = script;
+
+ let scriptTitle = h3();
+ if (script.file) scriptTitle.appendChild(a(script.file, script.file));
+ let anchor = a("", ' id=' + script.id);
+ anchor.name = "script"+script.id
+ scriptTitle.appendChild(anchor);
+ scriptDiv.appendChild(scriptTitle);
+ let summary = createNode('pre', 'script-details');
+ summary.appendChild(text(script.summary));
+ scriptDiv.appendChild(summary);
+ result.appendChild(scriptDiv);
+ return scriptDiv;
+}
+
+const kMaxTime = 120 * kSecondsToMillis;
+// Resolution of the graphs
+const kTimeIncrement = 1;
+const kSelectionTimespan = 2;
+const series = [
+// ['firstParseEvent', 'Any Parse Event'],
+ ['parse', 'Parsing'],
+// ['preparse', 'Preparsing'],
+// ['resolution', 'Preparsing with Var. Resolution'],
+ ['lazyCompile', 'Lazy Compilation'],
+ ['compile', 'Eager Compilation'],
+ ['execution', 'First Execution'],
+];
+const metricNames = series.map(each => each[0]);
+
+
+function appendGraph(script, parentNode, start, end) {
+ const timerLabel = 'graph script=' + script.id;
+ // TODO(cbruni): add support for network events
+
+ console.time(timerLabel);
+ let data = new google.visualization.DataTable();
+ data.addColumn('number', 'Time');
+ // The series are interleave bytes processed, time spent and thus have two
+ // different vAxes.
+ let seriesOptions = [];
+ series.forEach(each => {
+ let description = each[1];
+ // Add the bytes column.
+ data.addColumn('number', description + ' Bytes');
+ seriesOptions.push({targetAxisIndex: 0});
+ // Add the time column.
+ data.addColumn('number', description + ' Time');
+ seriesOptions.push({targetAxisIndex: 1, lineDashStyle: [3, 2]});
+ });
+ // The first entry contains the total.
+ seriesOptions[0].type = 'area';
+
+ const maxTime = Math.min(kMaxTime, end);
+ console.time('metrics');
+ let metricValues =
+ script.getAccumulatedTimeMetrics(metricNames , 0, maxTime, kTimeIncrement);
+ console.timeEnd('metrics');
+ console.assert(metricValues[0].length == seriesOptions.length + 1);
+ data.addRows(metricValues);
+
+ let options = {
+ explorer: {
+ actions: ['dragToZoom', 'rightClickToReset'],
+ maxZoomIn: 0.01
+ },
+ hAxis: {
+ format: '#,###.##s'
+ },
+ vAxes: {
+ 0: {title: 'Bytes Touched', format: 'short'},
+ 1: {title: 'Time', format: '#,###ms'}
+ },
+ height: 400,
+ width: 1000,
+ chartArea: {left: '5%', top: '15%', width: "85%", height: "75%"},
+ // The first series should be a area chart (total bytes touched),
+ series: seriesOptions,
+ // everthing else is a line.
+ seriesType: 'line'
+ };
+ let graphNode = createNode('div', 'chart');
+ let listNode = createNode('div', 'funktion-list');
+ parentNode.appendChild(graphNode);
+ parentNode.appendChild(listNode);
+ let chart = new google.visualization.ComboChart(graphNode);
+ google.visualization.events.addListener(chart, 'select',
+ () => selectGraphPointHandler(chart, data, script, parentNode));
+ chart.draw(data, options);
+ console.timeEnd(timerLabel);
+}
+
+
+function selectGraphPointHandler(chart, data, script, parentNode) {
+ let selection = chart.getSelection();
+ if (selection.length <= 0) return;
+ // Display a list of funktions with events at the given time.
+ let {row, column} = selection[0];
+ if (row === null|| column === null) return;
+ let name = series[((column-1)/2) | 0][0];
+ let time = data.getValue(row, 0);
+ let funktions = script.getFunktionsAtTime(
+ time * kSecondsToMillis, kSelectionTimespan, name);
+ let oldList = parentNode.querySelector('.funktion-list');
+ parentNode.replaceChild(createFunktionList(name, time, funktions), oldList);
+}
+
+function createFunktionList(metric, time, funktions) {
+ let container = createNode('div', 'funktion-list');
+ container.appendChild(h3('Changes of ' + metric + ' at ' +
+ time + 's: ' + funktions.length));
+ let listNode = createNode('ul');
+ funktions.forEach(funktion => {
+ let node = createNode('li', 'funktion');
+ node.funktion = funktion;
+ node.appendChild(text(funktion.toString(false) + " "));
+ let script = funktion.script;
+ if (script) {
+ node.appendChild(a("#script" + script.id, "in script " + script.id));
+ }
+ listNode.appendChild(node);
+ });
+ container.appendChild(listNode);
+ return container;
+}
+
+
+</script>
+</head>
+
+<body onload="handleOnLoad()">
+ <h1>BEHOLD, THIS IS PARSEROR!</h1>
+
+ <h2>Usage</h2>
+ Run your script with <code>--log-function-events</code> and upload <code>v8.log</code> on this page:<br/>
+ <code>/path/to/d8 --log-function-events your_script.js</code>
+
+ <h2>Data</h2>
+ <form name="fileForm">
+ <p>
+ <input id="uploadInput" type="file" name="files" onchange="loadFile();"> trace entries: <span id="count">0</span>
+ </p>
+ </form>
+
+ <h2>Result</h2>
+ <div id="result"></div>
+</body>
+
+</html>
diff --git a/deps/v8/tools/parse-processor.js b/deps/v8/tools/parse-processor.js
new file mode 100644
index 0000000000..30b593a156
--- /dev/null
+++ b/deps/v8/tools/parse-processor.js
@@ -0,0 +1,918 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+"use strict";
+
+
+/**
+ * A thin wrapper around shell's 'read' function showing a file name on error.
+ */
+function readFile(fileName) {
+ try {
+ return read(fileName);
+ } catch (e) {
+ console.log(fileName + ': ' + (e.message || e));
+ throw e;
+ }
+}
+
+// ===========================================================================
+
+// This is the only true formatting, why? For an international audience the
+// confusion between the decimal and thousands separator is big (alternating
+// between comma "," vs dot "."). The Swiss formatting uses "'" as a thousands
+// separator, dropping most of that confusion.
+var numberFormat = new Intl.NumberFormat('de-CH', {
+ maximumFractionDigits: 2,
+ minimumFractionDigits: 2,
+});
+
+function formatNumber(value) {
+ return formatNumber(value);
+}
+
+function BYTES(bytes, total) {
+ let units = ['B ', 'kB', 'mB', 'gB'];
+ let unitIndex = 0;
+ let value = bytes;
+ while (value > 1000 && unitIndex < units.length) {
+ value /= 1000;
+ unitIndex++;
+ }
+ let result = formatNumber(value).padStart(10) + ' ' + units[unitIndex];
+ if (total !== void 0 && total != 0) {
+ result += PERCENT(bytes, total).padStart(5);
+ }
+ return result;
+}
+
+function PERCENT(value, total) {
+ return Math.round(value / total * 100) + "%";
+}
+
+function timestampMin(list) {
+ let result = -1;
+ list.forEach(timestamp => {
+ if (result === -1) {
+ result = timestamp;
+ } else if (timestamp != -1) {
+ result = Math.min(result, timestamp);
+ }
+ });
+ return Math.round(result);
+}
+
+
+// ===========================================================================
+class Script {
+ constructor(file, id) {
+ this.file = file;
+ this.isNative = false;
+ this.id = id;
+ if (id === void 0 || id <= 0) {
+ throw new Error(`Invalid id=${id} for script with file='${file}'`);
+ }
+ this.isEval = false;
+ this.funktions = [];
+ this.metrics = new Map();
+ this.maxNestingLevel = 0;
+
+ this.firstEvent = -1;
+ this.firstParseEvent = -1;
+ this.lastParseEvent = -1;
+ this.executionTimestamp = -1;
+ this.compileTimestamp = -1;
+ this.lastEvent = -1;
+
+ this.compileTime = -0.0;
+
+ this.width = 0;
+ this.bytesTotal = 0;
+ this.ownBytes = -1;
+ this.finalized = false;
+ this.summary = '';
+ this.setFile(file);
+ }
+
+ setFile(name) {
+ this.file = name;
+ this.isNative = name.startsWith('native ');
+ }
+
+ isEmpty() {
+ return this.funktions.length === 0
+ }
+
+ funktionAtPosition(start) {
+ if (start === 0) throw "position 0 is reserved for the script";
+ if (this.finalized) throw 'Finalized script has no source position!';
+ return this.funktions[start];
+ }
+
+ addMissingFunktions(list) {
+ if (this.finalized) throw 'script is finalized!';
+ list.forEach(fn => {
+ if (this.funktions[fn.start] === void 0) {
+ this.addFunktion(fn);
+ }
+ });
+ }
+
+ addFunktion(fn) {
+ if (this.finalized) throw 'script is finalized!';
+ if (fn.start === void 0) throw "Funktion has no start position";
+ if (this.funktions[fn.start] !== void 0) {
+ fn.print();
+ throw "adding same function twice to script";
+ }
+ this.funktions[fn.start] = fn;
+ }
+
+ finalize() {
+ this.finalized = true;
+ // Compact funktions as we no longer need access via start byte position.
+ this.funktions = this.funktions.filter(each => true);
+ let parent = null;
+ let maxNesting = 0;
+ // Iterate over the Funktions in byte position order.
+ this.funktions.forEach(fn => {
+ fn.fromEval = this.isEval;
+ if (parent === null) {
+ parent = fn;
+ } else {
+ // Walk up the nested chain of Funktions to find the parent.
+ while (parent !== null && !fn.isNestedIn(parent)) {
+ parent = parent.parent;
+ }
+ fn.parent = parent;
+ if (parent) {
+ maxNesting = Math.max(maxNesting, parent.addNestedFunktion(fn));
+ }
+ parent = fn;
+ }
+ this.firstParseEvent = this.firstParseEvent === -1 ?
+ fn.getFirstParseEvent() :
+ Math.min(this.firstParseEvent, fn.getFirstParseEvent());
+ this.lastParseEvent =
+ Math.max(this.lastParseEvent, fn.getLastParseEvent());
+ fn.getFirstEvent();
+ if (Number.isNaN(this.lastEvent)) throw "Invalid lastEvent";
+ this.lastEvent = Math.max(this.lastEvent, fn.getLastEvent());
+ if (Number.isNaN(this.lastEvent)) throw "Invalid lastEvent";
+ });
+ this.maxNestingLevel = maxNesting;
+ this.getFirstEvent();
+ }
+
+ print() {
+ console.log(this.toString());
+ }
+
+ toString() {
+ let str = `SCRIPT id=${this.id} file=${this.file}\n` +
+ `functions[${this.funktions.length}]:`;
+ this.funktions.forEach(fn => str += fn.toString());
+ return str;
+ }
+
+ getBytes() {
+ return this.bytesTotal;
+ }
+
+ getOwnBytes() {
+ if (this.ownBytes === -1) {
+ this.ownBytes = this.funktions.reduce(
+ (bytes, each) => bytes - each.parent == null ? each.getBytes() : 0,
+ this.getBytes());
+ if (this.ownBytes < 0) throw "Own bytes must be positive";
+ }
+ return this.ownBytes;
+ }
+
+ // Also see Funktion.prototype.getMetricBytes
+ getMetricBytes(name) {
+ if (name == 'lazyCompileTimestamp') return this.getOwnBytes();
+ return this.getBytes();
+ }
+
+ getMetricTime(name) {
+ return this[name];
+ }
+
+ forEach(fn) {
+ fn(this);
+ this.funktions.forEach(fn);
+ }
+
+ // Container helper for TotalScript / Script.
+ getScripts() {
+ return [this];
+ }
+
+ calculateMetrics(printSummary) {
+ let log = (str) => this.summary += str + '\n';
+ log("SCRIPT: " + this.id);
+ let all = this.funktions;
+ if (all.length === 0) return;
+
+ let nofFunktions = all.length;
+ let ownBytesSum = list => {
+ return list.reduce((bytes, each) => bytes + each.getOwnBytes(), 0)
+ };
+
+ let info = (name, funktions) => {
+ let ownBytes = ownBytesSum(funktions);
+ let nofPercent = Math.round(funktions.length / nofFunktions * 100);
+ let value = (funktions.length + "").padStart(6) +
+ (nofPercent + "%").padStart(5) +
+ BYTES(ownBytes, this.bytesTotal).padStart(10);
+ log((" - " + name).padEnd(20) + value);
+ this.metrics.set(name + "-bytes", ownBytes);
+ this.metrics.set(name + "-count", funktions.length);
+ this.metrics.set(name + "-count-percent", nofPercent);
+ this.metrics.set(name + "-bytes-percent",
+ Math.round(ownBytes / this.bytesTotal * 100));
+ };
+
+ log(" - file: " + this.file);
+ info("scripts", this.getScripts());
+ info("functions", all);
+ info("toplevel fn", all.filter(each => each.isToplevel()));
+ info("preparsed", all.filter(each => each.preparseTime > 0));
+
+
+ info("fully parsed", all.filter(each => each.parseTime > 0));
+ // info("fn parsed", all.filter(each => each.parse2Time > 0));
+ // info("resolved", all.filter(each => each.resolutionTime > 0));
+ info("executed", all.filter(each => each.executionTimestamp > 0));
+ info("forEval", all.filter(each => each.fromEval));
+ info("lazy compiled", all.filter(each => each.lazyCompileTimestamp > 0));
+ info("eager compiled", all.filter(each => each.compileTimestamp > 0));
+
+ let parsingCost = new ExecutionCost('parse', all,
+ each => each.parseTime);
+ parsingCost.setMetrics(this.metrics);
+ log(parsingCost.toString())
+
+ let preParsingCost = new ExecutionCost('preparse', all,
+ each => each.preparseTime);
+ preParsingCost.setMetrics(this.metrics);
+ log(preParsingCost.toString())
+
+ let resolutionCost = new ExecutionCost('resolution', all,
+ each => each.resolutionTime);
+ resolutionCost.setMetrics(this.metrics);
+ log(resolutionCost.toString())
+
+ let nesting = new NestingDistribution(all);
+ nesting.setMetrics(this.metrics);
+ log(nesting.toString())
+
+ if (printSummary) console.log(this.summary);
+ }
+
+ getAccumulatedTimeMetrics(metrics, start, end, delta, incremental = false) {
+ // Returns an array of the following format:
+ // [ [start, acc(metric0, start, start), acc(metric1, ...), ...],
+ // [start+delta, acc(metric0, start, start+delta), ...],
+ // [start+delta*2, acc(metric0, start, start+delta*2), ...],
+ // ...
+ // ]
+ const timespan = end - start;
+ const kSteps = Math.ceil(timespan / delta);
+ // To reduce the time spent iterating over the funktions of this script
+ // we iterate once over all funktions and add the metric changes to each
+ // timepoint:
+ // [ [0, 300, ...], [1, 15, ...], [2, 100, ...], [3, 0, ...] ... ]
+ // In a second step we accumulate all values:
+ // [ [0, 300, ...], [1, 315, ...], [2, 415, ...], [3, 415, ...] ... ]
+ //
+ // To limit the number of data points required in the resulting graphs,
+ // only the rows for entries with actual changes are created.
+
+ const metricProperties = ["time"];
+ metrics.forEach(each => {
+ metricProperties.push(each + 'Timestamp');
+ metricProperties.push(each + 'Time');
+ });
+ // Create a packed {rowTemplate} which is copied later-on.
+ let indexToTime = (t) => (start + t * delta) / kSecondsToMillis;
+ let rowTemplate = [indexToTime(0)];
+ for (let i = 1; i < metricProperties.length; i++) rowTemplate.push(0.0);
+ // Create rows with 0-time entry.
+ let rows = new Array(rowTemplate.slice());
+ for (let t = 1; t <= kSteps; t++) rows.push(null);
+ // Create the real metric's property name on the Funktion object.
+ // Add the increments of each Funktion's metric to the result.
+ this.forEach(funktionOrScript => {
+ // Iterate over the Funktion's metric names, position 0 is the time.
+ for (let i = 1; i < metricProperties.length; i += 2) {
+ let property = metricProperties[i];
+ let timestamp = funktionOrScript[property];
+ if (timestamp === void 0) continue;
+ if (timestamp < 0 || end < timestamp) continue;
+ let index = Math.floor(timestamp / delta);
+ let row = rows[index];
+ if (row === null) {
+ // Add a new row if it didn't exist,
+ row = rows[index] = rowTemplate.slice();
+ // .. add the time offset.
+ row[0] = indexToTime(index);
+ }
+ // Add the metric value.
+ row[i] += funktionOrScript.getMetricBytes(property);
+ let timeMetricName = metricProperties[i + 1];
+ row[i + 1] += funktionOrScript.getMetricTime(timeMetricName);
+ }
+ });
+ // Create a packed array again with only the valid entries.
+ // Accumulate the incremental results by adding the metric values from
+ // the previous time window.
+ let previous = rows[0];
+ let result = [previous];
+ for (let t = 1; t < rows.length; t++) {
+ let current = rows[t];
+ if (current === null) {
+ // Ensure a zero data-point after each non-zero point.
+ if (incremental && rows[t - 1] !== null) {
+ let duplicate = rowTemplate.slice();
+ duplicate[0] = indexToTime(t);
+ result.push(duplicate);
+ }
+ continue;
+ }
+ if (!incremental) {
+ // Skip i==0 where the corresponding time value in seconds is.
+ for (let i = 1; i < metricProperties.length; i++) {
+ current[i] += previous[i];
+ }
+ }
+ // Make sure we have a data-point in time right before the current one.
+ if (rows[t - 1] === null) {
+ let duplicate = (incremental ? rowTemplate : previous).slice();
+ duplicate[0] = indexToTime(t - 1);
+ result.push(duplicate);
+ }
+ previous = current;
+ result.push(current);
+ }
+ // Make sure there is an entry at the last position to make sure all graphs
+ // have the same width.
+ const lastIndex = rows.length - 1;
+ if (rows[lastIndex] === null) {
+ let duplicate = previous.slice();
+ duplicate[0] = indexToTime(lastIndex);
+ result.push(duplicate);
+ }
+ return result;
+ }
+
+ getFunktionsAtTime(time, delta, metric) {
+ // Returns a list of Funktions whose metric changed in the
+ // [time-delta, time+delta] range.
+ return this.funktions.filter(
+ funktion => funktion.didMetricChange(time, delta, metric));
+ return result;
+ }
+
+ getFirstEvent() {
+ if (this.firstEvent === -1) {
+ // TODO(cbruni): add support for network request timestanp
+ this.firstEvent = this.firstParseEvent;
+ }
+ return this.firstEvent;
+ }
+}
+
+
+class TotalScript extends Script {
+ constructor() {
+ super('all files', 'all files');
+ this.scripts = [];
+ }
+
+ addAllFunktions(script) {
+ // funktions is indexed by byte offset and as such not packed. Add every
+ // Funktion one by one to keep this.funktions packed.
+ script.funktions.forEach(fn => this.funktions.push(fn));
+ this.scripts.push(script);
+ this.bytesTotal += script.bytesTotal;
+ }
+
+ // Iterate over all Scripts and nested Funktions.
+ forEach(fn) {
+ this.scripts.forEach(script => script.forEach(fn));
+ }
+
+ getScripts() {
+ return this.scripts;
+ }
+}
+
+
+// ===========================================================================
+
+class NestingDistribution {
+ constructor(funktions) {
+ // Stores the nof bytes per function nesting level.
+ this.accumulator = [0, 0, 0, 0, 0];
+ // Max nof bytes encountered at any nesting level.
+ this.max = 0;
+ // avg bytes per nesting level.
+ this.avg = 0;
+ this.totalBytes = 0;
+
+ funktions.forEach(each => each.accumulateNestingLevel(this.accumulator));
+ this.max = this.accumulator.reduce((max, each) => Math.max(max, each), 0);
+ this.totalBytes = this.accumulator.reduce((sum, each) => sum + each, 0);
+ for (let i = 0; i < this.accumulator.length; i++) {
+ this.avg += this.accumulator[i] * i;
+ }
+ this.avg /= this.totalBytes;
+ }
+
+ print() {
+ console.log(this.toString())
+ }
+
+ toString() {
+ let ticks = " ▁▂▃▄▅▆▇█";
+ let accString = this.accumulator.reduce((str, each) => {
+ let index = Math.round(each / this.max * (ticks.length - 1));
+ return str + ticks[index];
+ }, '');
+ let percent0 = this.accumulator[0]
+ let percent1 = this.accumulator[1];
+ let percent2plus = this.accumulator.slice(2)
+ .reduce((sum, each) => sum + each, 0);
+ return " - nesting level: " +
+ ' avg=' + formatNumber(this.avg) +
+ ' l0=' + PERCENT(percent0, this.totalBytes) +
+ ' l1=' + PERCENT(percent1, this.totalBytes) +
+ ' l2+=' + PERCENT(percent2plus, this.totalBytes) +
+ ' distribution=[' + accString + ']';
+
+ }
+
+ setMetrics(dict) {}
+}
+
+class ExecutionCost {
+ constructor(prefix, funktions, time_fn) {
+ this.prefix = prefix;
+ // Time spent on executed functions.
+ this.executedCost = 0
+ // Time spent on not executed functions.
+ this.nonExecutedCost = 0;
+
+ this.executedCost = funktions.reduce((sum, each) => {
+ return sum + (each.hasBeenExecuted() ? time_fn(each) : 0)
+ }, 0);
+ this.nonExecutedCost = funktions.reduce((sum, each) => {
+ return sum + (each.hasBeenExecuted() ? 0 : time_fn(each))
+ }, 0);
+
+ }
+
+ print() {
+ console.log(this.toString())
+ }
+
+ toString() {
+ return (' - ' + this.prefix + '-time:').padEnd(24) +
+ (" executed=" + formatNumber(this.executedCost) + 'ms').padEnd(20) +
+ " non-executed=" + formatNumber(this.nonExecutedCost) + 'ms';
+ }
+
+ setMetrics(dict) {
+ dict.set('parseMetric', this.executionCost);
+ dict.set('parseMetricNegative', this.nonExecutionCost);
+ }
+}
+
+// ===========================================================================
+const kNoTimeMetrics = {
+ __proto__: null,
+ executionTime: 0,
+ firstEventTimestamp: 0,
+ firstParseEventTimestamp: 0,
+ lastParseTimestamp: 0,
+ lastEventTimestamp: 0
+};
+
+class Funktion {
+ constructor(name, start, end, script) {
+ if (start < 0) throw "invalid start position: " + start;
+ if (end <= 0) throw "invalid end position: " + end;
+ if (end <= start) throw "invalid start end positions";
+
+ this.name = name;
+ this.start = start;
+ this.end = end;
+ this.ownBytes = -1;
+ this.script = script;
+ this.parent = null;
+ this.fromEval = false;
+ this.nested = [];
+ this.nestingLevel = 0;
+
+ this.preparseTimestamp = -1;
+ this.parseTimestamp = -1;
+ this.parse2Timestamp = -1;
+ this.resolutionTimestamp = -1;
+ this.lazyCompileTimestamp = -1;
+ this.compileTimestamp = -1;
+ this.executionTimestamp = -1;
+
+ this.preparseTime = -0.0;
+ this.parseTime = -0.0;
+ this.parse2Time = -0.0;
+ this.resolutionTime = -0.0;
+ this.scopeResolutionTime = -0.0;
+ this.lazyCompileTime = -0.0;
+ this.compileTime = -0.0;
+
+ // Lazily computed properties.
+ this.firstEventTimestamp = -1;
+ this.firstParseEventTimestamp = -1;
+ this.lastParseTimestamp = -1;
+ this.lastEventTimestamp = -1;
+
+ if (script) this.script.addFunktion(this);
+ }
+
+ getMetricBytes(name) {
+ if (name == 'lazyCompileTimestamp') return this.getOwnBytes();
+ return this.getBytes();
+ }
+
+ getMetricTime(name) {
+ if (name in kNoTimeMetrics) return 0;
+ return this[name];
+ }
+
+ getFirstEvent() {
+ if (this.firstEventTimestamp === -1) {
+ this.firstEventTimestamp = timestampMin(
+ [this.parseTimestamp, this.preparseTimestamp,
+ this.resolutionTimestamp, this.executionTimestamp
+ ]);
+ if (!(this.firstEventTimestamp > 0)) {
+ this.firstEventTimestamp = 0;
+ }
+ }
+ return this.firstEventTimestamp;
+ }
+
+ getFirstParseEvent() {
+ if (this.firstParseEventTimestamp === -1) {
+ this.firstParseEventTimestamp = timestampMin(
+ [this.parseTimestamp, this.preparseTimestamp,
+ this.resolutionTimestamp
+ ]);
+ if (!(this.firstParseEventTimestamp > 0)) {
+ this.firstParseEventTimestamp = 0;
+ }
+ }
+ return this.firstParseEventTimestamp;
+ }
+
+ getLastParseEvent() {
+ if (this.lastParseTimestamp === -1) {
+ this.lastParseTimestamp = Math.max(
+ this.preparseTimestamp + this.preparseTime,
+ this.parseTimestamp + this.parseTime,
+ this.resolutionTimestamp + this.resolutionTime);
+ if (!(this.lastParseTimestamp > 0)) {
+ this.lastParseTimestamp = 0;
+ }
+ }
+ return this.lastParseTimestamp;
+ }
+
+ getLastEvent() {
+ if (this.lastEventTimestamp === -1) {
+ this.lastEventTimestamp = Math.max(
+ this.getLastParseEvent(), this.executionTimestamp);
+ if (!(this.lastEventTimestamp > 0)) {
+ this.lastEventTimestamp = 0;
+ }
+ }
+ return this.lastEventTimestamp;
+ }
+
+ isNestedIn(funktion) {
+ if (this.script != funktion.script) throw "Incompatible script";
+ return funktion.start < this.start && this.end <= funktion.end;
+ }
+
+ isToplevel() {
+ return this.parent === null
+ }
+
+ hasBeenExecuted() {
+ return this.executionTimestamp > 0
+ }
+
+ accumulateNestingLevel(accumulator) {
+ let value = accumulator[this.nestingLevel] || 0;
+ accumulator[this.nestingLevel] = value + this.getOwnBytes();
+ }
+
+ addNestedFunktion(child) {
+ if (this.script != child.script) throw "Incompatible script";
+ if (child == null) throw "Nesting non child";
+ this.nested.push(child);
+ if (this.nested.length > 1) {
+ // Make sure the nested children don't overlap and have been inserted in
+ // byte start position order.
+ let last = this.nested[this.nested.length - 2];
+ if (last.end > child.start || last.start > child.start ||
+ last.end > child.end || last.start > child.end) {
+ throw "Wrongly nested child added";
+ }
+ }
+ child.nestingLevel = this.nestingLevel + 1;
+ return child.nestingLevel;
+ }
+
+ getBytes() {
+ return this.end - this.start;
+ }
+
+ getOwnBytes() {
+ if (this.ownBytes === -1) {
+ this.ownBytes = this.nested.reduce(
+ (bytes, each) => bytes - each.getBytes(),
+ this.getBytes());
+ if (this.ownBytes < 0) throw "Own bytes must be positive";
+ }
+ return this.ownBytes;
+ }
+
+ didMetricChange(time, delta, name) {
+ let value = this[name + 'Timestamp'];
+ return (time - delta) <= value && value <= (time + delta);
+ }
+
+ print() {
+ console.log(this.toString());
+ }
+
+ toString(details = true) {
+ let result = 'function' + (this.name ? ' ' + this.name : '') +
+ `() range=${this.start}-${this.end}`;
+ if (details) result += ` script=${this.script ? this.script.id : 'X'}`;
+ return result;
+ }
+}
+
+
+// ===========================================================================
+
+const kTimestampFactor = 1000;
+const kSecondsToMillis = 1000;
+
+function toTimestamp(microseconds) {
+ return microseconds / kTimestampFactor
+}
+
+function startOf(timestamp, time) {
+ let result = toTimestamp(timestamp) - time;
+ if (result < 0) throw "start timestamp cannnot be negative";
+ return result;
+}
+
+
+class ParseProcessor extends LogReader {
+ constructor() {
+ super();
+ let config = (processor) => {
+ // {script file},{script id},{start position},{end position},
+ // {time},{timestamp},{function name}
+ return {
+ parsers: [null, parseInt, parseInt, parseInt, parseFloat, parseInt, null],
+ processor: processor
+ }
+ };
+
+ this.dispatchTable_ = {
+ 'parse-full': config(this.processFull),
+ 'parse-function': config(this.processFunction),
+ 'parse-script': config(this.processScript),
+ 'parse-eval': config(this.processEval),
+ 'preparse-no-resolution': config(this.processPreparseNoResolution),
+ 'preparse-resolution': config(this.processPreparseResolution),
+ 'first-execution': config(this.processFirstExecution),
+ 'compile-lazy': config(this.processCompileLazy),
+ 'compile': config(this.processCompile)
+ };
+
+ this.idToScript = new Map();
+ this.fileToScript = new Map();
+ this.nameToFunction = new Map();
+ this.scripts = [];
+ this.totalScript = new TotalScript();
+ this.firstEvent = -1;
+ this.lastParseEvent = -1;
+ this.lastEvent = -1;
+ }
+
+ print() {
+ console.log("scripts:");
+ this.idToScript.forEach(script => script.print());
+ }
+
+ processString(string) {
+ let end = string.length;
+ let current = 0;
+ let next = 0;
+ let line;
+ let i = 0;
+ let entry;
+ while (current < end) {
+ next = string.indexOf("\n", current);
+ if (next === -1) break;
+ i++;
+ line = string.substring(current, next);
+ current = next + 1;
+ this.processLogLine(line);
+ }
+ this.postProcess();
+ }
+
+ processLogFile(fileName) {
+ this.collectEntries = true
+ this.lastLogFileName_ = fileName;
+ var line;
+ while (line = readline()) {
+ this.processLogLine(line);
+ }
+ this.postProcess();
+ }
+
+ postProcess() {
+ this.scripts = Array.from(this.idToScript.values())
+ .filter(each => !each.isNative);
+
+ this.scripts.forEach(script => script.finalize());
+ this.scripts.forEach(script => script.calculateMetrics(false));
+
+ this.firstEvent =
+ timestampMin(this.scripts.map(each => each.firstEvent));
+ this.lastParseEvent = this.scripts.reduce(
+ (max, script) => Math.max(max, script.lastParseEvent), -1);
+ this.lastEvent = this.scripts.reduce(
+ (max, script) => Math.max(max, script.lastEvent), -1);
+
+ this.scripts.forEach(script => this.totalScript.addAllFunktions(script));
+ this.totalScript.calculateMetrics(true);
+ const series = [
+ ['firstParseEvent', 'Any Parse Event'],
+ ['parse', 'Parsing'],
+ ['preparse', 'Preparsing'],
+ ['resolution', 'Preparsing with Var. Resolution'],
+ ['lazyCompile', 'Lazy Compilation'],
+ ['compile', 'Eager Compilation'],
+ ['execution', 'First Execution'],
+ ];
+ let metrics = series.map(each => each[0]);
+ this.totalScript.getAccumulatedTimeMetrics(metrics, 0, this.lastEvent, 10);
+ };
+
+ addEntry(entry) {
+ this.entries.push(entry);
+ }
+
+ lookupScript(file, id) {
+ // During preparsing we only have the temporary ranges and no script yet.
+ let script;
+ if (this.idToScript.has(id)) {
+ script = this.idToScript.get(id);
+ } else {
+ script = new Script(file, id);
+ this.idToScript.set(id, script);
+ }
+ if (file.length > 0 && script.file.length === 0) {
+ script.setFile(file);
+ this.fileToScript.set(file, script);
+ }
+ return script;
+ }
+
+ lookupFunktion(file, scriptId,
+ startPosition, endPosition, time, timestamp, functionName) {
+ let script = this.lookupScript(file, scriptId);
+ let funktion = script.funktionAtPosition(startPosition);
+ if (funktion === void 0) {
+ funktion = new Funktion(functionName, startPosition, endPosition, script);
+ }
+ return funktion;
+ }
+
+ processEval(file, scriptId, startPosition,
+ endPosition, time, timestamp, functionName) {
+ let script = this.lookupScript(file, scriptId);
+ script.isEval = true;
+ }
+
+ processFull(file, scriptId, startPosition,
+ endPosition, time, timestamp, functionName) {
+ let funktion = this.lookupFunktion(...arguments);
+ // TODO(cbruni): this should never happen, emit differen event from the
+ // parser.
+ if (funktion.parseTimestamp > 0) return;
+ funktion.parseTimestamp = startOf(timestamp, time);
+ funktion.parseTime = time;
+ }
+
+ processFunction(file, scriptId, startPosition,
+ endPosition, time, timestamp, functionName) {
+ let funktion = this.lookupFunktion(...arguments);
+ funktion.parseTimestamp = startOf(timestamp, time);
+ funktion.parseTime = time;
+ }
+
+ processScript(file, scriptId, startPosition,
+ endPosition, time, timestamp, functionName) {
+ // TODO timestamp and time
+ let script = this.lookupScript(file, scriptId);
+ let ts = startOf(timestamp, time);
+ script.parseTimestamp = ts;
+ script.firstEventTimestamp = ts;
+ script.firstParseEventTimestamp = ts;
+ script.parseTime = time;
+ }
+
+ processPreparseResolution(file, scriptId,
+ startPosition, endPosition, time, timestamp, functionName) {
+ let funktion = this.lookupFunktion(...arguments);
+ // TODO(cbruni): this should never happen, emit different event from the
+ // parser.
+ if (funktion.resolutionTimestamp > 0) return;
+ funktion.resolutionTimestamp = startOf(timestamp, time);
+ funktion.resolutionTime = time;
+ }
+
+ processPreparseNoResolution(file, scriptId,
+ startPosition, endPosition, time, timestamp, functionName) {
+ let funktion = this.lookupFunktion(...arguments);
+ funktion.preparseTimestamp = startOf(timestamp, time);
+ funktion.preparseTime = time;
+ }
+
+ processFirstExecution(file, scriptId,
+ startPosition, endPosition, time, timestamp, functionName) {
+ let script = this.lookupScript(file, scriptId);
+ if (startPosition === 0) {
+ // undefined = eval fn execution
+ if (script) {
+ script.executionTimestamp = toTimestamp(timestamp);
+ }
+ } else {
+ let funktion = script.funktionAtPosition(startPosition);
+ if (funktion) {
+ funktion.executionTimestamp = toTimestamp(timestamp);
+ } else if (functionName.length > 0) {
+ // throw new Error("Could not find function: " + functionName);
+ }
+ }
+ }
+
+ processCompileLazy(file, scriptId,
+ startPosition, endPosition, time, timestamp, functionName) {
+ let funktion = this.lookupFunktion(...arguments);
+ funktion.lazyCompileTimestamp = startOf(timestamp, time);
+ funktion.lazyCompileTime = time;
+ script.firstPar
+ }
+
+ processCompile(file, scriptId,
+ startPosition, endPosition, time, timestamp, functionName) {
+
+ let script = this.lookupScript(file, scriptId);
+ if (startPosition === 0) {
+ script.compileTimestamp = startOf(timestamp, time);
+ script.compileTime = time;
+ script.bytesTotal = endPosition;
+ } else {
+ let funktion = script.funktionAtPosition(startPosition);
+ funktion.compileTimestamp = startOf(timestamp, time);
+ funktion.compileTime = time;
+ }
+ }
+}
+
+
+class ArgumentsProcessor extends BaseArgumentsProcessor {
+ getArgsDispatch() {
+ return {};
+ }
+
+ getDefaultResults() {
+ return {
+ logFileName: 'v8.log',
+ range: 'auto,auto',
+ };
+ }
+}
diff --git a/deps/v8/tools/perf-compare.py b/deps/v8/tools/perf-compare.py
index b7a795b453..75f3c73c6a 100755
--- a/deps/v8/tools/perf-compare.py
+++ b/deps/v8/tools/perf-compare.py
@@ -12,7 +12,6 @@ Examples:
'''
from collections import OrderedDict
-import commands
import json
import math
from argparse import ArgumentParser
diff --git a/deps/v8/tools/perf-to-html.py b/deps/v8/tools/perf-to-html.py
index ac9f53f617..e3979360a7 100755
--- a/deps/v8/tools/perf-to-html.py
+++ b/deps/v8/tools/perf-to-html.py
@@ -12,7 +12,6 @@ from standard input or via the --filename option. Examples:
%prog -f results.json -t "ia32 results" -o results.html
'''
-import commands
import json
import math
from optparse import OptionParser
diff --git a/deps/v8/tools/predictable_wrapper.py b/deps/v8/tools/predictable_wrapper.py
new file mode 100644
index 0000000000..cf7bf00b3f
--- /dev/null
+++ b/deps/v8/tools/predictable_wrapper.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Wrapper script for verify-predictable mode. D8 is expected to be compiled with
+v8_enable_verify_predictable.
+
+The actual test command is expected to be passed to this wraper as is. E.g.:
+predictable_wrapper.py path/to/d8 --test --predictable --flag1 --flag2
+
+The command is run up to three times and the printed allocation hash is
+compared. Differences are reported as errors.
+"""
+
+import sys
+
+from testrunner.local import command
+
+MAX_TRIES = 3
+
+def main(args):
+ def allocation_str(stdout):
+ for line in reversed((stdout or '').splitlines()):
+ if line.startswith('### Allocations = '):
+ return line
+ return None
+
+ cmd = command.Command(args[0], args[1:])
+
+ previous_allocations = None
+ for run in range(1, MAX_TRIES + 1):
+ print '### Predictable run #%d' % run
+ output = cmd.execute()
+ if output.stdout:
+ print '### Stdout:'
+ print output.stdout
+ if output.stderr:
+ print '### Stderr:'
+ print output.stderr
+ print '### Return code: %s' % output.exit_code
+ if output.HasTimedOut():
+ # If we get a timeout in any run, we are in an unpredictable state. Just
+ # report it as a failure and don't rerun.
+ print '### Test timed out'
+ return 1
+ allocations = allocation_str(output.stdout)
+ if not allocations:
+ print ('### Test had no allocation output. Ensure this is built '
+ 'with v8_enable_verify_predictable and that '
+ '--verify-predictable is passed at the cmd line.')
+ return 2
+ if previous_allocations and previous_allocations != allocations:
+ print '### Allocations differ'
+ return 3
+ if run >= MAX_TRIES:
+ # No difference on the last run -> report a success.
+ return 0
+ previous_allocations = allocations
+ # Unreachable.
+ assert False
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/deps/v8/tools/presubmit.py b/deps/v8/tools/presubmit.py
index 2290422459..9ac26ddb16 100755
--- a/deps/v8/tools/presubmit.py
+++ b/deps/v8/tools/presubmit.py
@@ -554,9 +554,15 @@ def CheckDeps(workspace):
def PyTests(workspace):
- test_scripts = join(workspace, 'tools', 'release', 'test_scripts.py')
- return subprocess.call(
- [sys.executable, test_scripts], stdout=subprocess.PIPE) == 0
+ result = True
+ for script in [
+ join(workspace, 'tools', 'release', 'test_scripts.py'),
+ join(workspace, 'tools', 'unittests', 'run_tests_test.py'),
+ ]:
+ print 'Running ' + script
+ result &= subprocess.call(
+ [sys.executable, script], stdout=subprocess.PIPE) == 0
+ return result
def GetOptions():
@@ -573,8 +579,8 @@ def Main():
success = True
print "Running checkdeps..."
success &= CheckDeps(workspace)
- print "Running C++ lint check..."
if not options.no_lint:
+ print "Running C++ lint check..."
success &= CppLintProcessor().RunOnPath(workspace)
print "Running copyright header, trailing whitespaces and " \
"two empty lines between declarations check..."
diff --git a/deps/v8/tools/process-heap-prof.py b/deps/v8/tools/process-heap-prof.py
deleted file mode 100755
index a26cbf1589..0000000000
--- a/deps/v8/tools/process-heap-prof.py
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2009 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# This is an utility for converting V8 heap logs into .hp files that can
-# be further processed using 'hp2ps' tool (bundled with GHC and Valgrind)
-# to produce heap usage histograms.
-
-# Sample usage:
-# $ ./shell --log-gc script.js
-# $ tools/process-heap-prof.py v8.log | hp2ps -c > script-heap-graph.ps
-# ('-c' enables color, see hp2ps manual page for more options)
-# or
-# $ tools/process-heap-prof.py --js-cons-profile v8.log | hp2ps -c > script-heap-graph.ps
-# to get JS constructor profile
-
-
-import csv, sys, time, optparse
-
-def ProcessLogFile(filename, options):
- if options.js_cons_profile:
- itemname = 'heap-js-cons-item'
- else:
- itemname = 'heap-sample-item'
-
- first_call_time = None
- sample_time = 0.0
- sampling = False
- try:
- logfile = open(filename, 'rb')
- try:
- logreader = csv.reader(logfile)
-
- print('JOB "v8"')
- print('DATE "%s"' % time.asctime(time.localtime()))
- print('SAMPLE_UNIT "seconds"')
- print('VALUE_UNIT "bytes"')
-
- for row in logreader:
- if row[0] == 'heap-sample-begin' and row[1] == 'Heap':
- sample_time = float(row[3])/1000.0
- if first_call_time == None:
- first_call_time = sample_time
- sample_time -= first_call_time
- print('BEGIN_SAMPLE %.2f' % sample_time)
- sampling = True
- elif row[0] == 'heap-sample-end' and row[1] == 'Heap':
- print('END_SAMPLE %.2f' % sample_time)
- sampling = False
- elif row[0] == itemname and sampling:
- print(row[1]),
- if options.count:
- print('%d' % (int(row[2]))),
- if options.size:
- print('%d' % (int(row[3]))),
- print
- finally:
- logfile.close()
- except:
- sys.exit('can\'t open %s' % filename)
-
-
-def BuildOptions():
- result = optparse.OptionParser()
- result.add_option("--js_cons_profile", help="Constructor profile",
- default=False, action="store_true")
- result.add_option("--size", help="Report object size",
- default=False, action="store_true")
- result.add_option("--count", help="Report object count",
- default=False, action="store_true")
- return result
-
-
-def ProcessOptions(options):
- if not options.size and not options.count:
- options.size = True
- return True
-
-
-def Main():
- parser = BuildOptions()
- (options, args) = parser.parse_args()
- if not ProcessOptions(options):
- parser.print_help()
- sys.exit();
-
- if not args:
- print "Missing logfile"
- sys.exit();
-
- ProcessLogFile(args[0], options)
-
-
-if __name__ == '__main__':
- sys.exit(Main())
diff --git a/deps/v8/tools/run-num-fuzzer.isolate b/deps/v8/tools/run-num-fuzzer.isolate
index 4bd3d8b6c0..d0aca421a7 100644
--- a/deps/v8/tools/run-num-fuzzer.isolate
+++ b/deps/v8/tools/run-num-fuzzer.isolate
@@ -14,6 +14,7 @@
'includes': [
'testrunner/testrunner.isolate',
'../src/d8.isolate',
+ '../test/benchmarks/benchmarks.isolate',
'../test/mjsunit/mjsunit.isolate',
'../test/webkit/webkit.isolate',
],
diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py
index 0f1646d9ea..3823eb510c 100755
--- a/deps/v8/tools/run_perf.py
+++ b/deps/v8/tools/run_perf.py
@@ -106,7 +106,7 @@ import re
import subprocess
import sys
-from testrunner.local import commands
+from testrunner.local import command
from testrunner.local import utils
ARCH_GUESS = utils.DefaultArch()
@@ -493,15 +493,23 @@ class RunnableConfig(GraphConfig):
suffix = ["--"] + self.test_flags if self.test_flags else []
return self.flags + (extra_flags or []) + [self.main] + suffix
- def GetCommand(self, shell_dir, extra_flags=None):
+ def GetCommand(self, cmd_prefix, shell_dir, extra_flags=None):
# TODO(machenbach): This requires +.exe if run on windows.
extra_flags = extra_flags or []
- cmd = [os.path.join(shell_dir, self.binary)]
- if self.binary.endswith(".py"):
- cmd = [sys.executable] + cmd
if self.binary != 'd8' and '--prof' in extra_flags:
print "Profiler supported only on a benchmark run with d8"
- return cmd + self.GetCommandFlags(extra_flags=extra_flags)
+
+ if self.process_size:
+ cmd_prefix = ["/usr/bin/time", "--format=MaxMemory: %MKB"] + cmd_prefix
+ if self.binary.endswith('.py'):
+ # Copy cmd_prefix instead of update (+=).
+ cmd_prefix = cmd_prefix + [sys.executable]
+
+ return command.Command(
+ cmd_prefix=cmd_prefix,
+ shell=os.path.join(shell_dir, self.binary),
+ args=self.GetCommandFlags(extra_flags=extra_flags),
+ timeout=self.timeout or 60)
def Run(self, runner, trybot):
"""Iterates over several runs and handles the output for all traces."""
@@ -677,18 +685,9 @@ class DesktopPlatform(Platform):
suffix = ' - secondary' if secondary else ''
shell_dir = self.shell_dir_secondary if secondary else self.shell_dir
title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
- if runnable.process_size:
- command = ["/usr/bin/time", "--format=MaxMemory: %MKB"]
- else:
- command = []
-
- command += self.command_prefix + runnable.GetCommand(shell_dir,
- self.extra_flags)
+ cmd = runnable.GetCommand(self.command_prefix, shell_dir, self.extra_flags)
try:
- output = commands.Execute(
- command,
- timeout=runnable.timeout,
- )
+ output = cmd.execute()
except OSError as e: # pragma: no cover
print title % "OSError"
print e
diff --git a/deps/v8/tools/testrunner/PRESUBMIT.py b/deps/v8/tools/testrunner/PRESUBMIT.py
new file mode 100644
index 0000000000..7f7596a85d
--- /dev/null
+++ b/deps/v8/tools/testrunner/PRESUBMIT.py
@@ -0,0 +1,8 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+def CheckChangeOnCommit(input_api, output_api):
+ tests = input_api.canned_checks.GetUnitTestsInDirectory(
+ input_api, output_api, '../unittests', whitelist=['run_tests_test.py$'])
+ return input_api.RunTests(tests)
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
index b6ef6fb5cd..8fc09eed7b 100644
--- a/deps/v8/tools/testrunner/base_runner.py
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -3,6 +3,7 @@
# found in the LICENSE file.
+from collections import OrderedDict
import json
import optparse
import os
@@ -16,8 +17,11 @@ sys.path.insert(
os.path.dirname(os.path.abspath(__file__))))
+from local import testsuite
from local import utils
+from testproc.shard import ShardProc
+
BASE_DIR = (
os.path.dirname(
@@ -62,6 +66,16 @@ TEST_MAP = {
"intl",
"unittests",
],
+ # This needs to stay in sync with test/d8_default.isolate.
+ "d8_default": [
+ # TODO(machenbach): uncomment after infra side lands.
+ #"debugger",
+ "mjsunit",
+ "webkit",
+ #"message",
+ #"preparser",
+ #"intl",
+ ],
# This needs to stay in sync with test/optimize_for_size.isolate.
"optimize_for_size": [
"debugger",
@@ -177,16 +191,19 @@ class BuildConfig(object):
class BaseTestRunner(object):
- def __init__(self):
+ def __init__(self, basedir=None):
+ self.basedir = basedir or BASE_DIR
self.outdir = None
self.build_config = None
self.mode_name = None
self.mode_options = None
- def execute(self):
+ def execute(self, sys_args=None):
+ if sys_args is None: # pragma: no cover
+ sys_args = sys.argv[1:]
try:
parser = self._create_parser()
- options, args = self._parse_args(parser)
+ options, args = self._parse_args(parser, sys_args)
self._load_build_config(options)
@@ -197,10 +214,15 @@ class BaseTestRunner(object):
parser.print_help()
raise
+ args = self._parse_test_args(args)
+ suites = self._get_suites(args, options.verbose)
+
self._setup_env()
- return self._do_execute(options, args)
+ return self._do_execute(suites, args, options)
except TestRunnerError:
return 1
+ except KeyboardInterrupt:
+ return 2
def _create_parser(self):
parser = optparse.OptionParser()
@@ -227,15 +249,21 @@ class BaseTestRunner(object):
"directory will be used")
parser.add_option("-v", "--verbose", help="Verbose output",
default=False, action="store_true")
+ parser.add_option("--shard-count",
+ help="Split tests into this number of shards",
+ default=1, type="int")
+ parser.add_option("--shard-run",
+ help="Run this shard from the split up tests.",
+ default=1, type="int")
def _add_parser_options(self, parser):
pass
- def _parse_args(self, parser):
- options, args = parser.parse_args()
+ def _parse_args(self, parser, sys_args):
+ options, args = parser.parse_args(sys_args)
if any(map(lambda v: v and ',' in v,
- [options.arch, options.mode])):
+ [options.arch, options.mode])): # pragma: no cover
print 'Multiple arch/mode are deprecated'
raise TestRunnerError()
@@ -248,7 +276,7 @@ class BaseTestRunner(object):
except TestRunnerError:
pass
- if not self.build_config:
+ if not self.build_config: # pragma: no cover
print 'Failed to load build config'
raise TestRunnerError
@@ -274,14 +302,14 @@ class BaseTestRunner(object):
'%s.%s' % (options.arch, options.mode))
for outdir in outdirs():
- yield os.path.join(BASE_DIR, outdir)
+ yield os.path.join(self.basedir, outdir)
# buildbot option
if options.mode:
- yield os.path.join(BASE_DIR, outdir, options.mode)
+ yield os.path.join(self.basedir, outdir, options.mode)
def _get_gn_outdir(self):
- gn_out_dir = os.path.join(BASE_DIR, DEFAULT_OUT_GN)
+ gn_out_dir = os.path.join(self.basedir, DEFAULT_OUT_GN)
latest_timestamp = -1
latest_config = None
for gn_config in os.listdir(gn_out_dir):
@@ -305,7 +333,7 @@ class BaseTestRunner(object):
with open(build_config_path) as f:
try:
build_config_json = json.load(f)
- except Exception:
+ except Exception: # pragma: no cover
print("%s exists but contains invalid json. Is your build up-to-date?"
% build_config_path)
raise TestRunnerError()
@@ -324,7 +352,7 @@ class BaseTestRunner(object):
build_config_mode = 'debug' if self.build_config.is_debug else 'release'
if options.mode:
- if options.mode not in MODES:
+ if options.mode not in MODES: # pragma: no cover
print '%s mode is invalid' % options.mode
raise TestRunnerError()
if MODES[options.mode].execution_mode != build_config_mode:
@@ -346,7 +374,7 @@ class BaseTestRunner(object):
options.arch, self.build_config.arch))
raise TestRunnerError()
- if options.shell_dir:
+ if options.shell_dir: # pragma: no cover
print('Warning: --shell-dir is deprecated. Searching for executables in '
'build directory (%s) instead.' % self.outdir)
@@ -364,7 +392,7 @@ class BaseTestRunner(object):
def _setup_env(self):
# Use the v8 root as cwd as some test cases use "load" with relative paths.
- os.chdir(BASE_DIR)
+ os.chdir(self.basedir)
# Many tests assume an English interface.
os.environ['LANG'] = 'en_US.UTF-8'
@@ -403,7 +431,7 @@ class BaseTestRunner(object):
if self.build_config.tsan:
suppressions_file = os.path.join(
- BASE_DIR,
+ self.basedir,
'tools',
'sanitizers',
'tsan_suppressions.txt')
@@ -418,7 +446,7 @@ class BaseTestRunner(object):
def _get_external_symbolizer_option(self):
external_symbolizer_path = os.path.join(
- BASE_DIR,
+ self.basedir,
'third_party',
'llvm-build',
'Release+Asserts',
@@ -432,7 +460,84 @@ class BaseTestRunner(object):
return 'external_symbolizer_path=%s' % external_symbolizer_path
+ def _parse_test_args(self, args):
+ if not args:
+ args = self._get_default_suite_names()
+
+ # Expand arguments with grouped tests. The args should reflect the list
+ # of suites as otherwise filters would break.
+ def expand_test_group(name):
+ return TEST_MAP.get(name, [name])
+
+ return reduce(list.__add__, map(expand_test_group, args), [])
+
+ def _get_suites(self, args, verbose=False):
+ names = self._args_to_suite_names(args)
+ return self._load_suites(names, verbose)
+
+ def _args_to_suite_names(self, args):
+ # Use default tests if no test configuration was provided at the cmd line.
+ all_names = set(utils.GetSuitePaths(os.path.join(self.basedir, 'test')))
+ args_names = OrderedDict([(arg.split('/')[0], None) for arg in args]) # set
+ return [name for name in args_names if name in all_names]
+
+ def _get_default_suite_names(self):
+ return []
+
+ def _expand_test_group(self, name):
+ return TEST_MAP.get(name, [name])
+
+ def _load_suites(self, names, verbose=False):
+ def load_suite(name):
+ if verbose:
+ print '>>> Loading test suite: %s' % name
+ return testsuite.TestSuite.LoadTestSuite(
+ os.path.join(self.basedir, 'test', name))
+ return map(load_suite, names)
# TODO(majeski): remove options & args parameters
- def _do_execute(self, options, args):
+ def _do_execute(self, suites, args, options):
raise NotImplementedError()
+
+ def _create_shard_proc(self, options):
+ myid, count = self._get_shard_info(options)
+ if count == 1:
+ return None
+ return ShardProc(myid - 1, count)
+
+ def _get_shard_info(self, options):
+ """
+ Returns pair:
+ (id of the current shard [1; number of shards], number of shards)
+ """
+ # Read gtest shard configuration from environment (e.g. set by swarming).
+ # If none is present, use values passed on the command line.
+ shard_count = int(
+ os.environ.get('GTEST_TOTAL_SHARDS', options.shard_count))
+ shard_run = os.environ.get('GTEST_SHARD_INDEX')
+ if shard_run is not None:
+ # The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0.
+ shard_run = int(shard_run) + 1
+ else:
+ shard_run = options.shard_run
+
+ if options.shard_count > 1:
+ # Log if a value was passed on the cmd line and it differs from the
+ # environment variables.
+ if options.shard_count != shard_count: # pragma: no cover
+ print("shard_count from cmd line differs from environment variable "
+ "GTEST_TOTAL_SHARDS")
+ if (options.shard_run > 1 and
+ options.shard_run != shard_run): # pragma: no cover
+ print("shard_run from cmd line differs from environment variable "
+ "GTEST_SHARD_INDEX")
+
+ if shard_run < 1 or shard_run > shard_count:
+ # TODO(machenbach): Turn this into an assert. If that's wrong on the
+ # bots, printing will be quite useless. Or refactor this code to make
+ # sure we get a return code != 0 after testing if we got here.
+ print "shard-run not a valid number, should be in [1:shard-count]"
+ print "defaulting back to running all tests"
+ return 1, 1
+
+ return shard_run, shard_count
diff --git a/deps/v8/tools/testrunner/deopt_fuzzer.py b/deps/v8/tools/testrunner/deopt_fuzzer.py
index 75878d442c..5e6b79f5e9 100755
--- a/deps/v8/tools/testrunner/deopt_fuzzer.py
+++ b/deps/v8/tools/testrunner/deopt_fuzzer.py
@@ -26,7 +26,7 @@ from testrunner.local import verbose
from testrunner.objects import context
-DEFAULT_TESTS = ["mjsunit", "webkit"]
+DEFAULT_SUITES = ["mjsunit", "webkit"]
TIMEOUT_DEFAULT = 60
# Double the timeout for these:
@@ -37,8 +37,8 @@ DISTRIBUTION_MODES = ["smooth", "random"]
class DeoptFuzzer(base_runner.BaseTestRunner):
- def __init__(self):
- super(DeoptFuzzer, self).__init__()
+ def __init__(self, *args, **kwargs):
+ super(DeoptFuzzer, self).__init__(*args, **kwargs)
class RandomDistribution:
def __init__(self, seed=None):
@@ -136,12 +136,6 @@ class DeoptFuzzer(base_runner.BaseTestRunner):
" (verbose, dots, color, mono)"),
choices=progress.PROGRESS_INDICATORS.keys(),
default="mono")
- parser.add_option("--shard-count",
- help="Split testsuites into this number of shards",
- default=1, type="int")
- parser.add_option("--shard-run",
- help="Run this shard from the split up tests.",
- default=1, type="int")
parser.add_option("--seed", help="The seed for the random distribution",
type="int")
parser.add_option("-t", "--timeout", help="Timeout in seconds",
@@ -184,47 +178,6 @@ class DeoptFuzzer(base_runner.BaseTestRunner):
options.coverage_lift = 0
return True
- def _shard_tests(self, tests, shard_count, shard_run):
- if shard_count < 2:
- return tests
- if shard_run < 1 or shard_run > shard_count:
- print "shard-run not a valid number, should be in [1:shard-count]"
- print "defaulting back to running all tests"
- return tests
- count = 0
- shard = []
- for test in tests:
- if count % shard_count == shard_run - 1:
- shard.append(test)
- count += 1
- return shard
-
- def _do_execute(self, options, args):
- suite_paths = utils.GetSuitePaths(join(base_runner.BASE_DIR, "test"))
-
- if len(args) == 0:
- suite_paths = [ s for s in suite_paths if s in DEFAULT_TESTS ]
- else:
- args_suites = set()
- for arg in args:
- suite = arg.split(os.path.sep)[0]
- if not suite in args_suites:
- args_suites.add(suite)
- suite_paths = [ s for s in suite_paths if s in args_suites ]
-
- suites = []
- for root in suite_paths:
- suite = testsuite.TestSuite.LoadTestSuite(
- os.path.join(base_runner.BASE_DIR, "test", root))
- if suite:
- suites.append(suite)
-
- try:
- return self._execute(args, options, suites)
- except KeyboardInterrupt:
- return 2
-
-
def _calculate_n_tests(self, m, options):
"""Calculates the number of tests from m deopt points with exponential
coverage.
@@ -235,8 +188,10 @@ class DeoptFuzzer(base_runner.BaseTestRunner):
l = float(options.coverage_lift)
return int(math.pow(m, (m * c + l) / (m + l)))
+ def _get_default_suite_names(self):
+ return DEFAULT_SUITES
- def _execute(self, args, options, suites):
+ def _do_execute(self, suites, args, options):
print(">>> Running tests for %s.%s" % (self.build_config.arch,
self.mode_name))
@@ -264,7 +219,6 @@ class DeoptFuzzer(base_runner.BaseTestRunner):
True, # No sorting of test cases.
0, # Don't rerun failing tests.
0, # No use of a rerun-failing-tests maximum.
- False, # No predictable mode.
False, # No no_harness mode.
False, # Don't use perf data.
False) # Coverage not supported.
@@ -305,16 +259,16 @@ class DeoptFuzzer(base_runner.BaseTestRunner):
if len(args) > 0:
s.FilterTestCasesByArgs(args)
s.FilterTestCasesByStatus(False)
- for t in s.tests:
- t.flags += s.GetStatusfileFlags(t)
test_backup[s] = s.tests
analysis_flags = ["--deopt-every-n-times", "%d" % MAX_DEOPT,
"--print-deopt-stress"]
- s.tests = [t.CopyAddingFlags(t.variant, analysis_flags) for t in s.tests]
+ s.tests = [t.create_variant(t.variant, analysis_flags, 'analysis')
+ for t in s.tests]
num_tests += len(s.tests)
for t in s.tests:
t.id = test_id
+ t.cmd = t.get_command(ctx)
test_id += 1
if num_tests == 0:
@@ -333,7 +287,7 @@ class DeoptFuzzer(base_runner.BaseTestRunner):
for s in suites:
test_results = {}
for t in s.tests:
- for line in t.output.stdout.splitlines():
+ for line in runner.outputs[t].stdout.splitlines():
if line.startswith("=== Stress deopt counter: "):
test_results[t.path] = MAX_DEOPT - int(line.split(" ")[-1])
for t in s.tests:
@@ -357,17 +311,18 @@ class DeoptFuzzer(base_runner.BaseTestRunner):
distribution = dist.Distribute(n_deopt, max_deopt)
if options.verbose:
print "%s %s" % (t.path, distribution)
- for i in distribution:
- fuzzing_flags = ["--deopt-every-n-times", "%d" % i]
- s.tests.append(t.CopyAddingFlags(t.variant, fuzzing_flags))
+ for n, d in enumerate(distribution):
+ fuzzing_flags = ["--deopt-every-n-times", "%d" % d]
+ s.tests.append(t.create_variant(t.variant, fuzzing_flags, n))
num_tests += len(s.tests)
for t in s.tests:
t.id = test_id
+ t.cmd = t.get_command(ctx)
test_id += 1
if num_tests == 0:
print "No tests to run."
- return 0
+ return exit_code
print(">>> Deopt fuzzing phase (%d test cases)" % num_tests)
progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
diff --git a/deps/v8/tools/testrunner/gc_fuzzer.py b/deps/v8/tools/testrunner/gc_fuzzer.py
index 4130fff8be..18be227d98 100755
--- a/deps/v8/tools/testrunner/gc_fuzzer.py
+++ b/deps/v8/tools/testrunner/gc_fuzzer.py
@@ -27,7 +27,7 @@ from testrunner.local import verbose
from testrunner.objects import context
-DEFAULT_TESTS = ["mjsunit", "webkit"]
+DEFAULT_SUITES = ["mjsunit", "webkit", "benchmarks"]
TIMEOUT_DEFAULT = 60
# Double the timeout for these:
@@ -36,8 +36,8 @@ SLOW_ARCHS = ["arm",
class GCFuzzer(base_runner.BaseTestRunner):
- def __init__(self):
- super(GCFuzzer, self).__init__()
+ def __init__(self, *args, **kwargs):
+ super(GCFuzzer, self).__init__(*args, **kwargs)
self.fuzzer_rng = None
@@ -64,12 +64,6 @@ class GCFuzzer(base_runner.BaseTestRunner):
" (verbose, dots, color, mono)"),
choices=progress.PROGRESS_INDICATORS.keys(),
default="mono")
- parser.add_option("--shard-count",
- help="Split testsuites into this number of shards",
- default=1, type="int")
- parser.add_option("--shard-run",
- help="Run this shard from the split up tests.",
- default=1, type="int")
parser.add_option("-t", "--timeout", help="Timeout in seconds",
default= -1, type="int")
parser.add_option("--random-seed", default=0,
@@ -102,47 +96,6 @@ class GCFuzzer(base_runner.BaseTestRunner):
self.fuzzer_rng = random.Random(options.fuzzer_random_seed)
return True
- def _shard_tests(self, tests, shard_count, shard_run):
- if shard_count < 2:
- return tests
- if shard_run < 1 or shard_run > shard_count:
- print "shard-run not a valid number, should be in [1:shard-count]"
- print "defaulting back to running all tests"
- return tests
- count = 0
- shard = []
- for test in tests:
- if count % shard_count == shard_run - 1:
- shard.append(test)
- count += 1
- return shard
-
- def _do_execute(self, options, args):
- suite_paths = utils.GetSuitePaths(join(base_runner.BASE_DIR, "test"))
-
- if len(args) == 0:
- suite_paths = [ s for s in suite_paths if s in DEFAULT_TESTS ]
- else:
- args_suites = set()
- for arg in args:
- suite = arg.split(os.path.sep)[0]
- if not suite in args_suites:
- args_suites.add(suite)
- suite_paths = [ s for s in suite_paths if s in args_suites ]
-
- suites = []
- for root in suite_paths:
- suite = testsuite.TestSuite.LoadTestSuite(
- os.path.join(base_runner.BASE_DIR, "test", root))
- if suite:
- suites.append(suite)
-
- try:
- return self._execute(args, options, suites)
- except KeyboardInterrupt:
- return 2
-
-
def _calculate_n_tests(self, m, options):
"""Calculates the number of tests from m points with exponential coverage.
The coverage is expected to be between 0.0 and 1.0.
@@ -152,8 +105,10 @@ class GCFuzzer(base_runner.BaseTestRunner):
l = float(options.coverage_lift)
return int(math.pow(m, (m * c + l) / (m + l)))
+ def _get_default_suite_names(self):
+ return DEFAULT_SUITES
- def _execute(self, args, options, suites):
+ def _do_execute(self, suites, args, options):
print(">>> Running tests for %s.%s" % (self.build_config.arch,
self.mode_name))
@@ -179,7 +134,6 @@ class GCFuzzer(base_runner.BaseTestRunner):
True, # No sorting of test cases.
0, # Don't rerun failing tests.
0, # No use of a rerun-failing-tests maximum.
- False, # No predictable mode.
False, # No no_harness mode.
False, # Don't use perf data.
False) # Coverage not supported.
@@ -193,14 +147,12 @@ class GCFuzzer(base_runner.BaseTestRunner):
print('>>> Collection phase')
for s in suites:
- analysis_flags = [
- # > 100% to not influence default incremental marking, but we need this
- # flag to print reached incremental marking limit.
- '--stress_marking', '1000',
- '--trace_incremental_marking',
- ]
- s.tests = map(lambda t: t.CopyAddingFlags(t.variant, analysis_flags),
+ analysis_flags = ['--fuzzer-gc-analysis']
+ s.tests = map(lambda t: t.create_variant(t.variant, analysis_flags,
+ 'analysis'),
s.tests)
+ for t in s.tests:
+ t.cmd = t.get_command(ctx)
progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
runner = execution.Runner(suites, progress_indicator, ctx)
@@ -211,13 +163,15 @@ class GCFuzzer(base_runner.BaseTestRunner):
for s in suites:
for t in s.tests:
# Skip failed tests.
- if s.HasUnexpectedOutput(t):
+ if t.output_proc.has_unexpected_output(runner.outputs[t]):
print '%s failed, skipping' % t.path
continue
- max_limit = self._get_max_limit_reached(t)
+ max_limit = self._get_max_limit_reached(runner.outputs[t])
if max_limit:
test_results[t.path] = max_limit
+ runner = None
+
if options.dump_results_file:
with file("%s.%d.txt" % (options.dump_results_file, time.time()),
"w") as f:
@@ -237,7 +191,7 @@ class GCFuzzer(base_runner.BaseTestRunner):
if options.verbose:
print ('%s [x%d] (max marking limit=%.02f)' %
(t.path, subtests_count, max_percent))
- for _ in xrange(0, subtests_count):
+ for i in xrange(0, subtests_count):
fuzzer_seed = self._next_fuzzer_seed()
fuzzing_flags = [
'--stress_marking', str(max_percent),
@@ -245,12 +199,14 @@ class GCFuzzer(base_runner.BaseTestRunner):
]
if options.stress_compaction:
fuzzing_flags.append('--stress_compaction_random')
- s.tests.append(t.CopyAddingFlags(t.variant, fuzzing_flags))
+ s.tests.append(t.create_variant(t.variant, fuzzing_flags, i))
+ for t in s.tests:
+ t.cmd = t.get_command(ctx)
num_tests += len(s.tests)
if num_tests == 0:
print "No tests to run."
- return 0
+ return exit_code
print(">>> Fuzzing phase (%d test cases)" % num_tests)
progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
@@ -292,8 +248,6 @@ class GCFuzzer(base_runner.BaseTestRunner):
if len(args) > 0:
s.FilterTestCasesByArgs(args)
s.FilterTestCasesByStatus(False)
- for t in s.tests:
- t.flags += s.GetStatusfileFlags(t)
num_tests += len(s.tests)
for t in s.tests:
@@ -304,31 +258,16 @@ class GCFuzzer(base_runner.BaseTestRunner):
# Parses test stdout and returns what was the highest reached percent of the
# incremental marking limit (0-100).
- # Skips values >=100% since they already trigger incremental marking.
@staticmethod
- def _get_max_limit_reached(test):
- def is_im_line(l):
- return 'IncrementalMarking' in l and '% of the memory limit reached' in l
-
- def line_to_percent(l):
- return filter(lambda part: '%' in part, l.split(' '))[0]
-
- def percent_str_to_float(s):
- return float(s[:-1])
-
- if not (test.output and test.output.stdout):
+ def _get_max_limit_reached(output):
+ if not output.stdout:
return None
- im_lines = filter(is_im_line, test.output.stdout.splitlines())
- percents_str = map(line_to_percent, im_lines)
- percents = map(percent_str_to_float, percents_str)
-
- # Skip >= 100%.
- percents = filter(lambda p: p < 100, percents)
+ for l in reversed(output.stdout.splitlines()):
+ if l.startswith('### Maximum marking limit reached ='):
+ return float(l.split()[6])
- if not percents:
- return None
- return max(percents)
+ return None
def _next_fuzzer_seed(self):
fuzzer_seed = None
diff --git a/deps/v8/tools/testrunner/local/command.py b/deps/v8/tools/testrunner/local/command.py
new file mode 100644
index 0000000000..93b1ac9497
--- /dev/null
+++ b/deps/v8/tools/testrunner/local/command.py
@@ -0,0 +1,171 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import os
+import subprocess
+import sys
+import threading
+import time
+
+from ..local import utils
+from ..objects import output
+
+
+SEM_INVALID_VALUE = -1
+SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
+
+
+class BaseCommand(object):
+ def __init__(self, shell, args=None, cmd_prefix=None, timeout=60, env=None,
+ verbose=False):
+ assert(timeout > 0)
+
+ self.shell = shell
+ self.args = args or []
+ self.cmd_prefix = cmd_prefix or []
+ self.timeout = timeout
+ self.env = env or {}
+ self.verbose = verbose
+
+ def execute(self, **additional_popen_kwargs):
+ if self.verbose:
+ print '# %s' % self
+
+ process = self._start_process(**additional_popen_kwargs)
+
+ # Variable to communicate with the timer.
+ timeout_occured = [False]
+ timer = threading.Timer(
+ self.timeout, self._on_timeout, [process, timeout_occured])
+ timer.start()
+
+ start_time = time.time()
+ stdout, stderr = process.communicate()
+ duration = time.time() - start_time
+
+ timer.cancel()
+
+ return output.Output(
+ process.returncode,
+ timeout_occured[0],
+ stdout.decode('utf-8', 'replace').encode('utf-8'),
+ stderr.decode('utf-8', 'replace').encode('utf-8'),
+ process.pid,
+ duration
+ )
+
+ def _start_process(self, **additional_popen_kwargs):
+ try:
+ return subprocess.Popen(
+ args=self._get_popen_args(),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=self._get_env(),
+ **additional_popen_kwargs
+ )
+ except Exception as e:
+ sys.stderr.write('Error executing: %s\n' % self)
+ raise e
+
+ def _get_popen_args(self):
+ return self._to_args_list()
+
+ def _get_env(self):
+ env = os.environ.copy()
+ env.update(self.env)
+ # GTest shard information is read by the V8 tests runner. Make sure it
+ # doesn't leak into the execution of gtests we're wrapping. Those might
+ # otherwise apply a second level of sharding and as a result skip tests.
+ env.pop('GTEST_TOTAL_SHARDS', None)
+ env.pop('GTEST_SHARD_INDEX', None)
+ return env
+
+ def _kill_process(self, process):
+ raise NotImplementedError()
+
+ def _on_timeout(self, process, timeout_occured):
+ timeout_occured[0] = True
+ try:
+ self._kill_process(process)
+ except OSError:
+ sys.stderr.write('Error: Process %s already ended.\n' % process.pid)
+
+ def __str__(self):
+ return self.to_string()
+
+ def to_string(self, relative=False):
+ def escape(part):
+ # Escape spaces. We may need to escape more characters for this to work
+ # properly.
+ if ' ' in part:
+ return '"%s"' % part
+ return part
+
+ parts = map(escape, self._to_args_list())
+ cmd = ' '.join(parts)
+ if relative:
+ cmd = cmd.replace(os.getcwd() + os.sep, '')
+ return cmd
+
+ def _to_args_list(self):
+ return self.cmd_prefix + [self.shell] + self.args
+
+
+class PosixCommand(BaseCommand):
+ def _kill_process(self, process):
+ process.kill()
+
+
+class WindowsCommand(BaseCommand):
+ def _start_process(self, **kwargs):
+ # Try to change the error mode to avoid dialogs on fatal errors. Don't
+ # touch any existing error mode flags by merging the existing error mode.
+ # See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
+ def set_error_mode(mode):
+ prev_error_mode = SEM_INVALID_VALUE
+ try:
+ import ctypes
+ prev_error_mode = (
+ ctypes.windll.kernel32.SetErrorMode(mode)) #@UndefinedVariable
+ except ImportError:
+ pass
+ return prev_error_mode
+
+ error_mode = SEM_NOGPFAULTERRORBOX
+ prev_error_mode = set_error_mode(error_mode)
+ set_error_mode(error_mode | prev_error_mode)
+
+ try:
+ return super(WindowsCommand, self)._start_process(**kwargs)
+ finally:
+ if prev_error_mode != SEM_INVALID_VALUE:
+ set_error_mode(prev_error_mode)
+
+ def _get_popen_args(self):
+ return subprocess.list2cmdline(self._to_args_list())
+
+ def _kill_process(self, process):
+ if self.verbose:
+ print 'Attempting to kill process %d' % process.pid
+ sys.stdout.flush()
+ tk = subprocess.Popen(
+ 'taskkill /T /F /PID %d' % process.pid,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ stdout, stderr = tk.communicate()
+ if self.verbose:
+ print 'Taskkill results for %d' % process.pid
+ print stdout
+ print stderr
+ print 'Return code: %d' % tk.returncode
+ sys.stdout.flush()
+
+
+# Set the Command class to the OS-specific version.
+if utils.IsWindows():
+ Command = WindowsCommand
+else:
+ Command = PosixCommand
diff --git a/deps/v8/tools/testrunner/local/commands.py b/deps/v8/tools/testrunner/local/commands.py
deleted file mode 100644
index 4afd450d2f..0000000000
--- a/deps/v8/tools/testrunner/local/commands.py
+++ /dev/null
@@ -1,152 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import os
-import subprocess
-import sys
-from threading import Timer
-
-from ..local import utils
-from ..objects import output
-
-
-SEM_INVALID_VALUE = -1
-SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
-
-
-def Win32SetErrorMode(mode):
- prev_error_mode = SEM_INVALID_VALUE
- try:
- import ctypes
- prev_error_mode = \
- ctypes.windll.kernel32.SetErrorMode(mode) #@UndefinedVariable
- except ImportError:
- pass
- return prev_error_mode
-
-
-def RunProcess(verbose, timeout, args, additional_env, **rest):
- if verbose: print "#", " ".join(args)
- popen_args = args
- prev_error_mode = SEM_INVALID_VALUE
- if utils.IsWindows():
- popen_args = subprocess.list2cmdline(args)
- # Try to change the error mode to avoid dialogs on fatal errors. Don't
- # touch any existing error mode flags by merging the existing error mode.
- # See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
- error_mode = SEM_NOGPFAULTERRORBOX
- prev_error_mode = Win32SetErrorMode(error_mode)
- Win32SetErrorMode(error_mode | prev_error_mode)
-
- env = os.environ.copy()
- env.update(additional_env)
- # GTest shard information is read by the V8 tests runner. Make sure it
- # doesn't leak into the execution of gtests we're wrapping. Those might
- # otherwise apply a second level of sharding and as a result skip tests.
- env.pop('GTEST_TOTAL_SHARDS', None)
- env.pop('GTEST_SHARD_INDEX', None)
-
- try:
- process = subprocess.Popen(
- args=popen_args,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- env=env,
- **rest
- )
- except Exception as e:
- sys.stderr.write("Error executing: %s\n" % popen_args)
- raise e
-
- if (utils.IsWindows() and prev_error_mode != SEM_INVALID_VALUE):
- Win32SetErrorMode(prev_error_mode)
-
- def kill_process(process, timeout_result):
- timeout_result[0] = True
- try:
- if utils.IsWindows():
- if verbose:
- print "Attempting to kill process %d" % process.pid
- sys.stdout.flush()
- tk = subprocess.Popen(
- 'taskkill /T /F /PID %d' % process.pid,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- )
- stdout, stderr = tk.communicate()
- if verbose:
- print "Taskkill results for %d" % process.pid
- print stdout
- print stderr
- print "Return code: %d" % tk.returncode
- sys.stdout.flush()
- else:
- if utils.GuessOS() == "macos":
- # TODO(machenbach): Temporary output for investigating hanging test
- # driver on mac.
- print "Attempting to kill process %d - cmd %s" % (process.pid, args)
- try:
- print subprocess.check_output(
- "ps -e | egrep 'd8|cctest|unittests'", shell=True)
- except Exception:
- pass
- sys.stdout.flush()
- process.kill()
- if utils.GuessOS() == "macos":
- # TODO(machenbach): Temporary output for investigating hanging test
- # driver on mac. This will probably not print much, since kill only
- # sends the signal.
- print "Return code after signalling the kill: %s" % process.returncode
- sys.stdout.flush()
-
- except OSError:
- sys.stderr.write('Error: Process %s already ended.\n' % process.pid)
-
- # Pseudo object to communicate with timer thread.
- timeout_result = [False]
-
- timer = Timer(timeout, kill_process, [process, timeout_result])
- timer.start()
- stdout, stderr = process.communicate()
- timer.cancel()
-
- return output.Output(
- process.returncode,
- timeout_result[0],
- stdout.decode('utf-8', 'replace').encode('utf-8'),
- stderr.decode('utf-8', 'replace').encode('utf-8'),
- process.pid,
- )
-
-
-# TODO(machenbach): Instead of passing args around, we should introduce an
-# immutable Command class (that just represents the command with all flags and
-# is pretty-printable) and a member method for running such a command.
-def Execute(args, verbose=False, timeout=None, env=None):
- args = [ c for c in args if c != "" ]
- return RunProcess(verbose, timeout, args, env or {})
diff --git a/deps/v8/tools/testrunner/local/execution.py b/deps/v8/tools/testrunner/local/execution.py
index 8cc3556cae..d6d0725365 100644
--- a/deps/v8/tools/testrunner/local/execution.py
+++ b/deps/v8/tools/testrunner/local/execution.py
@@ -31,15 +31,14 @@ import os
import re
import shutil
import sys
-import time
+import traceback
-from pool import Pool
-from . import commands
+from . import command
from . import perfdata
from . import statusfile
-from . import testsuite
from . import utils
-from ..objects import output
+from . pool import Pool
+from ..objects import predictable
# Base dir of the v8 checkout.
@@ -48,76 +47,22 @@ BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(
TEST_DIR = os.path.join(BASE_DIR, "test")
-class Instructions(object):
- def __init__(self, command, test_id, timeout, verbose, env):
- self.command = command
- self.id = test_id
- self.timeout = timeout
- self.verbose = verbose
- self.env = env
-
-
# Structure that keeps global information per worker process.
ProcessContext = collections.namedtuple(
- "process_context", ["suites", "context"])
+ 'process_context', ['sancov_dir'])
-def MakeProcessContext(context, suite_names):
- """Generate a process-local context.
+TestJobResult = collections.namedtuple(
+ 'TestJobResult', ['id', 'outproc_result'])
- This reloads all suites per process and stores the global context.
+def MakeProcessContext(sancov_dir):
+ return ProcessContext(sancov_dir)
- Args:
- context: The global context from the test runner.
- suite_names (list of str): Suite names as loaded by the parent process.
- Load the same suites in each subprocess.
- """
- suites = {}
- for root in suite_names:
- # Don't reinitialize global state as this is concurrently called from
- # different processes.
- suite = testsuite.TestSuite.LoadTestSuite(
- os.path.join(TEST_DIR, root), global_init=False)
- if suite:
- suites[suite.name] = suite
- return ProcessContext(suites, context)
-
-
-def GetCommand(test, context):
- d8testflag = []
- shell = test.suite.GetShellForTestCase(test)
- if shell == "d8":
- d8testflag = ["--test"]
- if utils.IsWindows():
- shell += ".exe"
- if context.random_seed:
- d8testflag += ["--random-seed=%s" % context.random_seed]
- files, flags, env = test.suite.GetParametersForTestCase(test, context)
- cmd = (
- context.command_prefix +
- [os.path.abspath(os.path.join(context.shell_dir, shell))] +
- d8testflag +
- files +
- context.extra_flags +
- # Flags from test cases can overwrite extra cmd-line flags.
- flags
- )
- return cmd, env
-
-
-def _GetInstructions(test, context):
- command, env = GetCommand(test, context)
- timeout = context.timeout
- if ("--stress-opt" in test.flags or
- "--stress-opt" in context.mode_flags or
- "--stress-opt" in context.extra_flags):
- timeout *= 4
- if "--noenable-vfp3" in context.extra_flags:
- timeout *= 2
-
- # TODO(majeski): make it slow outcome dependent.
- timeout *= 2
- return Instructions(command, test.id, timeout, context.verbose, env)
+
+# Global function for multiprocessing, because pickling a static method doesn't
+# work on Windows.
+def run_job(job, process_context):
+ return job.run(process_context)
class Job(object):
@@ -126,31 +71,18 @@ class Job(object):
All contained fields will be pickled/unpickled.
"""
- def Run(self, process_context):
- """Executes the job.
-
- Args:
- process_context: Process-local information that is initialized by the
- executing worker.
- """
+ def run(self, process_context):
raise NotImplementedError()
-def SetupProblem(exception, test):
- stderr = ">>> EXCEPTION: %s\n" % exception
- match = re.match(r"^.*No such file or directory: '(.*)'$", str(exception))
- if match:
- # Extra debuging information when files are claimed missing.
- f = match.group(1)
- stderr += ">>> File %s exists? -> %s\n" % (f, os.path.exists(f))
- return test.id, output.Output(1, False, "", stderr, None), 0
-
-
class TestJob(Job):
- def __init__(self, test):
- self.test = test
+ def __init__(self, test_id, cmd, outproc, run_num):
+ self.test_id = test_id
+ self.cmd = cmd
+ self.outproc = outproc
+ self.run_num = run_num
- def _rename_coverage_data(self, output, context):
+ def _rename_coverage_data(self, out, sancov_dir):
"""Rename coverage data.
Rename files with PIDs to files with unique test IDs, because the number
@@ -159,64 +91,53 @@ class TestJob(Job):
42 is the test ID and 1 is the attempt (the same test might be rerun on
failures).
"""
- if context.sancov_dir and output.pid is not None:
- shell = self.test.suite.GetShellForTestCase(self.test)
- sancov_file = os.path.join(
- context.sancov_dir, "%s.%d.sancov" % (shell, output.pid))
+ if sancov_dir and out.pid is not None:
+ # Doesn't work on windows so basename is sufficient to get the shell name.
+ shell = os.path.basename(self.cmd.shell)
+ sancov_file = os.path.join(sancov_dir, "%s.%d.sancov" % (shell, out.pid))
# Some tests are expected to fail and don't produce coverage data.
if os.path.exists(sancov_file):
parts = sancov_file.split(".")
new_sancov_file = ".".join(
parts[:-2] +
- ["test", str(self.test.id), str(self.test.run)] +
+ ["test", str(self.test_id), str(self.run_num)] +
parts[-1:]
)
assert not os.path.exists(new_sancov_file)
os.rename(sancov_file, new_sancov_file)
- def Run(self, process_context):
- try:
- # Retrieve a new suite object on the worker-process side. The original
- # suite object isn't pickled.
- self.test.SetSuiteObject(process_context.suites)
- instr = _GetInstructions(self.test, process_context.context)
- except Exception, e:
- # TODO(majeski): Better exception reporting.
- return SetupProblem(e, self.test)
-
- start_time = time.time()
- output = commands.Execute(instr.command, instr.verbose, instr.timeout,
- instr.env)
- self._rename_coverage_data(output, process_context.context)
- return (instr.id, output, time.time() - start_time)
-
-
-def RunTest(job, process_context):
- return job.Run(process_context)
+ def run(self, context):
+ output = self.cmd.execute()
+ self._rename_coverage_data(output, context.sancov_dir)
+ return TestJobResult(self.test_id, self.outproc.process(output))
class Runner(object):
- def __init__(self, suites, progress_indicator, context):
+ def __init__(self, suites, progress_indicator, context, outproc_factory=None):
self.datapath = os.path.join("out", "testrunner_data")
self.perf_data_manager = perfdata.GetPerfDataManager(
context, self.datapath)
self.perfdata = self.perf_data_manager.GetStore(context.arch, context.mode)
self.perf_failures = False
self.printed_allocations = False
+ self.outproc_factory = outproc_factory or (lambda test: test.output_proc)
self.tests = [t for s in suites for t in s.tests]
+
+ # TODO(majeski): Pass dynamically instead of keeping them in the runner.
+ # Maybe some observer?
+ self.outputs = {t: None for t in self.tests}
+
self.suite_names = [s.name for s in suites]
# Always pre-sort by status file, slowest tests first.
- slow_key = lambda t: statusfile.IsSlow(t.suite.GetStatusFileOutcomes(t))
- self.tests.sort(key=slow_key, reverse=True)
+ self.tests.sort(key=lambda t: t.is_slow, reverse=True)
- # Sort by stored duration of not opted out.
+ # Sort by stored duration if not opted out.
if not context.no_sorting:
- for t in self.tests:
- t.duration = self.perfdata.FetchPerfData(t) or 1.0
- self.tests.sort(key=lambda t: t.duration, reverse=True)
+ self.tests.sort(key=lambda t: self.perfdata.FetchPerfData(t) or 1.0,
+ reverse=True)
self._CommonInit(suites, progress_indicator, context)
@@ -242,7 +163,7 @@ class Runner(object):
print("PerfData exception: %s" % e)
self.perf_failures = True
- def _MaybeRerun(self, pool, test):
+ def _MaybeRerun(self, pool, test, result):
if test.run <= self.context.rerun_failures_count:
# Possibly rerun this test if its run count is below the maximum per
# test. <= as the flag controls reruns not including the first run.
@@ -254,25 +175,25 @@ class Runner(object):
# Don't rerun this if the overall number of rerun tests has been
# reached.
return
- if test.run >= 2 and test.duration > self.context.timeout / 20.0:
+ if (test.run >= 2 and
+ result.output.duration > self.context.timeout / 20.0):
# Rerun slow tests at most once.
return
# Rerun this test.
- test.duration = None
- test.output = None
test.run += 1
- pool.add([TestJob(test)])
+ pool.add([
+ TestJob(test.id, test.cmd, self.outproc_factory(test), test.run)
+ ])
self.remaining += 1
self.total += 1
- def _ProcessTestNormal(self, test, result, pool):
- test.output = result[1]
- test.duration = result[2]
- has_unexpected_output = test.suite.HasUnexpectedOutput(test)
+ def _ProcessTest(self, test, result, pool):
+ self.outputs[test] = result.output
+ has_unexpected_output = result.has_unexpected_output
if has_unexpected_output:
self.failed.append(test)
- if test.output.HasCrashed():
+ if result.output.HasCrashed():
self.crashed += 1
else:
self.succeeded += 1
@@ -280,57 +201,15 @@ class Runner(object):
# For the indicator, everything that happens after the first run is treated
# as unexpected even if it flakily passes in order to include it in the
# output.
- self.indicator.HasRun(test, has_unexpected_output or test.run > 1)
+ self.indicator.HasRun(test, result.output,
+ has_unexpected_output or test.run > 1)
if has_unexpected_output:
# Rerun test failures after the indicator has processed the results.
self._VerbosePrint("Attempting to rerun test after failure.")
- self._MaybeRerun(pool, test)
+ self._MaybeRerun(pool, test, result)
# Update the perf database if the test succeeded.
return not has_unexpected_output
- def _ProcessTestPredictable(self, test, result, pool):
- def HasDifferentAllocations(output1, output2):
- def AllocationStr(stdout):
- for line in reversed((stdout or "").splitlines()):
- if line.startswith("### Allocations = "):
- self.printed_allocations = True
- return line
- return ""
- return (AllocationStr(output1.stdout) != AllocationStr(output2.stdout))
-
- # Always pass the test duration for the database update.
- test.duration = result[2]
- if test.run == 1 and result[1].HasTimedOut():
- # If we get a timeout in the first run, we are already in an
- # unpredictable state. Just report it as a failure and don't rerun.
- test.output = result[1]
- self.remaining -= 1
- self.failed.append(test)
- self.indicator.HasRun(test, True)
- if test.run > 1 and HasDifferentAllocations(test.output, result[1]):
- # From the second run on, check for different allocations. If a
- # difference is found, call the indicator twice to report both tests.
- # All runs of each test are counted as one for the statistic.
- self.remaining -= 1
- self.failed.append(test)
- self.indicator.HasRun(test, True)
- test.output = result[1]
- self.indicator.HasRun(test, True)
- elif test.run >= 3:
- # No difference on the third run -> report a success.
- self.remaining -= 1
- self.succeeded += 1
- test.output = result[1]
- self.indicator.HasRun(test, False)
- else:
- # No difference yet and less than three runs -> add another run and
- # remember the output for comparison.
- test.run += 1
- test.output = result[1]
- pool.add([TestJob(test)])
- # Always update the perf database.
- return True
-
def Run(self, jobs):
self.indicator.Starting()
self._RunInternal(jobs)
@@ -350,50 +229,54 @@ class Runner(object):
assert test.id >= 0
test_map[test.id] = test
try:
- yield [TestJob(test)]
+ yield [
+ TestJob(test.id, test.cmd, self.outproc_factory(test), test.run)
+ ]
except Exception, e:
# If this failed, save the exception and re-raise it later (after
# all other tests have had a chance to run).
- queued_exception[0] = e
+ queued_exception[0] = e, traceback.format_exc()
continue
try:
it = pool.imap_unordered(
- fn=RunTest,
+ fn=run_job,
gen=gen_tests(),
process_context_fn=MakeProcessContext,
- process_context_args=[self.context, self.suite_names],
+ process_context_args=[self.context.sancov_dir],
)
for result in it:
if result.heartbeat:
self.indicator.Heartbeat()
continue
- test = test_map[result.value[0]]
- if self.context.predictable:
- update_perf = self._ProcessTestPredictable(test, result.value, pool)
- else:
- update_perf = self._ProcessTestNormal(test, result.value, pool)
+
+ job_result = result.value
+ test_id = job_result.id
+ outproc_result = job_result.outproc_result
+
+ test = test_map[test_id]
+ update_perf = self._ProcessTest(test, outproc_result, pool)
if update_perf:
- self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(test))
+ self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(
+ test, outproc_result.output.duration))
+ except KeyboardInterrupt:
+ raise
+ except:
+ traceback.print_exc()
+ raise
finally:
self._VerbosePrint("Closing process pool.")
pool.terminate()
self._VerbosePrint("Closing database connection.")
- self._RunPerfSafe(lambda: self.perf_data_manager.close())
+ self._RunPerfSafe(self.perf_data_manager.close)
if self.perf_failures:
# Nuke perf data in case of failures. This might not work on windows as
# some files might still be open.
print "Deleting perf test data due to db corruption."
shutil.rmtree(self.datapath)
if queued_exception[0]:
- raise queued_exception[0]
-
- # Make sure that any allocations were printed in predictable mode (if we
- # ran any tests).
- assert (
- not self.total or
- not self.context.predictable or
- self.printed_allocations
- )
+ e, stacktrace = queued_exception[0]
+ print stacktrace
+ raise e
def _VerbosePrint(self, text):
if self.context.verbose:
@@ -403,6 +286,8 @@ class Runner(object):
class BreakNowException(Exception):
def __init__(self, value):
+ super(BreakNowException, self).__init__()
self.value = value
+
def __str__(self):
return repr(self.value)
diff --git a/deps/v8/tools/testrunner/local/junit_output.py b/deps/v8/tools/testrunner/local/junit_output.py
index d2748febd9..52f31ec422 100644
--- a/deps/v8/tools/testrunner/local/junit_output.py
+++ b/deps/v8/tools/testrunner/local/junit_output.py
@@ -34,9 +34,10 @@ class JUnitTestOutput:
self.root = xml.Element("testsuite")
self.root.attrib["name"] = test_suite_name
- def HasRunTest(self, test_name, test_duration, test_failure):
+ def HasRunTest(self, test_name, test_cmd, test_duration, test_failure):
testCaseElement = xml.Element("testcase")
- testCaseElement.attrib["name"] = " ".join(test_name)
+ testCaseElement.attrib["name"] = test_name
+ testCaseElement.attrib["cmd"] = test_cmd
testCaseElement.attrib["time"] = str(round(test_duration, 3))
if len(test_failure):
failureElement = xml.Element("failure")
@@ -44,5 +45,5 @@ class JUnitTestOutput:
testCaseElement.append(failureElement)
self.root.append(testCaseElement)
- def FinishAndWrite(self, file):
- xml.ElementTree(self.root).write(file, "UTF-8")
+ def FinishAndWrite(self, f):
+ xml.ElementTree(self.root).write(f, "UTF-8")
diff --git a/deps/v8/tools/testrunner/local/perfdata.py b/deps/v8/tools/testrunner/local/perfdata.py
index 29ebff773a..4cb618b0be 100644
--- a/deps/v8/tools/testrunner/local/perfdata.py
+++ b/deps/v8/tools/testrunner/local/perfdata.py
@@ -62,22 +62,17 @@ class PerfDataStore(object):
self.database.close()
self.closed = True
- def GetKey(self, test):
- """Computes the key used to access data for the given testcase."""
- flags = "".join(test.flags)
- return str("%s.%s.%s" % (test.suitename(), test.path, flags))
-
def FetchPerfData(self, test):
"""Returns the observed duration for |test| as read from the store."""
- key = self.GetKey(test)
+ key = test.get_id()
if key in self.database:
return self.database[key].avg
return None
- def UpdatePerfData(self, test):
- """Updates the persisted value in the store with test.duration."""
- testkey = self.GetKey(test)
- self.RawUpdatePerfData(testkey, test.duration)
+ def UpdatePerfData(self, test, duration):
+ """Updates the persisted value in the store with duration."""
+ testkey = test.get_id()
+ self.RawUpdatePerfData(testkey, duration)
def RawUpdatePerfData(self, testkey, duration):
with self.lock:
@@ -121,7 +116,7 @@ class PerfDataManager(object):
class NullPerfDataStore(object):
- def UpdatePerfData(self, test):
+ def UpdatePerfData(self, test, duration):
pass
def FetchPerfData(self, test):
diff --git a/deps/v8/tools/testrunner/local/pool.py b/deps/v8/tools/testrunner/local/pool.py
index 99996ee3ce..9199b62d8a 100644
--- a/deps/v8/tools/testrunner/local/pool.py
+++ b/deps/v8/tools/testrunner/local/pool.py
@@ -8,6 +8,21 @@ from multiprocessing import Event, Process, Queue
import traceback
+def setup_testing():
+ """For testing only: Use threading under the hood instead of multiprocessing
+ to make coverage work.
+ """
+ global Queue
+ global Event
+ global Process
+ del Queue
+ del Event
+ del Process
+ from Queue import Queue
+ from threading import Event
+ from threading import Thread as Process
+
+
class NormalResult():
def __init__(self, result):
self.result = result
@@ -120,8 +135,8 @@ class Pool():
self.done,
process_context_fn,
process_context_args))
- self.processes.append(p)
p.start()
+ self.processes.append(p)
self.advance(gen)
while self.count > 0:
@@ -145,6 +160,11 @@ class Pool():
else:
yield MaybeResult.create_result(result.result)
self.advance(gen)
+ except KeyboardInterrupt:
+ raise
+ except Exception as e:
+ traceback.print_exc()
+ print(">>> EXCEPTION: %s" % e)
finally:
self.terminate()
if internal_error:
diff --git a/deps/v8/tools/testrunner/local/progress.py b/deps/v8/tools/testrunner/local/progress.py
index e57a6e36c9..f6ebddf2e5 100644
--- a/deps/v8/tools/testrunner/local/progress.py
+++ b/deps/v8/tools/testrunner/local/progress.py
@@ -32,12 +32,9 @@ import os
import sys
import time
-from . import execution
from . import junit_output
from . import statusfile
-
-
-ABS_PATH_PREFIX = os.getcwd() + os.sep
+from ..testproc import progress as progress_proc
class ProgressIndicator(object):
@@ -54,33 +51,26 @@ class ProgressIndicator(object):
def Done(self):
pass
- def HasRun(self, test, has_unexpected_output):
+ def HasRun(self, test, output, has_unexpected_output):
pass
def Heartbeat(self):
pass
def PrintFailureHeader(self, test):
- if test.suite.IsNegativeTest(test):
+ if test.output_proc.negative:
negative_marker = '[negative] '
else:
negative_marker = ''
print "=== %(label)s %(negative)s===" % {
- 'label': test.GetLabel(),
- 'negative': negative_marker
+ 'label': test,
+ 'negative': negative_marker,
}
- def _EscapeCommand(self, test):
- command, _ = execution.GetCommand(test, self.runner.context)
- parts = []
- for part in command:
- if ' ' in part:
- # Escape spaces. We may need to escape more characters for this
- # to work properly.
- parts.append('"%s"' % part)
- else:
- parts.append(part)
- return " ".join(parts)
+ def ToProgressIndicatorProc(self):
+ print ('Warning: %s is not available as a processor' %
+ self.__class__.__name__)
+ return None
class IndicatorNotifier(object):
@@ -91,6 +81,9 @@ class IndicatorNotifier(object):
def Register(self, indicator):
self.indicators.append(indicator)
+ def ToProgressIndicatorProcs(self):
+ return [i.ToProgressIndicatorProc() for i in self.indicators]
+
# Forge all generic event-dispatching methods in IndicatorNotifier, which are
# part of the ProgressIndicator interface.
@@ -116,18 +109,19 @@ class SimpleProgressIndicator(ProgressIndicator):
def Done(self):
print
for failed in self.runner.failed:
+ output = self.runner.outputs[failed]
self.PrintFailureHeader(failed)
- if failed.output.stderr:
+ if output.stderr:
print "--- stderr ---"
- print failed.output.stderr.strip()
- if failed.output.stdout:
+ print output.stderr.strip()
+ if output.stdout:
print "--- stdout ---"
- print failed.output.stdout.strip()
- print "Command: %s" % self._EscapeCommand(failed)
- if failed.output.HasCrashed():
- print "exit code: %d" % failed.output.exit_code
+ print output.stdout.strip()
+ print "Command: %s" % failed.cmd.to_string()
+ if output.HasCrashed():
+ print "exit code: %d" % output.exit_code
print "--- CRASHED ---"
- if failed.output.HasTimedOut():
+ if output.HasTimedOut():
print "--- TIMEOUT ---"
if len(self.runner.failed) == 0:
print "==="
@@ -144,33 +138,36 @@ class SimpleProgressIndicator(ProgressIndicator):
class VerboseProgressIndicator(SimpleProgressIndicator):
- def HasRun(self, test, has_unexpected_output):
+ def HasRun(self, test, output, has_unexpected_output):
if has_unexpected_output:
- if test.output.HasCrashed():
+ if output.HasCrashed():
outcome = 'CRASH'
else:
outcome = 'FAIL'
else:
outcome = 'pass'
- print 'Done running %s: %s' % (test.GetLabel(), outcome)
+ print 'Done running %s: %s' % (test, outcome)
sys.stdout.flush()
def Heartbeat(self):
print 'Still working...'
sys.stdout.flush()
+ def ToProgressIndicatorProc(self):
+ return progress_proc.VerboseProgressIndicator()
+
class DotsProgressIndicator(SimpleProgressIndicator):
- def HasRun(self, test, has_unexpected_output):
+ def HasRun(self, test, output, has_unexpected_output):
total = self.runner.succeeded + len(self.runner.failed)
if (total > 1) and (total % 50 == 1):
sys.stdout.write('\n')
if has_unexpected_output:
- if test.output.HasCrashed():
+ if output.HasCrashed():
sys.stdout.write('C')
sys.stdout.flush()
- elif test.output.HasTimedOut():
+ elif output.HasTimedOut():
sys.stdout.write('T')
sys.stdout.flush()
else:
@@ -180,6 +177,9 @@ class DotsProgressIndicator(SimpleProgressIndicator):
sys.stdout.write('.')
sys.stdout.flush()
+ def ToProgressIndicatorProc(self):
+ return progress_proc.DotsProgressIndicator()
+
class CompactProgressIndicator(ProgressIndicator):
"""Abstract base class for {Color,Monochrome}ProgressIndicator"""
@@ -194,22 +194,22 @@ class CompactProgressIndicator(ProgressIndicator):
self.PrintProgress('Done')
print "" # Line break.
- def HasRun(self, test, has_unexpected_output):
- self.PrintProgress(test.GetLabel())
+ def HasRun(self, test, output, has_unexpected_output):
+ self.PrintProgress(str(test))
if has_unexpected_output:
self.ClearLine(self.last_status_length)
self.PrintFailureHeader(test)
- stdout = test.output.stdout.strip()
+ stdout = output.stdout.strip()
if len(stdout):
print self.templates['stdout'] % stdout
- stderr = test.output.stderr.strip()
+ stderr = output.stderr.strip()
if len(stderr):
print self.templates['stderr'] % stderr
- print "Command: %s" % self._EscapeCommand(test)
- if test.output.HasCrashed():
- print "exit code: %d" % test.output.exit_code
+ print "Command: %s" % test.cmd.to_string()
+ if output.HasCrashed():
+ print "exit code: %d" % output.exit_code
print "--- CRASHED ---"
- if test.output.HasTimedOut():
+ if output.HasTimedOut():
print "--- TIMEOUT ---"
def Truncate(self, string, length):
@@ -254,6 +254,9 @@ class ColorProgressIndicator(CompactProgressIndicator):
def ClearLine(self, last_line_length):
print "\033[1K\r",
+ def ToProgressIndicatorProc(self):
+ return progress_proc.ColorProgressIndicator()
+
class MonochromeProgressIndicator(CompactProgressIndicator):
@@ -269,10 +272,15 @@ class MonochromeProgressIndicator(CompactProgressIndicator):
def ClearLine(self, last_line_length):
print ("\r" + (" " * last_line_length) + "\r"),
+ def ToProgressIndicatorProc(self):
+ return progress_proc.MonochromeProgressIndicator()
-class JUnitTestProgressIndicator(ProgressIndicator):
+class JUnitTestProgressIndicator(ProgressIndicator):
def __init__(self, junitout, junittestsuite):
+ super(JUnitTestProgressIndicator, self).__init__()
+ self.junitout = junitout
+ self.juinttestsuite = junittestsuite
self.outputter = junit_output.JUnitTestOutput(junittestsuite)
if junitout:
self.outfile = open(junitout, "w")
@@ -284,29 +292,37 @@ class JUnitTestProgressIndicator(ProgressIndicator):
if self.outfile != sys.stdout:
self.outfile.close()
- def HasRun(self, test, has_unexpected_output):
+ def HasRun(self, test, output, has_unexpected_output):
fail_text = ""
if has_unexpected_output:
- stdout = test.output.stdout.strip()
+ stdout = output.stdout.strip()
if len(stdout):
fail_text += "stdout:\n%s\n" % stdout
- stderr = test.output.stderr.strip()
+ stderr = output.stderr.strip()
if len(stderr):
fail_text += "stderr:\n%s\n" % stderr
- fail_text += "Command: %s" % self._EscapeCommand(test)
- if test.output.HasCrashed():
- fail_text += "exit code: %d\n--- CRASHED ---" % test.output.exit_code
- if test.output.HasTimedOut():
+ fail_text += "Command: %s" % test.cmd.to_string()
+ if output.HasCrashed():
+ fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
+ if output.HasTimedOut():
fail_text += "--- TIMEOUT ---"
self.outputter.HasRunTest(
- [test.GetLabel()] + self.runner.context.mode_flags + test.flags,
- test.duration,
- fail_text)
+ test_name=str(test),
+ test_cmd=test.cmd.to_string(relative=True),
+ test_duration=output.duration,
+ test_failure=fail_text)
+
+ def ToProgressIndicatorProc(self):
+ if self.outfile != sys.stdout:
+ self.outfile.close()
+ return progress_proc.JUnitTestProgressIndicator(self.junitout,
+ self.junittestsuite)
class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, json_test_results, arch, mode, random_seed):
+ super(JsonTestProgressIndicator, self).__init__()
self.json_test_results = json_test_results
self.arch = arch
self.mode = mode
@@ -314,6 +330,10 @@ class JsonTestProgressIndicator(ProgressIndicator):
self.results = []
self.tests = []
+ def ToProgressIndicatorProc(self):
+ return progress_proc.JsonTestProgressIndicator(
+ self.json_test_results, self.arch, self.mode, self.random_seed)
+
def Done(self):
complete_results = []
if os.path.exists(self.json_test_results):
@@ -325,20 +345,19 @@ class JsonTestProgressIndicator(ProgressIndicator):
if self.tests:
# Get duration mean.
duration_mean = (
- sum(t.duration for t in self.tests) / float(len(self.tests)))
+ sum(duration for (_, duration) in self.tests) /
+ float(len(self.tests)))
# Sort tests by duration.
- timed_tests = [t for t in self.tests if t.duration is not None]
- timed_tests.sort(lambda a, b: cmp(b.duration, a.duration))
+ self.tests.sort(key=lambda (_, duration): duration, reverse=True)
slowest_tests = [
{
- "name": test.GetLabel(),
- "flags": test.flags,
- "command": self._EscapeCommand(test).replace(ABS_PATH_PREFIX, ""),
- "duration": test.duration,
- "marked_slow": statusfile.IsSlow(
- test.suite.GetStatusFileOutcomes(test)),
- } for test in timed_tests[:20]
+ "name": str(test),
+ "flags": test.cmd.args,
+ "command": test.cmd.to_string(relative=True),
+ "duration": duration,
+ "marked_slow": test.is_slow,
+ } for (test, duration) in self.tests[:20]
]
complete_results.append({
@@ -353,30 +372,30 @@ class JsonTestProgressIndicator(ProgressIndicator):
with open(self.json_test_results, "w") as f:
f.write(json.dumps(complete_results))
- def HasRun(self, test, has_unexpected_output):
+ def HasRun(self, test, output, has_unexpected_output):
# Buffer all tests for sorting the durations in the end.
- self.tests.append(test)
+ self.tests.append((test, output.duration))
if not has_unexpected_output:
# Omit tests that run as expected. Passing tests of reruns after failures
# will have unexpected_output to be reported here has well.
return
self.results.append({
- "name": test.GetLabel(),
- "flags": test.flags,
- "command": self._EscapeCommand(test).replace(ABS_PATH_PREFIX, ""),
+ "name": str(test),
+ "flags": test.cmd.args,
+ "command": test.cmd.to_string(relative=True),
"run": test.run,
- "stdout": test.output.stdout,
- "stderr": test.output.stderr,
- "exit_code": test.output.exit_code,
- "result": test.suite.GetOutcome(test),
- "expected": test.suite.GetExpectedOutcomes(test),
- "duration": test.duration,
+ "stdout": output.stdout,
+ "stderr": output.stderr,
+ "exit_code": output.exit_code,
+ "result": test.output_proc.get_outcome(output),
+ "expected": test.expected_outcomes,
+ "duration": output.duration,
# TODO(machenbach): This stores only the global random seed from the
# context and not possible overrides when using random-seed stress.
"random_seed": self.random_seed,
- "target_name": test.suite.GetShellForTestCase(test),
+ "target_name": test.get_shell(),
"variant": test.variant,
})
@@ -384,6 +403,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
class FlakinessTestProgressIndicator(ProgressIndicator):
def __init__(self, json_test_results):
+ super(FlakinessTestProgressIndicator, self).__init__()
self.json_test_results = json_test_results
self.results = {}
self.summary = {
@@ -405,28 +425,23 @@ class FlakinessTestProgressIndicator(ProgressIndicator):
"version": 3,
}, f)
- def HasRun(self, test, has_unexpected_output):
- key = "/".join(
- sorted(flag.lstrip("-")
- for flag in self.runner.context.extra_flags + test.flags) +
- ["test", test.GetLabel()],
- )
- outcome = test.suite.GetOutcome(test)
+ def HasRun(self, test, output, has_unexpected_output):
+ key = test.get_id()
+ outcome = test.output_proc.get_outcome(output)
assert outcome in ["PASS", "FAIL", "CRASH", "TIMEOUT"]
if test.run == 1:
# First run of this test.
- expected_outcomes = test.suite.GetExpectedOutcomes(test)
self.results[key] = {
"actual": outcome,
- "expected": " ".join(expected_outcomes),
- "times": [test.duration],
+ "expected": " ".join(test.expected_outcomes),
+ "times": [output.duration],
}
self.summary[outcome] = self.summary[outcome] + 1
else:
# This is a rerun and a previous result exists.
result = self.results[key]
result["actual"] = "%s %s" % (result["actual"], outcome)
- result["times"].append(test.duration)
+ result["times"].append(output.duration)
PROGRESS_INDICATORS = {
diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py
index 7caf0711ca..988750d6b4 100644
--- a/deps/v8/tools/testrunner/local/statusfile.py
+++ b/deps/v8/tools/testrunner/local/statusfile.py
@@ -44,14 +44,13 @@ FAIL_SLOPPY = "FAIL_SLOPPY"
# Modifiers
SKIP = "SKIP"
SLOW = "SLOW"
-FAST_VARIANTS = "FAST_VARIANTS"
NO_VARIANTS = "NO_VARIANTS"
ALWAYS = "ALWAYS"
KEYWORDS = {}
-for key in [SKIP, FAIL, PASS, CRASH, SLOW, FAIL_OK, FAST_VARIANTS, NO_VARIANTS,
- FAIL_SLOPPY, ALWAYS]:
+for key in [SKIP, FAIL, PASS, CRASH, SLOW, FAIL_OK, NO_VARIANTS, FAIL_SLOPPY,
+ ALWAYS]:
KEYWORDS[key] = key
# Support arches, modes to be written as keywords instead of strings.
@@ -67,31 +66,73 @@ for var in ["debug", "release", "big", "little",
for var in ALL_VARIANTS:
VARIABLES[var] = var
+class StatusFile(object):
+ def __init__(self, path, variables):
+ """
+ _rules: {variant: {test name: [rule]}}
+ _prefix_rules: {variant: {test name prefix: [rule]}}
+ """
+ with open(path) as f:
+ self._rules, self._prefix_rules = ReadStatusFile(f.read(), variables)
-def DoSkip(outcomes):
- return SKIP in outcomes
+ def get_outcomes(self, testname, variant=None):
+ """Merges variant dependent and independent rules."""
+ outcomes = frozenset()
+ for key in set([variant or '', '']):
+ rules = self._rules.get(key, {})
+ prefix_rules = self._prefix_rules.get(key, {})
-def IsSlow(outcomes):
- return SLOW in outcomes
+ if testname in rules:
+ outcomes |= rules[testname]
+ for prefix in prefix_rules:
+ if testname.startswith(prefix):
+ outcomes |= prefix_rules[prefix]
-def OnlyStandardVariant(outcomes):
- return NO_VARIANTS in outcomes
+ return outcomes
+ def warn_unused_rules(self, tests, check_variant_rules=False):
+ """Finds and prints unused rules in status file.
-def OnlyFastVariants(outcomes):
- return FAST_VARIANTS in outcomes
+ Rule X is unused when it doesn't apply to any tests, which can also mean
+ that all matching tests were skipped by another rule before evaluating X.
+ Args:
+ tests: list of pairs (testname, variant)
+ check_variant_rules: if set variant dependent rules are checked
+ """
-def IsPassOrFail(outcomes):
- return (PASS in outcomes and
- FAIL in outcomes and
- CRASH not in outcomes)
-
-
-def IsFailOk(outcomes):
- return FAIL_OK in outcomes
+ if check_variant_rules:
+ variants = list(ALL_VARIANTS)
+ else:
+ variants = ['']
+ used_rules = set()
+
+ for testname, variant in tests:
+ variant = variant or ''
+
+ if testname in self._rules.get(variant, {}):
+ used_rules.add((testname, variant))
+ if SKIP in self._rules[variant][testname]:
+ continue
+
+ for prefix in self._prefix_rules.get(variant, {}):
+ if testname.startswith(prefix):
+ used_rules.add((prefix, variant))
+ if SKIP in self._prefix_rules[variant][prefix]:
+ break
+
+ for variant in variants:
+ for rule, value in (
+ list(self._rules.get(variant, {}).iteritems()) +
+ list(self._prefix_rules.get(variant, {}).iteritems())):
+ if (rule, variant) not in used_rules:
+ if variant == '':
+ variant_desc = 'variant independent'
+ else:
+ variant_desc = 'variant: %s' % variant
+ print 'Unused rule: %s -> %s (%s)' % (rule, value, variant_desc)
def _JoinsPassAndFail(outcomes1, outcomes2):
diff --git a/deps/v8/tools/testrunner/local/testsuite.py b/deps/v8/tools/testrunner/local/testsuite.py
index 946e89a3fc..6a9e9831ce 100644
--- a/deps/v8/tools/testrunner/local/testsuite.py
+++ b/deps/v8/tools/testrunner/local/testsuite.py
@@ -30,55 +30,65 @@ import fnmatch
import imp
import os
-from . import commands
+from . import command
from . import statusfile
from . import utils
-from ..objects import testcase
-from variants import ALL_VARIANTS, ALL_VARIANT_FLAGS, FAST_VARIANT_FLAGS
+from ..objects.testcase import TestCase
+from variants import ALL_VARIANTS, ALL_VARIANT_FLAGS
-FAST_VARIANTS = set(["default", "turbofan"])
STANDARD_VARIANT = set(["default"])
-class VariantGenerator(object):
+class LegacyVariantsGenerator(object):
def __init__(self, suite, variants):
self.suite = suite
self.all_variants = ALL_VARIANTS & variants
- self.fast_variants = FAST_VARIANTS & variants
self.standard_variant = STANDARD_VARIANT & variants
+ def FilterVariantsByTest(self, test):
+ if test.only_standard_variant:
+ return self.standard_variant
+ return self.all_variants
+
+ def GetFlagSets(self, test, variant):
+ return ALL_VARIANT_FLAGS[variant]
+
+
+class StandardLegacyVariantsGenerator(LegacyVariantsGenerator):
def FilterVariantsByTest(self, testcase):
- result = self.all_variants
- outcomes = testcase.suite.GetStatusFileOutcomes(testcase)
- if outcomes:
- if statusfile.OnlyStandardVariant(outcomes):
- return self.standard_variant
- if statusfile.OnlyFastVariants(outcomes):
- result = self.fast_variants
- return result
-
- def GetFlagSets(self, testcase, variant):
- outcomes = testcase.suite.GetStatusFileOutcomes(testcase)
- if outcomes and statusfile.OnlyFastVariants(outcomes):
- return FAST_VARIANT_FLAGS[variant]
- else:
- return ALL_VARIANT_FLAGS[variant]
+ return self.standard_variant
-class TestSuite(object):
+class VariantsGenerator(object):
+ def __init__(self, variants):
+ self._all_variants = [v for v in variants if v in ALL_VARIANTS]
+ self._standard_variant = [v for v in variants if v in STANDARD_VARIANT]
+
+ def gen(self, test):
+ """Generator producing (variant, flags, procid suffix) tuples."""
+ flags_set = self._get_flags_set(test)
+ for n, variant in enumerate(self._get_variants(test)):
+ yield (variant, flags_set[variant][0], n)
+
+ def _get_flags_set(self, test):
+ return ALL_VARIANT_FLAGS
+ def _get_variants(self, test):
+ if test.only_standard_variant:
+ return self._standard_variant
+ return self._all_variants
+
+
+class TestSuite(object):
@staticmethod
- def LoadTestSuite(root, global_init=True):
+ def LoadTestSuite(root):
name = root.split(os.path.sep)[-1]
f = None
try:
(f, pathname, description) = imp.find_module("testcfg", [root])
module = imp.load_module(name + "_testcfg", f, pathname, description)
return module.GetSuite(name, root)
- except ImportError:
- # Use default if no testcfg is present.
- return GoogleTestSuite(name, root)
finally:
if f:
f.close()
@@ -88,69 +98,40 @@ class TestSuite(object):
self.name = name # string
self.root = root # string containing path
self.tests = None # list of TestCase objects
- self.rules = None # {variant: {test name: [rule]}}
- self.prefix_rules = None # {variant: {test name prefix: [rule]}}
- self.total_duration = None # float, assigned on demand
-
- self._outcomes_cache = dict()
-
- def suffix(self):
- return ".js"
+ self.statusfile = None
def status_file(self):
return "%s/%s.status" % (self.root, self.name)
- # Used in the status file and for stdout printing.
- def CommonTestName(self, testcase):
- if utils.IsWindows():
- return testcase.path.replace("\\", "/")
- else:
- return testcase.path
-
def ListTests(self, context):
raise NotImplementedError
- def _VariantGeneratorFactory(self):
+ def _LegacyVariantsGeneratorFactory(self):
"""The variant generator class to be used."""
- return VariantGenerator
+ return LegacyVariantsGenerator
- def CreateVariantGenerator(self, variants):
+ def CreateLegacyVariantsGenerator(self, variants):
"""Return a generator for the testing variants of this suite.
Args:
variants: List of variant names to be run as specified by the test
runner.
- Returns: An object of type VariantGenerator.
+ Returns: An object of type LegacyVariantsGenerator.
"""
- return self._VariantGeneratorFactory()(self, set(variants))
+ return self._LegacyVariantsGeneratorFactory()(self, set(variants))
- def PrepareSources(self):
- """Called once before multiprocessing for doing file-system operations.
+ def get_variants_gen(self, variants):
+ return self._variants_gen_class()(variants)
- This should not access the network. For network access use the method
- below.
- """
- pass
+ def _variants_gen_class(self):
+ return VariantsGenerator
def ReadStatusFile(self, variables):
- with open(self.status_file()) as f:
- self.rules, self.prefix_rules = (
- statusfile.ReadStatusFile(f.read(), variables))
+ self.statusfile = statusfile.StatusFile(self.status_file(), variables)
def ReadTestCases(self, context):
self.tests = self.ListTests(context)
- def GetStatusfileFlags(self, test):
- """Gets runtime flags from a status file.
-
- Every outcome that starts with "--" is a flag. Status file has to be loaded
- before using this function.
- """
- flags = []
- for outcome in self.GetStatusFileOutcomes(test):
- if outcome.startswith('--'):
- flags.append(outcome)
- return flags
def FilterTestCasesByStatus(self,
slow_tests_mode=None,
@@ -179,58 +160,16 @@ class TestSuite(object):
(mode == 'skip' and pass_fail))
def _compliant(test):
- outcomes = self.GetStatusFileOutcomes(test)
- if statusfile.DoSkip(outcomes):
+ if test.do_skip:
return False
- if _skip_slow(statusfile.IsSlow(outcomes), slow_tests_mode):
+ if _skip_slow(test.is_slow, slow_tests_mode):
return False
- if _skip_pass_fail(statusfile.IsPassOrFail(outcomes),
- pass_fail_tests_mode):
+ if _skip_pass_fail(test.is_pass_or_fail, pass_fail_tests_mode):
return False
return True
self.tests = filter(_compliant, self.tests)
- def WarnUnusedRules(self, check_variant_rules=False):
- """Finds and prints unused rules in status file.
-
- Rule X is unused when it doesn't apply to any tests, which can also mean
- that all matching tests were skipped by another rule before evaluating X.
-
- Status file has to be loaded before using this function.
- """
-
- if check_variant_rules:
- variants = list(ALL_VARIANTS)
- else:
- variants = ['']
- used_rules = set()
-
- for t in self.tests:
- testname = self.CommonTestName(t)
- variant = t.variant or ""
-
- if testname in self.rules.get(variant, {}):
- used_rules.add((testname, variant))
- if statusfile.DoSkip(self.rules[variant][testname]):
- continue
-
- for prefix in self.prefix_rules.get(variant, {}):
- if testname.startswith(prefix):
- used_rules.add((prefix, variant))
- if statusfile.DoSkip(self.prefix_rules[variant][prefix]):
- break
-
- for variant in variants:
- for rule, value in (list(self.rules.get(variant, {}).iteritems()) +
- list(self.prefix_rules.get(variant, {}).iteritems())):
- if (rule, variant) not in used_rules:
- if variant == '':
- variant_desc = 'variant independent'
- else:
- variant_desc = 'variant: %s' % variant
- print('Unused rule: %s -> %s (%s)' % (rule, value, variant_desc))
-
def FilterTestCasesByArgs(self, args):
"""Filter test cases based on command-line arguments.
@@ -256,167 +195,14 @@ class TestSuite(object):
break
self.tests = filtered
- def GetExpectedOutcomes(self, testcase):
- """Gets expected outcomes from status file.
+ def _create_test(self, path, **kwargs):
+ test = self._test_class()(self, path, self._path_to_name(path), **kwargs)
+ return test
- It differs from GetStatusFileOutcomes by selecting only outcomes that can
- be result of test execution.
- Status file has to be loaded before using this function.
- """
- outcomes = self.GetStatusFileOutcomes(testcase)
-
- expected = []
- if (statusfile.FAIL in outcomes or
- statusfile.FAIL_OK in outcomes):
- expected.append(statusfile.FAIL)
-
- if statusfile.CRASH in outcomes:
- expected.append(statusfile.CRASH)
-
- if statusfile.PASS in outcomes:
- expected.append(statusfile.PASS)
-
- return expected or [statusfile.PASS]
-
- def GetStatusFileOutcomes(self, testcase):
- """Gets outcomes from status file.
-
- Merges variant dependent and independent rules. Status file has to be loaded
- before using this function.
- """
- variant = testcase.variant or ''
- testname = self.CommonTestName(testcase)
- cache_key = '%s$%s' % (testname, variant)
-
- if cache_key not in self._outcomes_cache:
- # Load statusfile to get outcomes for the first time.
- assert(self.rules is not None)
- assert(self.prefix_rules is not None)
-
- outcomes = frozenset()
-
- for key in set([variant, '']):
- rules = self.rules.get(key, {})
- prefix_rules = self.prefix_rules.get(key, {})
-
- if testname in rules:
- outcomes |= rules[testname]
-
- for prefix in prefix_rules:
- if testname.startswith(prefix):
- outcomes |= prefix_rules[prefix]
-
- self._outcomes_cache[cache_key] = outcomes
-
- return self._outcomes_cache[cache_key]
-
- def GetShellForTestCase(self, testcase):
- """Returns shell to be executed for this test case."""
- return 'd8'
-
- def GetParametersForTestCase(self, testcase, context):
- """Returns a tuple of (files, flags, env) for this test case."""
+ def _test_class(self):
raise NotImplementedError
- def GetSourceForTest(self, testcase):
- return "(no source available)"
-
- def IsFailureOutput(self, testcase):
- return testcase.output.exit_code != 0
-
- def IsNegativeTest(self, testcase):
- return False
-
- def HasFailed(self, testcase):
- execution_failed = self.IsFailureOutput(testcase)
- if self.IsNegativeTest(testcase):
- return not execution_failed
- else:
- return execution_failed
-
- def GetOutcome(self, testcase):
- if testcase.output.HasCrashed():
- return statusfile.CRASH
- elif testcase.output.HasTimedOut():
- return statusfile.TIMEOUT
- elif self.HasFailed(testcase):
- return statusfile.FAIL
- else:
- return statusfile.PASS
-
- def HasUnexpectedOutput(self, testcase):
- return self.GetOutcome(testcase) not in self.GetExpectedOutcomes(testcase)
-
- def StripOutputForTransmit(self, testcase):
- if not self.HasUnexpectedOutput(testcase):
- testcase.output.stdout = ""
- testcase.output.stderr = ""
-
- def CalculateTotalDuration(self):
- self.total_duration = 0.0
- for t in self.tests:
- self.total_duration += t.duration
- return self.total_duration
-
-
-class StandardVariantGenerator(VariantGenerator):
- def FilterVariantsByTest(self, testcase):
- return self.standard_variant
-
-
-class GoogleTestSuite(TestSuite):
- def __init__(self, name, root):
- super(GoogleTestSuite, self).__init__(name, root)
-
- def ListTests(self, context):
- shell = os.path.abspath(
- os.path.join(context.shell_dir, self.GetShellForTestCase(None)))
+ def _path_to_name(self, path):
if utils.IsWindows():
- shell += ".exe"
-
- output = None
- for i in xrange(3): # Try 3 times in case of errors.
- cmd = (
- context.command_prefix +
- [shell, "--gtest_list_tests"] +
- context.extra_flags
- )
- output = commands.Execute(cmd)
- if output.exit_code == 0:
- break
- print "Test executable failed to list the tests (try %d).\n\nCmd:" % i
- print ' '.join(cmd)
- print "\nStdout:"
- print output.stdout
- print "\nStderr:"
- print output.stderr
- print "\nExit code: %d" % output.exit_code
- else:
- raise Exception("Test executable failed to list the tests.")
-
- tests = []
- test_case = ''
- for line in output.stdout.splitlines():
- test_desc = line.strip().split()[0]
- if test_desc.endswith('.'):
- test_case = test_desc
- elif test_case and test_desc:
- test = testcase.TestCase(self, test_case + test_desc)
- tests.append(test)
- tests.sort(key=lambda t: t.path)
- return tests
-
- def GetParametersForTestCase(self, testcase, context):
- flags = (
- testcase.flags +
- ["--gtest_filter=" + testcase.path] +
- ["--gtest_random_seed=%s" % context.random_seed] +
- ["--gtest_print_time=0"] +
- context.mode_flags)
- return [], flags, {}
-
- def _VariantGeneratorFactory(self):
- return StandardVariantGenerator
-
- def GetShellForTestCase(self, testcase):
- return self.name
+ return path.replace("\\", "/")
+ return path
diff --git a/deps/v8/tools/testrunner/local/testsuite_unittest.py b/deps/v8/tools/testrunner/local/testsuite_unittest.py
index a8483b9fc0..efefe4c533 100755
--- a/deps/v8/tools/testrunner/local/testsuite_unittest.py
+++ b/deps/v8/tools/testrunner/local/testsuite_unittest.py
@@ -19,10 +19,6 @@ from testrunner.objects.testcase import TestCase
class TestSuiteTest(unittest.TestCase):
def test_filter_testcases_by_status_first_pass(self):
suite = TestSuite('foo', 'bar')
- suite.tests = [
- TestCase(suite, 'foo/bar'),
- TestCase(suite, 'baz/bar'),
- ]
suite.rules = {
'': {
'foo/bar': set(['PASS', 'SKIP']),
@@ -34,27 +30,22 @@ class TestSuiteTest(unittest.TestCase):
'baz/': set(['PASS', 'SLOW']),
},
}
+ suite.tests = [
+ TestCase(suite, 'foo/bar', 'foo/bar'),
+ TestCase(suite, 'baz/bar', 'baz/bar'),
+ ]
suite.FilterTestCasesByStatus()
self.assertEquals(
- [TestCase(suite, 'baz/bar')],
+ [TestCase(suite, 'baz/bar', 'baz/bar')],
suite.tests,
)
- outcomes = suite.GetStatusFileOutcomes(suite.tests[0])
+ outcomes = suite.GetStatusFileOutcomes(suite.tests[0].name,
+ suite.tests[0].variant)
self.assertEquals(set(['PASS', 'FAIL', 'SLOW']), outcomes)
def test_filter_testcases_by_status_second_pass(self):
suite = TestSuite('foo', 'bar')
- test1 = TestCase(suite, 'foo/bar')
- test2 = TestCase(suite, 'baz/bar')
-
- suite.tests = [
- test1.CopyAddingFlags(variant='default', flags=[]),
- test1.CopyAddingFlags(variant='stress', flags=['-v']),
- test2.CopyAddingFlags(variant='default', flags=[]),
- test2.CopyAddingFlags(variant='stress', flags=['-v']),
- ]
-
suite.rules = {
'': {
'foo/bar': set(['PREV']),
@@ -78,30 +69,38 @@ class TestSuiteTest(unittest.TestCase):
'foo/': set(['PASS', 'SLOW']),
},
}
+
+ test1 = TestCase(suite, 'foo/bar', 'foo/bar')
+ test2 = TestCase(suite, 'baz/bar', 'baz/bar')
+ suite.tests = [
+ test1.create_variant(variant='default', flags=[]),
+ test1.create_variant(variant='stress', flags=['-v']),
+ test2.create_variant(variant='default', flags=[]),
+ test2.create_variant(variant='stress', flags=['-v']),
+ ]
+
suite.FilterTestCasesByStatus()
self.assertEquals(
[
- TestCase(suite, 'foo/bar', flags=['-v']),
- TestCase(suite, 'baz/bar'),
+ TestCase(suite, 'foo/bar', 'foo/bar').create_variant(None, ['-v']),
+ TestCase(suite, 'baz/bar', 'baz/bar'),
],
suite.tests,
)
self.assertEquals(
set(['PREV', 'PASS', 'SLOW']),
- suite.GetStatusFileOutcomes(suite.tests[0]),
+ suite.GetStatusFileOutcomes(suite.tests[0].name,
+ suite.tests[0].variant),
)
self.assertEquals(
set(['PREV', 'PASS', 'FAIL', 'SLOW']),
- suite.GetStatusFileOutcomes(suite.tests[1]),
+ suite.GetStatusFileOutcomes(suite.tests[1].name,
+ suite.tests[1].variant),
)
def test_fail_ok_outcome(self):
suite = TestSuite('foo', 'bar')
- suite.tests = [
- TestCase(suite, 'foo/bar'),
- TestCase(suite, 'baz/bar'),
- ]
suite.rules = {
'': {
'foo/bar': set(['FAIL_OK']),
@@ -109,10 +108,13 @@ class TestSuiteTest(unittest.TestCase):
},
}
suite.prefix_rules = {}
+ suite.tests = [
+ TestCase(suite, 'foo/bar', 'foo/bar'),
+ TestCase(suite, 'baz/bar', 'baz/bar'),
+ ]
for t in suite.tests:
- expected_outcomes = suite.GetExpectedOutcomes(t)
- self.assertEquals(['FAIL'], expected_outcomes)
+ self.assertEquals(['FAIL'], t.expected_outcomes)
if __name__ == '__main__':
diff --git a/deps/v8/tools/testrunner/local/utils.py b/deps/v8/tools/testrunner/local/utils.py
index 3e79e44afa..bf8c3d9f7e 100644
--- a/deps/v8/tools/testrunner/local/utils.py
+++ b/deps/v8/tools/testrunner/local/utils.py
@@ -26,10 +26,10 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import os
from os.path import exists
from os.path import isdir
from os.path import join
+import os
import platform
import re
import subprocess
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
index c8c7ce64a8..f1e9ad301e 100644
--- a/deps/v8/tools/testrunner/local/variants.py
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -4,38 +4,26 @@
# Use this to run several variants of the tests.
ALL_VARIANT_FLAGS = {
+ "code_serializer": [["--cache=code"]],
"default": [[]],
"future": [["--future"]],
+ # Alias of exhaustive variants, but triggering new test framework features.
+ "infra_staging": [[]],
"liftoff": [["--liftoff"]],
- "stress": [["--stress-opt", "--always-opt"]],
- # TODO(6792): Write protected code has been temporary added to the below
- # variant until the feature has been enabled (or staged) by default.
- "stress_incremental_marking": [["--stress-incremental-marking", "--write-protect-code-memory"]],
- # No optimization means disable all optimizations. OptimizeFunctionOnNextCall
- # would not force optimization too. It turns into a Nop. Please see
- # https://chromium-review.googlesource.com/c/452620/ for more discussion.
- "nooptimization": [["--noopt"]],
- "stress_background_compile": [["--background-compile", "--stress-background-compile"]],
- "wasm_traps": [["--wasm_trap_handler", "--invoke-weak-callbacks", "--wasm-jit-to-native"]],
-}
-
-# FAST_VARIANTS implies no --always-opt.
-FAST_VARIANT_FLAGS = {
- "default": [[]],
- "future": [["--future"]],
- "liftoff": [["--liftoff"]],
- "stress": [["--stress-opt"]],
- # TODO(6792): Write protected code has been temporary added to the below
- # variant until the feature has been enabled (or staged) by default.
- "stress_incremental_marking": [["--stress-incremental-marking", "--write-protect-code-memory"]],
+ "minor_mc": [["--minor-mc"]],
# No optimization means disable all optimizations. OptimizeFunctionOnNextCall
# would not force optimization too. It turns into a Nop. Please see
# https://chromium-review.googlesource.com/c/452620/ for more discussion.
"nooptimization": [["--noopt"]],
+ "slow_path": [["--force-slow-path"]],
+ "stress": [["--stress-opt", "--always-opt"]],
"stress_background_compile": [["--background-compile", "--stress-background-compile"]],
+ "stress_incremental_marking": [["--stress-incremental-marking"]],
+ # Trigger stress sampling allocation profiler with sample interval = 2^14
+ "stress_sampling": [["--stress-sampling-allocation-profiler=16384"]],
+ "trusted": [["--no-untrusted-code-mitigations"]],
"wasm_traps": [["--wasm_trap_handler", "--invoke-weak-callbacks", "--wasm-jit-to-native"]],
+ "wasm_no_native": [["--no-wasm-jit-to-native"]],
}
-ALL_VARIANTS = set(["default", "future", "liftoff", "stress",
- "stress_incremental_marking", "nooptimization",
- "stress_background_compile", "wasm_traps"])
+ALL_VARIANTS = set(ALL_VARIANT_FLAGS.keys())
diff --git a/deps/v8/tools/testrunner/local/verbose.py b/deps/v8/tools/testrunner/local/verbose.py
index f28398fa42..49e808588c 100644
--- a/deps/v8/tools/testrunner/local/verbose.py
+++ b/deps/v8/tools/testrunner/local/verbose.py
@@ -38,28 +38,30 @@ REPORT_TEMPLATE = (
* %(nocrash)4d tests are expected to be flaky but not crash
* %(pass)4d tests are expected to pass
* %(fail_ok)4d tests are expected to fail that we won't fix
- * %(fail)4d tests are expected to fail that we should fix""")
+ * %(fail)4d tests are expected to fail that we should fix
+ * %(crash)4d tests are expected to crash
+""")
+# TODO(majeski): Turn it into an observer.
def PrintReport(tests):
total = len(tests)
- skipped = nocrash = passes = fail_ok = fail = 0
+ skipped = nocrash = passes = fail_ok = fail = crash = 0
for t in tests:
- outcomes = t.suite.GetStatusFileOutcomes(t)
- if not outcomes:
- passes += 1
- continue
- if statusfile.DoSkip(outcomes):
+ if t.do_skip:
skipped += 1
- continue
- if statusfile.IsPassOrFail(outcomes):
+ elif t.is_pass_or_fail:
nocrash += 1
- if list(outcomes) == [statusfile.PASS]:
- passes += 1
- if statusfile.IsFailOk(outcomes):
+ elif t.is_fail_ok:
fail_ok += 1
- if list(outcomes) == [statusfile.FAIL]:
+ elif t.expected_outcomes == [statusfile.PASS]:
+ passes += 1
+ elif t.expected_outcomes == [statusfile.FAIL]:
fail += 1
+ elif t.expected_outcomes == [statusfile.CRASH]:
+ crash += 1
+ else:
+ assert False # Unreachable # TODO: check this in outcomes parsing phase.
print REPORT_TEMPLATE % {
"total": total,
@@ -67,18 +69,19 @@ def PrintReport(tests):
"nocrash": nocrash,
"pass": passes,
"fail_ok": fail_ok,
- "fail": fail
+ "fail": fail,
+ "crash": crash,
}
def PrintTestSource(tests):
for test in tests:
- suite = test.suite
- source = suite.GetSourceForTest(test).strip()
- if len(source) > 0:
- print "--- begin source: %s/%s ---" % (suite.name, test.path)
- print source
- print "--- end source: %s/%s ---" % (suite.name, test.path)
+ print "--- begin source: %s ---" % test
+ if test.is_source_available():
+ print test.get_source()
+ else:
+ print '(no source available)'
+ print "--- end source: %s ---" % test
def FormatTime(d):
@@ -86,16 +89,16 @@ def FormatTime(d):
return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis)
-def PrintTestDurations(suites, overall_time):
+def PrintTestDurations(suites, outputs, overall_time):
# Write the times to stderr to make it easy to separate from the
# test output.
print
sys.stderr.write("--- Total time: %s ---\n" % FormatTime(overall_time))
- timed_tests = [ t for s in suites for t in s.tests
- if t.duration is not None ]
- timed_tests.sort(lambda a, b: cmp(b.duration, a.duration))
+ timed_tests = [(t, outputs[t].duration) for s in suites for t in s.tests
+ if t in outputs]
+ timed_tests.sort(key=lambda (_, duration): duration, reverse=True)
index = 1
- for entry in timed_tests[:20]:
- t = FormatTime(entry.duration)
- sys.stderr.write("%4i (%s) %s\n" % (index, t, entry.GetLabel()))
+ for test, duration in timed_tests[:20]:
+ t = FormatTime(duration)
+ sys.stderr.write("%4i (%s) %s\n" % (index, t, test))
index += 1
diff --git a/deps/v8/tools/testrunner/objects/context.py b/deps/v8/tools/testrunner/objects/context.py
index fb5d717728..a3dd56d2dd 100644
--- a/deps/v8/tools/testrunner/objects/context.py
+++ b/deps/v8/tools/testrunner/objects/context.py
@@ -29,8 +29,8 @@
class Context():
def __init__(self, arch, mode, shell_dir, mode_flags, verbose, timeout,
isolates, command_prefix, extra_flags, noi18n, random_seed,
- no_sorting, rerun_failures_count, rerun_failures_max,
- predictable, no_harness, use_perf_data, sancov_dir):
+ no_sorting, rerun_failures_count, rerun_failures_max, no_harness,
+ use_perf_data, sancov_dir, infra_staging=False):
self.arch = arch
self.mode = mode
self.shell_dir = shell_dir
@@ -45,7 +45,7 @@ class Context():
self.no_sorting = no_sorting
self.rerun_failures_count = rerun_failures_count
self.rerun_failures_max = rerun_failures_max
- self.predictable = predictable
self.no_harness = no_harness
self.use_perf_data = use_perf_data
self.sancov_dir = sancov_dir
+ self.infra_staging = infra_staging
diff --git a/deps/v8/tools/testrunner/objects/output.py b/deps/v8/tools/testrunner/objects/output.py
index 99d6137698..adc33c9f12 100644
--- a/deps/v8/tools/testrunner/objects/output.py
+++ b/deps/v8/tools/testrunner/objects/output.py
@@ -32,12 +32,13 @@ from ..local import utils
class Output(object):
- def __init__(self, exit_code, timed_out, stdout, stderr, pid):
+ def __init__(self, exit_code, timed_out, stdout, stderr, pid, duration):
self.exit_code = exit_code
self.timed_out = timed_out
self.stdout = stdout
self.stderr = stderr
self.pid = pid
+ self.duration = duration
def HasCrashed(self):
if utils.IsWindows():
diff --git a/deps/v8/tools/testrunner/objects/predictable.py b/deps/v8/tools/testrunner/objects/predictable.py
new file mode 100644
index 0000000000..ad93077be9
--- /dev/null
+++ b/deps/v8/tools/testrunner/objects/predictable.py
@@ -0,0 +1,57 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from ..local import statusfile
+from ..outproc import base as outproc_base
+from ..testproc.result import Result
+
+
+# Only check the exit code of the predictable_wrapper in
+# verify-predictable mode. Negative tests are not supported as they
+# usually also don't print allocation hashes. There are two versions of
+# negative tests: one specified by the test, the other specified through
+# the status file (e.g. known bugs).
+
+
+def get_outproc(test):
+ output_proc = test.output_proc
+ if output_proc.negative or statusfile.FAIL in test.expected_outcomes:
+ # TODO(majeski): Skip these tests instead of having special outproc.
+ return NeverUnexpectedOutputOutProc(output_proc)
+ return OutProc(output_proc)
+
+
+class OutProc(outproc_base.BaseOutProc):
+ """Output processor wrapper for predictable mode. It has custom process and
+ has_unexpected_output implementation, but for all other methods it simply
+ calls wrapped output processor.
+ """
+ def __init__(self, _outproc):
+ super(OutProc, self).__init__()
+ self._outproc = _outproc
+
+ def process(self, output):
+ return Result(self.has_unexpected_output(output), output)
+
+ def has_unexpected_output(self, output):
+ return output.exit_code != 0
+
+ def get_outcome(self, output):
+ return self._outproc.get_outcome(output)
+
+ @property
+ def negative(self):
+ return self._outproc.negative
+
+ @property
+ def expected_outcomes(self):
+ return self._outproc.expected_outcomes
+
+
+class NeverUnexpectedOutputOutProc(OutProc):
+ """Output processor wrapper for tests that we will return False for
+ has_unexpected_output in the predictable mode.
+ """
+ def has_unexpected_output(self, output):
+ return False
diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py
index fd8c27bc59..06db32802c 100644
--- a/deps/v8/tools/testrunner/objects/testcase.py
+++ b/deps/v8/tools/testrunner/objects/testcase.py
@@ -25,45 +25,274 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import copy
+import os
+import re
+import shlex
+
+from ..outproc import base as outproc
+from ..local import command
+from ..local import statusfile
+from ..local import utils
+
+FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
+
+
class TestCase(object):
- def __init__(self, suite, path, variant=None, flags=None):
+ def __init__(self, suite, path, name):
self.suite = suite # TestSuite object
+
self.path = path # string, e.g. 'div-mod', 'test-api/foo'
- self.flags = flags or [] # list of strings, flags specific to this test
- self.variant = variant # name of the used testing variant
- self.output = None
+ self.name = name # string that identifies test in the status file
+
+ self.variant = None # name of the used testing variant
+ self.variant_flags = [] # list of strings, flags specific to this test
+
self.id = None # int, used to map result back to TestCase instance
- self.duration = None # assigned during execution
self.run = 1 # The nth time this test is executed.
+ self.cmd = None
+
+ # Fields used by the test processors.
+ self.origin = None # Test that this test is subtest of.
+ self.processor = None # Processor that created this subtest.
+ self.procid = '%s/%s' % (self.suite.name, self.name) # unique id
+ self.keep_output = False # Can output of this test be dropped
+
+ self._statusfile_outcomes = None
+ self._expected_outcomes = None # optimization: None == [statusfile.PASS]
+ self._statusfile_flags = None
+ self._prepare_outcomes()
+
+ def create_subtest(self, processor, subtest_id, variant=None, flags=None,
+ keep_output=False):
+ subtest = copy.copy(self)
+ subtest.origin = self
+ subtest.processor = processor
+ subtest.procid += '.%s' % subtest_id
+ subtest.keep_output = keep_output
+ if variant is not None:
+ assert self.variant is None
+ subtest.variant = variant
+ subtest.variant_flags = flags
+ subtest._prepare_outcomes()
+ return subtest
+
+ def create_variant(self, variant, flags, procid_suffix=None):
+ """Makes a shallow copy of the object and updates variant, variant flags and
+ all fields that depend on it, e.g. expected outcomes.
+
+ Args
+ variant - variant name
+ flags - flags that should be added to origin test's variant flags
+ procid_suffix - for multiple variants with the same name set suffix to
+ keep procid unique.
+ """
+ other = copy.copy(self)
+ if not self.variant_flags:
+ other.variant_flags = flags
+ else:
+ other.variant_flags = self.variant_flags + flags
+ other.variant = variant
+ if procid_suffix:
+ other.procid += '[%s-%s]' % (variant, procid_suffix)
+ else:
+ other.procid += '[%s]' % variant
+
+ other._prepare_outcomes(variant != self.variant)
+
+ return other
+
+ def _prepare_outcomes(self, force_update=True):
+ if force_update or self._statusfile_outcomes is None:
+ def is_flag(outcome):
+ return outcome.startswith('--')
+ def not_flag(outcome):
+ return not is_flag(outcome)
+
+ outcomes = self.suite.statusfile.get_outcomes(self.name, self.variant)
+ self._statusfile_outcomes = filter(not_flag, outcomes)
+ self._statusfile_flags = filter(is_flag, outcomes)
+ self.expected_outcomes = (
+ self._parse_status_file_outcomes(self._statusfile_outcomes))
+
+ def _parse_status_file_outcomes(self, outcomes):
+ if (statusfile.FAIL_SLOPPY in outcomes and
+ '--use-strict' not in self.variant_flags):
+ return outproc.OUTCOMES_FAIL
+
+ expected_outcomes = []
+ if (statusfile.FAIL in outcomes or
+ statusfile.FAIL_OK in outcomes):
+ expected_outcomes.append(statusfile.FAIL)
+ if statusfile.CRASH in outcomes:
+ expected_outcomes.append(statusfile.CRASH)
+
+ # Do not add PASS if there is nothing else. Empty outcomes are converted to
+ # the global [PASS].
+ if expected_outcomes and statusfile.PASS in outcomes:
+ expected_outcomes.append(statusfile.PASS)
+
+ # Avoid creating multiple instances of a list with a single FAIL.
+ if expected_outcomes == outproc.OUTCOMES_FAIL:
+ return outproc.OUTCOMES_FAIL
+ return expected_outcomes or outproc.OUTCOMES_PASS
+
+ @property
+ def do_skip(self):
+ return statusfile.SKIP in self._statusfile_outcomes
+
+ @property
+ def is_slow(self):
+ return statusfile.SLOW in self._statusfile_outcomes
+
+ @property
+ def is_fail_ok(self):
+ return statusfile.FAIL_OK in self._statusfile_outcomes
- def CopyAddingFlags(self, variant, flags):
- return TestCase(self.suite, self.path, variant, self.flags + flags)
+ @property
+ def is_pass_or_fail(self):
+ return (statusfile.PASS in self._statusfile_outcomes and
+ statusfile.FAIL in self._statusfile_outcomes and
+ statusfile.CRASH not in self._statusfile_outcomes)
- def SetSuiteObject(self, suites):
- self.suite = suites[self.suite]
+ @property
+ def only_standard_variant(self):
+ return statusfile.NO_VARIANTS in self._statusfile_outcomes
- def suitename(self):
- return self.suite.name
+ def get_command(self, context):
+ params = self._get_cmd_params(context)
+ env = self._get_cmd_env()
+ shell, shell_flags = self._get_shell_with_flags(context)
+ timeout = self._get_timeout(params, context.timeout)
+ return self._create_cmd(shell, shell_flags + params, env, timeout, context)
- def GetLabel(self):
- return self.suitename() + "/" + self.suite.CommonTestName(self)
+ def _get_cmd_params(self, ctx):
+ """Gets command parameters and combines them in the following order:
+ - files [empty by default]
+ - extra flags (from command line)
+ - user flags (variant/fuzzer flags)
+ - statusfile flags
+ - mode flags (based on chosen mode)
+ - source flags (from source code) [empty by default]
- def __getstate__(self):
- """Representation to pickle test cases.
+ The best way to modify how parameters are created is to only override
+ methods for getting partial parameters.
+ """
+ return (
+ self._get_files_params(ctx) +
+ self._get_extra_flags(ctx) +
+ self._get_variant_flags() +
+ self._get_statusfile_flags() +
+ self._get_mode_flags(ctx) +
+ self._get_source_flags() +
+ self._get_suite_flags(ctx)
+ )
+
+ def _get_cmd_env(self):
+ return {}
+
+ def _get_files_params(self, ctx):
+ return []
+
+ def _get_extra_flags(self, ctx):
+ return ctx.extra_flags
+
+ def _get_variant_flags(self):
+ return self.variant_flags
- The original suite won't be sent beyond process boundaries. Instead
- send the name only and retrieve a process-local suite later.
+ def _get_statusfile_flags(self):
+ """Gets runtime flags from a status file.
+
+ Every outcome that starts with "--" is a flag.
"""
- return dict(self.__dict__, suite=self.suite.name)
+ return self._statusfile_flags
+
+ def _get_mode_flags(self, ctx):
+ return ctx.mode_flags
+
+ def _get_source_flags(self):
+ return []
+
+ def _get_suite_flags(self, ctx):
+ return []
+
+ def _get_shell_with_flags(self, ctx):
+ shell = self.get_shell()
+ shell_flags = []
+ if shell == 'd8':
+ shell_flags.append('--test')
+ if utils.IsWindows():
+ shell += '.exe'
+ if ctx.random_seed:
+ shell_flags.append('--random-seed=%s' % ctx.random_seed)
+ return shell, shell_flags
+
+ def _get_timeout(self, params, timeout):
+ if "--stress-opt" in params:
+ timeout *= 4
+ if "--noenable-vfp3" in params:
+ timeout *= 2
+
+ # TODO(majeski): make it slow outcome dependent.
+ timeout *= 2
+ return timeout
+
+ def get_shell(self):
+ return 'd8'
+
+ def _get_suffix(self):
+ return '.js'
+
+ def _create_cmd(self, shell, params, env, timeout, ctx):
+ return command.Command(
+ cmd_prefix=ctx.command_prefix,
+ shell=os.path.abspath(os.path.join(ctx.shell_dir, shell)),
+ args=params,
+ env=env,
+ timeout=timeout,
+ verbose=ctx.verbose
+ )
+
+ def _parse_source_flags(self, source=None):
+ source = source or self.get_source()
+ flags = []
+ for match in re.findall(FLAGS_PATTERN, source):
+ flags += shlex.split(match.strip())
+ return flags
+
+ def is_source_available(self):
+ return self._get_source_path() is not None
+
+ def get_source(self):
+ with open(self._get_source_path()) as f:
+ return f.read()
+
+ def _get_source_path(self):
+ return None
+
+ @property
+ def output_proc(self):
+ if self.expected_outcomes is outproc.OUTCOMES_PASS:
+ return outproc.DEFAULT
+ return outproc.OutProc(self.expected_outcomes)
def __cmp__(self, other):
# Make sure that test cases are sorted correctly if sorted without
# key function. But using a key function is preferred for speed.
return cmp(
- (self.suite.name, self.path, self.flags),
- (other.suite.name, other.path, other.flags),
+ (self.suite.name, self.name, self.variant_flags),
+ (other.suite.name, other.name, other.variant_flags)
)
+ def __hash__(self):
+ return hash((self.suite.name, self.name, ''.join(self.variant_flags)))
+
def __str__(self):
- return "[%s/%s %s]" % (self.suite.name, self.path, self.flags)
+ return self.suite.name + '/' + self.name
+
+ # TODO(majeski): Rename `id` field or `get_id` function since they're
+ # unrelated.
+ def get_id(self):
+ return '%s/%s %s' % (
+ self.suite.name, self.name, ' '.join(self.variant_flags))
diff --git a/deps/v8/tools/testrunner/outproc/__init__.py b/deps/v8/tools/testrunner/outproc/__init__.py
new file mode 100644
index 0000000000..4433538556
--- /dev/null
+++ b/deps/v8/tools/testrunner/outproc/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/deps/v8/tools/testrunner/outproc/base.py b/deps/v8/tools/testrunner/outproc/base.py
new file mode 100644
index 0000000000..9a9db4e81d
--- /dev/null
+++ b/deps/v8/tools/testrunner/outproc/base.py
@@ -0,0 +1,166 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import itertools
+
+from ..local import statusfile
+from ..testproc.result import Result
+
+
+OUTCOMES_PASS = [statusfile.PASS]
+OUTCOMES_FAIL = [statusfile.FAIL]
+
+
+class BaseOutProc(object):
+ def process(self, output):
+ return Result(self.has_unexpected_output(output), output)
+
+ def has_unexpected_output(self, output):
+ return self.get_outcome(output) not in self.expected_outcomes
+
+ def get_outcome(self, output):
+ if output.HasCrashed():
+ return statusfile.CRASH
+ elif output.HasTimedOut():
+ return statusfile.TIMEOUT
+ elif self._has_failed(output):
+ return statusfile.FAIL
+ else:
+ return statusfile.PASS
+
+ def _has_failed(self, output):
+ execution_failed = self._is_failure_output(output)
+ if self.negative:
+ return not execution_failed
+ return execution_failed
+
+ def _is_failure_output(self, output):
+ return output.exit_code != 0
+
+ @property
+ def negative(self):
+ return False
+
+ @property
+ def expected_outcomes(self):
+ raise NotImplementedError()
+
+
+class Negative(object):
+ @property
+ def negative(self):
+ return True
+
+
+class PassOutProc(BaseOutProc):
+ """Output processor optimized for positive tests expected to PASS."""
+ def has_unexpected_output(self, output):
+ return self.get_outcome(output) != statusfile.PASS
+
+ @property
+ def expected_outcomes(self):
+ return OUTCOMES_PASS
+
+
+class OutProc(BaseOutProc):
+ """Output processor optimized for positive tests with expected outcomes
+ different than a single PASS.
+ """
+ def __init__(self, expected_outcomes):
+ self._expected_outcomes = expected_outcomes
+
+ @property
+ def expected_outcomes(self):
+ return self._expected_outcomes
+
+ # TODO(majeski): Inherit from PassOutProc in case of OUTCOMES_PASS and remove
+ # custom get/set state.
+ def __getstate__(self):
+ d = self.__dict__
+ if self._expected_outcomes is OUTCOMES_PASS:
+ d = d.copy()
+ del d['_expected_outcomes']
+ return d
+
+ def __setstate__(self, d):
+ if '_expected_outcomes' not in d:
+ d['_expected_outcomes'] = OUTCOMES_PASS
+ self.__dict__.update(d)
+
+
+# TODO(majeski): Override __reduce__ to make it deserialize as one instance.
+DEFAULT = PassOutProc()
+
+
+class ExpectedOutProc(OutProc):
+ """Output processor that has is_failure_output depending on comparing the
+ output with the expected output.
+ """
+ def __init__(self, expected_outcomes, expected_filename):
+ super(ExpectedOutProc, self).__init__(expected_outcomes)
+ self._expected_filename = expected_filename
+
+ def _is_failure_output(self, output):
+ with open(self._expected_filename, 'r') as f:
+ expected_lines = f.readlines()
+
+ for act_iterator in self._act_block_iterator(output):
+ for expected, actual in itertools.izip_longest(
+ self._expected_iterator(expected_lines),
+ act_iterator,
+ fillvalue=''
+ ):
+ if expected != actual:
+ return True
+ return False
+
+ def _act_block_iterator(self, output):
+ """Iterates over blocks of actual output lines."""
+ lines = output.stdout.splitlines()
+ start_index = 0
+ found_eqeq = False
+ for index, line in enumerate(lines):
+ # If a stress test separator is found:
+ if line.startswith('=='):
+ # Iterate over all lines before a separator except the first.
+ if not found_eqeq:
+ found_eqeq = True
+ else:
+ yield self._actual_iterator(lines[start_index:index])
+ # The next block of output lines starts after the separator.
+ start_index = index + 1
+ # Iterate over complete output if no separator was found.
+ if not found_eqeq:
+ yield self._actual_iterator(lines)
+
+ def _actual_iterator(self, lines):
+ return self._iterator(lines, self._ignore_actual_line)
+
+ def _expected_iterator(self, lines):
+ return self._iterator(lines, self._ignore_expected_line)
+
+ def _ignore_actual_line(self, line):
+ """Ignore empty lines, valgrind output, Android output and trace
+ incremental marking output.
+ """
+ if not line:
+ return True
+ return (line.startswith('==') or
+ line.startswith('**') or
+ line.startswith('ANDROID') or
+ line.startswith('###') or
+ # FIXME(machenbach): The test driver shouldn't try to use slow
+ # asserts if they weren't compiled. This fails in optdebug=2.
+ line == 'Warning: unknown flag --enable-slow-asserts.' or
+ line == 'Try --help for options')
+
+ def _ignore_expected_line(self, line):
+ return not line
+
+ def _iterator(self, lines, ignore_predicate):
+ for line in lines:
+ line = line.strip()
+ if not ignore_predicate(line):
+ yield line
diff --git a/deps/v8/tools/testrunner/outproc/message.py b/deps/v8/tools/testrunner/outproc/message.py
new file mode 100644
index 0000000000..bbfc1cdf7e
--- /dev/null
+++ b/deps/v8/tools/testrunner/outproc/message.py
@@ -0,0 +1,56 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import itertools
+import os
+import re
+
+from . import base
+
+
+class OutProc(base.OutProc):
+ def __init__(self, expected_outcomes, basepath, expected_fail):
+ super(OutProc, self).__init__(expected_outcomes)
+ self._basepath = basepath
+ self._expected_fail = expected_fail
+
+ def _is_failure_output(self, output):
+ fail = output.exit_code != 0
+ if fail != self._expected_fail:
+ return True
+
+ expected_lines = []
+ # Can't use utils.ReadLinesFrom() here because it strips whitespace.
+ with open(self._basepath + '.out') as f:
+ for line in f:
+ if line.startswith("#") or not line.strip():
+ continue
+ expected_lines.append(line)
+ raw_lines = output.stdout.splitlines()
+ actual_lines = [ s for s in raw_lines if not self._ignore_line(s) ]
+ if len(expected_lines) != len(actual_lines):
+ return True
+
+ env = {
+ 'basename': os.path.basename(self._basepath + '.js'),
+ }
+ for (expected, actual) in itertools.izip_longest(
+ expected_lines, actual_lines, fillvalue=''):
+ pattern = re.escape(expected.rstrip() % env)
+ pattern = pattern.replace('\\*', '.*')
+ pattern = pattern.replace('\\{NUMBER\\}', '\d+(?:\.\d*)?')
+ pattern = '^%s$' % pattern
+ if not re.match(pattern, actual):
+ return True
+ return False
+
+ def _ignore_line(self, string):
+ """Ignore empty lines, valgrind output, Android output."""
+ return (
+ not string or
+ not string.strip() or
+ string.startswith("==") or
+ string.startswith("**") or
+ string.startswith("ANDROID")
+ )
diff --git a/deps/v8/tools/testrunner/outproc/mkgrokdump.py b/deps/v8/tools/testrunner/outproc/mkgrokdump.py
new file mode 100644
index 0000000000..8efde1226f
--- /dev/null
+++ b/deps/v8/tools/testrunner/outproc/mkgrokdump.py
@@ -0,0 +1,31 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import difflib
+
+from . import base
+
+
+class OutProc(base.OutProc):
+ def __init__(self, expected_outcomes, expected_path):
+ super(OutProc, self).__init__(expected_outcomes)
+ self._expected_path = expected_path
+
+ def _is_failure_output(self, output):
+ with open(self._expected_path) as f:
+ expected = f.read()
+ expected_lines = expected.splitlines()
+ actual_lines = output.stdout.splitlines()
+ diff = difflib.unified_diff(expected_lines, actual_lines, lineterm="",
+ fromfile="expected_path")
+ diffstring = '\n'.join(diff)
+ if diffstring is not "":
+ if "generated from a non-shipping build" in output.stdout:
+ return False
+ if not "generated from a shipping build" in output.stdout:
+ output.stdout = "Unexpected output:\n\n" + output.stdout
+ return True
+ output.stdout = diffstring
+ return True
+ return False
diff --git a/deps/v8/tools/testrunner/outproc/mozilla.py b/deps/v8/tools/testrunner/outproc/mozilla.py
new file mode 100644
index 0000000000..1400d0ec54
--- /dev/null
+++ b/deps/v8/tools/testrunner/outproc/mozilla.py
@@ -0,0 +1,33 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from . import base
+
+
+def _is_failure_output(self, output):
+ return (
+ output.exit_code != 0 or
+ 'FAILED!' in output.stdout
+ )
+
+
+class OutProc(base.OutProc):
+ """Optimized for positive tests."""
+OutProc._is_failure_output = _is_failure_output
+
+
+class PassOutProc(base.PassOutProc):
+ """Optimized for positive tests expected to PASS."""
+PassOutProc._is_failure_output = _is_failure_output
+
+
+class NegOutProc(base.Negative, OutProc):
+ pass
+
+class NegPassOutProc(base.Negative, PassOutProc):
+ pass
+
+
+MOZILLA_PASS_DEFAULT = PassOutProc()
+MOZILLA_PASS_NEGATIVE = NegPassOutProc()
diff --git a/deps/v8/tools/testrunner/outproc/test262.py b/deps/v8/tools/testrunner/outproc/test262.py
new file mode 100644
index 0000000000..b5eb5547c3
--- /dev/null
+++ b/deps/v8/tools/testrunner/outproc/test262.py
@@ -0,0 +1,54 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import re
+
+from . import base
+
+
+class ExceptionOutProc(base.OutProc):
+ """Output processor for tests with expected exception."""
+ def __init__(self, expected_outcomes, expected_exception=None):
+ super(ExceptionOutProc, self).__init__(expected_outcomes)
+ self._expected_exception = expected_exception
+
+ def _is_failure_output(self, output):
+ if output.exit_code != 0:
+ return True
+ if self._expected_exception != self._parse_exception(output.stdout):
+ return True
+ return 'FAILED!' in output.stdout
+
+ def _parse_exception(self, string):
+ # somefile:somelinenumber: someerror[: sometext]
+ # somefile might include an optional drive letter on windows e.g. "e:".
+ match = re.search(
+ '^(?:\w:)?[^:]*:[0-9]+: ([^: ]+?)($|: )', string, re.MULTILINE)
+ if match:
+ return match.group(1).strip()
+ else:
+ return None
+
+
+def _is_failure_output(self, output):
+ return (
+ output.exit_code != 0 or
+ 'FAILED!' in output.stdout
+ )
+
+
+class NoExceptionOutProc(base.OutProc):
+ """Output processor optimized for tests without expected exception."""
+NoExceptionOutProc._is_failure_output = _is_failure_output
+
+
+class PassNoExceptionOutProc(base.PassOutProc):
+ """
+ Output processor optimized for tests expected to PASS without expected
+ exception.
+ """
+PassNoExceptionOutProc._is_failure_output = _is_failure_output
+
+
+PASS_NO_EXCEPTION = PassNoExceptionOutProc()
diff --git a/deps/v8/tools/testrunner/outproc/webkit.py b/deps/v8/tools/testrunner/outproc/webkit.py
new file mode 100644
index 0000000000..290e67dc5a
--- /dev/null
+++ b/deps/v8/tools/testrunner/outproc/webkit.py
@@ -0,0 +1,18 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from . import base
+
+
+class OutProc(base.ExpectedOutProc):
+ def _is_failure_output(self, output):
+ if output.exit_code != 0:
+ return True
+ return super(OutProc, self)._is_failure_output(output)
+
+ def _ignore_expected_line(self, line):
+ return (
+ line.startswith('#') or
+ super(OutProc, self)._ignore_expected_line(line)
+ )
diff --git a/deps/v8/tools/testrunner/standard_runner.py b/deps/v8/tools/testrunner/standard_runner.py
index d838df783c..3be2099252 100755
--- a/deps/v8/tools/testrunner/standard_runner.py
+++ b/deps/v8/tools/testrunner/standard_runner.py
@@ -25,6 +25,15 @@ from testrunner.local import utils
from testrunner.local import verbose
from testrunner.local.variants import ALL_VARIANTS
from testrunner.objects import context
+from testrunner.objects import predictable
+from testrunner.testproc.execution import ExecutionProc
+from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
+from testrunner.testproc.loader import LoadProc
+from testrunner.testproc.progress import (VerboseProgressIndicator,
+ ResultsTracker,
+ TestsCounter)
+from testrunner.testproc.rerun import RerunProc
+from testrunner.testproc.variant import VariantProc
TIMEOUT_DEFAULT = 60
@@ -48,7 +57,7 @@ VARIANT_ALIASES = {
# Shortcut for the two above ("more" first - it has the longer running tests).
"exhaustive": MORE_VARIANTS + VARIANTS,
# Additional variants, run on a subset of bots.
- "extra": ["future", "liftoff"],
+ "extra": ["future", "liftoff", "trusted"],
}
GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
@@ -66,14 +75,20 @@ SLOW_ARCHS = ["arm",
"s390x",
"arm64"]
+PREDICTABLE_WRAPPER = os.path.join(
+ base_runner.BASE_DIR, 'tools', 'predictable_wrapper.py')
+
class StandardTestRunner(base_runner.BaseTestRunner):
- def __init__(self):
- super(StandardTestRunner, self).__init__()
+ def __init__(self, *args, **kwargs):
+ super(StandardTestRunner, self).__init__(*args, **kwargs)
self.sancov_dir = None
- def _do_execute(self, options, args):
+ def _get_default_suite_names(self):
+ return ['default']
+
+ def _do_execute(self, suites, args, options):
if options.swarming:
# Swarming doesn't print how isolated commands are called. Lets make
# this less cryptic by printing it ourselves.
@@ -89,42 +104,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
except Exception:
pass
- suite_paths = utils.GetSuitePaths(join(base_runner.BASE_DIR, "test"))
-
- # Use default tests if no test configuration was provided at the cmd line.
- if len(args) == 0:
- args = ["default"]
-
- # Expand arguments with grouped tests. The args should reflect the list
- # of suites as otherwise filters would break.
- def ExpandTestGroups(name):
- if name in base_runner.TEST_MAP:
- return [suite for suite in base_runner.TEST_MAP[name]]
- else:
- return [name]
- args = reduce(lambda x, y: x + y,
- [ExpandTestGroups(arg) for arg in args],
- [])
-
- args_suites = OrderedDict() # Used as set
- for arg in args:
- args_suites[arg.split('/')[0]] = True
- suite_paths = [ s for s in args_suites if s in suite_paths ]
-
- suites = []
- for root in suite_paths:
- suite = testsuite.TestSuite.LoadTestSuite(
- os.path.join(base_runner.BASE_DIR, "test", root))
- if suite:
- suites.append(suite)
-
- for s in suites:
- s.PrepareSources()
-
- try:
- return self._execute(args, options, suites)
- except KeyboardInterrupt:
- return 2
+ return self._execute(args, options, suites)
def _add_parser_options(self, parser):
parser.add_option("--sancov-dir",
@@ -154,6 +134,8 @@ class StandardTestRunner(base_runner.BaseTestRunner):
parser.add_option("--extra-flags",
help="Additional flags to pass to each test command",
action="append", default=[])
+ parser.add_option("--infra-staging", help="Use new test runner features",
+ default=False, action="store_true")
parser.add_option("--isolates", help="Whether to test isolates",
default=False, action="store_true")
parser.add_option("-j", help="The number of parallel tasks to run",
@@ -200,12 +182,6 @@ class StandardTestRunner(base_runner.BaseTestRunner):
parser.add_option("--rerun-failures-max",
help="Maximum number of failing test cases to rerun.",
default=100, type="int")
- parser.add_option("--shard-count",
- help="Split testsuites into this number of shards",
- default=1, type="int")
- parser.add_option("--shard-run",
- help="Run this shard from the split up tests.",
- default=1, type="int")
parser.add_option("--dont-skip-slow-simulator-tests",
help="Don't skip more slow tests when using a"
" simulator.",
@@ -253,13 +229,13 @@ class StandardTestRunner(base_runner.BaseTestRunner):
if options.novfp3:
options.extra_flags.append("--noenable-vfp3")
- if options.no_variants:
+ if options.no_variants: # pragma: no cover
print ("Option --no-variants is deprecated. "
"Pass --variants=default instead.")
assert not options.variants
options.variants = "default"
- if options.exhaustive_variants:
+ if options.exhaustive_variants: # pragma: no cover
# TODO(machenbach): Switch infra to --variants=exhaustive after M65.
print ("Option --exhaustive-variants is deprecated. "
"Pass --variants=exhaustive instead.")
@@ -280,6 +256,9 @@ class StandardTestRunner(base_runner.BaseTestRunner):
options.extra_flags.append("--predictable")
options.extra_flags.append("--verify_predictable")
options.extra_flags.append("--no-inline-new")
+ # Add predictable wrapper to command prefix.
+ options.command_prefix = (
+ [sys.executable, PREDICTABLE_WRAPPER] + options.command_prefix)
# TODO(machenbach): Figure out how to test a bigger subset of variants on
# msan.
@@ -295,6 +274,10 @@ class StandardTestRunner(base_runner.BaseTestRunner):
# Use developer defaults if no variant was specified.
options.variants = options.variants or "dev"
+ if options.variants == "infra_staging":
+ options.variants = "exhaustive"
+ options.infra_staging = True
+
# Resolve variant aliases and dedupe.
# TODO(machenbach): Don't mutate global variable. Rather pass mutated
# version as local variable.
@@ -308,7 +291,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
print "All variants must be in %s" % str(ALL_VARIANTS)
raise base_runner.TestRunnerError()
- def CheckTestMode(name, option):
+ def CheckTestMode(name, option): # pragma: no cover
if not option in ["run", "skip", "dontcare"]:
print "Unknown %s mode %s" % (name, option)
raise base_runner.TestRunnerError()
@@ -317,6 +300,8 @@ class StandardTestRunner(base_runner.BaseTestRunner):
if self.build_config.no_i18n:
base_runner.TEST_MAP["bot_default"].remove("intl")
base_runner.TEST_MAP["default"].remove("intl")
+ # TODO(machenbach): uncomment after infra side lands.
+ # base_runner.TEST_MAP["d8_default"].remove("intl")
def _setup_env(self):
super(StandardTestRunner, self)._setup_env()
@@ -366,10 +351,10 @@ class StandardTestRunner(base_runner.BaseTestRunner):
options.no_sorting,
options.rerun_failures_count,
options.rerun_failures_max,
- self.build_config.predictable,
options.no_harness,
use_perf_data=not options.swarming,
- sancov_dir=self.sancov_dir)
+ sancov_dir=self.sancov_dir,
+ infra_staging=options.infra_staging)
# TODO(all): Combine "simulator" and "simulator_run".
# TODO(machenbach): In GN we can derive simulator run from
@@ -405,6 +390,31 @@ class StandardTestRunner(base_runner.BaseTestRunner):
"tsan": self.build_config.tsan,
"ubsan_vptr": self.build_config.ubsan_vptr,
}
+
+ progress_indicator = progress.IndicatorNotifier()
+ progress_indicator.Register(
+ progress.PROGRESS_INDICATORS[options.progress]())
+ if options.junitout: # pragma: no cover
+ progress_indicator.Register(progress.JUnitTestProgressIndicator(
+ options.junitout, options.junittestsuite))
+ if options.json_test_results:
+ progress_indicator.Register(progress.JsonTestProgressIndicator(
+ options.json_test_results,
+ self.build_config.arch,
+ self.mode_options.execution_mode,
+ ctx.random_seed))
+ if options.flakiness_results: # pragma: no cover
+ progress_indicator.Register(progress.FlakinessTestProgressIndicator(
+ options.flakiness_results))
+
+ if options.infra_staging:
+ for s in suites:
+ s.ReadStatusFile(variables)
+ s.ReadTestCases(ctx)
+
+ return self._run_test_procs(suites, args, options, progress_indicator,
+ ctx)
+
all_tests = []
num_tests = 0
for s in suites:
@@ -417,14 +427,15 @@ class StandardTestRunner(base_runner.BaseTestRunner):
# First filtering by status applying the generic rules (tests without
# variants)
if options.warn_unused:
- s.WarnUnusedRules(check_variant_rules=False)
+ tests = [(t.name, t.variant) for t in s.tests]
+ s.statusfile.warn_unused_rules(tests, check_variant_rules=False)
s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)
if options.cat:
verbose.PrintTestSource(s.tests)
continue
- variant_gen = s.CreateVariantGenerator(VARIANTS)
- variant_tests = [ t.CopyAddingFlags(v, flags)
+ variant_gen = s.CreateLegacyVariantsGenerator(VARIANTS)
+ variant_tests = [ t.create_variant(v, flags)
for t in s.tests
for v in variant_gen.FilterVariantsByTest(t)
for flags in variant_gen.GetFlagSets(t, v) ]
@@ -440,22 +451,24 @@ class StandardTestRunner(base_runner.BaseTestRunner):
else:
yield ["--random-seed=%d" % self._random_seed()]
s.tests = [
- t.CopyAddingFlags(t.variant, flags)
+ t.create_variant(t.variant, flags, 'seed-stress-%d' % n)
for t in variant_tests
- for flags in iter_seed_flags()
+ for n, flags in enumerate(iter_seed_flags())
]
else:
s.tests = variant_tests
# Second filtering by status applying also the variant-dependent rules.
if options.warn_unused:
- s.WarnUnusedRules(check_variant_rules=True)
+ tests = [(t.name, t.variant) for t in s.tests]
+ s.statusfile.warn_unused_rules(tests, check_variant_rules=True)
+
s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)
+ s.tests = self._shard_tests(s.tests, options)
for t in s.tests:
- t.flags += s.GetStatusfileFlags(t)
+ t.cmd = t.get_command(ctx)
- s.tests = self._shard_tests(s.tests, options)
num_tests += len(s.tests)
if options.cat:
@@ -466,28 +479,19 @@ class StandardTestRunner(base_runner.BaseTestRunner):
# Run the tests.
start_time = time.time()
- progress_indicator = progress.IndicatorNotifier()
- progress_indicator.Register(
- progress.PROGRESS_INDICATORS[options.progress]())
- if options.junitout:
- progress_indicator.Register(progress.JUnitTestProgressIndicator(
- options.junitout, options.junittestsuite))
- if options.json_test_results:
- progress_indicator.Register(progress.JsonTestProgressIndicator(
- options.json_test_results,
- self.build_config.arch,
- self.mode_options.execution_mode,
- ctx.random_seed))
- if options.flakiness_results:
- progress_indicator.Register(progress.FlakinessTestProgressIndicator(
- options.flakiness_results))
- runner = execution.Runner(suites, progress_indicator, ctx)
+ if self.build_config.predictable:
+ outproc_factory = predictable.get_outproc
+ else:
+ outproc_factory = None
+
+ runner = execution.Runner(suites, progress_indicator, ctx,
+ outproc_factory)
exit_code = runner.Run(options.j)
overall_duration = time.time() - start_time
if options.time:
- verbose.PrintTestDurations(suites, overall_duration)
+ verbose.PrintTestDurations(suites, runner.outputs, overall_duration)
if num_tests == 0:
print("Warning: no tests were run!")
@@ -503,8 +507,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
print "Merging sancov files."
subprocess.check_call([
sys.executable,
- join(
- base_runner.BASE_DIR, "tools", "sanitizers", "sancov_merger.py"),
+ join(self.basedir, "tools", "sanitizers", "sancov_merger.py"),
"--coverage-dir=%s" % self.sancov_dir])
except:
print >> sys.stderr, "Error: Merging sancov files failed."
@@ -513,33 +516,10 @@ class StandardTestRunner(base_runner.BaseTestRunner):
return exit_code
def _shard_tests(self, tests, options):
- # Read gtest shard configuration from environment (e.g. set by swarming).
- # If none is present, use values passed on the command line.
- shard_count = int(
- os.environ.get('GTEST_TOTAL_SHARDS', options.shard_count))
- shard_run = os.environ.get('GTEST_SHARD_INDEX')
- if shard_run is not None:
- # The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0.
- shard_run = int(shard_run) + 1
- else:
- shard_run = options.shard_run
-
- if options.shard_count > 1:
- # Log if a value was passed on the cmd line and it differs from the
- # environment variables.
- if options.shard_count != shard_count:
- print("shard_count from cmd line differs from environment variable "
- "GTEST_TOTAL_SHARDS")
- if options.shard_run > 1 and options.shard_run != shard_run:
- print("shard_run from cmd line differs from environment variable "
- "GTEST_SHARD_INDEX")
+ shard_run, shard_count = self._get_shard_info(options)
if shard_count < 2:
return tests
- if shard_run < 1 or shard_run > shard_count:
- print "shard-run not a valid number, should be in [1:shard-count]"
- print "defaulting back to running all tests"
- return tests
count = 0
shard = []
for test in tests:
@@ -548,6 +528,72 @@ class StandardTestRunner(base_runner.BaseTestRunner):
count += 1
return shard
+ def _run_test_procs(self, suites, args, options, progress_indicator,
+ context):
+ jobs = options.j
+
+ print '>>> Running with test processors'
+ loader = LoadProc()
+ tests_counter = TestsCounter()
+ results = ResultsTracker()
+ indicators = progress_indicator.ToProgressIndicatorProcs()
+ execproc = ExecutionProc(jobs, context)
+
+ procs = [
+ loader,
+ NameFilterProc(args) if args else None,
+ StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
+ self._create_shard_proc(options),
+ tests_counter,
+ VariantProc(VARIANTS),
+ StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
+ ] + indicators + [
+ results,
+ self._create_rerun_proc(context),
+ execproc,
+ ]
+
+ procs = filter(None, procs)
+
+ for i in xrange(0, len(procs) - 1):
+ procs[i].connect_to(procs[i + 1])
+
+ tests = [t for s in suites for t in s.tests]
+ tests.sort(key=lambda t: t.is_slow, reverse=True)
+
+ loader.setup()
+ loader.load_tests(tests)
+
+ print '>>> Running %d base tests' % tests_counter.total
+ tests_counter.remove_from_chain()
+
+ execproc.start()
+
+ for indicator in indicators:
+ indicator.finished()
+
+ print '>>> %d tests ran' % results.total
+
+ exit_code = 0
+ if results.failed:
+ exit_code = 1
+ if results.remaining:
+ exit_code = 2
+
+
+ if exit_code == 1 and options.json_test_results:
+ print("Force exit code 0 after failures. Json test results file "
+ "generated with failure information.")
+ exit_code = 0
+ return exit_code
+
+ def _create_rerun_proc(self, ctx):
+ if not ctx.rerun_failures_count:
+ return None
+ return RerunProc(ctx.rerun_failures_count,
+ ctx.rerun_failures_max)
+
+
if __name__ == '__main__':
sys.exit(StandardTestRunner().execute())
diff --git a/deps/v8/tools/testrunner/testproc/__init__.py b/deps/v8/tools/testrunner/testproc/__init__.py
new file mode 100644
index 0000000000..4433538556
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/deps/v8/tools/testrunner/testproc/base.py b/deps/v8/tools/testrunner/testproc/base.py
new file mode 100644
index 0000000000..1a87dbed55
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/base.py
@@ -0,0 +1,207 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from .result import SKIPPED
+
+
+"""
+Pipeline
+
+Test processors are chained together and communicate with each other by
+calling previous/next processor in the chain.
+ ----next_test()----> ----next_test()---->
+Proc1 Proc2 Proc3
+ <---result_for()---- <---result_for()----
+
+For every next_test there is exactly one result_for call.
+If processor ignores the test it has to return SkippedResult.
+If it created multiple subtests for one test and wants to pass all of them to
+the previous processor it can enclose them in GroupedResult.
+
+
+Subtests
+
+When test processor needs to modify the test or create some variants of the
+test it creates subtests and sends them to the next processor.
+Each subtest has:
+- procid - globally unique id that should contain id of the parent test and
+ some suffix given by test processor, e.g. its name + subtest type.
+- processor - which created it
+- origin - pointer to the parent (sub)test
+"""
+
+
+DROP_RESULT = 0
+DROP_OUTPUT = 1
+DROP_PASS_OUTPUT = 2
+DROP_PASS_STDOUT = 3
+
+def get_reduce_result_function(requirement):
+ if requirement == DROP_RESULT:
+ return lambda _: None
+
+ if requirement == DROP_OUTPUT:
+ def f(result):
+ result.output = None
+ return result
+ return f
+
+ if requirement == DROP_PASS_OUTPUT:
+ def f(result):
+ if not result.has_unexpected_output:
+ result.output = None
+ return result
+ return f
+
+ if requirement == DROP_PASS_STDOUT:
+ def f(result):
+ if not result.has_unexpected_output:
+ result.output.stdout = None
+ result.output.stderr = None
+ return result
+ return f
+
+
+class TestProc(object):
+ def __init__(self):
+ self._prev_proc = None
+ self._next_proc = None
+ self._requirement = DROP_RESULT
+ self._prev_requirement = None
+ self._reduce_result = lambda result: result
+
+ def connect_to(self, next_proc):
+ """Puts `next_proc` after itself in the chain."""
+ next_proc._prev_proc = self
+ self._next_proc = next_proc
+
+ def remove_from_chain(self):
+ if self._prev_proc:
+ self._prev_proc._next_proc = self._next_proc
+ if self._next_proc:
+ self._next_proc._prev_proc = self._prev_proc
+
+ def setup(self, requirement=DROP_RESULT):
+ """
+ Method called by previous processor or processor pipeline creator to let
+ the processors know what part of the result can be ignored.
+ """
+ self._prev_requirement = requirement
+ if self._next_proc:
+ self._next_proc.setup(max(requirement, self._requirement))
+ if self._prev_requirement < self._requirement:
+ self._reduce_result = get_reduce_result_function(self._prev_requirement)
+
+ def next_test(self, test):
+ """
+ Method called by previous processor whenever it produces new test.
+ This method shouldn't be called by anyone except previous processor.
+ """
+ raise NotImplementedError()
+
+ def result_for(self, test, result):
+ """
+ Method called by next processor whenever it has result for some test.
+ This method shouldn't be called by anyone except next processor.
+ """
+ raise NotImplementedError()
+
+ def heartbeat(self):
+ if self._prev_proc:
+ self._prev_proc.heartbeat()
+
+ ### Communication
+
+ def _send_test(self, test):
+ """Helper method for sending test to the next processor."""
+ self._next_proc.next_test(test)
+
+ def _send_result(self, test, result):
+ """Helper method for sending result to the previous processor."""
+ result = self._reduce_result(result)
+ self._prev_proc.result_for(test, result)
+
+
+
+class TestProcObserver(TestProc):
+ """Processor used for observing the data."""
+ def __init__(self):
+ super(TestProcObserver, self).__init__()
+
+ def next_test(self, test):
+ self._on_next_test(test)
+ self._send_test(test)
+
+ def result_for(self, test, result):
+ self._on_result_for(test, result)
+ self._send_result(test, result)
+
+ def heartbeat(self):
+ self._on_heartbeat()
+ super(TestProcObserver, self).heartbeat()
+
+ def _on_next_test(self, test):
+ """Method called after receiving test from previous processor but before
+ sending it to the next one."""
+ pass
+
+ def _on_result_for(self, test, result):
+ """Method called after receiving result from next processor but before
+ sending it to the previous one."""
+ pass
+
+ def _on_heartbeat(self):
+ pass
+
+
+class TestProcProducer(TestProc):
+ """Processor for creating subtests."""
+
+ def __init__(self, name):
+ super(TestProcProducer, self).__init__()
+ self._name = name
+
+ def next_test(self, test):
+ self._next_test(test)
+
+ def result_for(self, subtest, result):
+ self._result_for(subtest.origin, subtest, result)
+
+ ### Implementation
+ def _next_test(self, test):
+ raise NotImplementedError()
+
+ def _result_for(self, test, subtest, result):
+ """
+ result_for method extended with `subtest` parameter.
+
+ Args
+ test: test used by current processor to create the subtest.
+ subtest: test for which the `result` is.
+ result: subtest execution result created by the output processor.
+ """
+ raise NotImplementedError()
+
+ ### Managing subtests
+ def _create_subtest(self, test, subtest_id, **kwargs):
+ """Creates subtest with subtest id <processor name>-`subtest_id`."""
+ return test.create_subtest(self, '%s-%s' % (self._name, subtest_id),
+ **kwargs)
+
+
+class TestProcFilter(TestProc):
+ """Processor for filtering tests."""
+
+ def next_test(self, test):
+ if self._filter(test):
+ self._send_result(test, SKIPPED)
+ else:
+ self._send_test(test)
+
+ def result_for(self, test, result):
+ self._send_result(test, result)
+
+ def _filter(self, test):
+ """Returns whether test should be filtered out."""
+ raise NotImplementedError()
diff --git a/deps/v8/tools/testrunner/testproc/execution.py b/deps/v8/tools/testrunner/testproc/execution.py
new file mode 100644
index 0000000000..021b02af3e
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/execution.py
@@ -0,0 +1,92 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import traceback
+
+from . import base
+from ..local import pool
+
+
+# Global function for multiprocessing, because pickling a static method doesn't
+# work on Windows.
+def run_job(job, process_context):
+ return job.run(process_context)
+
+
+def create_process_context(requirement):
+ return ProcessContext(base.get_reduce_result_function(requirement))
+
+
+JobResult = collections.namedtuple('JobResult', ['id', 'result'])
+ProcessContext = collections.namedtuple('ProcessContext', ['reduce_result_f'])
+
+
+class Job(object):
+ def __init__(self, test_id, cmd, outproc, keep_output):
+ self.test_id = test_id
+ self.cmd = cmd
+ self.outproc = outproc
+ self.keep_output = keep_output
+
+ def run(self, process_ctx):
+ output = self.cmd.execute()
+ result = self.outproc.process(output)
+ if not self.keep_output:
+ result = process_ctx.reduce_result_f(result)
+ return JobResult(self.test_id, result)
+
+
+class ExecutionProc(base.TestProc):
+ """Last processor in the chain. Instead of passing tests further it creates
+ commands and output processors, executes them in multiple worker processes and
+ sends results to the previous processor.
+ """
+
+ def __init__(self, jobs, context):
+ super(ExecutionProc, self).__init__()
+ self._pool = pool.Pool(jobs)
+ self._context = context
+ self._tests = {}
+
+ def connect_to(self, next_proc):
+ assert False, 'ExecutionProc cannot be connected to anything'
+
+ def start(self):
+ try:
+ it = self._pool.imap_unordered(
+ fn=run_job,
+ gen=[],
+ process_context_fn=create_process_context,
+ process_context_args=[self._prev_requirement],
+ )
+ for pool_result in it:
+ if pool_result.heartbeat:
+ continue
+
+ job_result = pool_result.value
+ test_id, result = job_result
+
+ test, result.cmd = self._tests[test_id]
+ del self._tests[test_id]
+ self._send_result(test, result)
+ except KeyboardInterrupt:
+ raise
+ except:
+ traceback.print_exc()
+ raise
+ finally:
+ self._pool.terminate()
+
+ def next_test(self, test):
+ test_id = test.procid
+ cmd = test.get_command(self._context)
+ self._tests[test_id] = test, cmd
+
+ # TODO(majeski): Needs factory for outproc as in local/execution.py
+ outproc = test.output_proc
+ self._pool.add([Job(test_id, cmd, outproc, test.keep_output)])
+
+ def result_for(self, test, result):
+ assert False, 'ExecutionProc cannot receive results'
diff --git a/deps/v8/tools/testrunner/testproc/filter.py b/deps/v8/tools/testrunner/testproc/filter.py
new file mode 100644
index 0000000000..5081997751
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/filter.py
@@ -0,0 +1,83 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from collections import defaultdict
+import fnmatch
+
+from . import base
+
+
+class StatusFileFilterProc(base.TestProcFilter):
+ """Filters tests by outcomes from status file.
+
+ Status file has to be loaded before using this function.
+
+ Args:
+ slow_tests_mode: What to do with slow tests.
+ pass_fail_tests_mode: What to do with pass or fail tests.
+
+ Mode options:
+ None (default): don't skip
+ "skip": skip if slow/pass_fail
+ "run": skip if not slow/pass_fail
+ """
+
+ def __init__(self, slow_tests_mode, pass_fail_tests_mode):
+ super(StatusFileFilterProc, self).__init__()
+ self._slow_tests_mode = slow_tests_mode
+ self._pass_fail_tests_mode = pass_fail_tests_mode
+
+ def _filter(self, test):
+ return (
+ test.do_skip or
+ self._skip_slow(test.is_slow) or
+ self._skip_pass_fail(test.is_pass_or_fail)
+ )
+
+ def _skip_slow(self, is_slow):
+ return (
+ (self._slow_tests_mode == 'run' and not is_slow) or
+ (self._slow_tests_mode == 'skip' and is_slow)
+ )
+
+ def _skip_pass_fail(self, is_pass_fail):
+ return (
+ (self._pass_fail_tests_mode == 'run' and not is_pass_fail) or
+ (self._pass_fail_tests_mode == 'skip' and is_pass_fail)
+ )
+
+
+class NameFilterProc(base.TestProcFilter):
+ """Filters tests based on command-line arguments.
+
+ args can be a glob: asterisks in any position of the name
+ represent zero or more characters. Without asterisks, only exact matches
+ will be used with the exeption of the test-suite name as argument.
+ """
+ def __init__(self, args):
+ super(NameFilterProc, self).__init__()
+
+ self._globs = defaultdict(list)
+ for a in args:
+ argpath = a.split('/')
+ suitename = argpath[0]
+ path = '/'.join(argpath[1:]) or '*'
+ self._globs[suitename].append(path)
+
+ for s, globs in self._globs.iteritems():
+ if not globs or '*' in globs:
+ self._globs[s] = []
+
+ def _filter(self, test):
+ globs = self._globs.get(test.suite.name)
+ if globs is None:
+ return True
+
+ if not globs:
+ return False
+
+ for g in globs:
+ if fnmatch.fnmatch(test.path, g):
+ return False
+ return True
diff --git a/deps/v8/tools/testrunner/testproc/loader.py b/deps/v8/tools/testrunner/testproc/loader.py
new file mode 100644
index 0000000000..0a3d0df1b3
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/loader.py
@@ -0,0 +1,27 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from . import base
+
+
+class LoadProc(base.TestProc):
+ """First processor in the chain that passes all tests to the next processor.
+ """
+
+ def load_tests(self, tests):
+ loaded = set()
+ for test in tests:
+ if test.procid in loaded:
+ print 'Warning: %s already obtained' % test.procid
+ continue
+
+ loaded.add(test.procid)
+ self._send_test(test)
+
+ def next_test(self, test):
+ assert False, 'Nothing can be connected to the LoadProc'
+
+ def result_for(self, test, result):
+ # Ignore all results.
+ pass
diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py
new file mode 100644
index 0000000000..78514f7252
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/progress.py
@@ -0,0 +1,385 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import os
+import sys
+import time
+
+from . import base
+from ..local import junit_output
+
+
+def print_failure_header(test):
+ if test.output_proc.negative:
+ negative_marker = '[negative] '
+ else:
+ negative_marker = ''
+ print "=== %(label)s %(negative)s===" % {
+ 'label': test,
+ 'negative': negative_marker,
+ }
+
+
+class TestsCounter(base.TestProcObserver):
+ def __init__(self):
+ super(TestsCounter, self).__init__()
+ self.total = 0
+
+ def _on_next_test(self, test):
+ self.total += 1
+
+
+class ResultsTracker(base.TestProcObserver):
+ def __init__(self):
+ super(ResultsTracker, self).__init__()
+ self._requirement = base.DROP_OUTPUT
+
+ self.failed = 0
+ self.remaining = 0
+ self.total = 0
+
+ def _on_next_test(self, test):
+ self.total += 1
+ self.remaining += 1
+
+ def _on_result_for(self, test, result):
+ self.remaining -= 1
+ if result.has_unexpected_output:
+ self.failed += 1
+
+
+class ProgressIndicator(base.TestProcObserver):
+ def finished(self):
+ pass
+
+
+class SimpleProgressIndicator(ProgressIndicator):
+ def __init__(self):
+ super(SimpleProgressIndicator, self).__init__()
+ self._requirement = base.DROP_PASS_OUTPUT
+
+ self._failed = []
+ self._total = 0
+
+ def _on_next_test(self, test):
+ self._total += 1
+
+ def _on_result_for(self, test, result):
+ # TODO(majeski): Support for dummy/grouped results
+ if result.has_unexpected_output:
+ self._failed.append((test, result))
+
+ def finished(self):
+ crashed = 0
+ print
+ for test, result in self._failed:
+ print_failure_header(test)
+ if result.output.stderr:
+ print "--- stderr ---"
+ print result.output.stderr.strip()
+ if result.output.stdout:
+ print "--- stdout ---"
+ print result.output.stdout.strip()
+ print "Command: %s" % result.cmd.to_string()
+ if result.output.HasCrashed():
+ print "exit code: %d" % result.output.exit_code
+ print "--- CRASHED ---"
+ crashed += 1
+ if result.output.HasTimedOut():
+ print "--- TIMEOUT ---"
+ if len(self._failed) == 0:
+ print "==="
+ print "=== All tests succeeded"
+ print "==="
+ else:
+ print
+ print "==="
+ print "=== %i tests failed" % len(self._failed)
+ if crashed > 0:
+ print "=== %i tests CRASHED" % crashed
+ print "==="
+
+
+class VerboseProgressIndicator(SimpleProgressIndicator):
+ def _on_result_for(self, test, result):
+ super(VerboseProgressIndicator, self)._on_result_for(test, result)
+ # TODO(majeski): Support for dummy/grouped results
+ if result.has_unexpected_output:
+ if result.output.HasCrashed():
+ outcome = 'CRASH'
+ else:
+ outcome = 'FAIL'
+ else:
+ outcome = 'pass'
+ print 'Done running %s: %s' % (test, outcome)
+ sys.stdout.flush()
+
+ def _on_heartbeat(self):
+ print 'Still working...'
+ sys.stdout.flush()
+
+
+class DotsProgressIndicator(SimpleProgressIndicator):
+ def __init__(self):
+ super(DotsProgressIndicator, self).__init__()
+ self._count = 0
+
+ def _on_result_for(self, test, result):
+ # TODO(majeski): Support for dummy/grouped results
+ self._count += 1
+ if self._count > 1 and self._count % 50 == 1:
+ sys.stdout.write('\n')
+ if result.has_unexpected_output:
+ if result.output.HasCrashed():
+ sys.stdout.write('C')
+ sys.stdout.flush()
+ elif result.output.HasTimedOut():
+ sys.stdout.write('T')
+ sys.stdout.flush()
+ else:
+ sys.stdout.write('F')
+ sys.stdout.flush()
+ else:
+ sys.stdout.write('.')
+ sys.stdout.flush()
+
+
+class CompactProgressIndicator(ProgressIndicator):
+ def __init__(self, templates):
+ super(CompactProgressIndicator, self).__init__()
+ self._requirement = base.DROP_PASS_OUTPUT
+
+ self._templates = templates
+ self._last_status_length = 0
+ self._start_time = time.time()
+
+ self._total = 0
+ self._passed = 0
+ self._failed = 0
+
+ def _on_next_test(self, test):
+ self._total += 1
+
+ def _on_result_for(self, test, result):
+ # TODO(majeski): Support for dummy/grouped results
+ if result.has_unexpected_output:
+ self._failed += 1
+ else:
+ self._passed += 1
+
+ self._print_progress(str(test))
+ if result.has_unexpected_output:
+ output = result.output
+ stdout = output.stdout.strip()
+ stderr = output.stderr.strip()
+
+ self._clear_line(self._last_status_length)
+ print_failure_header(test)
+ if len(stdout):
+ print self._templates['stdout'] % stdout
+ if len(stderr):
+ print self._templates['stderr'] % stderr
+ print "Command: %s" % result.cmd
+ if output.HasCrashed():
+ print "exit code: %d" % output.exit_code
+ print "--- CRASHED ---"
+ if output.HasTimedOut():
+ print "--- TIMEOUT ---"
+
+ def finished(self):
+ self._print_progress('Done')
+ print
+
+ def _print_progress(self, name):
+ self._clear_line(self._last_status_length)
+ elapsed = time.time() - self._start_time
+ if not self._total:
+ progress = 0
+ else:
+ progress = (self._passed + self._failed) * 100 // self._total
+ status = self._templates['status_line'] % {
+ 'passed': self._passed,
+ 'progress': progress,
+ 'failed': self._failed,
+ 'test': name,
+ 'mins': int(elapsed) / 60,
+ 'secs': int(elapsed) % 60
+ }
+ status = self._truncate(status, 78)
+ self._last_status_length = len(status)
+ print status,
+ sys.stdout.flush()
+
+ def _truncate(self, string, length):
+ if length and len(string) > (length - 3):
+ return string[:(length - 3)] + "..."
+ else:
+ return string
+
+ def _clear_line(self, last_length):
+ raise NotImplementedError()
+
+
+class ColorProgressIndicator(CompactProgressIndicator):
+ def __init__(self):
+ templates = {
+ 'status_line': ("[%(mins)02i:%(secs)02i|"
+ "\033[34m%%%(progress) 4d\033[0m|"
+ "\033[32m+%(passed) 4d\033[0m|"
+ "\033[31m-%(failed) 4d\033[0m]: %(test)s"),
+ 'stdout': "\033[1m%s\033[0m",
+ 'stderr': "\033[31m%s\033[0m",
+ }
+ super(ColorProgressIndicator, self).__init__(templates)
+
+ def _clear_line(self, last_length):
+ print "\033[1K\r",
+
+
+class MonochromeProgressIndicator(CompactProgressIndicator):
+ def __init__(self):
+ templates = {
+ 'status_line': ("[%(mins)02i:%(secs)02i|%%%(progress) 4d|"
+ "+%(passed) 4d|-%(failed) 4d]: %(test)s"),
+ 'stdout': '%s',
+ 'stderr': '%s',
+ }
+ super(MonochromeProgressIndicator, self).__init__(templates)
+
+ def _clear_line(self, last_length):
+ print ("\r" + (" " * last_length) + "\r"),
+
+
+class JUnitTestProgressIndicator(ProgressIndicator):
+ def __init__(self, junitout, junittestsuite):
+ super(JUnitTestProgressIndicator, self).__init__()
+ self._requirement = base.DROP_PASS_STDOUT
+
+ self.outputter = junit_output.JUnitTestOutput(junittestsuite)
+ if junitout:
+ self.outfile = open(junitout, "w")
+ else:
+ self.outfile = sys.stdout
+
+ def _on_result_for(self, test, result):
+ # TODO(majeski): Support for dummy/grouped results
+ fail_text = ""
+ output = result.output
+ if result.has_unexpected_output:
+ stdout = output.stdout.strip()
+ if len(stdout):
+ fail_text += "stdout:\n%s\n" % stdout
+ stderr = output.stderr.strip()
+ if len(stderr):
+ fail_text += "stderr:\n%s\n" % stderr
+ fail_text += "Command: %s" % result.cmd.to_string()
+ if output.HasCrashed():
+ fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
+ if output.HasTimedOut():
+ fail_text += "--- TIMEOUT ---"
+ self.outputter.HasRunTest(
+ test_name=str(test),
+ test_cmd=result.cmd.to_string(relative=True),
+ test_duration=output.duration,
+ test_failure=fail_text)
+
+ def finished(self):
+ self.outputter.FinishAndWrite(self.outfile)
+ if self.outfile != sys.stdout:
+ self.outfile.close()
+
+
+class JsonTestProgressIndicator(ProgressIndicator):
+ def __init__(self, json_test_results, arch, mode, random_seed):
+ super(JsonTestProgressIndicator, self).__init__()
+ # We want to drop stdout/err for all passed tests on the first try, but we
+ # need to get outputs for all runs after the first one. To accommodate that,
+ # reruns are set to keep the result no matter what requirement says, i.e.
+ # keep_output set to True in the RerunProc.
+ self._requirement = base.DROP_PASS_STDOUT
+
+ self.json_test_results = json_test_results
+ self.arch = arch
+ self.mode = mode
+ self.random_seed = random_seed
+ self.results = []
+ self.tests = []
+
+ def _on_result_for(self, test, result):
+ if result.is_rerun:
+ self.process_results(test, result.results)
+ else:
+ self.process_results(test, [result])
+
+ def process_results(self, test, results):
+ for run, result in enumerate(results):
+ # TODO(majeski): Support for dummy/grouped results
+ output = result.output
+ # Buffer all tests for sorting the durations in the end.
+ # TODO(machenbach): Running average + buffer only slowest 20 tests.
+ self.tests.append((test, output.duration, result.cmd))
+
+ # Omit tests that run as expected on the first try.
+ # Everything that happens after the first run is included in the output
+ # even if it flakily passes.
+ if not result.has_unexpected_output and run == 0:
+ continue
+
+ self.results.append({
+ "name": str(test),
+ "flags": result.cmd.args,
+ "command": result.cmd.to_string(relative=True),
+ "run": run + 1,
+ "stdout": output.stdout,
+ "stderr": output.stderr,
+ "exit_code": output.exit_code,
+ "result": test.output_proc.get_outcome(output),
+ "expected": test.expected_outcomes,
+ "duration": output.duration,
+
+ # TODO(machenbach): This stores only the global random seed from the
+ # context and not possible overrides when using random-seed stress.
+ "random_seed": self.random_seed,
+ "target_name": test.get_shell(),
+ "variant": test.variant,
+ })
+
+ def finished(self):
+ complete_results = []
+ if os.path.exists(self.json_test_results):
+ with open(self.json_test_results, "r") as f:
+ # Buildbot might start out with an empty file.
+ complete_results = json.loads(f.read() or "[]")
+
+ duration_mean = None
+ if self.tests:
+ # Get duration mean.
+ duration_mean = (
+ sum(duration for (_, duration, cmd) in self.tests) /
+ float(len(self.tests)))
+
+ # Sort tests by duration.
+ self.tests.sort(key=lambda (_, duration, cmd): duration, reverse=True)
+ slowest_tests = [
+ {
+ "name": str(test),
+ "flags": cmd.args,
+ "command": cmd.to_string(relative=True),
+ "duration": duration,
+ "marked_slow": test.is_slow,
+ } for (test, duration, cmd) in self.tests[:20]
+ ]
+
+ complete_results.append({
+ "arch": self.arch,
+ "mode": self.mode,
+ "results": self.results,
+ "slowest_tests": slowest_tests,
+ "duration_mean": duration_mean,
+ "test_total": len(self.tests),
+ })
+
+ with open(self.json_test_results, "w") as f:
+ f.write(json.dumps(complete_results))
diff --git a/deps/v8/tools/testrunner/testproc/rerun.py b/deps/v8/tools/testrunner/testproc/rerun.py
new file mode 100644
index 0000000000..7f96e0260c
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/rerun.py
@@ -0,0 +1,59 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+
+from . import base
+from .result import RerunResult
+
+
+class RerunProc(base.TestProcProducer):
+ def __init__(self, rerun_max, rerun_max_total=None):
+ super(RerunProc, self).__init__('Rerun')
+ self._requirement = base.DROP_OUTPUT
+
+ self._rerun = {}
+ self._results = collections.defaultdict(list)
+ self._rerun_max = rerun_max
+ self._rerun_total_left = rerun_max_total
+
+ def _next_test(self, test):
+ self._send_next_subtest(test)
+
+ def _result_for(self, test, subtest, result):
+ # First result
+ if subtest.procid[-2:] == '-1':
+ # Passed, no reruns
+ if not result.has_unexpected_output:
+ self._send_result(test, result)
+ return
+
+ self._rerun[test.procid] = 0
+
+ results = self._results[test.procid]
+ results.append(result)
+
+ if self._needs_rerun(test, result):
+ self._rerun[test.procid] += 1
+ if self._rerun_total_left is not None:
+ self._rerun_total_left -= 1
+ self._send_next_subtest(test, self._rerun[test.procid])
+ else:
+ result = RerunResult.create(results)
+ self._finalize_test(test)
+ self._send_result(test, result)
+
+ def _needs_rerun(self, test, result):
+ # TODO(majeski): Limit reruns count for slow tests.
+ return ((self._rerun_total_left is None or self._rerun_total_left > 0) and
+ self._rerun[test.procid] < self._rerun_max and
+ result.has_unexpected_output)
+
+ def _send_next_subtest(self, test, run=0):
+ subtest = self._create_subtest(test, str(run + 1), keep_output=(run != 0))
+ self._send_test(subtest)
+
+ def _finalize_test(self, test):
+ del self._rerun[test.procid]
+ del self._results[test.procid]
diff --git a/deps/v8/tools/testrunner/testproc/result.py b/deps/v8/tools/testrunner/testproc/result.py
new file mode 100644
index 0000000000..c817fc06ec
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/result.py
@@ -0,0 +1,97 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class ResultBase(object):
+ @property
+ def is_skipped(self):
+ return False
+
+ @property
+ def is_grouped(self):
+ return False
+
+ @property
+ def is_rerun(self):
+ return False
+
+
+class Result(ResultBase):
+ """Result created by the output processor."""
+
+ def __init__(self, has_unexpected_output, output, cmd=None):
+ self.has_unexpected_output = has_unexpected_output
+ self.output = output
+ self.cmd = cmd
+
+
+class GroupedResult(ResultBase):
+ """Result consisting of multiple results. It can be used by processors that
+ create multiple subtests for each test and want to pass all results back.
+ """
+
+ @staticmethod
+ def create(results):
+ """Create grouped result from the list of results. It filters out skipped
+ results. If all results are skipped results it returns skipped result.
+
+ Args:
+ results: list of pairs (test, result)
+ """
+ results = [(t, r) for (t, r) in results if not r.is_skipped]
+ if not results:
+ return SKIPPED
+ return GroupedResult(results)
+
+ def __init__(self, results):
+ self.results = results
+
+ @property
+ def is_grouped(self):
+ return True
+
+
+class SkippedResult(ResultBase):
+ """Result without any meaningful value. Used primarily to inform the test
+ processor that it's test wasn't executed.
+ """
+
+ @property
+ def is_skipped(self):
+ return True
+
+
+SKIPPED = SkippedResult()
+
+
+class RerunResult(Result):
+ """Result generated from several reruns of the same test. It's a subclass of
+ Result since the result of rerun is result of the last run. In addition to
+ normal result it contains results of all reruns.
+ """
+ @staticmethod
+ def create(results):
+ """Create RerunResult based on list of results. List cannot be empty. If it
+ has only one element it's returned as a result.
+ """
+ assert results
+
+ if len(results) == 1:
+ return results[0]
+ return RerunResult(results)
+
+ def __init__(self, results):
+ """Has unexpected output and the output itself of the RerunResult equals to
+ the last result in the passed list.
+ """
+ assert results
+
+ last = results[-1]
+ super(RerunResult, self).__init__(last.has_unexpected_output, last.output,
+ last.cmd)
+ self.results = results
+
+ @property
+ def is_rerun(self):
+ return True
diff --git a/deps/v8/tools/testrunner/testproc/shard.py b/deps/v8/tools/testrunner/testproc/shard.py
new file mode 100644
index 0000000000..1caac9fee6
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/shard.py
@@ -0,0 +1,30 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from . import base
+
+
+class ShardProc(base.TestProcFilter):
+ """Processor distributing tests between shards.
+ It simply passes every n-th test. To be deterministic it has to be placed
+ before all processors that generate tests dynamically.
+ """
+ def __init__(self, myid, shards_count):
+ """
+ Args:
+ myid: id of the shard within [0; shards_count - 1]
+ shards_count: number of shards
+ """
+ super(ShardProc, self).__init__()
+
+ assert myid >= 0 and myid < shards_count
+
+ self._myid = myid
+ self._shards_count = shards_count
+ self._last = 0
+
+ def _filter(self, test):
+ res = self._last != self._myid
+ self._last = (self._last + 1) % self._shards_count
+ return res
diff --git a/deps/v8/tools/testrunner/testproc/variant.py b/deps/v8/tools/testrunner/testproc/variant.py
new file mode 100644
index 0000000000..dba1af91fc
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/variant.py
@@ -0,0 +1,68 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from . import base
+from ..local.variants import ALL_VARIANTS, ALL_VARIANT_FLAGS
+from .result import GroupedResult
+
+
+STANDARD_VARIANT = set(["default"])
+
+
+class VariantProc(base.TestProcProducer):
+ """Processor creating variants.
+
+ For each test it keeps generator that returns variant, flags and id suffix.
+ It produces variants one at a time, so it's waiting for the result of one
+ variant to create another variant of the same test.
+ It maintains the order of the variants passed to the init.
+
+ There are some cases when particular variant of the test is not valid. To
+ ignore subtests like that, StatusFileFilterProc should be placed somewhere
+ after the VariantProc.
+ """
+
+ def __init__(self, variants):
+ super(VariantProc, self).__init__('VariantProc')
+ self._next_variant = {}
+ self._variant_gens = {}
+ self._variants = variants
+
+ def setup(self, requirement=base.DROP_RESULT):
+ super(VariantProc, self).setup(requirement)
+
+ # VariantProc is optimized for dropping the result and it should be placed
+ # in the chain where it's possible.
+ assert requirement == base.DROP_RESULT
+
+ def _next_test(self, test):
+ gen = self._variants_gen(test)
+ self._next_variant[test.procid] = gen
+ self._try_send_new_subtest(test, gen)
+
+ def _result_for(self, test, subtest, result):
+ gen = self._next_variant[test.procid]
+ self._try_send_new_subtest(test, gen)
+
+ def _try_send_new_subtest(self, test, variants_gen):
+ for variant, flags, suffix in variants_gen:
+ subtest = self._create_subtest(test, '%s-%s' % (variant, suffix),
+ variant=variant, flags=flags)
+ self._send_test(subtest)
+ return
+
+ del self._next_variant[test.procid]
+ self._send_result(test, None)
+
+ def _variants_gen(self, test):
+ """Generator producing (variant, flags, procid suffix) tuples."""
+ return self._get_variants_gen(test).gen(test)
+
+ def _get_variants_gen(self, test):
+ key = test.suite.name
+ variants_gen = self._variant_gens.get(key)
+ if not variants_gen:
+ variants_gen = test.suite.get_variants_gen(self._variants)
+ self._variant_gens[key] = variants_gen
+ return variants_gen
diff --git a/deps/v8/tools/turbolizer/code-view.js b/deps/v8/tools/turbolizer/code-view.js
index 8165210c31..7f9728a83a 100644
--- a/deps/v8/tools/turbolizer/code-view.js
+++ b/deps/v8/tools/turbolizer/code-view.js
@@ -164,8 +164,6 @@ class CodeView extends View {
}
}
}
-
- view.resizeToParent();
}
deleteContent() {}
diff --git a/deps/v8/tools/turbolizer/graph-view.js b/deps/v8/tools/turbolizer/graph-view.js
index 8de050f3e6..cdbc40c8f0 100644
--- a/deps/v8/tools/turbolizer/graph-view.js
+++ b/deps/v8/tools/turbolizer/graph-view.js
@@ -706,6 +706,7 @@ class GraphView extends View {
.on("mousedown", function(d){
graph.pathMouseDown.call(graph, d3.select(this), d);
})
+ .attr("adjacentToHover", "false");
// Set the correct styles on all of the paths
visibleEdges.classed('value', function(e) {
@@ -740,7 +741,8 @@ class GraphView extends View {
var newGs = graph.visibleNodes.enter()
.append("g");
- newGs.classed("control", function(n) { return n.isControl(); })
+ newGs.classed("turbonode", function(n) { return true; })
+ .classed("control", function(n) { return n.isControl(); })
.classed("live", function(n) { return n.isLive(); })
.classed("dead", function(n) { return !n.isLive(); })
.classed("javascript", function(n) { return n.isJavaScript(); })
@@ -754,6 +756,34 @@ class GraphView extends View {
.on("mouseup", function(d){
graph.nodeMouseUp.call(graph, d3.select(this), d);
})
+ .on('mouseover', function(d){
+ var nodeSelection = d3.select(this);
+ let node = graph.nodeMap[d.id];
+ let adjInputEdges = graph.visibleEdges.filter(e => { return e.target === node; });
+ let adjOutputEdges = graph.visibleEdges.filter(e => { return e.source === node; });
+ adjInputEdges.attr('relToHover', "input");
+ adjOutputEdges.attr('relToHover', "output");
+ let adjInputNodes = adjInputEdges.data().map(e => e.source);
+ graph.visibleNodes.data(adjInputNodes, function(d) {
+ return d.id;
+ }).attr('relToHover', "input");
+ let adjOutputNodes = adjOutputEdges.data().map(e => e.target);
+ graph.visibleNodes.data(adjOutputNodes, function(d) {
+ return d.id;
+ }).attr('relToHover', "output");
+ graph.updateGraphVisibility();
+ })
+ .on('mouseout', function(d){
+ var nodeSelection = d3.select(this);
+ let node = graph.nodeMap[d.id];
+ let adjEdges = graph.visibleEdges.filter(e => { return e.target === node || e.source === node; });
+ adjEdges.attr('relToHover', "none");
+ let adjNodes = adjEdges.data().map(e => e.target).concat(adjEdges.data().map(e => e.source));
+ let nodes = graph.visibleNodes.data(adjNodes, function(d) {
+ return d.id;
+ }).attr('relToHover', "none");
+ graph.updateGraphVisibility();
+ })
.call(graph.drag);
newGs.append("rect")
diff --git a/deps/v8/tools/turbolizer/index.html b/deps/v8/tools/turbolizer/index.html
index 4066fd8010..552e83783a 100644
--- a/deps/v8/tools/turbolizer/index.html
+++ b/deps/v8/tools/turbolizer/index.html
@@ -4,13 +4,14 @@
<title>Turbolizer</title>
<link rel="stylesheet" href="turbo-visualizer.css" />
</head>
- <body width="100%">
+ <body>
<div id="left">
<div id='source-text'>
<pre id='source-text-pre'\>
</div>
</div>
- <div id="middle">
+ <div class="resizer-left"></div>
+ <div id="middle" class="resizable-pane">
<div id="graph-toolbox-anchor">
<span id="graph-toolbox">
<input id="layout" type="image" title="layout graph" src="layout-icon.png"
@@ -47,12 +48,13 @@
<pre id="schedule-text-pre" class='prettyprint prettyprinted'>
<ul id="schedule-list" class='nolinenums noindent'>
</ul>
- </pre>
+ </pre>
</div>
<div id='text-placeholder' width="0px" height="0px" style="position: absolute; top:100000px;" ><svg><text text-anchor="right">
<tspan white-space="inherit" id="text-measure"/>
</text></svg></div>
</div>
+ <div class="resizer-right"></div>
<div id="right">
<div id='disassembly'>
<pre id='disassembly-text-pre' class='prettyprint prettyprinted'>
@@ -63,15 +65,15 @@
</div>
<div id="source-collapse" class="collapse-pane">
<input id="source-expand" type="image" title="show source"
- src="right-arrow.png" class="button-input-invisible">
+ src="right-arrow.png" class="button-input invisible">
<input id="source-shrink" type="image" title="hide source"
src="left-arrow.png" class="button-input">
</div>
<div id="disassembly-collapse" class="collapse-pane">
<input id="disassembly-expand" type="image" title="show disassembly"
- src="left-arrow.png" class="button-input">
- <input id="disassembly-shrink" type="image" title="hide disassembly"
- src="right-arrow.png" class="button-input-invisible">
+ src="left-arrow.png" class="button-input invisible">
+ <input id="disassembly-shrink" type="image" title="hide disassembly"
+ src="right-arrow.png" class="button-input">
</div>
<script src="https://cdn.rawgit.com/google/code-prettify/master/loader/run_prettify.js"></script>
<script src="http://d3js.org/d3.v3.min.js" charset="utf-8"></script>
diff --git a/deps/v8/tools/turbolizer/turbo-visualizer.css b/deps/v8/tools/turbolizer/turbo-visualizer.css
index 69a6ccabb5..7fd9c4852a 100644
--- a/deps/v8/tools/turbolizer/turbo-visualizer.css
+++ b/deps/v8/tools/turbolizer/turbo-visualizer.css
@@ -41,10 +41,8 @@
outline: none;
}
-.button-input-invisible {
- vertical-align: middle;
- width: 0px;
- visibility: hidden;
+.invisible {
+ display: none;
}
@@ -57,9 +55,12 @@
!important
}
+
body {
margin: 0;
padding: 0;
+ height: 100vh;
+ width: 100vw;
overflow:hidden;
-webkit-touch-callout: none;
-webkit-user-select: none;
@@ -69,8 +70,8 @@ body {
user-select: none;
}
-p {
- text-align: center;
+p {
+ text-align: center;
overflow: overlay;
position: relative;
}
@@ -97,21 +98,47 @@ div.scrollable {
overflow-y: _croll; overflow-x: hidden;
}
-g.control rect {
+g.turbonode[relToHover="input"] rect {
+ stroke: #67e62c;
+ stroke-width: 16px;
+}
+
+g.turbonode[relToHover="output"] rect {
+ stroke: #d23b14;
+ stroke-width: 16px;
+}
+
+path[relToHover="input"] {
+ stroke: #67e62c;
+ stroke-width: 16px;
+}
+
+path[relToHover="output"] {
+ stroke: #d23b14;
+ stroke-width: 16px;
+}
+
+
+g.turbonode:hover rect {
+ stroke: #000000;
+ stroke-width: 7px;
+}
+
+g.control rect {
fill: #EFCC00;
stroke: #080808;
stroke-width: 5px;
}
-g.javascript rect {
+g.javascript rect {
fill: #DD7E6B;
}
-g.simplified rect {
+g.simplified rect {
fill: #3C78D8;
}
-g.machine rect {
+g.machine rect {
fill: #6AA84F;
}
@@ -156,47 +183,14 @@ circle.halfFilledBubbleStyle:hover {
stroke-width: 3px;
}
-path.effect {
+path {
fill: none;
stroke: #080808;
stroke-width: 4px;
cursor: default;
}
-path.effect:hover {
- stroke-width: 6px;
-}
-
-path.control {
- fill: none;
- stroke: #080808;
- stroke-width: 4px;
- cursor: default;
-}
-
-path.control:hover {
- stroke-width: 6px;
-}
-
-path.value {
- fill: none;
- stroke: #888888;
- stroke-width: 4px;
- cursor: default;
-}
-
-path.value:hover {
- stroke-width: 6px;
-}
-
-path.frame-state {
- fill: none;
- stroke: #080808;
- stroke-width: 4px;
- cursor: default;
-}
-
-path.frame-state:hover{
+path:hover {
stroke-width: 6px;
}
@@ -246,33 +240,20 @@ span.linkable-text:hover {
font-weight: bold;
}
+
#left {
float: left; height: 100%; background-color: #FFFFFF;
- -webkit-transition: all 1s ease-in-out;
- -moz-transition: all 1s ease-in-out;
- -o-transition: all 1s ease-in-out;
- transition: all .3s ease-in-out;
- transition-property: width;
}
#middle {
- float:left; height: 100%; background-color: #F8F8F8;
- -webkit-transition: all 1s ease-in-out;
- -moz-transition: all 1s ease-in-out;
- -o-transition: all 1s ease-in-out;
- transition: all .3s ease-in-out;
- transition-property: width;
+ float:left; height: 100%; background-color: #F8F8F8;
}
#right {
- float: right; background-color: #FFFFFF;
- -webkit-transition: all 1s ease-in-out;
- -moz-transition: all 1s ease-in-out;
- -o-transition: all 1s ease-in-out;
- transition: all .3s ease-in-out;
- transition-property: width;
+ float: right; background-color: #FFFFFF;
}
+
#disassembly-collapse {
right: 0;
}
@@ -288,7 +269,7 @@ span.linkable-text:hover {
#graph-toolbox {
position: relative;
top: 1em;
- left: 0.7em;
+ left: 25px;
border: 2px solid #eee8d5;
border-radius: 5px;
padding: 0.7em;
@@ -337,4 +318,44 @@ tspan {
text {
dominant-baseline: text-before-edge;
+}
+
+.resizer-left {
+ position:absolute;
+ width: 4px;
+ height:100%;
+ background: #a0a0a0;
+ cursor: pointer;
+}
+
+.resizer-left.snapped {
+ width: 12px;
+}
+
+.resizer-left:hover {
+ background: orange;
+}
+
+.resizer-left.dragged {
+ background: orange;
+}
+
+.resizer-right {
+ position:absolute;
+ width: 4px;
+ height:100%;
+ background: #a0a0a0;
+ cursor: pointer;
+}
+
+.resizer-right.snapped {
+ width: 12px;
+}
+
+.resizer-right:hover {
+ background: orange;
+}
+
+.resizer-right.dragged {
+ background: orange;
} \ No newline at end of file
diff --git a/deps/v8/tools/turbolizer/turbo-visualizer.js b/deps/v8/tools/turbolizer/turbo-visualizer.js
index 280caf01db..c04384810b 100644
--- a/deps/v8/tools/turbolizer/turbo-visualizer.js
+++ b/deps/v8/tools/turbolizer/turbo-visualizer.js
@@ -1,99 +1,191 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-document.onload = (function(d3){
- "use strict";
- var jsonObj;
- var sourceExpandClassList = document.getElementById(SOURCE_EXPAND_ID).classList;
- var sourceCollapseClassList = document.getElementById(SOURCE_COLLAPSE_ID).classList;
- var sourceExpanded = sourceCollapseClassList.contains(COLLAPSE_PANE_BUTTON_VISIBLE);
- var disassemblyExpandClassList = document.getElementById(DISASSEMBLY_EXPAND_ID).classList;
- var disassemblyCollapseClassList = document.getElementById(DISASSEMBLY_COLLAPSE_ID).classList;
- var disassemblyExpanded = disassemblyCollapseClassList.contains(COLLAPSE_PANE_BUTTON_VISIBLE);
- var svg = null;
- var graph = null;
- var schedule = null;
- var empty = null;
- var currentPhaseView = null;
- var disassemblyView = null;
- var sourceView = null;
- var selectionBroker = null;
+class Snapper {
- function updatePanes() {
- if (sourceExpanded) {
- if (disassemblyExpanded) {
- d3.select("#" + SOURCE_PANE_ID).style(WIDTH, "30%");
- d3.select("#" + INTERMEDIATE_PANE_ID).style(WIDTH, "40%");
- d3.select("#" + GENERATED_PANE_ID).style(WIDTH, "30%");
- } else {
- d3.select("#" + SOURCE_PANE_ID).style(WIDTH, "50%");
- d3.select("#" + INTERMEDIATE_PANE_ID).style(WIDTH, "50%");
- d3.select("#" + GENERATED_PANE_ID).style(WIDTH, "0%");
- }
- } else {
- if (disassemblyExpanded) {
- d3.select("#" + SOURCE_PANE_ID).style(WIDTH, "0%");
- d3.select("#" + INTERMEDIATE_PANE_ID).style(WIDTH, "50%");
- d3.select("#" + GENERATED_PANE_ID).style(WIDTH, "50%");
- } else {
- d3.select("#" + SOURCE_PANE_ID).style(WIDTH, "0%");
- d3.select("#" + INTERMEDIATE_PANE_ID).style(WIDTH, "100%");
- d3.select("#" + GENERATED_PANE_ID).style(WIDTH, "0%");
- }
- }
+ constructor(resizer) {
+ let snapper = this;
+ snapper.resizer = resizer;
+ snapper.sourceExpand = d3.select("#" + SOURCE_EXPAND_ID);
+ snapper.sourceCollapse = d3.select("#" + SOURCE_COLLAPSE_ID);
+ snapper.disassemblyExpand = d3.select("#" + DISASSEMBLY_EXPAND_ID);
+ snapper.disassemblyCollapse = d3.select("#" + DISASSEMBLY_COLLAPSE_ID);
+
+ d3.select("#source-collapse").on("click", function(){
+ resizer.snapper.toggleSourceExpanded();
+ });
+ d3.select("#disassembly-collapse").on("click", function(){
+ resizer.snapper.toggleDisassemblyExpanded();
+ });
}
- function getLastExpandedState(type, default_state) {
+ getLastExpandedState(type, default_state) {
var state = window.sessionStorage.getItem("expandedState-"+type);
if (state === null) return default_state;
return state === 'true';
}
- function setLastExpandedState(type, state) {
+ setLastExpandedState(type, state) {
window.sessionStorage.setItem("expandedState-"+type, state);
}
- function toggleSourceExpanded() {
- setSourceExpanded(!sourceExpanded);
+ toggleSourceExpanded() {
+ this.setSourceExpanded(!this.sourceExpand.classed("invisible"));
+ }
+
+ sourceExpandUpdate(newState) {
+ this.setLastExpandedState("source", newState);
+ this.sourceExpand.classed("invisible", newState);
+ this.sourceCollapse.classed("invisible", !newState);
}
- function setSourceExpanded(newState) {
- sourceExpanded = newState;
- setLastExpandedState("source", newState);
- updatePanes();
+ setSourceExpanded(newState) {
+ if (this.sourceExpand.classed("invisible") === newState) return;
+ this.sourceExpandUpdate(newState);
+ let resizer = this.resizer;
if (newState) {
- sourceCollapseClassList.add(COLLAPSE_PANE_BUTTON_VISIBLE);
- sourceCollapseClassList.remove(COLLAPSE_PANE_BUTTON_INVISIBLE);
- sourceExpandClassList.add(COLLAPSE_PANE_BUTTON_INVISIBLE);
- sourceExpandClassList.remove(COLLAPSE_PANE_BUTTON_VISIBLE);
+ resizer.sep_left = resizer.sep_left_snap;
+ resizer.sep_left_snap = 0;
} else {
- sourceCollapseClassList.add(COLLAPSE_PANE_BUTTON_INVISIBLE);
- sourceCollapseClassList.remove(COLLAPSE_PANE_BUTTON_VISIBLE);
- sourceExpandClassList.add(COLLAPSE_PANE_BUTTON_VISIBLE);
- sourceExpandClassList.remove(COLLAPSE_PANE_BUTTON_INVISIBLE);
+ resizer.sep_left_snap = resizer.sep_left;
+ resizer.sep_left = 0;
}
+ resizer.updatePanes();
}
- function toggleDisassemblyExpanded() {
- setDisassemblyExpanded(!disassemblyExpanded);
+ toggleDisassemblyExpanded() {
+ this.setDisassemblyExpanded(!this.disassemblyExpand.classed("invisible"));
}
- function setDisassemblyExpanded(newState) {
- disassemblyExpanded = newState;
- setLastExpandedState("disassembly", newState);
- updatePanes();
+ disassemblyExpandUpdate(newState) {
+ this.setLastExpandedState("disassembly", newState);
+ this.disassemblyExpand.classed("invisible", newState);
+ this.disassemblyCollapse.classed("invisible", !newState);
+ }
+
+ setDisassemblyExpanded(newState) {
+ console.log(newState)
+ if (this.disassemblyExpand.classed("invisible") === newState) return;
+ this.disassemblyExpandUpdate(newState);
+ let resizer = this.resizer;
if (newState) {
- disassemblyCollapseClassList.add(COLLAPSE_PANE_BUTTON_VISIBLE);
- disassemblyCollapseClassList.remove(COLLAPSE_PANE_BUTTON_INVISIBLE);
- disassemblyExpandClassList.add(COLLAPSE_PANE_BUTTON_INVISIBLE);
- disassemblyExpandClassList.remove(COLLAPSE_PANE_BUTTON_VISIBLE);
+ resizer.sep_right = resizer.sep_right_snap;
+ resizer.sep_right_snap = resizer.client_width;
+ console.log("set expand")
} else {
- disassemblyCollapseClassList.add(COLLAPSE_PANE_BUTTON_INVISIBLE);
- disassemblyCollapseClassList.remove(COLLAPSE_PANE_BUTTON_VISIBLE);
- disassemblyExpandClassList.add(COLLAPSE_PANE_BUTTON_VISIBLE);
- disassemblyExpandClassList.remove(COLLAPSE_PANE_BUTTON_INVISIBLE);
+ resizer.sep_right_snap = resizer.sep_right;
+ resizer.sep_right = resizer.client_width;
+ console.log("set collapse")
}
+ resizer.updatePanes();
+ }
+
+ panesUpated() {
+ this.sourceExpandUpdate(this.resizer.sep_left > this.resizer.dead_width);
+ this.disassemblyExpandUpdate(this.resizer.sep_right <
+ (this.resizer.client_width - this.resizer.dead_width));
+ }
+}
+
+class Resizer {
+ constructor(panes_updated_callback, dead_width) {
+ let resizer = this;
+ resizer.snapper = new Snapper(resizer)
+ resizer.panes_updated_callback = panes_updated_callback;
+ resizer.dead_width = dead_width
+ resizer.client_width = d3.select("body").node().getBoundingClientRect().width;
+ resizer.left = d3.select("#" + SOURCE_PANE_ID);
+ resizer.middle = d3.select("#" + INTERMEDIATE_PANE_ID);
+ resizer.right = d3.select("#" + GENERATED_PANE_ID);
+ resizer.resizer_left = d3.select('.resizer-left');
+ resizer.resizer_right = d3.select('.resizer-right');
+ resizer.sep_left = resizer.client_width/3;
+ resizer.sep_right = resizer.client_width/3*2;
+ resizer.sep_left_snap = 0;
+ resizer.sep_right_snap = 0;
+ // Offset to prevent resizers from sliding slightly over one another.
+ resizer.sep_width_offset = 7;
+
+ let dragResizeLeft = d3.behavior.drag()
+ .on('drag', function() {
+ let x = d3.mouse(this.parentElement)[0];
+ resizer.sep_left = Math.min(Math.max(0,x), resizer.sep_right-resizer.sep_width_offset);
+ resizer.updatePanes();
+ })
+ .on('dragstart', function() {
+ resizer.resizer_left.classed("dragged", true);
+ let x = d3.mouse(this.parentElement)[0];
+ if (x > dead_width) {
+ resizer.sep_left_snap = resizer.sep_left;
+ }
+ })
+ .on('dragend', function() {
+ resizer.resizer_left.classed("dragged", false);
+ });
+ resizer.resizer_left.call(dragResizeLeft);
+
+ let dragResizeRight = d3.behavior.drag()
+ .on('drag', function() {
+ let x = d3.mouse(this.parentElement)[0];
+ resizer.sep_right = Math.max(resizer.sep_left+resizer.sep_width_offset, Math.min(x, resizer.client_width));
+ resizer.updatePanes();
+ })
+ .on('dragstart', function() {
+ resizer.resizer_right.classed("dragged", true);
+ let x = d3.mouse(this.parentElement)[0];
+ if (x < (resizer.client_width-dead_width)) {
+ resizer.sep_right_snap = resizer.sep_right;
+ }
+ })
+ .on('dragend', function() {
+ resizer.resizer_right.classed("dragged", false);
+ });;
+ resizer.resizer_right.call(dragResizeRight);
+ window.onresize = function(){
+ resizer.updateWidths();
+ /*fitPanesToParents();*/
+ resizer.updatePanes();
+ };
+ }
+
+ updatePanes() {
+ let left_snapped = this.sep_left === 0;
+ let right_snapped = this.sep_right >= this.client_width - 1;
+ this.resizer_left.classed("snapped", left_snapped);
+ this.resizer_right.classed("snapped", right_snapped);
+ this.left.style('width', this.sep_left + 'px');
+ this.middle.style('width', (this.sep_right-this.sep_left) + 'px');
+ this.right.style('width', (this.client_width - this.sep_right) + 'px');
+ this.resizer_left.style('left', this.sep_left + 'px');
+ this.resizer_right.style('right', (this.client_width - this.sep_right - 1) + 'px');
+
+ this.snapper.panesUpated();
+ this.panes_updated_callback();
+ }
+
+ updateWidths() {
+ this.client_width = d3.select("body").node().getBoundingClientRect().width;
+ this.sep_right = Math.min(this.sep_right, this.client_width);
+ this.sep_left = Math.min(Math.max(0, this.sep_left), this.sep_right);
+ }
+}
+
+document.onload = (function(d3){
+ "use strict";
+ var jsonObj;
+ var svg = null;
+ var graph = null;
+ var schedule = null;
+ var empty = null;
+ var currentPhaseView = null;
+ var disassemblyView = null;
+ var sourceView = null;
+ var selectionBroker = null;
+ let resizer = new Resizer(panesUpdatedCallback, 100);
+
+ function panesUpdatedCallback() {
+ graph.fitGraphViewToWindow();
}
function hideCurrentPhase() {
@@ -128,8 +220,6 @@ document.onload = (function(d3){
d3.select("#right").classed("scrollable", false);
graph.fitGraphViewToWindow();
- disassemblyView.resizeToParent();
- sourceView.resizeToParent();
d3.select("#left").classed("scrollable", true);
d3.select("#right").classed("scrollable", true);
@@ -138,21 +228,6 @@ document.onload = (function(d3){
selectionBroker = new SelectionBroker();
function initializeHandlers(g) {
- d3.select("#source-collapse").on("click", function(){
- toggleSourceExpanded(true);
- setTimeout(function(){
- g.fitGraphViewToWindow();
- }, 300);
- });
- d3.select("#disassembly-collapse").on("click", function(){
- toggleDisassemblyExpanded();
- setTimeout(function(){
- g.fitGraphViewToWindow();
- }, 300);
- });
- window.onresize = function(){
- fitPanesToParents();
- };
d3.select("#hidden-file-upload").on("change", function() {
if (window.File && window.FileReader && window.FileList) {
var uploadFile = this.files[0];
@@ -238,9 +313,11 @@ document.onload = (function(d3){
initializeHandlers(graph);
- setSourceExpanded(getLastExpandedState("source", true));
- setDisassemblyExpanded(getLastExpandedState("disassembly", false));
+ resizer.snapper.setSourceExpanded(resizer.snapper.getLastExpandedState("source", true));
+ resizer.snapper.setDisassemblyExpanded(resizer.snapper.getLastExpandedState("disassembly", false));
displayPhaseView(empty, null);
fitPanesToParents();
+ resizer.updatePanes();
+
})(window.d3);
diff --git a/deps/v8/tools/turbolizer/view.js b/deps/v8/tools/turbolizer/view.js
index 1ce1056a7f..a7c1f1e417 100644
--- a/deps/v8/tools/turbolizer/view.js
+++ b/deps/v8/tools/turbolizer/view.js
@@ -18,21 +18,9 @@ class View {
show(data, rememberedSelection) {
this.parentNode.appendChild(this.divElement[0][0]);
this.initializeContent(data, rememberedSelection);
- this.resizeToParent();
this.divElement.attr(VISIBILITY, 'visible');
}
- resizeToParent() {
- var view = this;
- var documentElement = document.documentElement;
- var y;
- if (this.parentNode.clientHeight)
- y = Math.max(this.parentNode.clientHeight, documentElement.clientHeight);
- else
- y = documentElement.clientHeight;
- this.parentNode.style.height = y + 'px';
- }
-
hide() {
this.divElement.attr(VISIBILITY, 'hidden');
this.deleteContent();
diff --git a/deps/v8/tools/unittests/PRESUBMIT.py b/deps/v8/tools/unittests/PRESUBMIT.py
new file mode 100644
index 0000000000..d428813e13
--- /dev/null
+++ b/deps/v8/tools/unittests/PRESUBMIT.py
@@ -0,0 +1,9 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+def CheckChangeOnCommit(input_api, output_api):
+ # TODO(machenbach): Run all unittests.
+ tests = input_api.canned_checks.GetUnitTestsInDirectory(
+ input_api, output_api, '.', whitelist=['run_tests_test.py$'])
+ return input_api.RunTests(tests)
diff --git a/deps/v8/tools/unittests/predictable_wrapper_test.py b/deps/v8/tools/unittests/predictable_wrapper_test.py
new file mode 100755
index 0000000000..c085fb8879
--- /dev/null
+++ b/deps/v8/tools/unittests/predictable_wrapper_test.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import subprocess
+import sys
+import tempfile
+import unittest
+
+TOOLS_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+
+PREDICTABLE_WRAPPER = os.path.join(
+ TOOLS_DIR, 'predictable_wrapper.py')
+
+PREDICTABLE_MOCKED = os.path.join(
+ TOOLS_DIR, 'unittests', 'testdata', 'predictable_mocked.py')
+
+def call_wrapper(mode):
+ """Call the predictable wrapper under test with a mocked file to test.
+
+ Instead of d8, we use python and a python mock script. This mock script is
+ expecting two arguments, mode (one of 'equal', 'differ' or 'missing') and
+ a path to a temporary file for simulating non-determinism.
+ """
+ fd, state_file = tempfile.mkstemp()
+ os.close(fd)
+ try:
+ args = [
+ sys.executable,
+ PREDICTABLE_WRAPPER,
+ sys.executable,
+ PREDICTABLE_MOCKED,
+ mode,
+ state_file,
+ ]
+ proc = subprocess.Popen(args, stdout=subprocess.PIPE)
+ proc.communicate()
+ return proc.returncode
+ finally:
+ os.unlink(state_file)
+
+
+class PredictableTest(unittest.TestCase):
+ def testEqualAllocationOutput(self):
+ self.assertEqual(0, call_wrapper('equal'))
+
+ def testNoAllocationOutput(self):
+ self.assertEqual(2, call_wrapper('missing'))
+
+ def testDifferentAllocationOutput(self):
+ self.assertEqual(3, call_wrapper('differ'))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/deps/v8/tools/unittests/run_perf_test.py b/deps/v8/tools/unittests/run_perf_test.py
index fd1e36531a..07dd515406 100644..100755
--- a/deps/v8/tools/unittests/run_perf_test.py
+++ b/deps/v8/tools/unittests/run_perf_test.py
@@ -94,8 +94,8 @@ class PerfTest(unittest.TestCase):
include=([os.path.join(cls.base, "run_perf.py")]))
cls._cov.start()
import run_perf
- from testrunner.local import commands
- global commands
+ from testrunner.local import command
+ global command
global run_perf
@classmethod
@@ -125,9 +125,14 @@ class PerfTest(unittest.TestCase):
stderr=None,
timed_out=kwargs.get("timed_out", False))
for arg in args[1]]
- def execute(*args, **kwargs):
- return test_outputs.pop()
- commands.Execute = MagicMock(side_effect=execute)
+ def create_cmd(*args, **kwargs):
+ cmd = MagicMock()
+ def execute(*args, **kwargs):
+ return test_outputs.pop()
+ cmd.execute = MagicMock(side_effect=execute)
+ return cmd
+
+ command.Command = MagicMock(side_effect=create_cmd)
# Check that d8 is called from the correct cwd for each test run.
dirs = [path.join(TEST_WORKSPACE, arg) for arg in args[0]]
@@ -164,18 +169,23 @@ class PerfTest(unittest.TestCase):
self.assertEquals(errors, self._LoadResults()["errors"])
def _VerifyMock(self, binary, *args, **kwargs):
- arg = [path.join(path.dirname(self.base), binary)]
- arg += args
- commands.Execute.assert_called_with(
- arg, timeout=kwargs.get("timeout", 60))
+ shell = path.join(path.dirname(self.base), binary)
+ command.Command.assert_called_with(
+ cmd_prefix=[],
+ shell=shell,
+ args=list(args),
+ timeout=kwargs.get('timeout', 60))
def _VerifyMockMultiple(self, *args, **kwargs):
- expected = []
- for arg in args:
- a = [path.join(path.dirname(self.base), arg[0])]
- a += arg[1:]
- expected.append(((a,), {"timeout": kwargs.get("timeout", 60)}))
- self.assertEquals(expected, commands.Execute.call_args_list)
+ self.assertEquals(len(args), len(command.Command.call_args_list))
+ for arg, actual in zip(args, command.Command.call_args_list):
+ expected = {
+ 'cmd_prefix': [],
+ 'shell': path.join(path.dirname(self.base), arg[0]),
+ 'args': list(arg[1:]),
+ 'timeout': kwargs.get('timeout', 60)
+ }
+ self.assertEquals((expected, ), actual)
def testOneRun(self):
self._WriteTestInput(V8_JSON)
diff --git a/deps/v8/tools/unittests/run_tests_test.py b/deps/v8/tools/unittests/run_tests_test.py
new file mode 100755
index 0000000000..f4ff3fe1f7
--- /dev/null
+++ b/deps/v8/tools/unittests/run_tests_test.py
@@ -0,0 +1,667 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Global system tests for V8 test runners and fuzzers.
+
+This hooks up the framework under tools/testrunner testing high-level scenarios
+with different test suite extensions and build configurations.
+"""
+
+# TODO(machenbach): Mock out util.GuessOS to make these tests really platform
+# independent.
+# TODO(machenbach): Move coverage recording to a global test entry point to
+# include other unittest suites in the coverage report.
+# TODO(machenbach): Coverage data from multiprocessing doesn't work.
+# TODO(majeski): Add some tests for the fuzzers.
+
+import collections
+import contextlib
+import json
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+import unittest
+
+from cStringIO import StringIO
+
+TOOLS_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+TEST_DATA_ROOT = os.path.join(TOOLS_ROOT, 'unittests', 'testdata')
+RUN_TESTS_PY = os.path.join(TOOLS_ROOT, 'run-tests.py')
+
+Result = collections.namedtuple(
+ 'Result', ['stdout', 'stderr', 'returncode'])
+
+Result.__str__ = lambda self: (
+ '\nReturncode: %s\nStdout:\n%s\nStderr:\n%s\n' %
+ (self.returncode, self.stdout, self.stderr))
+
+
+@contextlib.contextmanager
+def temp_dir():
+ """Wrapper making a temporary directory available."""
+ path = None
+ try:
+ path = tempfile.mkdtemp('v8_test_')
+ yield path
+ finally:
+ if path:
+ shutil.rmtree(path)
+
+
+@contextlib.contextmanager
+def temp_base(baseroot='testroot1'):
+ """Wrapper that sets up a temporary V8 test root.
+
+ Args:
+ baseroot: The folder with the test root blueprint. Relevant files will be
+ copied to the temporary test root, to guarantee a fresh setup with no
+ dirty state.
+ """
+ basedir = os.path.join(TEST_DATA_ROOT, baseroot)
+ with temp_dir() as tempbase:
+ builddir = os.path.join(tempbase, 'out', 'Release')
+ testroot = os.path.join(tempbase, 'test')
+ os.makedirs(builddir)
+ shutil.copy(os.path.join(basedir, 'v8_build_config.json'), builddir)
+ shutil.copy(os.path.join(basedir, 'd8_mocked.py'), builddir)
+
+ for suite in os.listdir(os.path.join(basedir, 'test')):
+ os.makedirs(os.path.join(testroot, suite))
+ for entry in os.listdir(os.path.join(basedir, 'test', suite)):
+ shutil.copy(
+ os.path.join(basedir, 'test', suite, entry),
+ os.path.join(testroot, suite))
+ yield tempbase
+
+
+@contextlib.contextmanager
+def capture():
+ """Wrapper that replaces system stdout/stderr an provides the streams."""
+ oldout = sys.stdout
+ olderr = sys.stderr
+ try:
+ stdout=StringIO()
+ stderr=StringIO()
+ sys.stdout = stdout
+ sys.stderr = stderr
+ yield stdout, stderr
+ finally:
+ sys.stdout = oldout
+ sys.stderr = olderr
+
+
+def run_tests(basedir, *args, **kwargs):
+ """Executes the test runner with captured output."""
+ with capture() as (stdout, stderr):
+ sys_args = ['--command-prefix', sys.executable] + list(args)
+ if kwargs.get('infra_staging', False):
+ sys_args.append('--infra-staging')
+ code = standard_runner.StandardTestRunner(
+ basedir=basedir).execute(sys_args)
+ return Result(stdout.getvalue(), stderr.getvalue(), code)
+
+
+def override_build_config(basedir, **kwargs):
+ """Override the build config with new values provided as kwargs."""
+ path = os.path.join(basedir, 'out', 'Release', 'v8_build_config.json')
+ with open(path) as f:
+ config = json.load(f)
+ config.update(kwargs)
+ with open(path, 'w') as f:
+ json.dump(config, f)
+
+
+class SystemTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ # Try to set up python coverage and run without it if not available.
+ cls._cov = None
+ try:
+ import coverage
+ if int(coverage.__version__.split('.')[0]) < 4:
+ cls._cov = None
+ print 'Python coverage version >= 4 required.'
+ raise ImportError()
+ cls._cov = coverage.Coverage(
+ source=([os.path.join(TOOLS_ROOT, 'testrunner')]),
+ omit=['*unittest*', '*__init__.py'],
+ )
+ cls._cov.exclude('raise NotImplementedError')
+ cls._cov.exclude('if __name__ == .__main__.:')
+ cls._cov.exclude('except TestRunnerError:')
+ cls._cov.exclude('except KeyboardInterrupt:')
+ cls._cov.exclude('if options.verbose:')
+ cls._cov.exclude('if verbose:')
+ cls._cov.exclude('pass')
+ cls._cov.exclude('assert False')
+ cls._cov.start()
+ except ImportError:
+ print 'Running without python coverage.'
+ sys.path.append(TOOLS_ROOT)
+ global standard_runner
+ from testrunner import standard_runner
+ from testrunner.local import pool
+ pool.setup_testing()
+
+ @classmethod
+ def tearDownClass(cls):
+ if cls._cov:
+ cls._cov.stop()
+ print ''
+ print cls._cov.report(show_missing=True)
+
+ def testPass(self):
+ """Test running only passing tests in two variants.
+
+ Also test printing durations.
+ """
+ with temp_base() as basedir:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=default,stress',
+ '--time',
+ 'sweet/bananas',
+ 'sweet/raspberries',
+ )
+ self.assertIn('Running 4 tests', result.stdout, result)
+ self.assertIn('Done running sweet/bananas: pass', result.stdout, result)
+ self.assertIn('Total time:', result.stderr, result)
+ self.assertIn('sweet/bananas', result.stderr, result)
+ self.assertEqual(0, result.returncode, result)
+
+ def testShardedProc(self):
+ with temp_base() as basedir:
+ for shard in [1, 2]:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=default,stress',
+ '--shard-count=2',
+ '--shard-run=%d' % shard,
+ 'sweet/bananas',
+ 'sweet/raspberries',
+ infra_staging=True,
+ )
+ # One of the shards gets one variant of each test.
+ self.assertIn('Running 1 base tests', result.stdout, result)
+ self.assertIn('2 tests ran', result.stdout, result)
+ if shard == 1:
+ self.assertIn('Done running sweet/bananas', result.stdout, result)
+ else:
+ self.assertIn('Done running sweet/raspberries', result.stdout, result)
+ self.assertEqual(0, result.returncode, result)
+
+ def testSharded(self):
+ """Test running a particular shard."""
+ with temp_base() as basedir:
+ for shard in [1, 2]:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=default,stress',
+ '--shard-count=2',
+ '--shard-run=%d' % shard,
+ 'sweet/bananas',
+ 'sweet/raspberries',
+ )
+ # One of the shards gets one variant of each test.
+ self.assertIn('Running 2 tests', result.stdout, result)
+ self.assertIn('Done running sweet/bananas', result.stdout, result)
+ self.assertIn('Done running sweet/raspberries', result.stdout, result)
+ self.assertEqual(0, result.returncode, result)
+
+ def testFailProc(self):
+ self.testFail(infra_staging=True)
+
+ def testFail(self, infra_staging=False):
+ """Test running only failing tests in two variants."""
+ with temp_base() as basedir:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=default,stress',
+ 'sweet/strawberries',
+ infra_staging=infra_staging,
+ )
+ if not infra_staging:
+ self.assertIn('Running 2 tests', result.stdout, result)
+ else:
+ self.assertIn('Running 1 base tests', result.stdout, result)
+ self.assertIn('2 tests ran', result.stdout, result)
+ self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result)
+ self.assertEqual(1, result.returncode, result)
+
+ def check_cleaned_json_output(self, expected_results_name, actual_json):
+ # Check relevant properties of the json output.
+ with open(actual_json) as f:
+ json_output = json.load(f)[0]
+ pretty_json = json.dumps(json_output, indent=2, sort_keys=True)
+
+ # Replace duration in actual output as it's non-deterministic. Also
+ # replace the python executable prefix as it has a different absolute
+ # path dependent on where this runs.
+ def replace_variable_data(data):
+ data['duration'] = 1
+ data['command'] = ' '.join(
+ ['/usr/bin/python'] + data['command'].split()[1:])
+ for data in json_output['slowest_tests']:
+ replace_variable_data(data)
+ for data in json_output['results']:
+ replace_variable_data(data)
+ json_output['duration_mean'] = 1
+
+ with open(os.path.join(TEST_DATA_ROOT, expected_results_name)) as f:
+ expected_test_results = json.load(f)
+
+ msg = None # Set to pretty_json for bootstrapping.
+ self.assertDictEqual(json_output, expected_test_results, msg)
+
+ def testFailWithRerunAndJSONProc(self):
+ self.testFailWithRerunAndJSON(infra_staging=True)
+
+ def testFailWithRerunAndJSON(self, infra_staging=False):
+ """Test re-running a failing test and output to json."""
+ with temp_base() as basedir:
+ json_path = os.path.join(basedir, 'out.json')
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=default',
+ '--rerun-failures-count=2',
+ '--random-seed=123',
+ '--json-test-results', json_path,
+ 'sweet/strawberries',
+ infra_staging=infra_staging,
+ )
+ if not infra_staging:
+ self.assertIn('Running 1 tests', result.stdout, result)
+ else:
+ self.assertIn('Running 1 base tests', result.stdout, result)
+ self.assertIn('1 tests ran', result.stdout, result)
+ self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result)
+ if not infra_staging:
+ # We run one test, which fails and gets re-run twice.
+ self.assertIn('3 tests failed', result.stdout, result)
+ else:
+ # With test processors we don't count reruns as separated failures.
+ # TODO(majeski): fix it?
+ self.assertIn('1 tests failed', result.stdout, result)
+ self.assertEqual(0, result.returncode, result)
+
+ # TODO(majeski): Previously we only reported the variant flags in the
+ # flags field of the test result.
+ # After recent changes we report all flags, including the file names.
+ # This is redundant to the command. Needs investigation.
+ self.check_cleaned_json_output('expected_test_results1.json', json_path)
+
+ def testFlakeWithRerunAndJSONProc(self):
+ self.testFlakeWithRerunAndJSON(infra_staging=True)
+
+ def testFlakeWithRerunAndJSON(self, infra_staging=False):
+ """Test re-running a failing test and output to json."""
+ with temp_base(baseroot='testroot2') as basedir:
+ json_path = os.path.join(basedir, 'out.json')
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=default',
+ '--rerun-failures-count=2',
+ '--random-seed=123',
+ '--json-test-results', json_path,
+ 'sweet',
+ infra_staging=infra_staging,
+ )
+ if not infra_staging:
+ self.assertIn('Running 1 tests', result.stdout, result)
+ self.assertIn(
+ 'Done running sweet/bananaflakes: FAIL', result.stdout, result)
+ self.assertIn('1 tests failed', result.stdout, result)
+ else:
+ self.assertIn('Running 1 base tests', result.stdout, result)
+ self.assertIn(
+ 'Done running sweet/bananaflakes: pass', result.stdout, result)
+ self.assertIn('All tests succeeded', result.stdout, result)
+ self.assertEqual(0, result.returncode, result)
+ self.check_cleaned_json_output('expected_test_results2.json', json_path)
+
+ def testAutoDetect(self):
+ """Fake a build with several auto-detected options.
+
+ Using all those options at once doesn't really make much sense. This is
+ merely for getting coverage.
+ """
+ with temp_base() as basedir:
+ override_build_config(
+ basedir, dcheck_always_on=True, is_asan=True, is_cfi=True,
+ is_msan=True, is_tsan=True, is_ubsan_vptr=True, target_cpu='x86',
+ v8_enable_i18n_support=False, v8_target_cpu='x86',
+ v8_use_snapshot=False)
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=default',
+ 'sweet/bananas',
+ )
+ expect_text = (
+ '>>> Autodetected:\n'
+ 'asan\n'
+ 'cfi_vptr\n'
+ 'dcheck_always_on\n'
+ 'msan\n'
+ 'no_i18n\n'
+ 'no_snap\n'
+ 'tsan\n'
+ 'ubsan_vptr\n'
+ '>>> Running tests for ia32.release')
+ self.assertIn(expect_text, result.stdout, result)
+ self.assertEqual(0, result.returncode, result)
+ # TODO(machenbach): Test some more implications of the auto-detected
+ # options, e.g. that the right env variables are set.
+
+ def testSkipsProc(self):
+ self.testSkips(infra_staging=True)
+
+ def testSkips(self, infra_staging=False):
+ """Test skipping tests in status file for a specific variant."""
+ with temp_base() as basedir:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=nooptimization',
+ 'sweet/strawberries',
+ infra_staging=infra_staging,
+ )
+ if not infra_staging:
+ self.assertIn('Running 0 tests', result.stdout, result)
+ else:
+ self.assertIn('Running 1 base tests', result.stdout, result)
+ self.assertIn('0 tests ran', result.stdout, result)
+ self.assertEqual(0, result.returncode, result)
+
+ def testDefaultProc(self):
+ self.testDefault(infra_staging=True)
+
+ def testDefault(self, infra_staging=False):
+ """Test using default test suites, though no tests are run since they don't
+ exist in a test setting.
+ """
+ with temp_base() as basedir:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ infra_staging=infra_staging,
+ )
+ if not infra_staging:
+ self.assertIn('Warning: no tests were run!', result.stdout, result)
+ else:
+ self.assertIn('Running 0 base tests', result.stdout, result)
+ self.assertIn('0 tests ran', result.stdout, result)
+ self.assertEqual(0, result.returncode, result)
+
+ def testNoBuildConfig(self):
+ """Test failing run when build config is not found."""
+ with temp_base() as basedir:
+ result = run_tests(basedir)
+ self.assertIn('Failed to load build config', result.stdout, result)
+ self.assertEqual(1, result.returncode, result)
+
+ def testGNOption(self):
+ """Test using gn option, but no gn build folder is found."""
+ with temp_base() as basedir:
+ # TODO(machenbach): This should fail gracefully.
+ with self.assertRaises(OSError):
+ run_tests(basedir, '--gn')
+
+ def testInconsistentMode(self):
+ """Test failing run when attempting to wrongly override the mode."""
+ with temp_base() as basedir:
+ override_build_config(basedir, is_debug=True)
+ result = run_tests(basedir, '--mode=Release')
+ self.assertIn('execution mode (release) for release is inconsistent '
+ 'with build config (debug)', result.stdout, result)
+ self.assertEqual(1, result.returncode, result)
+
+ def testInconsistentArch(self):
+ """Test failing run when attempting to wrongly override the arch."""
+ with temp_base() as basedir:
+ result = run_tests(basedir, '--mode=Release', '--arch=ia32')
+ self.assertIn(
+ '--arch value (ia32) inconsistent with build config (x64).',
+ result.stdout, result)
+ self.assertEqual(1, result.returncode, result)
+
+ def testWrongVariant(self):
+ """Test using a bogus variant."""
+ with temp_base() as basedir:
+ result = run_tests(basedir, '--mode=Release', '--variants=meh')
+ self.assertEqual(1, result.returncode, result)
+
+ def testModeFromBuildConfig(self):
+ """Test auto-detection of mode from build config."""
+ with temp_base() as basedir:
+ result = run_tests(basedir, '--outdir=out/Release', 'sweet/bananas')
+ self.assertIn('Running tests for x64.release', result.stdout, result)
+ self.assertEqual(0, result.returncode, result)
+
+ def testReport(self):
+ """Test the report feature.
+
+ This also exercises various paths in statusfile logic.
+ """
+ with temp_base() as basedir:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--variants=default',
+ 'sweet',
+ '--report',
+ )
+ self.assertIn(
+ '3 tests are expected to fail that we should fix',
+ result.stdout, result)
+ self.assertEqual(1, result.returncode, result)
+
+ def testWarnUnusedRules(self):
+ """Test the unused-rules feature."""
+ with temp_base() as basedir:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--variants=default,nooptimization',
+ 'sweet',
+ '--warn-unused',
+ )
+ self.assertIn( 'Unused rule: carrots', result.stdout, result)
+ self.assertIn( 'Unused rule: regress/', result.stdout, result)
+ self.assertEqual(1, result.returncode, result)
+
+ def testCatNoSources(self):
+ """Test printing sources, but the suite's tests have none available."""
+ with temp_base() as basedir:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--variants=default',
+ 'sweet/bananas',
+ '--cat',
+ )
+ self.assertIn('begin source: sweet/bananas', result.stdout, result)
+ self.assertIn('(no source available)', result.stdout, result)
+ self.assertEqual(0, result.returncode, result)
+
+ def testPredictableProc(self):
+ self.testPredictable(infra_staging=True)
+
+ def testPredictable(self, infra_staging=False):
+ """Test running a test in verify-predictable mode.
+
+ The test will fail because of missing allocation output. We verify that and
+ that the predictable flags are passed and printed after failure.
+ """
+ with temp_base() as basedir:
+ override_build_config(basedir, v8_enable_verify_predictable=True)
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=default',
+ 'sweet/bananas',
+ infra_staging=infra_staging,
+ )
+ if not infra_staging:
+ self.assertIn('Running 1 tests', result.stdout, result)
+ else:
+ self.assertIn('Running 1 base tests', result.stdout, result)
+ self.assertIn('1 tests ran', result.stdout, result)
+ self.assertIn('Done running sweet/bananas: FAIL', result.stdout, result)
+ self.assertIn('Test had no allocation output', result.stdout, result)
+ self.assertIn('--predictable --verify_predictable', result.stdout, result)
+ self.assertEqual(1, result.returncode, result)
+
+ def testSlowArch(self):
+ """Test timeout factor manipulation on slow architecture."""
+ with temp_base() as basedir:
+ override_build_config(basedir, v8_target_cpu='arm64')
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=default',
+ 'sweet/bananas',
+ )
+ # TODO(machenbach): We don't have a way for testing if the correct
+ # timeout was used.
+ self.assertEqual(0, result.returncode, result)
+
+ def testRandomSeedStressWithDefault(self):
+ """Test using random-seed-stress feature has the right number of tests."""
+ with temp_base() as basedir:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=default',
+ '--random-seed-stress-count=2',
+ 'sweet/bananas',
+ )
+ self.assertIn('Running 2 tests', result.stdout, result)
+ self.assertEqual(0, result.returncode, result)
+
+ def testRandomSeedStressWithSeed(self):
+ """Test using random-seed-stress feature passing a random seed."""
+ with temp_base() as basedir:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=default',
+ '--random-seed-stress-count=2',
+ '--random-seed=123',
+ 'sweet/strawberries',
+ )
+ self.assertIn('Running 2 tests', result.stdout, result)
+ # We use a failing test so that the command is printed and we can verify
+ # that the right random seed was passed.
+ self.assertIn('--random-seed=123', result.stdout, result)
+ self.assertEqual(1, result.returncode, result)
+
+ def testSpecificVariants(self):
+ """Test using NO_VARIANTS modifiers in status files skips the desire tests.
+
+ The test runner cmd line configures 4 tests to run (2 tests * 2 variants).
+ But the status file applies a modifier to each skipping one of the
+ variants.
+ """
+ with temp_base() as basedir:
+ override_build_config(basedir, v8_use_snapshot=False)
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=default,stress',
+ 'sweet/bananas',
+ 'sweet/raspberries',
+ )
+ # Both tests are either marked as running in only default or only
+ # slow variant.
+ self.assertIn('Running 2 tests', result.stdout, result)
+ self.assertEqual(0, result.returncode, result)
+
+ def testStatusFilePresubmit(self):
+ """Test that the fake status file is well-formed."""
+ with temp_base() as basedir:
+ from testrunner.local import statusfile
+ self.assertTrue(statusfile.PresubmitCheck(
+ os.path.join(basedir, 'test', 'sweet', 'sweet.status')))
+
+ def testDotsProgressProc(self):
+ self.testDotsProgress(infra_staging=True)
+
+ def testDotsProgress(self, infra_staging=False):
+ with temp_base() as basedir:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=dots',
+ 'sweet/cherries',
+ 'sweet/bananas',
+ '--no-sorting', '-j1', # make results order deterministic
+ infra_staging=infra_staging,
+ )
+ if not infra_staging:
+ self.assertIn('Running 2 tests', result.stdout, result)
+ else:
+ self.assertIn('Running 2 base tests', result.stdout, result)
+ self.assertIn('2 tests ran', result.stdout, result)
+ self.assertIn('F.', result.stdout, result)
+ self.assertEqual(1, result.returncode, result)
+
+ def testMonoProgressProc(self):
+ self._testCompactProgress('mono', True)
+
+ def testMonoProgress(self):
+ self._testCompactProgress('mono', False)
+
+ def testColorProgressProc(self):
+ self._testCompactProgress('color', True)
+
+ def testColorProgress(self):
+ self._testCompactProgress('color', False)
+
+ def _testCompactProgress(self, name, infra_staging):
+ with temp_base() as basedir:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=%s' % name,
+ 'sweet/cherries',
+ 'sweet/bananas',
+ infra_staging=infra_staging,
+ )
+ if name == 'color':
+ expected = ('\033[34m% 100\033[0m|'
+ '\033[32m+ 1\033[0m|'
+ '\033[31m- 1\033[0m]: Done')
+ else:
+ expected = '% 100|+ 1|- 1]: Done'
+ self.assertIn(expected, result.stdout)
+ self.assertIn('sweet/cherries', result.stdout)
+ self.assertIn('sweet/bananas', result.stdout)
+ self.assertEqual(1, result.returncode, result)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/deps/v8/tools/unittests/testdata/expected_test_results1.json b/deps/v8/tools/unittests/testdata/expected_test_results1.json
new file mode 100644
index 0000000000..172b87a5d6
--- /dev/null
+++ b/deps/v8/tools/unittests/testdata/expected_test_results1.json
@@ -0,0 +1,107 @@
+{
+ "arch": "x64",
+ "duration_mean": 1,
+ "mode": "release",
+ "results": [
+ {
+ "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "duration": 1,
+ "exit_code": 1,
+ "expected": [
+ "PASS"
+ ],
+ "flags": [
+ "--random-seed=123",
+ "strawberries",
+ "--nohard-abort"
+ ],
+ "name": "sweet/strawberries",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 1,
+ "stderr": "",
+ "stdout": "--random-seed=123 strawberries --nohard-abort\n",
+ "target_name": "d8_mocked.py",
+ "variant": "default"
+ },
+ {
+ "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "duration": 1,
+ "exit_code": 1,
+ "expected": [
+ "PASS"
+ ],
+ "flags": [
+ "--random-seed=123",
+ "strawberries",
+ "--nohard-abort"
+ ],
+ "name": "sweet/strawberries",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 2,
+ "stderr": "",
+ "stdout": "--random-seed=123 strawberries --nohard-abort\n",
+ "target_name": "d8_mocked.py",
+ "variant": "default"
+ },
+ {
+ "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "duration": 1,
+ "exit_code": 1,
+ "expected": [
+ "PASS"
+ ],
+ "flags": [
+ "--random-seed=123",
+ "strawberries",
+ "--nohard-abort"
+ ],
+ "name": "sweet/strawberries",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 3,
+ "stderr": "",
+ "stdout": "--random-seed=123 strawberries --nohard-abort\n",
+ "target_name": "d8_mocked.py",
+ "variant": "default"
+ }
+ ],
+ "slowest_tests": [
+ {
+ "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "duration": 1,
+ "flags": [
+ "--random-seed=123",
+ "strawberries",
+ "--nohard-abort"
+ ],
+ "marked_slow": true,
+ "name": "sweet/strawberries"
+ },
+ {
+ "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "duration": 1,
+ "flags": [
+ "--random-seed=123",
+ "strawberries",
+ "--nohard-abort"
+ ],
+ "marked_slow": true,
+ "name": "sweet/strawberries"
+ },
+ {
+ "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "duration": 1,
+ "flags": [
+ "--random-seed=123",
+ "strawberries",
+ "--nohard-abort"
+ ],
+ "marked_slow": true,
+ "name": "sweet/strawberries"
+ }
+ ],
+ "test_total": 3
+}
+
diff --git a/deps/v8/tools/unittests/testdata/expected_test_results2.json b/deps/v8/tools/unittests/testdata/expected_test_results2.json
new file mode 100644
index 0000000000..7fcfe47f71
--- /dev/null
+++ b/deps/v8/tools/unittests/testdata/expected_test_results2.json
@@ -0,0 +1,74 @@
+{
+ "arch": "x64",
+ "duration_mean": 1,
+ "mode": "release",
+ "results": [
+ {
+ "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort",
+ "duration": 1,
+ "exit_code": 1,
+ "expected": [
+ "PASS"
+ ],
+ "flags": [
+ "--random-seed=123",
+ "bananaflakes",
+ "--nohard-abort"
+ ],
+ "name": "sweet/bananaflakes",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 1,
+ "stderr": "",
+ "stdout": "--random-seed=123 bananaflakes --nohard-abort\n",
+ "target_name": "d8_mocked.py",
+ "variant": "default"
+ },
+ {
+ "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort",
+ "duration": 1,
+ "exit_code": 0,
+ "expected": [
+ "PASS"
+ ],
+ "flags": [
+ "--random-seed=123",
+ "bananaflakes",
+ "--nohard-abort"
+ ],
+ "name": "sweet/bananaflakes",
+ "random_seed": 123,
+ "result": "PASS",
+ "run": 2,
+ "stderr": "",
+ "stdout": "--random-seed=123 bananaflakes --nohard-abort\n",
+ "target_name": "d8_mocked.py",
+ "variant": "default"
+ }
+ ],
+ "slowest_tests": [
+ {
+ "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort",
+ "duration": 1,
+ "flags": [
+ "--random-seed=123",
+ "bananaflakes",
+ "--nohard-abort"
+ ],
+ "marked_slow": false,
+ "name": "sweet/bananaflakes"
+ },
+ {
+ "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort",
+ "duration": 1,
+ "flags": [
+ "--random-seed=123",
+ "bananaflakes",
+ "--nohard-abort"
+ ],
+ "marked_slow": false,
+ "name": "sweet/bananaflakes"
+ }
+ ],
+ "test_total": 2
+}
diff --git a/deps/v8/tools/unittests/testdata/predictable_mocked.py b/deps/v8/tools/unittests/testdata/predictable_mocked.py
new file mode 100644
index 0000000000..cc332c2c46
--- /dev/null
+++ b/deps/v8/tools/unittests/testdata/predictable_mocked.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+assert len(sys.argv) == 3
+
+if sys.argv[1] == 'equal':
+ # 1. Scenario: print equal allocation hashes.
+ print '### Allocations = 9497, hash = 0xc322c6b0'
+elif sys.argv[1] == 'differ':
+ # 2. Scenario: print different allocation hashes. This prints a different
+ # hash on the second run, based on the content of a semaphore file. This
+ # file is expected to be empty in the beginning.
+ with open(sys.argv[2]) as f:
+ if f.read():
+ print '### Allocations = 9497, hash = 0xc322c6b0'
+ else:
+ print '### Allocations = 9497, hash = 0xc322c6b1'
+ with open(sys.argv[2], 'w') as f:
+ f.write('something')
+else:
+ # 3. Scenario: missing allocation hashes. Don't print anything.
+ assert 'missing'
+
+sys.exit(0)
diff --git a/deps/v8/tools/unittests/testdata/testroot1/d8_mocked.py b/deps/v8/tools/unittests/testdata/testroot1/d8_mocked.py
new file mode 100644
index 0000000000..c7ca55a571
--- /dev/null
+++ b/deps/v8/tools/unittests/testdata/testroot1/d8_mocked.py
@@ -0,0 +1,16 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Dummy d8 replacement. Just passes all test, except if 'berries' is in args.
+"""
+
+import sys
+
+args = ' '.join(sys.argv[1:])
+print args
+# Let all berries fail.
+if 'berries' in args:
+ sys.exit(1)
+sys.exit(0)
diff --git a/deps/v8/tools/unittests/testdata/testroot1/test/sweet/sweet.status b/deps/v8/tools/unittests/testdata/testroot1/test/sweet/sweet.status
new file mode 100644
index 0000000000..74214631dc
--- /dev/null
+++ b/deps/v8/tools/unittests/testdata/testroot1/test/sweet/sweet.status
@@ -0,0 +1,35 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[
+[ALWAYS, {
+ 'raspberries': FAIL,
+ 'strawberries': [PASS, ['mode == release', SLOW], ['mode == debug', NO_VARIANTS]],
+
+ # Both cherries and apples are to test how PASS an FAIL from different
+ # sections are merged.
+ 'cherries': [PASS, SLOW],
+ 'apples': [FAIL],
+
+ # Unused rule.
+ 'carrots': [PASS, FAIL],
+}],
+
+['variant == nooptimization', {
+ 'strawberries': [SKIP],
+}],
+
+['arch == x64', {
+ 'cherries': [FAIL],
+ 'apples': [PASS, SLOW],
+
+ # Unused rule.
+ 'regress/*': [CRASH],
+}],
+
+['no_snap', {
+ 'bananas': [PASS, NO_VARIANTS],
+ 'raspberries': [FAIL, NO_VARIANTS],
+}],
+]
diff --git a/deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py b/deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py
new file mode 100644
index 0000000000..115471ac72
--- /dev/null
+++ b/deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py
@@ -0,0 +1,31 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Dummy test suite extension with some fruity tests.
+"""
+
+from testrunner.local import testsuite
+from testrunner.objects import testcase
+
+class TestSuite(testsuite.TestSuite):
+ def ListTests(self, context):
+ return map(
+ self._create_test,
+ ['bananas', 'apples', 'cherries', 'strawberries', 'raspberries'],
+ )
+
+ def _test_class(self):
+ return TestCase
+
+
+class TestCase(testcase.TestCase):
+ def get_shell(self):
+ return 'd8_mocked.py'
+
+ def _get_files_params(self, ctx):
+ return [self.name]
+
+def GetSuite(name, root):
+ return TestSuite(name, root)
diff --git a/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json b/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
new file mode 100644
index 0000000000..c5e3ee35f1
--- /dev/null
+++ b/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
@@ -0,0 +1,18 @@
+{
+ "current_cpu": "x64",
+ "dcheck_always_on": false,
+ "is_asan": false,
+ "is_cfi": false,
+ "is_component_build": false,
+ "is_debug": false,
+ "is_gcov_coverage": false,
+ "is_ubsan_vptr": false,
+ "is_msan": false,
+ "is_tsan": false,
+ "target_cpu": "x64",
+ "v8_current_cpu": "x64",
+ "v8_enable_i18n_support": true,
+ "v8_enable_verify_predictable": false,
+ "v8_target_cpu": "x64",
+ "v8_use_snapshot": true
+}
diff --git a/deps/v8/tools/unittests/testdata/testroot2/d8_mocked.py b/deps/v8/tools/unittests/testdata/testroot2/d8_mocked.py
new file mode 100644
index 0000000000..e66e299bc6
--- /dev/null
+++ b/deps/v8/tools/unittests/testdata/testroot2/d8_mocked.py
@@ -0,0 +1,29 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Dummy d8 replacement for flaky tests.
+"""
+
+import os
+import sys
+
+PATH = os.path.dirname(os.path.abspath(__file__))
+
+print ' '.join(sys.argv[1:])
+
+# Test files ending in 'flakes' should first fail then pass. We store state in
+# a file side by side with the executable. No clean-up required as all tests
+# run in a temp test root. Restriction: Only one variant is supported for now.
+for arg in sys.argv[1:]:
+ if arg.endswith('flakes'):
+ flake_state = os.path.join(PATH, arg)
+ if os.path.exists(flake_state):
+ sys.exit(0)
+ else:
+ with open(flake_state, 'w') as f:
+ f.write('something')
+ sys.exit(1)
+
+sys.exit(0)
diff --git a/deps/v8/tools/unittests/testdata/testroot2/test/sweet/sweet.status b/deps/v8/tools/unittests/testdata/testroot2/test/sweet/sweet.status
new file mode 100644
index 0000000000..9ad8c81948
--- /dev/null
+++ b/deps/v8/tools/unittests/testdata/testroot2/test/sweet/sweet.status
@@ -0,0 +1,6 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[
+]
diff --git a/deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py b/deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py
new file mode 100644
index 0000000000..9407769b35
--- /dev/null
+++ b/deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py
@@ -0,0 +1,31 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Dummy test suite extension with some flaky fruity tests.
+"""
+
+from testrunner.local import testsuite
+from testrunner.objects import testcase
+
+class TestSuite(testsuite.TestSuite):
+ def ListTests(self, context):
+ return map(
+ self._create_test,
+ ['bananaflakes'],
+ )
+
+ def _test_class(self):
+ return TestCase
+
+
+class TestCase(testcase.TestCase):
+ def get_shell(self):
+ return 'd8_mocked.py'
+
+ def _get_files_params(self, ctx):
+ return [self.name]
+
+def GetSuite(name, root):
+ return TestSuite(name, root)
diff --git a/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json b/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
new file mode 100644
index 0000000000..c5e3ee35f1
--- /dev/null
+++ b/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
@@ -0,0 +1,18 @@
+{
+ "current_cpu": "x64",
+ "dcheck_always_on": false,
+ "is_asan": false,
+ "is_cfi": false,
+ "is_component_build": false,
+ "is_debug": false,
+ "is_gcov_coverage": false,
+ "is_ubsan_vptr": false,
+ "is_msan": false,
+ "is_tsan": false,
+ "target_cpu": "x64",
+ "v8_current_cpu": "x64",
+ "v8_enable_i18n_support": true,
+ "v8_enable_verify_predictable": false,
+ "v8_target_cpu": "x64",
+ "v8_use_snapshot": true
+}
diff --git a/deps/v8/tools/v8-rolls.sh b/deps/v8/tools/v8-rolls.sh
deleted file mode 100755
index 590e05c1f9..0000000000
--- a/deps/v8/tools/v8-rolls.sh
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/bin/bash
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-########## Global variable definitions
-
-DEPS_STRING='"v8_revision":'
-INFO=tools/v8-info.sh
-
-V8="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
-
-########## Function definitions
-
-usage() {
-cat << EOF
-usage: $0 OPTIONS
-
-Run in chromium/src to get information about V8 rolls.
-
-OPTIONS:
- -h Show this message.
- -n Number of rolls to print information about.
- -s Chromium git hash to start printing V8 information about.
-EOF
-}
-
-v8_line() {
- git show $1:DEPS | grep -n $DEPS_STRING | cut -d":" -f1
-}
-
-v8_info() {
- git blame -L$(v8_line $1),+1 $1 DEPS | grep $DEPS_STRING
-}
-
-v8_svn() {
- sed -e 's/^.*"\([0-9]\+\)",$/\1/'
-}
-
-v8_roll() {
- cut -d" " -f1
-}
-
-find_rev() {
- git svn find-rev $1
-}
-
-msg() {
- msg=$(git log --format="%h %ci %ce" -1 $1)
- h=$(echo $msg | cut -d" " -f1)
- d=$(echo $msg | cut -d" " -f2)
- t=$(echo $msg | cut -d" " -f3)
- a=$(echo $msg | cut -d" " -f5)
- a1=$(echo $a | cut -d"@" -f1)
- a2=$(echo $a | cut -d"@" -f2)
- echo $h $d $t $a1@$a2
-}
-
-v8_revision() {
- cd $V8
- $INFO -v $1
-}
-
-rolls() {
- roll=$2
- for i in $(seq 1 $1); do
- info=$(v8_info $roll)
- roll=$(echo $info | v8_roll $roll)
- trunk=$(echo $info | v8_svn $roll)
- echo "$(v8_revision $trunk) $trunk $(find_rev $roll) $(msg $roll)"
- roll=$roll^1
- done
-}
-
-########## Option parsing
-
-REVISIONS=1
-START=HEAD
-
-while getopts ":hn:s:" OPTION ; do
- case $OPTION in
- h) usage
- exit 0
- ;;
- n) REVISIONS=$OPTARG
- ;;
- s) START=$OPTARG
- ;;
- ?) echo "Illegal option: -$OPTARG"
- usage
- exit 1
- ;;
- esac
-done
-
-rolls $REVISIONS $START
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index d5765a6a04..c96741a9a1 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -51,113 +51,115 @@ INSTANCE_TYPES = {
147: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
148: "FIXED_DOUBLE_ARRAY_TYPE",
149: "FILLER_TYPE",
- 150: "ACCESSOR_INFO_TYPE",
- 151: "ACCESSOR_PAIR_TYPE",
- 152: "ACCESS_CHECK_INFO_TYPE",
- 153: "INTERCEPTOR_INFO_TYPE",
- 154: "FUNCTION_TEMPLATE_INFO_TYPE",
- 155: "OBJECT_TEMPLATE_INFO_TYPE",
- 156: "ALLOCATION_SITE_TYPE",
- 157: "ALLOCATION_MEMENTO_TYPE",
- 158: "SCRIPT_TYPE",
- 159: "ALIASED_ARGUMENTS_ENTRY_TYPE",
- 160: "PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE",
- 161: "PROMISE_REACTION_JOB_INFO_TYPE",
- 162: "DEBUG_INFO_TYPE",
- 163: "STACK_FRAME_INFO_TYPE",
- 164: "PROTOTYPE_INFO_TYPE",
- 165: "TUPLE2_TYPE",
- 166: "TUPLE3_TYPE",
- 167: "CONTEXT_EXTENSION_TYPE",
- 168: "MODULE_TYPE",
- 169: "MODULE_INFO_ENTRY_TYPE",
- 170: "ASYNC_GENERATOR_REQUEST_TYPE",
+ 150: "ACCESS_CHECK_INFO_TYPE",
+ 151: "ACCESSOR_INFO_TYPE",
+ 152: "ACCESSOR_PAIR_TYPE",
+ 153: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+ 154: "ALLOCATION_MEMENTO_TYPE",
+ 155: "ALLOCATION_SITE_TYPE",
+ 156: "ASYNC_GENERATOR_REQUEST_TYPE",
+ 157: "CONTEXT_EXTENSION_TYPE",
+ 158: "DEBUG_INFO_TYPE",
+ 159: "FUNCTION_TEMPLATE_INFO_TYPE",
+ 160: "INTERCEPTOR_INFO_TYPE",
+ 161: "MODULE_INFO_ENTRY_TYPE",
+ 162: "MODULE_TYPE",
+ 163: "OBJECT_TEMPLATE_INFO_TYPE",
+ 164: "PROMISE_REACTION_JOB_INFO_TYPE",
+ 165: "PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE",
+ 166: "PROTOTYPE_INFO_TYPE",
+ 167: "SCRIPT_TYPE",
+ 168: "STACK_FRAME_INFO_TYPE",
+ 169: "TUPLE2_TYPE",
+ 170: "TUPLE3_TYPE",
171: "FIXED_ARRAY_TYPE",
- 172: "HASH_TABLE_TYPE",
- 173: "DESCRIPTOR_ARRAY_TYPE",
+ 172: "DESCRIPTOR_ARRAY_TYPE",
+ 173: "HASH_TABLE_TYPE",
174: "TRANSITION_ARRAY_TYPE",
- 175: "FEEDBACK_VECTOR_TYPE",
- 176: "PROPERTY_ARRAY_TYPE",
- 177: "SHARED_FUNCTION_INFO_TYPE",
- 178: "CELL_TYPE",
- 179: "WEAK_CELL_TYPE",
+ 175: "CELL_TYPE",
+ 176: "CODE_DATA_CONTAINER_TYPE",
+ 177: "FEEDBACK_VECTOR_TYPE",
+ 178: "LOAD_HANDLER_TYPE",
+ 179: "PROPERTY_ARRAY_TYPE",
180: "PROPERTY_CELL_TYPE",
- 181: "SMALL_ORDERED_HASH_MAP_TYPE",
- 182: "SMALL_ORDERED_HASH_SET_TYPE",
- 183: "CODE_DATA_CONTAINER_TYPE",
- 184: "JS_PROXY_TYPE",
- 185: "JS_GLOBAL_OBJECT_TYPE",
- 186: "JS_GLOBAL_PROXY_TYPE",
- 187: "JS_MODULE_NAMESPACE_TYPE",
- 188: "JS_SPECIAL_API_OBJECT_TYPE",
- 189: "JS_VALUE_TYPE",
- 190: "JS_MESSAGE_OBJECT_TYPE",
- 191: "JS_DATE_TYPE",
- 192: "JS_API_OBJECT_TYPE",
- 193: "JS_OBJECT_TYPE",
- 194: "JS_ARGUMENTS_TYPE",
- 195: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 196: "JS_GENERATOR_OBJECT_TYPE",
- 197: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
- 198: "JS_ARRAY_TYPE",
- 199: "JS_ARRAY_BUFFER_TYPE",
- 200: "JS_TYPED_ARRAY_TYPE",
- 201: "JS_DATA_VIEW_TYPE",
- 202: "JS_SET_TYPE",
- 203: "JS_MAP_TYPE",
- 204: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
- 205: "JS_SET_VALUE_ITERATOR_TYPE",
- 206: "JS_MAP_KEY_ITERATOR_TYPE",
- 207: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
- 208: "JS_MAP_VALUE_ITERATOR_TYPE",
- 209: "JS_WEAK_MAP_TYPE",
- 210: "JS_WEAK_SET_TYPE",
- 211: "JS_PROMISE_TYPE",
- 212: "JS_REGEXP_TYPE",
- 213: "JS_ERROR_TYPE",
- 214: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
- 215: "JS_STRING_ITERATOR_TYPE",
- 216: "JS_TYPED_ARRAY_KEY_ITERATOR_TYPE",
- 217: "JS_FAST_ARRAY_KEY_ITERATOR_TYPE",
- 218: "JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE",
- 219: "JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 220: "JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 221: "JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 222: "JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 223: "JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 224: "JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 225: "JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 226: "JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 227: "JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 228: "JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 229: "JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 230: "JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 231: "JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 232: "JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 233: "JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 234: "JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 235: "JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE",
- 236: "JS_INT8_ARRAY_VALUE_ITERATOR_TYPE",
- 237: "JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE",
- 238: "JS_INT16_ARRAY_VALUE_ITERATOR_TYPE",
- 239: "JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE",
- 240: "JS_INT32_ARRAY_VALUE_ITERATOR_TYPE",
- 241: "JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE",
- 242: "JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE",
- 243: "JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE",
- 244: "JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE",
- 245: "JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE",
- 246: "JS_FAST_ARRAY_VALUE_ITERATOR_TYPE",
- 247: "JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE",
- 248: "JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
- 249: "JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
- 250: "JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE",
- 251: "WASM_INSTANCE_TYPE",
- 252: "WASM_MEMORY_TYPE",
- 253: "WASM_MODULE_TYPE",
- 254: "WASM_TABLE_TYPE",
- 255: "JS_BOUND_FUNCTION_TYPE",
- 256: "JS_FUNCTION_TYPE",
+ 181: "SHARED_FUNCTION_INFO_TYPE",
+ 182: "SMALL_ORDERED_HASH_MAP_TYPE",
+ 183: "SMALL_ORDERED_HASH_SET_TYPE",
+ 184: "STORE_HANDLER_TYPE",
+ 185: "WEAK_CELL_TYPE",
+ 1024: "JS_PROXY_TYPE",
+ 1025: "JS_GLOBAL_OBJECT_TYPE",
+ 1026: "JS_GLOBAL_PROXY_TYPE",
+ 1027: "JS_MODULE_NAMESPACE_TYPE",
+ 1040: "JS_SPECIAL_API_OBJECT_TYPE",
+ 1041: "JS_VALUE_TYPE",
+ 1056: "JS_API_OBJECT_TYPE",
+ 1057: "JS_OBJECT_TYPE",
+ 1058: "JS_ARGUMENTS_TYPE",
+ 1059: "JS_ARRAY_BUFFER_TYPE",
+ 1060: "JS_ARRAY_TYPE",
+ 1061: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
+ 1062: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
+ 1063: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 1064: "JS_DATE_TYPE",
+ 1065: "JS_ERROR_TYPE",
+ 1066: "JS_GENERATOR_OBJECT_TYPE",
+ 1067: "JS_MAP_TYPE",
+ 1068: "JS_MAP_KEY_ITERATOR_TYPE",
+ 1069: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
+ 1070: "JS_MAP_VALUE_ITERATOR_TYPE",
+ 1071: "JS_MESSAGE_OBJECT_TYPE",
+ 1072: "JS_PROMISE_TYPE",
+ 1073: "JS_REGEXP_TYPE",
+ 1074: "JS_SET_TYPE",
+ 1075: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
+ 1076: "JS_SET_VALUE_ITERATOR_TYPE",
+ 1077: "JS_STRING_ITERATOR_TYPE",
+ 1078: "JS_WEAK_MAP_TYPE",
+ 1079: "JS_WEAK_SET_TYPE",
+ 1080: "JS_TYPED_ARRAY_TYPE",
+ 1081: "JS_DATA_VIEW_TYPE",
+ 1082: "JS_TYPED_ARRAY_KEY_ITERATOR_TYPE",
+ 1083: "JS_FAST_ARRAY_KEY_ITERATOR_TYPE",
+ 1084: "JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE",
+ 1085: "JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1086: "JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1087: "JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1088: "JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1089: "JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1090: "JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1091: "JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1092: "JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1093: "JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1094: "JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1095: "JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1096: "JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1097: "JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1098: "JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1099: "JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1100: "JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1101: "JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE",
+ 1102: "JS_INT8_ARRAY_VALUE_ITERATOR_TYPE",
+ 1103: "JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE",
+ 1104: "JS_INT16_ARRAY_VALUE_ITERATOR_TYPE",
+ 1105: "JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE",
+ 1106: "JS_INT32_ARRAY_VALUE_ITERATOR_TYPE",
+ 1107: "JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE",
+ 1108: "JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE",
+ 1109: "JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE",
+ 1110: "JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE",
+ 1111: "JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE",
+ 1112: "JS_FAST_ARRAY_VALUE_ITERATOR_TYPE",
+ 1113: "JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE",
+ 1114: "JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
+ 1115: "JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
+ 1116: "JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE",
+ 1117: "WASM_INSTANCE_TYPE",
+ 1118: "WASM_MEMORY_TYPE",
+ 1119: "WASM_MODULE_TYPE",
+ 1120: "WASM_TABLE_TYPE",
+ 1121: "JS_BOUND_FUNCTION_TYPE",
+ 1122: "JS_FUNCTION_TYPE",
}
# List of known V8 maps.
@@ -165,7 +167,7 @@ KNOWN_MAPS = {
0x02201: (138, "FreeSpaceMap"),
0x02251: (132, "MetaMap"),
0x022a1: (131, "NullMap"),
- 0x022f1: (173, "DescriptorArrayMap"),
+ 0x022f1: (172, "DescriptorArrayMap"),
0x02341: (171, "FixedArrayMap"),
0x02391: (149, "OnePointerFillerMap"),
0x023e1: (149, "TwoPointerFillerMap"),
@@ -177,19 +179,19 @@ KNOWN_MAPS = {
0x025c1: (131, "BooleanMap"),
0x02611: (136, "ByteArrayMap"),
0x02661: (171, "FixedCOWArrayMap"),
- 0x026b1: (172, "HashTableMap"),
+ 0x026b1: (173, "HashTableMap"),
0x02701: (128, "SymbolMap"),
0x02751: (72, "OneByteStringMap"),
0x027a1: (171, "ScopeInfoMap"),
- 0x027f1: (177, "SharedFunctionInfoMap"),
+ 0x027f1: (181, "SharedFunctionInfoMap"),
0x02841: (133, "CodeMap"),
0x02891: (171, "FunctionContextMap"),
- 0x028e1: (178, "CellMap"),
- 0x02931: (179, "WeakCellMap"),
+ 0x028e1: (175, "CellMap"),
+ 0x02931: (185, "WeakCellMap"),
0x02981: (180, "GlobalPropertyCellMap"),
0x029d1: (135, "ForeignMap"),
0x02a21: (174, "TransitionArrayMap"),
- 0x02a71: (175, "FeedbackVectorMap"),
+ 0x02a71: (177, "FeedbackVectorMap"),
0x02ac1: (131, "ArgumentsMarkerMap"),
0x02b11: (131, "ExceptionMap"),
0x02b61: (131, "TerminationExceptionMap"),
@@ -204,79 +206,80 @@ KNOWN_MAPS = {
0x02e31: (171, "WithContextMap"),
0x02e81: (171, "DebugEvaluateContextMap"),
0x02ed1: (171, "ScriptContextTableMap"),
- 0x02f21: (148, "FixedDoubleArrayMap"),
- 0x02f71: (134, "MutableHeapNumberMap"),
- 0x02fc1: (172, "OrderedHashMapMap"),
- 0x03011: (172, "OrderedHashSetMap"),
- 0x03061: (172, "NameDictionaryMap"),
- 0x030b1: (172, "GlobalDictionaryMap"),
- 0x03101: (172, "NumberDictionaryMap"),
- 0x03151: (172, "StringTableMap"),
- 0x031a1: (172, "WeakHashTableMap"),
- 0x031f1: (171, "SloppyArgumentsElementsMap"),
- 0x03241: (181, "SmallOrderedHashMapMap"),
- 0x03291: (182, "SmallOrderedHashSetMap"),
- 0x032e1: (183, "CodeDataContainerMap"),
- 0x03331: (190, "JSMessageObjectMap"),
- 0x03381: (193, "ExternalMap"),
- 0x033d1: (137, "BytecodeArrayMap"),
- 0x03421: (171, "ModuleInfoMap"),
- 0x03471: (178, "NoClosuresCellMap"),
- 0x034c1: (178, "OneClosureCellMap"),
- 0x03511: (178, "ManyClosuresCellMap"),
- 0x03561: (176, "PropertyArrayMap"),
- 0x035b1: (130, "BigIntMap"),
- 0x03601: (106, "NativeSourceStringMap"),
- 0x03651: (64, "StringMap"),
- 0x036a1: (73, "ConsOneByteStringMap"),
- 0x036f1: (65, "ConsStringMap"),
- 0x03741: (77, "ThinOneByteStringMap"),
- 0x03791: (69, "ThinStringMap"),
- 0x037e1: (67, "SlicedStringMap"),
- 0x03831: (75, "SlicedOneByteStringMap"),
- 0x03881: (66, "ExternalStringMap"),
- 0x038d1: (82, "ExternalStringWithOneByteDataMap"),
- 0x03921: (74, "ExternalOneByteStringMap"),
- 0x03971: (98, "ShortExternalStringMap"),
- 0x039c1: (114, "ShortExternalStringWithOneByteDataMap"),
- 0x03a11: (0, "InternalizedStringMap"),
- 0x03a61: (2, "ExternalInternalizedStringMap"),
- 0x03ab1: (18, "ExternalInternalizedStringWithOneByteDataMap"),
- 0x03b01: (10, "ExternalOneByteInternalizedStringMap"),
- 0x03b51: (34, "ShortExternalInternalizedStringMap"),
- 0x03ba1: (50, "ShortExternalInternalizedStringWithOneByteDataMap"),
- 0x03bf1: (42, "ShortExternalOneByteInternalizedStringMap"),
- 0x03c41: (106, "ShortExternalOneByteStringMap"),
- 0x03c91: (140, "FixedUint8ArrayMap"),
- 0x03ce1: (139, "FixedInt8ArrayMap"),
- 0x03d31: (142, "FixedUint16ArrayMap"),
- 0x03d81: (141, "FixedInt16ArrayMap"),
- 0x03dd1: (144, "FixedUint32ArrayMap"),
- 0x03e21: (143, "FixedInt32ArrayMap"),
- 0x03e71: (145, "FixedFloat32ArrayMap"),
- 0x03ec1: (146, "FixedFloat64ArrayMap"),
- 0x03f11: (147, "FixedUint8ClampedArrayMap"),
- 0x03f61: (165, "Tuple2Map"),
- 0x03fb1: (158, "ScriptMap"),
- 0x04001: (153, "InterceptorInfoMap"),
- 0x04051: (150, "AccessorInfoMap"),
- 0x040a1: (151, "AccessorPairMap"),
- 0x040f1: (152, "AccessCheckInfoMap"),
- 0x04141: (154, "FunctionTemplateInfoMap"),
- 0x04191: (155, "ObjectTemplateInfoMap"),
- 0x041e1: (156, "AllocationSiteMap"),
- 0x04231: (157, "AllocationMementoMap"),
- 0x04281: (159, "AliasedArgumentsEntryMap"),
- 0x042d1: (160, "PromiseResolveThenableJobInfoMap"),
- 0x04321: (161, "PromiseReactionJobInfoMap"),
- 0x04371: (162, "DebugInfoMap"),
- 0x043c1: (163, "StackFrameInfoMap"),
- 0x04411: (164, "PrototypeInfoMap"),
- 0x04461: (166, "Tuple3Map"),
- 0x044b1: (167, "ContextExtensionMap"),
- 0x04501: (168, "ModuleMap"),
- 0x04551: (169, "ModuleInfoEntryMap"),
- 0x045a1: (170, "AsyncGeneratorRequestMap"),
+ 0x02f21: (171, "ArrayListMap"),
+ 0x02f71: (148, "FixedDoubleArrayMap"),
+ 0x02fc1: (134, "MutableHeapNumberMap"),
+ 0x03011: (173, "OrderedHashMapMap"),
+ 0x03061: (173, "OrderedHashSetMap"),
+ 0x030b1: (173, "NameDictionaryMap"),
+ 0x03101: (173, "GlobalDictionaryMap"),
+ 0x03151: (173, "NumberDictionaryMap"),
+ 0x031a1: (173, "StringTableMap"),
+ 0x031f1: (173, "WeakHashTableMap"),
+ 0x03241: (171, "SloppyArgumentsElementsMap"),
+ 0x03291: (182, "SmallOrderedHashMapMap"),
+ 0x032e1: (183, "SmallOrderedHashSetMap"),
+ 0x03331: (176, "CodeDataContainerMap"),
+ 0x03381: (1071, "JSMessageObjectMap"),
+ 0x033d1: (1057, "ExternalMap"),
+ 0x03421: (137, "BytecodeArrayMap"),
+ 0x03471: (171, "ModuleInfoMap"),
+ 0x034c1: (175, "NoClosuresCellMap"),
+ 0x03511: (175, "OneClosureCellMap"),
+ 0x03561: (175, "ManyClosuresCellMap"),
+ 0x035b1: (179, "PropertyArrayMap"),
+ 0x03601: (130, "BigIntMap"),
+ 0x03651: (106, "NativeSourceStringMap"),
+ 0x036a1: (64, "StringMap"),
+ 0x036f1: (73, "ConsOneByteStringMap"),
+ 0x03741: (65, "ConsStringMap"),
+ 0x03791: (77, "ThinOneByteStringMap"),
+ 0x037e1: (69, "ThinStringMap"),
+ 0x03831: (67, "SlicedStringMap"),
+ 0x03881: (75, "SlicedOneByteStringMap"),
+ 0x038d1: (66, "ExternalStringMap"),
+ 0x03921: (82, "ExternalStringWithOneByteDataMap"),
+ 0x03971: (74, "ExternalOneByteStringMap"),
+ 0x039c1: (98, "ShortExternalStringMap"),
+ 0x03a11: (114, "ShortExternalStringWithOneByteDataMap"),
+ 0x03a61: (0, "InternalizedStringMap"),
+ 0x03ab1: (2, "ExternalInternalizedStringMap"),
+ 0x03b01: (18, "ExternalInternalizedStringWithOneByteDataMap"),
+ 0x03b51: (10, "ExternalOneByteInternalizedStringMap"),
+ 0x03ba1: (34, "ShortExternalInternalizedStringMap"),
+ 0x03bf1: (50, "ShortExternalInternalizedStringWithOneByteDataMap"),
+ 0x03c41: (42, "ShortExternalOneByteInternalizedStringMap"),
+ 0x03c91: (106, "ShortExternalOneByteStringMap"),
+ 0x03ce1: (140, "FixedUint8ArrayMap"),
+ 0x03d31: (139, "FixedInt8ArrayMap"),
+ 0x03d81: (142, "FixedUint16ArrayMap"),
+ 0x03dd1: (141, "FixedInt16ArrayMap"),
+ 0x03e21: (144, "FixedUint32ArrayMap"),
+ 0x03e71: (143, "FixedInt32ArrayMap"),
+ 0x03ec1: (145, "FixedFloat32ArrayMap"),
+ 0x03f11: (146, "FixedFloat64ArrayMap"),
+ 0x03f61: (147, "FixedUint8ClampedArrayMap"),
+ 0x03fb1: (169, "Tuple2Map"),
+ 0x04001: (167, "ScriptMap"),
+ 0x04051: (160, "InterceptorInfoMap"),
+ 0x040a1: (151, "AccessorInfoMap"),
+ 0x040f1: (150, "AccessCheckInfoMap"),
+ 0x04141: (152, "AccessorPairMap"),
+ 0x04191: (153, "AliasedArgumentsEntryMap"),
+ 0x041e1: (154, "AllocationMementoMap"),
+ 0x04231: (155, "AllocationSiteMap"),
+ 0x04281: (156, "AsyncGeneratorRequestMap"),
+ 0x042d1: (157, "ContextExtensionMap"),
+ 0x04321: (158, "DebugInfoMap"),
+ 0x04371: (159, "FunctionTemplateInfoMap"),
+ 0x043c1: (161, "ModuleInfoEntryMap"),
+ 0x04411: (162, "ModuleMap"),
+ 0x04461: (163, "ObjectTemplateInfoMap"),
+ 0x044b1: (164, "PromiseReactionJobInfoMap"),
+ 0x04501: (165, "PromiseResolveThenableJobInfoMap"),
+ 0x04551: (166, "PrototypeInfoMap"),
+ 0x045a1: (168, "StackFrameInfoMap"),
+ 0x045f1: (170, "Tuple3Map"),
}
# List of known V8 objects.
@@ -349,6 +352,7 @@ FRAME_MARKERS = (
"ARGUMENTS_ADAPTOR",
"BUILTIN",
"BUILTIN_EXIT",
+ "NATIVE",
)
# This set of constants is generated from a shipping build.
diff --git a/deps/v8/tools/wasm/update-wasm-spec-tests.sh b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
index ffdef0d820..c4d18a3333 100755
--- a/deps/v8/tools/wasm/update-wasm-spec-tests.sh
+++ b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
@@ -3,15 +3,27 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# Exit immediately if a command exits with a non-zero status.
set -e
+# Treat unset variables as an error when substituting.
+set -u
+
+# return value of a pipeline is the status of the last command to exit with a
+# non-zero status, or zero if no command exited with a non-zero status
+set -o pipefail
+
TOOLS_WASM_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
V8_DIR="${TOOLS_WASM_DIR}/../.."
+SPEC_TEST_DIR=${V8_DIR}/test/wasm-spec-tests
cd ${V8_DIR}
-mkdir -p ./test/wasm-spec-tests/tests/
-rm -rf ./test/wasm-spec-tests/tests/*
+rm -rf ${SPEC_TEST_DIR}/tests
+mkdir ${SPEC_TEST_DIR}/tests
+
+rm -rf ${SPEC_TEST_DIR}/tmp
+mkdir ${SPEC_TEST_DIR}/tmp
./tools/dev/gm.py x64.release d8
@@ -20,11 +32,14 @@ make clean all
cd ${V8_DIR}/test/wasm-js/test/core
-./run.py --wasm ${V8_DIR}/test/wasm-js/interpreter/wasm --js ${V8_DIR}/out/x64.release/d8
-cp ${V8_DIR}/test/wasm-js/test/core/output/*.js ${V8_DIR}/test/wasm-spec-tests/tests
+./run.py --wasm ${V8_DIR}/test/wasm-js/interpreter/wasm --js ${V8_DIR}/out/x64.release/d8 --out ${SPEC_TEST_DIR}/tmp
+cp ${SPEC_TEST_DIR}/tmp/*.js ${SPEC_TEST_DIR}/tests/
+rm -rf ${SPEC_TEST_DIR}/tmp
-cd ${V8_DIR}/test/wasm-spec-tests
+cd ${SPEC_TEST_DIR}
+echo
+echo "The following files will get uploaded:"
+ls tests
+echo
upload_to_google_storage.py -a -b v8-wasm-spec-tests tests
-
-
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index a39b5f1e45..ed5e51f96a 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -5,7 +5,8 @@ Try to write something funny. And please don't add trailing whitespace.
A Smi balks into a war and says:
"I'm so deoptimized today!"
The doubles heard this and started to unbox.
-The Smi looked at them when a crazy v8-autoroll account showed up......
-The autoroller bought a round of Himbeerbrause. Suddenly.....
-The bartender starts to shake the bottles...............
+The Smi looked at them when a crazy v8-autoroll account showed up...
+The autoroller bought a round of Himbeerbrause. Suddenly...
+The bartender starts to shake the bottles.......................
+.
.